max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
---|---|---|---|---|---|---|
vitrage/evaluator/template_data.py | HoonMinJeongUm/Hunmin-vitrage | 0 | 5100 | <reponame>HoonMinJeongUm/Hunmin-vitrage<filename>vitrage/evaluator/template_data.py
# Copyright 2016 - Nokia
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from collections import namedtuple
ActionSpecs = namedtuple(
'ActionSpecs', ['id', 'type', 'targets', 'properties'])
EdgeDescription = namedtuple('EdgeDescription', ['edge', 'source', 'target'])
ENTITY = 'entity'
RELATIONSHIP = 'relationship'
class Scenario(object):
def __init__(self, id, version, condition, actions, subgraphs, entities,
relationships, enabled=False):
self.id = id
self.version = version
self.condition = condition
self.actions = actions
self.subgraphs = subgraphs
self.entities = entities
self.relationships = relationships
self.enabled = enabled
def __eq__(self, other):
return self.id == other.id and \
self.condition == other.condition and \
self.actions == other.actions and \
self.subgraphs == other.subgraphs and \
self.entities == other.entities and \
self.relationships == other.relationships
# noinspection PyAttributeOutsideInit
class TemplateData(object):
def __init__(self, name, template_type, version, entities,
relationships, scenarios):
self.name = name
self.template_type = template_type
self.version = version
self.entities = entities
self.relationships = relationships
self.scenarios = scenarios
@property
def name(self):
return self._name
@name.setter
def name(self, template_name):
self._name = template_name
@property
def template_type(self):
return self._template_type
@template_type.setter
def template_type(self, template_type):
self._template_type = template_type
@property
def version(self):
return self._version
@version.setter
def version(self, version):
self._version = version
@property
def entities(self):
return self._entities
@entities.setter
def entities(self, entities):
self._entities = entities
@property
def relationships(self):
return self._relationships
@relationships.setter
def relationships(self, relationships):
self._relationships = relationships
@property
def scenarios(self):
return self._scenarios
@scenarios.setter
def scenarios(self, scenarios):
self._scenarios = scenarios
| 1.9375 | 2 |
scripts/summarize-kmer-counts.py | rpetit3/anthrax-metagenome-study | 0 | 5101 | #! /usr/bin/env python3
"""Parse through the simulated sequencing group specific kmer counts."""
import argparse as ap
from collections import OrderedDict
import glob
import gzip
import os
import sys
import time
import numpy as np
import multiprocessing as mp
SAMPLES = OrderedDict()
KMERS = {}
HAMMING = OrderedDict()
SAMPLE_COLS = [
'sample', 'is_bcg', 'is_ba', 'has_lethal', 'simulated_coverage', 'group',
'total_kmers', 'tp', 'tn', 'fp', 'fn',
'kmer_cov_min', 'kmer_cov_mean', 'kmer_cov_median', 'kmer_cov_max',
'non_zero_kmer_cov_min', 'non_zero_kmer_cov_mean',
'non_zero_kmer_cov_median', 'non_zero_kmer_cov_max'
]
KMER_COLS = [
'kmer', 'simulated_coverage', 'group', 'hamming_distance',
'tp', 'tn', 'fp', 'fn',
'group_kmer_cov_min',
'group_kmer_cov_mean',
'group_kmer_cov_median',
'group_kmer_cov_max',
'non_zero_group_kmer_cov_min',
'non_zero_group_kmer_cov_mean',
'non_zero_group_kmer_cov_median',
'non_zero_group_kmer_cov_max',
'outgroup_kmer_cov_min',
'outgroup_kmer_cov_mean',
'outgroup_kmer_cov_median',
'outgroup_kmer_cov_max',
'non_zero_outgroup_kmer_cov_min',
'non_zero_outgroup_kmer_cov_mean',
'non_zero_outgroup_kmer_cov_median',
'non_zero_outgroup_kmer_cov_max'
]
def get_group_status(sample, group):
"""Return if a sample is within a group or not."""
within_group = None
if group == 'ba':
within_group = True if SAMPLES[sample]['is_ba'] == 'True' else False
elif group == 'bcg':
within_group = True if SAMPLES[sample]['is_bcg'] == 'True' else False
else:
# lef
within_group = True if SAMPLES[sample]['has_lethal'] else False
return within_group
def get_coverage_stats(coverage):
"""Return summary stats of a set of coverages."""
non_zero = [c for c in coverage if c]
np_array = np.array(coverage)
non_zero_array = np.array(non_zero)
return {
'min': min(coverage) if coverage else 0,
'median': int(np.median(np_array)) if coverage else 0,
'mean': "{0:.4f}".format(np.mean(np_array)) if coverage else 0,
'max': max(coverage) if coverage else 0,
'non_zero_min': min(non_zero_array) if non_zero else 0,
'non_zero_median': int(np.median(non_zero_array)) if non_zero else 0,
'non_zero_mean': int(round(np.mean(non_zero_array))) if non_zero else 0,
'non_zero_max': max(non_zero_array) if non_zero else 0,
}
def reverse_complement(seq):
"""Reverse complement a DNA sequence."""
complement = {
'A': 'T', 'T': 'A', 'G': 'C', 'C': 'G',
'a': 't', 't': 'a', 'g': 'c', 'c': 'g'
}
return ''.join([complement[b] for b in seq[::-1]])
def parse_counts(counts, sample, coverage, group, skip_kmers=False,
filter_kmers=False):
"""Parse kmer counts."""
within_group = get_group_status(sample, group)
sample_row = {'coverages': [], 'tp': 0, 'tn': 0, 'fp': 0, 'fn': 0}
with gzip.open(counts, 'r') as count_handle:
for line in count_handle:
kmer, count = line.decode().rstrip().split()
count = int(count)
parse = True
if filter_kmers:
parse = kmer in KMERS or reverse_complement(kmer) in KMERS
elif not skip_kmers:
if kmer not in KMERS:
kmer = reverse_complement(kmer)
if within_group:
KMERS[kmer][coverage]['group_coverages'].append(count)
if count:
KMERS[kmer][coverage]['tp'] += 1
else:
KMERS[kmer][coverage]['fn'] += 1
else:
KMERS[kmer][coverage]['outgroup_coverages'].append(count)
if count:
KMERS[kmer][coverage]['fp'] += 1
else:
KMERS[kmer][coverage]['tn'] += 1
if parse:
sample_row['coverages'].append(count)
if within_group:
if count:
sample_row['tp'] += 1
else:
sample_row['fn'] += 1
else:
if count:
sample_row['fp'] += 1
else:
sample_row['tn'] += 1
coverage_stats = get_coverage_stats(sample_row['coverages'])
SAMPLES[sample]['results'].append({
'simulated_coverage': coverage,
'within_group': within_group,
'tp': sample_row['tp'],
'tn': sample_row['tn'],
'fp': sample_row['fp'],
'fn': sample_row['fn'],
'kmer_cov_min': coverage_stats['min'],
'kmer_cov_mean': coverage_stats['mean'],
'kmer_cov_median': coverage_stats['median'],
'kmer_cov_max': coverage_stats['max'],
'non_zero_kmer_cov_min': coverage_stats['non_zero_min'],
'non_zero_kmer_cov_mean': coverage_stats['non_zero_mean'],
'non_zero_kmer_cov_median': coverage_stats['non_zero_median'],
'non_zero_kmer_cov_max': coverage_stats['non_zero_max'],
})
def parse_kmers(kmers, coverages, skip_kmers=False, has_hamming=True):
with open(kmers, 'r') as kmer_handle:
for line in kmer_handle:
if line.startswith(">"):
line = line.rstrip().replace(">", "")
kmer, distance = line.split("-")
if not has_hamming:
distance = False
KMERS[kmer] = OrderedDict()
HAMMING[kmer] = distance
if not skip_kmers:
for coverage in coverages:
KMERS[kmer][coverage] = {
'group_coverages': [], 'outgroup_coverages': [],
'tp': 0, 'tn': 0, 'fp': 0, 'fn': 0
}
def parse_summary(summary):
"""Parse Summary file."""
cols = None
with open(summary, 'r') as summary_handle:
# Column Names:
# accession, gi, is_bcg, is_ba, species, genome_size, description
for line in summary_handle:
line = line.rstrip()
if line.startswith('#'):
cols = line.replace('#', '').split('\t')
else:
row = dict(zip(cols, line.split('\t')))
SAMPLES[row['accession']] = row
if row['accession'] == 'NZ_CP009941':
# NZ_CP009941 - Bacillus cereus w/ lef on chromosome
SAMPLES[row['accession']]['has_lethal'] = True
else:
SAMPLES[row['accession']]['has_lethal'] = False
SAMPLES[row['accession']]['results'] = []
def print_sample_summary(file_output):
"""Print the final per sample summaries."""
with open(file_output, 'w') as output_handle:
output_handle.write(("\t".join(SAMPLE_COLS)))
output_handle.write("\n")
for sample in SAMPLES:
if SAMPLES[sample]['results']:
for result in SAMPLES[sample]['results']:
row = {
'sample': sample,
'is_bcg': SAMPLES[sample]['is_bcg'],
'is_ba': SAMPLES[sample]['is_ba'],
'has_lethal': SAMPLES[sample]['has_lethal'],
'simulated_coverage': result['simulated_coverage'],
'group': args.group,
'within_group': result['within_group'],
'total_kmers': total_kmers,
'tp': result['tp'],
'tn': result['tn'],
'fp': result['fp'],
'fn': result['fn'],
'kmer_cov_min': result['kmer_cov_min'],
'kmer_cov_mean': result['kmer_cov_mean'],
'kmer_cov_median': result['kmer_cov_median'],
'kmer_cov_max': result['kmer_cov_max'],
'non_zero_kmer_cov_min': result['non_zero_kmer_cov_min'],
'non_zero_kmer_cov_mean': result['non_zero_kmer_cov_mean'],
'non_zero_kmer_cov_median': result['non_zero_kmer_cov_median'],
'non_zero_kmer_cov_max': result['non_zero_kmer_cov_max']
}
output_handle.write(("\t".join([
str(row[col]) for col in SAMPLE_COLS
])))
output_handle.write("\n")
def print_kmer_summary(file_output):
"""Print the final per kmer summaries."""
with open(file_output, 'w') as output_handle:
output_handle.write(("\t".join(KMER_COLS)))
output_handle.write("\n")
for kmer, coverages in KMERS.items():
for coverage in coverages:
within_group = get_coverage_stats(
KMERS[kmer][coverage]['group_coverages']
)
outgroup = get_coverage_stats(
KMERS[kmer][coverage]['outgroup_coverages']
)
row = {
'kmer': kmer,
'simulated_coverage': coverage,
'group': args.group,
'hamming_distance': HAMMING[kmer],
'tp': KMERS[kmer][coverage]['tp'],
'tn': KMERS[kmer][coverage]['tn'],
'fp': KMERS[kmer][coverage]['fp'],
'fn': KMERS[kmer][coverage]['fn'],
'group_kmer_cov_min': within_group['min'],
'group_kmer_cov_mean': within_group['mean'],
'group_kmer_cov_median': within_group['median'],
'group_kmer_cov_max': within_group['max'],
'non_zero_group_kmer_cov_min': within_group['non_zero_min'],
'non_zero_group_kmer_cov_mean': within_group['non_zero_mean'],
'non_zero_group_kmer_cov_median': within_group['non_zero_median'],
'non_zero_group_kmer_cov_max': within_group['non_zero_max'],
'outgroup_kmer_cov_min': outgroup['min'],
'outgroup_kmer_cov_mean': outgroup['mean'],
'outgroup_kmer_cov_median': outgroup['median'],
'outgroup_kmer_cov_max': outgroup['max'],
'non_zero_outgroup_kmer_cov_min': outgroup['non_zero_min'],
'non_zero_outgroup_kmer_cov_mean': outgroup['non_zero_mean'],
'non_zero_outgroup_kmer_cov_median': outgroup['non_zero_median'],
'non_zero_outgroup_kmer_cov_max': outgroup['non_zero_max'],
}
output_handle.write(("\t".join([
str(row[col]) for col in KMER_COLS
])))
output_handle.write("\n")
def read_lines(input_file):
"""Return lines in a text file as a list."""
lines = []
with open(input_file, 'r') as input_handle:
for line in input_handle:
lines.append(line.rstrip())
return lines
def parse_filter_kmers(kmers):
with open(kmers, 'r') as kmer_handle:
for line in kmer_handle:
if line.startswith(">"):
line = line.rstrip().replace(">", "")
KMERS[line.split("-")[0]] = True
if __name__ == '__main__':
parser = ap.ArgumentParser(
prog='summarize-kmer-counts.py', conflict_handler='resolve',
description=("Summarize kmer counts of each simulation.")
)
parser.add_argument('summary', type=str, metavar="SUMMARY",
help='Summary of Bacillus genomes.')
parser.add_argument('directory', type=str, metavar="SIMUALTION_DIR",
help='Directory with group specific 31-mer counts.')
parser.add_argument('group', type=str, metavar="GROUP",
help='Which group to parse (ba, bcg or lef).')
parser.add_argument('kmers', type=str, metavar="KMERS",
help='Group specific k-mers.')
parser.add_argument('coverages', type=str, metavar="COVERAGES",
help=('Coverages to subsample to.'))
parser.add_argument('outdir', type=str, metavar="OUTDIR",
help='Directory to output to.')
parser.add_argument('--cpu', default=1, type=int, metavar="INT",
help='Number of cores to use (Default: 1)')
parser.add_argument('--single_sample', type=str, metavar="STR",
help='Process a single sample.')
parser.add_argument('--skip_kmers', action='store_true', default=False,
help='Skip kmer processing.')
parser.add_argument('--filter', action='store_true', default=False,
help='Filter counts based on input kmers.')
args = parser.parse_args()
if args.group not in ['ba', 'bcg', 'lef']:
raise Exception("GROUPS must be 'ba', 'bcg' or 'lef'")
coverages = read_lines(args.coverages)
print("Parsing Summary")
parse_summary(args.summary)
print("Parsing Kmers")
if args.filter:
print("Filtering Kmers")
args.skip_kmers = True
parse_filter_kmers(args.kmers)
else:
print("Parsing Kmers")
parse_kmers(args.kmers, coverages, skip_kmers=args.skip_kmers,
has_hamming=False if args.group == 'lef' else True)
total_kmers = len(KMERS)
current = 1
samples = list(SAMPLES.keys())
if args.single_sample:
samples = [args.single_sample]
total = len(samples)
for sample in samples:
path = "{0}/{1}".format(args.directory, sample)
if os.path.exists(path):
print("Working on {0} ({1} of {2})".format(sample, current, total))
current += 1
count_files = sorted(glob.glob(
"{0}/*-{1}.txt.gz".format(path, args.group)
))
for count_file in count_files:
coverage = os.path.basename(count_file).split('-')[1]
parse_counts(count_file, sample, coverage, args.group,
skip_kmers=args.skip_kmers,
filter_kmers=args.filter)
print("Output sample summary")
if args.single_sample:
print_sample_summary("{0}/count-summary-{1}-{2}.txt".format(
args.outdir, args.single_sample, args.group
))
else:
print_sample_summary("{0}/count-summary-sample-{1}.txt".format(
args.outdir, args.group
))
if not args.skip_kmers:
print("Output kmer summary")
if args.single_sample:
print_kmer_summary("{0}/count-summary-kmer-{1}-{2}.txt".format(
args.outdir, args.single_sample, args.group
))
else:
print_kmer_summary("{0}/count-summary-kmer-{1}.txt".format(
args.outdir, args.group
))
| 2.1875 | 2 |
movies/exceptions.py | te0dor/netguru-movies | 0 | 5102 | <filename>movies/exceptions.py
from marshmallow.exceptions import ValidationError
class ObjectDoesNotExist(Exception):
"""Exception if not found results"""
pass
class CommunicationError(Exception):
"""Exception for diferents problem with communications."""
pass
__all__ = ('ValidationError', 'ObjectDoesNotExist', 'CommunicationError')
| 2.3125 | 2 |
music_api/apps/music_app/admin.py | fejiroofficial/Simple_music | 0 | 5103 | from django.contrib import admin
from .models import Songs
admin.site.register(Songs)
# Register your models here.
| 1.390625 | 1 |
scripts/generate_image_series.py | JIC-Image-Analysis/senescence-in-field | 0 | 5104 | # Draw image time series for one or more plots
from jicbioimage.core.image import Image
import dtoolcore
import click
from translate_labels import rack_plot_to_image_plot
from image_utils import join_horizontally, join_vertically
def identifiers_where_match_is_true(dataset, match_function):
return [i for i in dataset.identifiers if match_function(i)]
def generate_image_series_for_plot(rack, plot):
n_image, n_plot = rack_plot_to_image_plot(rack, plot)
# n_image, n_plot = 55, 24
print "{}_{}".format(n_image, n_plot)
dataset_uri = 'file:/Users/hartleym/data_intermediate/separate_plots'
dataset = dtoolcore.DataSet.from_uri(dataset_uri)
plot_number_overlay = dataset.get_overlay('plot_number')
ordering_overlay = dataset.get_overlay('ordering')
date_overlay = dataset.get_overlay('date')
def is_match(i):
try:
ordering_as_int = int(ordering_overlay[i])
except TypeError:
return False
if ordering_as_int != n_image:
return False
if int(plot_number_overlay[i]) != n_plot:
return False
return True
identifiers = identifiers_where_match_is_true(dataset, is_match)
def sort_identifiers_by_date(identifiers):
dates_and_identifiers = [(date_overlay[i], i) for i in identifiers]
sorted_dates_and_identifiers = sorted(dates_and_identifiers)
_, sorted_identifiers = zip(*sorted_dates_and_identifiers)
return(sorted_identifiers)
sorted_identifiers = sort_identifiers_by_date(identifiers)
def identifiers_to_joined_image(identifiers):
images = []
for identifier in identifiers:
image_fpath = dataset.item_content_abspath(identifier)
image = Image.from_file(image_fpath)
images.append(image)
return join_horizontally(images)
result = identifiers_to_joined_image(sorted_identifiers)
output_fname = 'example_from_tobin.png'
with open(output_fname, 'wb') as fh:
fh.write(result.png())
@click.command()
def main():
# Early leaf senescence
# generate_image_series_for_plot(3, 16)
# generate_image_series_for_plot(7, 9)
# generate_image_series_for_plot(9, 1)
# Late leaf senescence
generate_image_series_for_plot(7, 15)
if __name__ == '__main__':
main()
| 2.390625 | 2 |
pytpp/properties/response_objects/system_status.py | Venafi/pytpp | 4 | 5105 | <gh_stars>1-10
from pytpp.properties.response_objects.dataclasses import system_status
from pytpp.tools.helpers.date_converter import from_date_string
class SystemStatus:
@staticmethod
def Engine(response_object: dict):
if not isinstance(response_object, dict):
response_object = {}
return system_status.Engine(
dn=response_object.get('DN'),
display_name=response_object.get('DisplayName'),
guid=response_object.get('Guid'),
id=response_object.get('Id'),
name=response_object.get('Name'),
)
@staticmethod
def Services(response_object: dict):
if not isinstance(response_object, dict):
response_object = {}
return system_status.Services(
vplatform=SystemStatus.Service(response_object.get('vPlatform')),
log_server=SystemStatus.Service(response_object.get('logServer')),
iis=SystemStatus.Service(response_object.get('iis')),
)
@staticmethod
def Service(response_object: dict):
if not isinstance(response_object, dict):
response_object = {}
return system_status.Service(
modules=response_object.get('modules'),
time_since_first_seen=from_date_string(response_object.get('timeSinceFirstSeen'), duration_format=True),
time_since_last_seen=from_date_string(response_object.get('timeSinceLastSeen'), duration_format=True),
status=response_object.get('Status'),
)
@staticmethod
def SystemStatus(response_object: dict):
if not isinstance(response_object, dict):
response_object = {}
return system_status.SystemStatus(
engine_name=response_object.get('engineName'),
services=SystemStatus.Services(response_object.get('services')),
version=response_object.get('version'),
)
@staticmethod
def Task(response_object: dict):
if not isinstance(response_object, dict):
response_object = {}
return system_status.Task(
display_name=response_object.get('DisplayName'),
name=response_object.get('Name'),
start_time=from_date_string(response_object.get('StartTime')),
stop_time=from_date_string(response_object.get('StopTime')),
warning_count=response_object.get('WarningCount'),
)
@staticmethod
def UpgradeInfo(response_object: dict):
if not isinstance(response_object, dict):
response_object = {}
return system_status.UpgradeInfo(
id=response_object.get('Id'),
start_time=from_date_string(response_object.get('StartTime')),
versions=response_object.get('Versions'),
)
@staticmethod
def UpgradeStatus(response_object: dict):
if not isinstance(response_object, dict):
response_object = {}
return system_status.UpgradeStatus(
engine=SystemStatus.Engine(response_object.get('Engine')),
status=response_object.get('Status'),
upgrade_start_time=from_date_string(response_object.get('UpgradeStartTime')),
upgrade_stop_time=from_date_string(response_object.get('UpgradeStopTime')),
tasks_completed=[SystemStatus.Task(t) for t in response_object.get('TasksCompleted', [])],
tasks_pending=[SystemStatus.Task(t) for t in response_object.get('TasksPending', [])],
tasks_running=[SystemStatus.Task(t) for t in response_object.get('TasksRunning', [])],
)
@staticmethod
def UpgradeSummary(response_object: dict):
if not isinstance(response_object, dict):
response_object = {}
return system_status.UpgradeSummary(
status=response_object.get('Status'),
upgrade_start_time=from_date_string(response_object.get('UpgradeStartTime')),
upgrade_stop_time=from_date_string(response_object.get('UpgradeStopTime')),
completed_tasks=response_object.get('CompletedTasks'),
target_version=response_object.get('TargetVersion'),
engines_complete=response_object.get('EnginesComplete'),
engines_running=response_object.get('EnginesRunning'),
engines_blocked=response_object.get('EnginesBlocked'),
engines_in_error=response_object.get('EnginesInError'),
engines_pending_install=response_object.get('EnginesPendingInstall'),
)
| 2.375 | 2 |
src/data/dataModule.py | mikkelfo/Title-prediction-from-abstract | 0 | 5106 | <gh_stars>0
from typing import Optional
import pytorch_lightning as pl
import torch
from omegaconf import OmegaConf
from torch.utils.data import DataLoader, random_split
from transformers import T5Tokenizer
from src.data.PaperDataset import PaperDataset
class ArvixDataModule(pl.LightningDataModule):
def __init__(self, config: str = "src/data/config.yaml") -> None:
super().__init__()
self.config = OmegaConf.load(config)
def prepare_data(self) -> None:
# Add tokenizing
tokenizer = T5Tokenizer.from_pretrained("t5-base")
titles, abstracts = torch.load("data/processed/data.pt").T
#titles, abstracts = torch.load("data/processed/data.pt").T
tokenized_abstracts = tokenizer.batch_encode_plus(
abstracts, padding=True, truncation=True, return_tensors="pt"
)
tokenized_titles = tokenizer.batch_encode_plus(
titles, padding=True, truncation=True, return_tensors="pt"
)
self.data = PaperDataset(tokenized_abstracts, tokenized_titles)
def setup(self, stage: Optional[str] = None):
train, val, test = random_split(
self.data,
[self.config.n_train, self.config.n_val, self.config.n_test],
generator=torch.Generator().manual_seed(1337),
)
if stage == "fit" or stage is None:
self.train_set = train
self.val_set = val
if stage == "test":
self.test_set = test
def train_dataloader(self) -> DataLoader:
return DataLoader(self.train_set, batch_size=32, num_workers=4)
def val_dataloader(self) -> DataLoader:
return DataLoader(self.val_set, batch_size=32, num_workers=4)
def test_dataloader(self) -> DataLoader:
return DataLoader(self.test_set, batch_size=32, num_workers=4)
if __name__ == "__main__":
dm = ArvixDataModule()
| 2.171875 | 2 |
shs/gui/RootFrame.py | ansobolev/shs | 1 | 5107 | # -*- coding: utf-8 -*-
import os
import sys
import time
import subprocess
import wx
import ConfigParser
from wx.lib.mixins.listctrl import getListCtrlSelection
from wx.lib.pubsub import pub
from gui.RootGUI import RootGUI
from StepsDialog import StepsDialog
from PlotFrame import PlotFuncFrame, PlotCorrFrame
import interface
import mbox
class RootFrame(RootGUI):
calcs = []
plot_frame = None
def __init__(self, *args, **kwds):
super(RootFrame, self).__init__(*args, **kwds)
# set root
self.root = self.set_root()
# initialize choices
self.propChoices = interface.dataClasses()
calc_data_types = self.propChoices.types()
calc_data_classes = self.propChoices.classes(calc_data_types[0])
corr_classes = self.propChoices.classes("Histogram")
self.propType.SetItems(calc_data_types)
self.propChoice.SetItems(calc_data_classes)
self.xCorr.SetItems(corr_classes)
self.yCorr.SetItems(corr_classes)
self.propType.SetSelection(0)
self.propChoice.SetSelection(0)
self.xCorr.SetSelection(0)
self.yCorr.SetSelection(0)
# initialize calc tree
self.build_tree(self.root, self.typeRBox.GetItemLabel(self.typeRBox.GetSelection()))
# initialize calc list
self.calcList.InsertColumn(0, 'Directory', width=180)
self.calcList.InsertColumn(1, 'Type', width=70)
self.calcList.InsertColumn(2, 'NSteps', width=100)
def set_root(self):
"""
Sets root directory fr GUI based on config file
:return: Root directory
"""
config_dir = os.path.expanduser("~/.local/shs")
config_file = os.path.join(config_dir, "shs_gui.cfg")
# check the file and create one if it's not there
if not os.path.isfile(config_file):
os.makedirs(config_dir)
open(config_file, 'w').close()
config = ConfigParser.ConfigParser()
config.read(config_file)
# if config exists and has needed option
if config.has_option("general", "root_dir"):
return config.get("general", "root_dir")
# make config
if not config.has_section("general"):
config.add_section("general")
dlg = wx.DirDialog(self, "Select root directory")
if dlg.ShowModal() == wx.ID_OK:
root_dir = dlg.GetPath()
config.set("general", "root_dir", root_dir)
else:
sys.exit(1)
with open(config_file, 'w') as f:
config.write(f)
return root_dir
def build_tree(self, root, calc_type):
"""
Adds a new root element and then its children
:param root: root directory for the tree
:param calc_type: calculation type
"""
self.calcTree.DeleteAllItems()
r = len(root.split(os.sep))
ids = {root: self.calcTree.AddRoot(root)}
for (dir_path, dir_names, file_names) in os.walk(root):
if interface.isCalcOfType(calc_type, dn=dir_names, fn=file_names):
# find the number of steps in MDE file, quickly
nsteps = interface.GetNumMDESteps(dir_path)
ancdirs = dir_path.split(os.sep)[r:]
if nsteps is not None:
ancdirs[-1] += ' [%i]' % nsteps
ad = root
for ancdir in ancdirs:
d = os.path.join(ad, ancdir)
if not d in ids:
ids[d] = self.calcTree.AppendItem(ids[ad], ancdir)
self.calcTree.SortChildren(ids[ad])
ad = d
def get_selection_dir(self):
item = self.calcTree.GetSelection()
parent = self.calcTree.GetItemParent(item)
path = [self.calcTree.GetItemText(item)]
while parent.IsOk():
path.append(self.calcTree.GetItemText(parent))
parent = self.calcTree.GetItemParent(parent)
# calculation directory
calc_dir = os.sep.join(path[::-1]).split()[0]
return calc_dir
# return os.sep.join((self.root, calc_dir))
def onSelChange(self, event):
# calculation type
ctype = self.typeRBox.GetItemLabel(self.typeRBox.GetSelection())
# calculation directory
cdir = self.get_selection_dir()
if interface.isCalcOfType(ctype, dir=cdir):
self.enqueueBtn.Enable()
else:
self.enqueueBtn.Enable(False)
def propTypeChange(self, event):
# property type
pt_num = self.propType.GetSelection()
pt = self.propType.GetItems()[pt_num]
self.propChoice.SetItems(self.propChoices.classes(pt))
self.propChoice.SetSelection(0)
def typeChange(self, event):
ctype = self.typeRBox.GetItemLabel(self.typeRBox.GetSelection())
self.build_tree(self.root, ctype)
def upBtnPress(self, event):
# selection indices
sind = getListCtrlSelection(self.calcList)
if sind:
# number of deleted strings
ds = 0
for si in sind:
self.calcs.pop(si - ds)
self.calcList.DeleteItem(si - ds)
ds += 1
return 0
return 1
def downBtnPress(self, event):
# current list count
clc = self.calcList.GetItemCount()
# calculation type
ctype = self.typeRBox.GetItemLabel(self.typeRBox.GetSelection())
# calculation directory
cdir = self.get_selection_dir()
if not interface.isCalcOfType(ctype, dir=cdir):
mbox.NoResults(cdir, ctype)
return 1
# init steps range
r = None
if ctype in ('.output', '.ANI'):
# enter dialog
dlg = StepsDialog(None)
if dlg.ShowModal() == wx.ID_OK:
r = dlg.GetRange()
dlg.Destroy()
self.calcs.append(interface.getCalc(cdir, ctype, r))
self.calcList.InsertStringItem(clc, cdir[len(self.root)+1:])
self.calcList.SetStringItem(clc, 1, ctype)
self.calcList.SetStringItem(clc, 2, str(len(r)) if r is not None else '')
return 0
def on_enqueue_press(self, _):
from sshutils import getMount, getDevice, getRemoteDir
# on which device are we?
calc_dir = self.get_selection_dir()
mount_path = getMount(calc_dir)
device_name, device_type = getDevice(mount_path)
if 'ssh' in device_type:
user, host_dir = device_name.split('@')
hostname, remote_mount_path = host_dir.split(':')
remote_dir = getRemoteDir(calc_dir, mount_path, remote_mount_path)
self.enqueue_remote(remote_dir, hostname, user)
else:
self.enqueue_local(calc_dir)
@staticmethod
def enqueue_local(calc_dir):
"""
Enqueue a task on a local filesystem
:param calc_dir: calculation directory on a local filesystem
:return: error_code (0 is OK)
"""
import distutils.spawn
# find which queue system is implemented on cluster (qstat - PBS, sinfo - SLURM)
if distutils.spawn.find_executable('qstat') is not None:
q = 'pbs'
elif distutils.spawn.find_executable('sinfo') is not None:
q = 'slurm'
else:
mbox.JobSubmit(None, ())
return -1
comm = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', q, q + '.sh'))
submit = subprocess.Popen(['/bin/bash', comm, '-d=' + calc_dir], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
mbox.JobSubmit(q, submit.communicate())
@staticmethod
def enqueue_remote(calc_dir, host, user):
"""
Enqueue a task on a remote filesystem
:param calc_dir: calculation directory on a remote filesystem
:param host: host where to enqueue a task
:param user: user of a remote system who enqueues a task
:return: error code (0 is OK)
"""
from sshutils import getSSHClient, getQueue, copyFile, removeFile, runCommand
ssh = getSSHClient(host, user)
# find which queue system is implemented on cluster (qstat - PBS, sinfo - SLURM)
q = getQueue(ssh)
if q is None:
mbox.JobSubmit(None, ())
return None
# queue putter on a local machine
local_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', q))
putter = q + '.sh'
sftp = copyFile(ssh, putter, local_dir, calc_dir)
remote_file = os.path.join(calc_dir, putter)
stdout, stderr = runCommand(ssh, 'bash ' + remote_file + ' -d=' + calc_dir)
mbox.JobSubmit(q, ('\n'.join(stdout.readlines()), '\n'.join(stderr.readlines())))
removeFile(sftp, remote_file)
ssh.close()
def plotBtnPress(self, event):
if self.noteBook.GetSelection() == 0:
self.plot_property()
else:
self.plot_correlation()
def plot_property(self):
# plot options - get all the data to plot
ptype = self.propType.GetItems()[self.propType.GetSelection()]
pchoice = self.propChoice.GetItems()[self.propChoice.GetSelection()]
data_class = self.propChoices.dataClass(ptype, pchoice)
leg = [self.calcList.GetItemText(i) for i in getListCtrlSelection(self.calcList)]
t1 = time.clock()
plot_data = interface.getData(ptype, data_class, leg,
[self.calcs[i] for i in getListCtrlSelection(self.calcList)])
self.SetStatusText('Calculation time: %7.2f s.' % (time.clock() - t1))
msg = plot_data
try:
self.plot_frame.Raise()
except (AttributeError, wx.PyDeadObjectError):
self.plot_frame = PlotFuncFrame(self)
self.plot_frame.Show()
pub.sendMessage('data.plot', message=msg)
def plot_correlation(self):
# correlate options - get all the data to plot
xchoice = self.xCorr.GetSelection()
ychoice = self.yCorr.GetSelection()
leg = [self.calcList.GetItemText(i) for i in getListCtrlSelection(self.calcList)]
data, info = interface.getCorr(xchoice, ychoice, [self.calcs[i] for i in getListCtrlSelection(self.calcList)])
msg = [leg, data, info]
try:
self.plot_frame.Raise()
except (AttributeError, wx.PyDeadObjectError):
self.plot_frame = PlotCorrFrame(self)
self.plot_frame.Show()
pub.sendMessage('corr.plot', message=msg)
| 2.125 | 2 |
saleor/order/migrations/0015_auto_20170206_0407.py | acabezasg/urpi-master | 6 | 5108 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-02-06 10:07
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django_prices.models
class Migration(migrations.Migration):
dependencies = [
('order', '0014_auto_20161028_0955'),
]
operations = [
migrations.AlterModelOptions(
name='deliverygroup',
options={'verbose_name': 'Delivery Group', 'verbose_name_plural': 'Delivery Groups'},
),
migrations.AlterModelOptions(
name='order',
options={'ordering': ('-last_status_change',), 'verbose_name': 'Order', 'verbose_name_plural': 'Orders'},
),
migrations.AlterModelOptions(
name='ordereditem',
options={'verbose_name': 'Ordered item', 'verbose_name_plural': 'Ordered items'},
),
migrations.AlterModelOptions(
name='orderhistoryentry',
options={'ordering': ('date',), 'verbose_name': 'Order history entry', 'verbose_name_plural': 'Order history entries'},
),
migrations.AlterModelOptions(
name='ordernote',
options={'verbose_name': 'Order note', 'verbose_name_plural': 'Order notes'},
),
migrations.AlterModelOptions(
name='payment',
options={'ordering': ('-pk',), 'verbose_name': 'Payment', 'verbose_name_plural': 'Payments'},
),
migrations.AlterField(
model_name='deliverygroup',
name='last_updated',
field=models.DateTimeField(auto_now=True, null=True, verbose_name='last updated'),
),
migrations.AlterField(
model_name='deliverygroup',
name='shipping_method_name',
field=models.CharField(blank=True, default=None, editable=False, max_length=255, null=True, verbose_name='shipping method name'),
),
migrations.AlterField(
model_name='deliverygroup',
name='tracking_number',
field=models.CharField(blank=True, default='', max_length=255, verbose_name='tracking number'),
),
migrations.AlterField(
model_name='order',
name='billing_address',
field=models.ForeignKey(editable=False, on_delete=django.db.models.deletion.CASCADE, related_name='+', to='account.Address', verbose_name='billing address'),
),
migrations.AlterField(
model_name='order',
name='discount_amount',
field=django_prices.models.MoneyField(blank=True, currency=settings.DEFAULT_CURRENCY, decimal_places=2, max_digits=12, null=True, verbose_name='discount amount'),
),
migrations.AlterField(
model_name='order',
name='discount_name',
field=models.CharField(blank=True, default='', max_length=255, verbose_name='discount name'),
),
migrations.AlterField(
model_name='order',
name='shipping_address',
field=models.ForeignKey(editable=False, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='+', to='account.Address', verbose_name='shipping address'),
),
migrations.AlterField(
model_name='order',
name='total_net',
field=django_prices.models.MoneyField(blank=True, currency=settings.DEFAULT_CURRENCY, decimal_places=2, max_digits=12, null=True, verbose_name='total net'),
),
migrations.AlterField(
model_name='order',
name='total_tax',
field=django_prices.models.MoneyField(blank=True, currency=settings.DEFAULT_CURRENCY, decimal_places=2, max_digits=12, null=True, verbose_name='total tax'),
),
migrations.AlterField(
model_name='order',
name='tracking_client_id',
field=models.CharField(blank=True, editable=False, max_length=36, verbose_name='tracking client id'),
),
migrations.AlterField(
model_name='order',
name='user_email',
field=models.EmailField(blank=True, default='', editable=False, max_length=254, verbose_name='user email'),
),
migrations.AlterField(
model_name='order',
name='voucher',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='discount.Voucher', verbose_name='voucher'),
),
migrations.AlterField(
model_name='ordereditem',
name='delivery_group',
field=models.ForeignKey(editable=False, on_delete=django.db.models.deletion.CASCADE, related_name='items', to='order.DeliveryGroup', verbose_name='delivery group'),
),
migrations.AlterField(
model_name='ordereditem',
name='stock',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='product.Stock', verbose_name='stock'),
),
migrations.AlterField(
model_name='orderhistoryentry',
name='comment',
field=models.CharField(blank=True, default='', max_length=100, verbose_name='comment'),
),
migrations.AlterField(
model_name='orderhistoryentry',
name='order',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='history', to='order.Order', verbose_name='order'),
),
migrations.AlterField(
model_name='orderhistoryentry',
name='user',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='user'),
),
migrations.AlterField(
model_name='payment',
name='order',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='payments', to='order.Order', verbose_name='order'),
),
]
| 1.523438 | 2 |
testrail_client/api/configurations.py | tonybearpan/testrail-lib | 0 | 5109 | <reponame>tonybearpan/testrail-lib
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from .base import TestRailAPIBase
class Config(TestRailAPIBase):
"""
Use the following API methods to request details
about configurations and to create or modify configurations.
"""
def __repr__(self):
return '<TestRailAPI config>'
def get(self, project_id):
"""
Returns a list of available configurations,
grouped by configuration groups (requires TestRail 3.1 or later).
:param project_id: The ID of the project
"""
return self._get('get_configs/{}'.format(project_id))
def add(self, config_group_id, name=''):
"""
Creates a new configuration (requires TestRail 5.2 or later).
:param config_group_id: The ID of the configuration group
the configuration should be added to
:param name: str, The name of the configuration (required)
"""
return self._post('add_config/{}'.format(config_group_id),
json=dict(name=name))
def update(self, config_group_id, name=''):
"""
Updates an existing configuration (requires TestRail 5.2 or later).
:param config_group_id: The ID of the configuration group
the configuration should be added to
:param name: str, The name of the configuration (required)
"""
return self._post('update_config/{}'.format(config_group_id),
json=dict(name=name))
def delete(self, config_id):
"""
Deletes an existing configuration (requires TestRail 5.2 or later).
:param config_id:
"""
return self._post('delete_config/{}'.format(config_id))
def add_group(self, project_id, name=''):
"""
Creates a new configuration group (requires TestRail 5.2 or later).
:param project_id: The ID of the project the configuration group should be added to
:param name: The name of the configuration group (required)
"""
return self._post('add_config_group/{}'.format(project_id),
json=dict(name=name))
def update_group(self, config_group_id, name):
"""
Updates an existing configuration group (requires TestRail 5.2 or later).
:param config_group_id: The ID of the configuration group
:param name: The name of the configuration group
"""
return self._post('update_config_group/{}'.format(config_group_id),
json=dict(name=name))
def delete_group(self, config_group_id):
"""
Deletes an existing configuration (requires TestRail 5.2 or later).
:param config_group_id: The ID of the configuration
"""
return self._post('delete_config_group/{}'.format(config_group_id))
def priority(self):
"""
Returns a list of available priorities.
"""
return self._get('get_priorities')
def template(self, project_id):
"""
Returns a list of available templates (requires TestRail 5.2 or later).
:param project_id:The ID of the project
"""
return self._get('get_templates/{}'.format(project_id))
| 2.3125 | 2 |
tests/asserts_wrapper.py | QARancher/k8s_client | 0 | 5110 | def assert_not_none(actual_result, message=""):
if not message:
message = f"{actual_result} resulted with None"
assert actual_result, message
def assert_equal(actual_result, expected_result, message=""):
if not message:
message = f"{actual_result} is not equal to expected " \
f"result {expected_result}"
assert actual_result == expected_result, message
def assert_in_list(searched_list, wanted_element, message=""):
if not message:
message = f"Failed to find '{wanted_element}' in list {searched_list}"
assert wanted_element in searched_list, message
def assert_not_in_list(searched_list, unwanted_element, message=""):
if not message:
message = f"'{unwanted_element}' found in list {searched_list} \n " \
f"although it should not be"
assert unwanted_element not in searched_list, message
def assert_of_type(wanted_type, wanted_object, message=""):
if not message:
message = f"{wanted_object} is not of type: {wanted_type}"
assert isinstance(wanted_object, wanted_type), message
| 3.265625 | 3 |
srl/simulation_test.py | google/simple-reinforcement-learning | 60 | 5111 | <gh_stars>10-100
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import unittest
from srl import movement
from srl import simulation
from srl import world
class TestSimulation(unittest.TestCase):
def test_in_terminal_state(self):
w = world.World.parse('@^')
sim = simulation.Simulation(world.Static(w))
self.assertFalse(sim.in_terminal_state)
sim.act(movement.ACTION_RIGHT)
self.assertTrue(sim.in_terminal_state)
def test_act_accumulates_score(self):
w = world.World.parse('@.')
sim = simulation.Simulation(world.Static(w))
sim.act(movement.ACTION_RIGHT)
sim.act(movement.ACTION_LEFT)
self.assertEqual(-2, sim.score)
def test_to_array(self):
w = world.World.parse('$.@^#')
sim = simulation.Simulation(world.Static(w))
self.assertTrue(
(np.array([[2, 3, 4, 5, 1]], dtype=np.int8) == sim.to_array())
.all())
| 2.53125 | 3 |
src/xmltollvm.py | Tejvinder/thesis-ghidra | 101 | 5112 | from llvmlite import ir
import xml.etree.ElementTree as et
int32 = ir.IntType(32)
int64 = ir.IntType(64)
int1 = ir.IntType(1)
void_type = ir.VoidType()
function_names = []
registers, functions, uniques, extracts = {}, {}, {}, {}
internal_functions = {}
memory = {}
flags = ["ZF", "CF", "OF", "SF"]
pointers = ["RSP", "RIP", "RBP", "EBP", "ESP"]
def lift(filename):
root = et.parse(filename).getroot()
module = ir.Module(name="lifted")
for register in root.find('globals').findall('register'):
if register.get('name') in flags:
var = ir.GlobalVariable(module, ir.IntType(1), register.get('name'))
var.initializer = ir.Constant(ir.IntType(1), None)
var.linkage = 'internal'
registers[register.get('name')] = var
elif register.get('name') in pointers:
var = ir.GlobalVariable(module, ir.PointerType(ir.IntType(8)), register.get('name'))
var.initializer = ir.Constant(ir.PointerType(ir.IntType(8)), None)
var.linkage = 'internal'
registers[register.get('name')] = var
else:
var = ir.GlobalVariable(module, ir.IntType(8 * int(register.get('size'))), register.get('name'))
var.initializer = ir.Constant(ir.IntType(8 * int(register.get('size'))), None)
var.linkage = 'internal'
registers[register.get('name')] = var
for memory_location in root.find('memory').findall('memory'):
var = ir.GlobalVariable(module, ir.IntType(8 * int(memory_location.get('size'))), memory_location.get('name'))
var.initializer = ir.Constant(ir.IntType(8 * int(memory_location.get('size'))), None)
var.linkage = 'internal'
memory[memory_location.get('name')] = var
func_return = ir.VoidType()
fnty = ir.FunctionType(func_return, [])
ir_func = ir.Function(module, fnty, "intra_function_branch")
internal_functions["intra_function_branch"] = ir_func
func_return = ir.VoidType()
fnty = ir.FunctionType(func_return, [])
ir_func = ir.Function(module, fnty, "call_indirect")
internal_functions["call_indirect"] = ir_func
func_return = ir.VoidType()
fnty = ir.FunctionType(func_return, [])
ir_func = ir.Function(module, fnty, "bit_extraction")
internal_functions["bit_extraction"] = ir_func
for function in root.findall('function'):
name = function.get('name')
x = 1
while name in function_names:
name = name + "_" + str(x)
x += 1
function_names.append(name)
address = function.get('address')
functions[address] = [build_function(name, module), function]
for address in functions:
ir_func, function = functions[address]
populate_func(ir_func, function)
return module
def populate_func(ir_func, function):
builders, blocks = build_cfg(function, ir_func)
if blocks == {}:
return
populate_cfg(function, builders, blocks)
def build_function(name, module):
func_return = ir.VoidType()
fnty = ir.FunctionType(func_return, [])
ir_func = ir.Function(module, fnty, name)
return ir_func
def build_cfg(function, ir_func):
builders, blocks = {}, {}
instructions = function.find("instructions")
if instructions:
block = ir_func.append_basic_block("entry")
blocks["entry"] = block
builders["entry"] = ir.IRBuilder(block)
for instruction in instructions:
address = instruction.find("address").text
block = ir_func.append_basic_block(address)
blocks[address] = block
builders[address] = ir.IRBuilder(block)
return builders, blocks
# noinspection DuplicatedCode
def populate_cfg(function, builders, blocks):
builder = builders["entry"]
stack_size = 10 * 1024 * 1024
stack = builder.alloca(ir.IntType(8), stack_size, name="stack")
stack_top = builder.gep(stack, [ir.Constant(int64, stack_size - 8)], name="stack_top")
builder.store(stack_top, registers["RSP"])
builder.branch(list(blocks.values())[1])
block_iterator = 1
instr = 0
quiter = False
for instruction in function.find("instructions"):
if quiter:
break
address = instruction.find("address").text
if address in builders:
builder = builders[address]
pcodes = instruction.find("pcodes")
pc = 0
no_branch = True
for pcode in pcodes:
pc += 1
mnemonic = pcode.find("name")
if mnemonic.text == "COPY":
output = pcode.find("output")
if output.text in flags and pcode.find("input_0").get("storage") == "constant":
source = ir.Constant(ir.IntType(1), int(pcode.find("input_0").text, 0))
else:
source = fetch_input_varnode(builder, pcode.find("input_0"))
update_output(builder, pcode.find("output"), source)
elif mnemonic.text == "LOAD":
input_1 = pcode.find("input_1")
output = pcode.find("output")
rhs = fetch_input_varnode(builder, input_1)
if input_1.get("storage") == "unique" and output.get("storage") == "unique":
# This is incorrect. This is treating it as a copy, should load the memory address in the input 1
update_output(builder, output, rhs)
else:
if input_1.text in pointers:
rhs = builder.gep(rhs, [ir.Constant(int64, 0)])
result = builder.load(rhs)
update_output(builder, output, result)
elif mnemonic.text == "STORE":
input_1 = pcode.find("input_1") # target
input_2 = pcode.find("input_2") # source
rhs = fetch_input_varnode(builder, input_2)
lhs = fetch_output_varnode(input_1)
lhs2 = builder.gep(lhs, [ir.Constant(int64, 0)])
if lhs2.type != rhs.type.as_pointer():
lhs2 = builder.bitcast(lhs2, rhs.type.as_pointer())
builder.store(rhs, lhs2)
elif mnemonic.text == "BRANCH":
value = pcode.find("input_0").text[2:-2]
if value in functions:
target = functions[value][0]
builder.call(target, [])
elif value in blocks:
target = blocks[value]
builder.branch(target)
no_branch = False
else:
# weird jump into some label in another function
# might be solved with callbr instruction?
builder.call(internal_functions["intra_function_branch"], [])
elif mnemonic.text == "CBRANCH":
true_target = blocks[pcode.find("input_0").text[2:-2]]
false_target = list(blocks.values())[block_iterator + 1]
condition = fetch_input_varnode(builder, pcode.find("input_1"))
no_branch = False
builder.cbranch(condition, true_target, false_target)
elif mnemonic.text == "BRANCHIND":
no_branch = False
target = fetch_input_varnode(builder, pcode.find("input_0"))
if not target.type.is_pointer:
target = builder.inttoptr(target, target.type.as_pointer())
builder.branch_indirect(target)
elif mnemonic.text == "CALL":
target = functions[pcode.find("input_0").text[2:-2]][0]
builder.call(target, [])
elif mnemonic.text == "CALLIND":
# target = pcode.find("input_0").text[2:-2]
builder.call(internal_functions["call_indirect"], [])
elif mnemonic.text == "USERDEFINED":
raise Exception("Not implemented")
elif mnemonic.text == "RETURN":
input_1 = pcode.find("input_1")
no_branch = False
if input_1 is None:
builder.ret_void()
else:
raise Exception("Return value being passed")
elif mnemonic.text == "PIECE":
raise Exception("PIECE operation needs to be tested")
elif mnemonic.text == "SUBPIECE":
output = pcode.find("output")
input_0 = pcode.find("input_0")
input_1 = pcode.find("input_1")
if input_1.text == "0x0":
val = fetch_input_varnode(builder, input_0)
result = builder.trunc(val, ir.IntType(int(output.get("size")) * 8))
update_output(builder, output, result)
else:
builder.call(internal_functions['bit_extraction'], [])
elif mnemonic.text == "INT_EQUAL":
lhs = fetch_input_varnode(builder, pcode.find("input_0"))
rhs = fetch_input_varnode(builder, pcode.find("input_1"))
lhs, rhs = int_comparison_check_inputs(builder, lhs, rhs)
result = builder.icmp_unsigned('==', lhs, rhs)
update_output(builder, pcode.find("output"), result)
elif mnemonic.text == "INT_NOTEQUAL":
lhs = fetch_input_varnode(builder, pcode.find("input_0"))
rhs = fetch_input_varnode(builder, pcode.find("input_1"))
lhs, rhs = int_comparison_check_inputs(builder, lhs, rhs)
result = builder.icmp_unsigned('!=', lhs, rhs)
update_output(builder, pcode.find("output"), result)
elif mnemonic.text == "INT_LESS":
lhs = fetch_input_varnode(builder, pcode.find("input_0"))
rhs = fetch_input_varnode(builder, pcode.find("input_1"))
lhs, rhs = int_comparison_check_inputs(builder, lhs, rhs)
result = builder.icmp_unsigned('<', lhs, rhs)
update_output(builder, pcode.find("output"), result)
elif mnemonic.text == "INT_SLESS":
lhs = fetch_input_varnode(builder, pcode.find("input_0"))
rhs = fetch_input_varnode(builder, pcode.find("input_1"))
lhs, rhs = int_comparison_check_inputs(builder, lhs, rhs)
result = builder.icmp_signed('<', lhs, rhs)
update_output(builder, pcode.find("output"), result)
elif mnemonic.text == "INT_LESSEQUAL":
lhs = fetch_input_varnode(builder, pcode.find("input_0"))
rhs = fetch_input_varnode(builder, pcode.find("input_1"))
lhs, rhs = int_comparison_check_inputs(builder, lhs, rhs)
result = builder.icmp_unsigned('<=', lhs, rhs)
update_output(builder, pcode.find("output"), result)
elif mnemonic.text == "INT_SLESS_EQUAL":
lhs = fetch_input_varnode(builder, pcode.find("input_0"))
rhs = fetch_input_varnode(builder, pcode.find("input_1"))
lhs, rhs = int_comparison_check_inputs(builder, lhs, rhs)
result = builder.icmp_signed('<=', lhs, rhs)
update_output(builder, pcode.find("output"), result)
elif mnemonic.text == "INT_ZEXT":
rhs = fetch_input_varnode(builder, pcode.find("input_0"))
if rhs.type.is_pointer:
rhs = builder.ptrtoint(rhs, rhs.type.pointee)
output = builder.zext(rhs, ir.IntType(int(pcode.find("output").get("size")) * 8))
update_output(builder, pcode.find("output"), output)
elif mnemonic.text == "INT_SEXT":
rhs = fetch_input_varnode(builder, pcode.find("input_0"))
if rhs.type.is_pointer:
rhs = builder.ptrtoint(rhs, rhs.type.pointee)
output = builder.sext(rhs, ir.IntType(int(pcode.find("output").get("size")) * 8))
update_output(builder, pcode.find("output"), output)
elif mnemonic.text == "INT_ADD":
input_0 = pcode.find("input_0")
input_1 = pcode.find("input_1")
lhs = fetch_input_varnode(builder, input_0)
rhs = fetch_input_varnode(builder, input_1)
target = ir.IntType(int(pcode.find("output").get("size")) * 8)
if input_0.text in pointers and input_1.get("storage") == "constant":
result = builder.gep(lhs, [ir.Constant(int64, int(input_1.text, 16))])
else:
lhs, rhs = int_check_inputs(builder, lhs, rhs, target)
result = builder.add(lhs, rhs)
update_output(builder, pcode.find("output"), result)
elif mnemonic.text == "INT_SUB":
input_0 = pcode.find("input_0")
input_1 = pcode.find("input_1")
lhs = fetch_input_varnode(builder, input_0)
rhs = fetch_input_varnode(builder, input_1)
target = ir.IntType(int(pcode.find("output").get("size")) * 8)
if input_0.text in pointers and input_1.get("storage") == "constant":
result = builder.gep(lhs, [ir.Constant(int64, -int(input_1.text, 16))])
else:
lhs, rhs = int_check_inputs(builder, lhs, rhs, target)
result = builder.sub(lhs, rhs)
update_output(builder, pcode.find("output"), result)
elif mnemonic.text == "INT_CARRY":
lhs = fetch_input_varnode(builder, pcode.find("input_0"))
rhs = fetch_input_varnode(builder, pcode.find("input_1"))
lhs, rhs = int_comparison_check_inputs(builder, lhs, rhs)
result = builder.uadd_with_overflow(lhs, rhs)
result = builder.extract_value(result, 1)
update_output(builder, pcode.find("output"), result)
elif mnemonic.text == "INT_SCARRY":
lhs = fetch_input_varnode(builder, pcode.find("input_0"))
rhs = fetch_input_varnode(builder, pcode.find("input_1"))
lhs, rhs = int_comparison_check_inputs(builder, lhs, rhs)
result = builder.sadd_with_overflow(lhs, rhs)
result = builder.extract_value(result, 1)
update_output(builder, pcode.find("output"), result)
elif mnemonic.text == "INT_SBORROW":
lhs = fetch_input_varnode(builder, pcode.find("input_0"))
rhs = fetch_input_varnode(builder, pcode.find("input_1"))
lhs, rhs = int_comparison_check_inputs(builder, lhs, rhs)
result = builder.sadd_with_overflow(lhs, rhs)
result = builder.extract_value(result, 1)
update_output(builder, pcode.find("output"), result)
elif mnemonic.text == "INT_2COMP":
val = fetch_input_varnode(builder, pcode.find("input_0"))
result = builder.not_(val)
update_output(builder, pcode.find("output"), result)
elif mnemonic.text == "INT_NEGATE":
val = fetch_input_varnode(builder, pcode.find("input_0"))
result = builder.neg(val)
update_output(builder, pcode.find("output"), result)
elif mnemonic.text == "INT_XOR":
lhs = fetch_input_varnode(builder, pcode.find("input_0"))
rhs = fetch_input_varnode(builder, pcode.find("input_1"))
target = ir.IntType(int(pcode.find("output").get("size")) * 8)
lhs, rhs = int_check_inputs(builder, lhs, rhs, target)
output = builder.xor(lhs, rhs)
update_output(builder, pcode.find("output"), output)
elif mnemonic.text == "INT_AND":
lhs = fetch_input_varnode(builder, pcode.find("input_0"))
rhs = fetch_input_varnode(builder, pcode.find("input_1"))
target = ir.IntType(int(pcode.find("output").get("size")) * 8)
lhs, rhs = int_check_inputs(builder, lhs, rhs, target)
output = builder.and_(lhs, rhs)
update_output(builder, pcode.find("output"), output)
elif mnemonic.text == "INT_OR":
lhs = fetch_input_varnode(builder, pcode.find("input_0"))
rhs = fetch_input_varnode(builder, pcode.find("input_1"))
target = ir.IntType(int(pcode.find("output").get("size")) * 8)
lhs, rhs = int_check_inputs(builder, lhs, rhs, target)
output = builder.or_(lhs, rhs)
update_output(builder, pcode.find("output"), output)
elif mnemonic.text == "INT_LEFT":
lhs = fetch_input_varnode(builder, pcode.find("input_0"))
rhs = fetch_input_varnode(builder, pcode.find("input_1"))
target = ir.IntType(int(pcode.find("output").get("size")) * 8)
lhs, rhs = check_shift_inputs(builder, lhs, rhs, target)
output = builder.shl(lhs, rhs)
update_output(builder, pcode.find("output"), output)
elif mnemonic.text == "INT_RIGHT":
lhs = fetch_input_varnode(builder, pcode.find("input_0"))
rhs = fetch_input_varnode(builder, pcode.find("input_1"))
target = ir.IntType(int(pcode.find("output").get("size")) * 8)
lhs, rhs = check_shift_inputs(builder, lhs, rhs, target)
output = builder.lshr(lhs, rhs)
update_output(builder, pcode.find("output"), output)
elif mnemonic.text == "INT_SRIGHT":
lhs = fetch_input_varnode(builder, pcode.find("input_0"))
rhs = fetch_input_varnode(builder, pcode.find("input_1"))
target = ir.IntType(int(pcode.find("output").get("size")) * 8)
lhs, rhs = check_shift_inputs(builder, lhs, rhs, target)
output = builder.ashr(lhs, rhs)
update_output(builder, pcode.find("output"), output)
elif mnemonic.text == "INT_MULT":
lhs = fetch_input_varnode(builder, pcode.find("input_0"))
rhs = fetch_input_varnode(builder, pcode.find("input_1"))
target = ir.IntType(int(pcode.find("output").get("size")) * 8)
lhs, rhs = int_check_inputs(builder, lhs, rhs, target)
output = builder.mul(lhs, rhs)
update_output(builder, pcode.find("output"), output)
elif mnemonic.text == "INT_DIV":
lhs = fetch_input_varnode(builder, pcode.find("input_0"))
rhs = fetch_input_varnode(builder, pcode.find("input_1"))
target = ir.IntType(int(pcode.find("output").get("size")) * 8)
lhs, rhs = int_check_inputs(builder, lhs, rhs, target)
output = builder.div(lhs, rhs)
update_output(builder, pcode.find("output"), output)
elif mnemonic.text == "INT_REM":
lhs = fetch_input_varnode(builder, pcode.find("input_0"))
rhs = fetch_input_varnode(builder, pcode.find("input_1"))
target = ir.IntType(int(pcode.find("output").get("size")) * 8)
lhs, rhs = int_check_inputs(builder, lhs, rhs, target)
output = builder.urem(lhs, rhs)
update_output(builder, pcode.find("output"), output)
elif mnemonic.text == "INT_SDIV":
lhs = fetch_input_varnode(builder, pcode.find("input_0"))
rhs = fetch_input_varnode(builder, pcode.find("input_1"))
target = ir.IntType(int(pcode.find("output").get("size")) * 8)
lhs, rhs = int_check_inputs(builder, lhs, rhs, target)
output = builder.sdiv(lhs, rhs)
update_output(builder, pcode.find("output"), output)
elif mnemonic.text == "INT_SREM":
lhs = fetch_input_varnode(builder, pcode.find("input_0"))
rhs = fetch_input_varnode(builder, pcode.find("input_1"))
target = ir.IntType(int(pcode.find("output").get("size")) * 8)
lhs, rhs = int_check_inputs(builder, lhs, rhs, target)
output = builder.srem(lhs, rhs)
update_output(builder, pcode.find("output"), output)
elif mnemonic.text == "BOOL_NEGATE":
lhs = fetch_input_varnode(builder, pcode.find("input_0"))
result = builder.neg(lhs)
update_output(builder, pcode.find("output"), result)
elif mnemonic.text == "BOOL_XOR":
lhs = fetch_input_varnode(builder, pcode.find("input_0"))
rhs = fetch_input_varnode(builder, pcode.find("input_1"))
result = builder.xor(lhs, rhs)
update_output(builder, pcode.find("output"), result)
elif mnemonic.text == "BOOL_AND":
lhs = fetch_input_varnode(builder, pcode.find("input_0"))
rhs = fetch_input_varnode(builder, pcode.find("input_1"))
result = builder.and_(lhs, rhs)
update_output(builder, pcode.find("output"), result)
elif mnemonic.text == "BOOL_OR":
lhs = fetch_input_varnode(builder, pcode.find("input_0"))
rhs = fetch_input_varnode(builder, pcode.find("input_1"))
result = builder.or_(lhs, rhs)
update_output(builder, pcode.find("output"), result)
elif mnemonic.text == "FLOAT_EQUAL":
raise Exception("Not implemented")
elif mnemonic.text == "FLOAT_NOTEQUAL":
raise Exception("Not implemented")
elif mnemonic.text == "FLOAT_LESS":
raise Exception("Not implemented")
elif mnemonic.text == "FLOAT_LESSEQUAL":
raise Exception("Not implemented")
elif mnemonic.text == "FLOAT_ADD":
raise Exception("Not implemented")
elif mnemonic.text == "FLOAT_SUB":
raise Exception("Not implemented")
elif mnemonic.text == "FLOAT_MULT":
raise Exception("Not implemented")
elif mnemonic.text == "FLOAT_DIV":
raise Exception("Not implemented")
elif mnemonic.text == "FLOAT_NEG":
raise Exception("Not implemented")
elif mnemonic.text == "FLOAT_ABS":
raise Exception("Not implemented")
elif mnemonic.text == "FLOAT_SQRT":
raise Exception("Not implemented")
elif mnemonic.text == "FLOAT_CEIL":
raise Exception("Not implemented")
elif mnemonic.text == "FLOAT_FLOOR":
raise Exception("Not implemented")
elif mnemonic.text == "FLOAT_ROUND":
raise Exception("Not implemented")
elif mnemonic.text == "FLOAT_NAN":
raise Exception("Not implemented")
elif mnemonic.text == "INT2FLOAT":
raise Exception("Not implemented")
elif mnemonic.text == "FLOAT2FLOAT":
raise Exception("Not implemented")
elif mnemonic.text == "TRUNC":
raise Exception("Not implemented")
elif mnemonic.text == "CPOOLREF":
raise Exception("Not implemented")
elif mnemonic.text == "NEW":
raise Exception("Not implemented")
elif mnemonic.text == "MULTIEQUAL":
raise Exception("Not implemented")
elif mnemonic.text == "INDIRECT":
raise Exception("Not implemented")
elif mnemonic.text == "PTRADD":
raise Exception("Not implemented")
elif mnemonic.text == "PTRSUB":
raise Exception("Not implemented")
elif mnemonic.text == "CAST":
raise Exception("Not implemented")
else:
raise Exception("Not a standard pcode instruction")
block_iterator += 1
instr += 1
if block_iterator < len(blocks) and no_branch:
builder.branch(list(blocks.values())[block_iterator])
def fetch_input_varnode(builder, name):
var_type = name.get("storage")
var_size = int(name.get("size")) * 8
if var_type == "register":
return builder.load(registers[name.text])
elif var_type == "unique":
if name.text not in list(uniques.keys()):
raise Exception("Temporary variable referenced before defined")
return uniques[name.text]
elif var_type == "constant":
var = ir.Constant(ir.IntType(var_size), int(name.text, 0))
return var
elif var_type == "memory":
return memory[name.text]
def update_output(builder, name, output):
var_type = name.get("storage")
if var_type == "register":
reg = registers[name.text]
if reg.type != output.type.as_pointer():
reg = builder.bitcast(reg, output.type.as_pointer())
builder.store(output, reg)
elif var_type == "unique":
uniques[name.text] = output
def fetch_output_varnode(name):
var_type = name.get("storage")
if var_type == "register":
return registers[name.text]
elif var_type == "unique":
if name.text not in uniques:
uniques[name.text] = None
return uniques[name.text]
def int_check_inputs(builder, lhs, rhs, target):
if lhs.type != target:
if lhs.type.is_pointer:
lhs2 = lhs
lhs = builder.ptrtoint(lhs, target)
if lhs2 == rhs:
rhs = lhs
if rhs.type != target and lhs != rhs:
if rhs.type.is_pointer:
rhs = builder.ptrtoint(rhs, target)
return lhs, rhs
def check_shift_inputs(builder, lhs, rhs, target):
if lhs.type != target:
if lhs.type.is_pointer:
lhs = builder.ptrtoint(lhs, target)
else:
lhs = builder.zext(lhs, target)
if rhs.type != target:
if rhs.type.is_pointer:
rhs = builder.ptrtoint(rhs, target)
else:
rhs = builder.zext(rhs, target)
return lhs, rhs
def int_comparison_check_inputs(builder, lhs, rhs):
# For integer comparison operations. We assume rhs is the correct type.
if lhs.type.is_pointer:
lhs = builder.ptrtoint(lhs, rhs.type)
return lhs, rhs | 2.296875 | 2 |
modules/WPSeku/modules/discovery/generic/wplisting.py | Farz7/Darkness | 18 | 5113 | #/usr/bin/env python
# -*- Coding: UTF-8 -*-
#
# WPSeku: Wordpress Security Scanner
#
# @url: https://github.com/m4ll0k/WPSeku
# @author: <NAME> (M4ll0k)
import re
from lib import wphttp
from lib import wpprint
class wplisting:
chk = wphttp.UCheck()
out = wpprint.wpprint()
def __init__(self,agent,proxy,redir,time,url,cookie):
self.url = url
self.cookie = cookie
self.req = wphttp.wphttp(
agent=agent,proxy=proxy,
redir=redir,time=time
)
def run(self):
paths = ['/wp-admin','/wp-includes','/wp-content/uploads',
'/wp-content/plugins','/wp-content/themes'
]
try:
for path in paths:
url = wplisting.chk.path(self.url,path)
resp = self.req.send(url,c=self.cookie)
if resp.status_code == 200 and resp._content != None:
if resp.url == url:
wplisting.out.plus('Dir {} listing enabled under: {}'.format(path,resp.url))
except Exception,e:
pass | 2.015625 | 2 |
tw2/jit/widgets/__init__.py | toscawidgets/tw2.jit | 1 | 5114 | <gh_stars>1-10
from tw2.jit.widgets.chart import (AreaChart, BarChart, PieChart)
from tw2.jit.widgets.graph import (ForceDirectedGraph, RadialGraph)
from tw2.jit.widgets.tree import (SpaceTree, HyperTree, Sunburst,
Icicle, TreeMap)
from tw2.jit.widgets.ajax import AjaxRadialGraph
from tw2.jit.widgets.sqla import SQLARadialGraph
| 1.296875 | 1 |
bot.py | tiianprb/TikTok-Downloader-Bot | 0 | 5115 | <filename>bot.py
import json, requests, os, shlex, asyncio, uuid, shutil
from typing import Tuple
from pyrogram import Client, filters
from pyrogram.types import InlineKeyboardButton, InlineKeyboardMarkup, CallbackQuery
# Configs
API_HASH = os.environ['API_HASH']
APP_ID = int(os.environ['APP_ID'])
BOT_TOKEN = os.environ['BOT_TOKEN']
downloads = './downloads/{}/'
#Button
START_BUTTONS=[
[
InlineKeyboardButton('Source', url='https://github.com/X-Gorn/TikTokDL'),
InlineKeyboardButton('Project Channel', url='https://t.me/xTeamBots'),
],
[InlineKeyboardButton('Author', url='https://t.me/xgorn')],
]
DL_BUTTONS=[
[
InlineKeyboardButton('No Watermark', callback_data='nowm'),
InlineKeyboardButton('Watermark', callback_data='wm'),
],
[InlineKeyboardButton('Audio', callback_data='audio')],
]
# Running bot
xbot = Client('TikTokDL', api_id=APP_ID, api_hash=API_HASH, bot_token=BOT_TOKEN)
# Helpers
# Thanks to FridayUB
async def run_cmd(cmd: str) -> Tuple[str, str, int, int]:
args = shlex.split(cmd)
process = await asyncio.create_subprocess_exec(
*args, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE
)
stdout, stderr = await process.communicate()
return (
stdout.decode("utf-8", "replace").strip(),
stderr.decode("utf-8", "replace").strip(),
process.returncode,
process.pid,
)
# Start
@xbot.on_message(filters.command('start') & filters.private)
async def _start(bot, update):
await update.reply_text(f"I'm TikTokDL!\nYou can download tiktok video/audio using this bot", True, reply_markup=InlineKeyboardMarkup(START_BUTTONS))
# Downloader for tiktok
@xbot.on_message(filters.regex(pattern='.*http.*') & filters.private)
async def _tiktok(bot, update):
url = update.text
session = requests.Session()
resp = session.head(url, allow_redirects=True)
if not 'tiktok.com' in resp.url:
return
await update.reply('Select the options below', True, reply_markup=InlineKeyboardMarkup(DL_BUTTONS))
# Callbacks
@xbot.on_callback_query()
async def _callbacks(bot, cb: CallbackQuery):
if cb.data == 'nowm':
dirs = downloads.format(uuid.uuid4().hex)
os.makedirs(dirs)
cbb = cb
update = cbb.message.reply_to_message
await cb.message.delete()
url = update.text
session = requests.Session()
resp = session.head(url, allow_redirects=True)
if '?' in resp.url:
tt = resp.url.split('?', 1)[0]
else:
tt = resp.url
ttid = dirs+tt.split('/')[-1]
r = requests.get('https://api.reiyuura.me/api/dl/tiktok?url='+tt)
result = r.text
rs = json.loads(result)
link = rs['result']['nowm']
resp = session.head(link, allow_redirects=True)
r = requests.get(resp.url, allow_redirects=True)
open(f'{ttid}.mp4', 'wb').write(r.content)
await bot.send_video(update.chat.id, f'{ttid}.mp4',)
shutil.rmtree(dirs)
elif cb.data == 'wm':
dirs = downloads.format(uuid.uuid4().hex)
os.makedirs(dirs)
cbb = cb
update = cbb.message.reply_to_message
await cb.message.delete()
url = update.text
session = requests.Session()
resp = session.head(url, allow_redirects=True)
if '?' in resp.url:
tt = resp.url.split('?', 1)[0]
else:
tt = resp.url
ttid = dirs+tt.split('/')[-1]
r = requests.get('https://api.reiyuura.me/api/dl/tiktok?url='+tt)
result = r.text
rs = json.loads(result)
link = rs['result']['wm']
resp = session.head(link, allow_redirects=True)
r = requests.get(resp.url, allow_redirects=True)
open(f'{ttid}.mp4', 'wb').write(r.content)
await bot.send_video(update.chat.id, f'{ttid}.mp4',)
shutil.rmtree(dirs)
elif cb.data == 'audio':
dirs = downloads.format(uuid.uuid4().hex)
os.makedirs(dirs)
cbb = cb
update = cbb.message.reply_to_message
await cb.message.delete()
url = update.text
session = requests.Session()
resp = session.head(url, allow_redirects=True)
if '?' in resp.url:
tt = resp.url.split('?', 1)[0]
else:
tt = resp.url
ttid = dirs+tt.split('/')[-1]
r = requests.get('https://api.reiyuura.me/api/dl/tiktok?url='+tt)
result = r.text
rs = json.loads(result)
link = rs['result']['wm']
resp = session.head(link, allow_redirects=True)
r = requests.get(resp.url, allow_redirects=True)
open(f'{ttid}.mp4', 'wb').write(r.content)
cmd = f'ffmpeg -i "{ttid}.mp4" -vn -ar 44100 -ac 2 -ab 192 -f mp3 "{ttid}.mp3"'
await run_cmd(cmd)
await bot.send_audio(update.chat.id, f'{ttid}.mp3',)
shutil.rmtree(dirs)
xbot.run()
| 2.421875 | 2 |
frontend-gui/rpanel.py | skyu0221/660-iot | 0 | 5116 | import wx
import wx.adv
import random
import util
import config
import time
import datetime
import threading
import requests
import json
from functools import partial
class ReqeusterThread(threading.Thread):
# https://www.oreilly.com/library/view/python-cookbook/0596001673/ch06s03.html
def __init__(self, name, parent_thread, parent_panel):
threading.Thread.__init__(self, name=name)
self._stopevent = threading.Event()
self.parent_panel = parent_panel
self.parent_thread = parent_thread
def run(self):
while (not self._stopevent.is_set()) and self.parent_thread.is_alive():
print("hello")
# print(self.parent_panel.info_widget_dict)
# print(self.parent_panel.info)
# chnage to real time
end = datetime.datetime.now()
start = end - datetime.timedelta(minutes=1)
self.parent_panel.info["start"] = util.convert_to_GMT_zone(start)
self.parent_panel.info["end"] = util.convert_to_GMT_zone(end)
self.parent_panel._send_request(self.parent_panel.info)
self._stopevent.wait(5.0)
def join(self, timeout=None):
self._stopevent.set()
print("thread stop")
threading.Thread.join(self, timeout)
class RightPanel(wx.Panel):
def __init__(self, parent, info={}):
wx.Panel.__init__(self, parent=parent)
self.drop_down_menu_ID = None
self.result_visual_ID = None
self.info = info
self._init_UI()
def _init_UI(self):
self.SetBackgroundColour("#BAB86C")
font = wx.SystemSettings.GetFont(wx.SYS_SYSTEM_FONT)
font.SetPointSize(20)
vbox = wx.BoxSizer(wx.VERTICAL)
hbox1 = wx.BoxSizer(wx.HORIZONTAL)
# add question label
st1 = wx.StaticText(self, label='Question')
st1.SetFont(font)
hbox1.Add(st1, proportion=2, flag=wx.RIGHT, border=10)
# add drop down menu
question_list = [
"1. How many people are in the building?",
"2. How many people are in a specific room?",
"3. Where is someone?",
# "4. Which room has someone visited?",
"4. What is the utilization of a specific room?"
]
drop_down_menu = wx.ComboBox(self, choices=question_list)
hbox1.Add(drop_down_menu, proportion=8, flag=wx.TOP, border=5)
vbox1 = wx.BoxSizer(wx.VERTICAL)
# add result label
# st2 = wx.StaticText(self, label='Result')
# st2.SetFont(font)
# vbox1.Add(st2, proportion=1, flag=wx.ALIGN_CENTER, border=1)
# add canvas panel
# canvas_panel = CanvasPanel(self)
# vbox1.Add(canvas_panel, proportion=9, flag=wx.EXPAND|wx.LEFT|wx.RIGHT|wx.BOTTOM, border=10)
result_panel = ResultPanel(self)
# result_panel.SetBackgroundColour("#000000")
vbox1.Add(result_panel, proportion=9, flag=wx.EXPAND|wx.LEFT|wx.RIGHT|wx.BOTTOM, border=10)
vbox.Add(hbox1, proportion=1, flag=wx.EXPAND|wx.ALL, border=10)
vbox.Add(vbox1, proportion=9, flag=wx.EXPAND|wx.LEFT|wx.RIGHT|wx.BOTTOM, border=10)
self.SetSizer(vbox)
# listen combo
drop_down_menu.Bind(wx.EVT_COMBOBOX, partial(self.on_selection,
combo_box=drop_down_menu,
panel=result_panel))
def on_selection(self, event, combo_box, panel):
# print(self.drop_down_menu.GetValue())
print(combo_box.GetValue())
panel.init_question_UI(combo_box.GetValue()[0])
# st2 = wx.StaticText(self, label=combo_box.GetValue())
# st2.SetFont(font)
# sizer1.Add(st2, proportion=1, flag=wx.ALIGN_CENTER, border=1)
class ResultPanel(wx.Panel):
def __init__(self, parent):
wx.Panel.__init__(self, parent)
# self._init_UI()
self._q_dict = {"1": self._q1_panel,
"2": self._q2_panel,
"3": self._q3_panel,
# "4": self._q4_panel,
"4": self._q5_panel,}
self.info_widget_dict = {"feeder": {}, "consumer": {}}
self.worker = None
self.server = config.SERVER
self._set_font()
def _set_font(self):
self.font = wx.SystemSettings.GetFont(wx.SYS_SYSTEM_FONT)
self.font.SetPointSize(12)
self.font.MakeBold()
def init_question_UI(self, q_idx):
# clean the panel
for child in self.GetChildren():
child.Destroy()
# stop the worker
if self.worker:
# print("the worker has been stop")
self.worker.join()
self.worker = None
self.info_widget_dict["feeder"].clear()
self.info_widget_dict["consumer"].clear()
decorate_panel = self._q_dict[q_idx]
decorate_panel()
def add_date_time_picker_layout(self):
vbox = wx.BoxSizer(wx.VERTICAL)
hbox1 = wx.BoxSizer(wx.HORIZONTAL)
hbox2 = wx.BoxSizer(wx.HORIZONTAL)
hbox3 = wx.BoxSizer(wx.HORIZONTAL)
# Start
start_label = wx.StaticText(self, label="START TIME")
start_label.SetFont(self.font)
dpc1 = wx.adv.DatePickerCtrl(self, -1, wx.DefaultDateTime)
tpc1 = wx.adv.TimePickerCtrl(self, -1, wx.DefaultDateTime)
hbox1.Add(start_label, proportion=2, flag=wx.RIGHT|wx.TOP, border=4)
hbox1.Add(dpc1, proportion=3, flag=wx.RIGHT, border=5)
hbox1.Add(tpc1, proportion=3, flag=wx.RIGHT, border=5)
vbox.Add(hbox1, proportion=0, flag=wx.ALL, border=5)
# End
end_label = wx.StaticText(self, label="END TIME")
end_label.SetFont(self.font)
dpc2 = wx.adv.DatePickerCtrl(self, -1, wx.DefaultDateTime)
tpc2 = wx.adv.TimePickerCtrl(self, -1, wx.DefaultDateTime)
hbox2.Add(end_label, proportion=2, flag=wx.RIGHT|wx.TOP, border=4)
hbox2.Add(dpc2, proportion=3, flag=wx.RIGHT, border=5)
hbox2.Add(tpc2, proportion=3, flag=wx.RIGHT, border=5)
vbox.Add(hbox2, proportion=0, flag=wx.ALL, border=5)
# Real time box
real_label = wx.StaticText(self, label="REAL TIME")
real_label.SetFont(self.font)
cb = wx.CheckBox(self)
hbox3.Add(real_label, proportion=2, flag=wx.RIGHT|wx.TOP, border=4)
hbox3.Add(cb, proportion=3, flag=wx.RIGHT|wx.TOP, border=5)
vbox.Add(hbox3, proportion=0, flag=wx.ALL, border=5)
self.info_widget_dict["feeder"]["start_date"] = dpc1
self.info_widget_dict["feeder"]["start_time"] = tpc1
self.info_widget_dict["feeder"]["end_date"] = dpc2
self.info_widget_dict["feeder"]["end_time"] = tpc2
self.info_widget_dict["feeder"]["real_time"] = cb
# self.SetBackgroundColour("#000000")
# r = lambda: random.randint(0,255)
# color = '#%02X%02X%02X' % (r(),r(),r())
return vbox
def _add_confirm_button(self, sizer, question_index):
"""
question_index => {1, 2, 3, 4}
"""
comfirm_btn = wx.Button(self, id=-1, label="Confirm")
sizer.Add(comfirm_btn, proportion=0, flag=wx.TOP|wx.LEFT, border=5)
# self.Bind(wx.EVT_BUTTON, self.OnClick, comfirm_btn)
self.Bind(wx.EVT_BUTTON, lambda event: self.OnClick(event, question_index), comfirm_btn)
def _add_result_label(self, sizer):
result_label = wx.StaticText(self, label="RESULT")
font = wx.SystemSettings.GetFont(wx.SYS_SYSTEM_FONT)
font.SetPointSize(20)
font.MakeBold()
result_label.SetFont(font)
sizer.Add(result_label, proportion=0, flag=wx.ALIGN_CENTER_HORIZONTAL, border=20)
def OnClick(self, event, question_index):
info = {}
# handle date and time
if question_index in [1, 2, 3, 4]:
start_date = self.info_widget_dict["feeder"]["start_date"].GetValue()
start_time = self.info_widget_dict["feeder"]["start_time"].GetValue()
end_date = self.info_widget_dict["feeder"]["end_date"].GetValue()
end_time = self.info_widget_dict["feeder"]["end_time"].GetValue()
info["start"] = util.combine_datetime(start_date, start_time)
info["end"] = util.combine_datetime(end_date, end_time)
# print("start time = {}".format(info["start"]))
# print("end time = {}".format(info["end"]))
if_real_time = self.info_widget_dict["feeder"]["real_time"].GetValue()
if question_index == 1:
# requester send request to server
pass
elif question_index == 2:
# requester send request to server
room = self.info_widget_dict["feeder"]["room_select"].GetValue()
print(room)
info["room"] = room
elif question_index == 3:
# requester send request to server
name = self.info_widget_dict["feeder"]["name_select"].GetValue()
print(name)
info["name"] = name
else: # question_index = 4
name = self.info_widget_dict["feeder"]["name_select"].GetValue()
print(name)
info["name"] = name
else: # question_index == 5
if_real_time = False
date = self.info_widget_dict["feeder"]["date_picker"].GetValue()
time = self.info_widget_dict["feeder"]["time_picker"].GetValue()
room = self.info_widget_dict["feeder"]["room_select"].GetValue()
info["date"] = util.combine_datetime(date, time)
info["room"] = room
# requester send request to server
info["question_index"] = question_index
self.info = info
if if_real_time:
if not self.worker:
self.worker = ReqeusterThread(name="question_{}_requester".format(question_index), parent_thread=threading.currentThread(), parent_panel=self)
self.worker.start()
print("start worker")
else:
# first check if the worker is working
if self.worker:
self.worker.join()
self.worker = None
self._send_request(info)
def _request_handle(self, url, body={}, params={}, METHOD="post"):
# https://stackoverflow.com/questions/15900338/python-request-post-with-param-data
print("url", url)
print("body", body)
print("params", params)
resp = {}
if METHOD == "post":
r = requests.post(url, data=body)
else:
r = requests.get(url, params=params)
print(r.status_code)
if r.status_code == 200:
resp = r.json()
print(resp)
print(type(resp))
return resp
def _send_request(self, info):
question_index = int(info["question_index"])
if question_index == 1:
## get ##
url = self.server + "/people_building/"
body = {"start": info["start"], "end": info["end"]}
# body = {'start': '2020-04-05 21:00:00', 'end': '2020-04-05 21:10:00'}
response = self._request_handle(url=url, body=body, METHOD="post")
try:
occu = str(response['count'])
except:
occu = str(0)
## received##
self.info_widget_dict["consumer"]["occu_label"].SetLabel(occu)
elif question_index == 2:
## get ##
url = self.server + "/people_room/"
body = {"room": info["room"],
"start": info["start"],
"end": info["end"],
# 'start': '2020-04-05 21:00:00', 'end': '2020-04-05 21:10:00'
}
response = self._request_handle(url=url, body=body, METHOD="post")
try:
occu = str(response['count'])
occupancy_info = response['occupancy_info']
except:
occu = str(0)
occupancy_info = []
## received ##
self.info_widget_dict["consumer"]["occu_label"].SetLabel(occu)
nlb = self.info_widget_dict["consumer"]["name_list"]
nlb.Clear()
for name in occupancy_info:
nlb.Append(name)
elif question_index == 3:
## get ##
url = self.server + "/person_room/"
body = {"name": info["name"],
"start": info["start"],
"end": info["end"],
# 'start': '2020-04-05 21:00:00', 'end': '2020-04-05 21:10:00'
}
response = self._request_handle(url=url, body=body, METHOD="post")
try:
room_list = response['room']
count = str(len(room_list))
except:
count = str(0)
room_list = []
## received ##
self.info_widget_dict["consumer"]["count_label"].SetLabel(count)
rlb = self.info_widget_dict["consumer"]["room_list"]
rlb.Clear()
for name in room_list:
rlb.Append(name)
elif question_index == 4:
## get ##
url = self.server + "question/4"
body = {"name": info["name"],
# "start_time": info["start"],
# "end_time": info["end"],
"time": info["start"],
}
response = self._request_handle(url=url, body=body, METHOD="post")
count = str(random.randint(0, 20))
room_list = ["Room_1_1_140", "Room_1_1_141"]
## received ##
self.info_widget_dict["consumer"]["count_label"].SetLabel(count)
rlb = self.info_widget_dict["consumer"]["room_list"]
rlb.Clear()
for name in room_list:
rlb.Append(name)
elif question_index == 5:
## get ##
url = self.server + "/utilization/"
body = {"room": info["room"],
"date": info["date"],
# 'date': '2020-04-05 20:00:00'
}
response = self._request_handle(url=url, body=body, METHOD="post")
# self.request_handle(url, body, METHOD="post")
try:
response = json.loads(response)
utilization = "{:.2f}".format(response["utilization"]*100) + "%"
except:
utilization = "0%"
## received##
self.info_widget_dict["consumer"]["utilization_label"].SetLabel(utilization)
def _q1_panel(self):
print("q1")
main_vbox = self.add_date_time_picker_layout()
# confirm button
self._add_confirm_button(main_vbox, 1)
# add result label
self._add_result_label(main_vbox)
# add result widget
hbox = wx.BoxSizer(wx.HORIZONTAL)
label = wx.StaticText(self, label="Occupancy")
label.SetFont(self.font)
hbox.Add(label, proportion=2, flag=wx.TOP|wx.RIGHT, border=5)
occu_label = wx.StaticText(self, label="__")
occu_label.SetFont(self.font)
hbox.Add(occu_label, proportion=2, flag=wx.TOP|wx.RIGHT, border=5)
main_vbox.Add(hbox, proportion=0, flag=wx.ALL, border=5)
self.info_widget_dict["consumer"]["occu_label"] = occu_label
self.SetSizer(main_vbox)
# https://stackoverflow.com/questions/42365239/wxpython-after-changing-panel-and-redo-layout-panel-is-very-small
self.Fit()
self.GetParent().SendSizeEvent()
def _q2_panel(self):
print("q2")
main_vbox = self.add_date_time_picker_layout()
# Room Info
room_hbox = wx.BoxSizer(wx.HORIZONTAL)
room_label = wx.StaticText(self, label="Room")
room_label.SetFont(self.font)
room_hbox.Add(room_label, proportion=2, flag=wx.TOP|wx.RIGHT, border=5)
room_list = [
"",
"Room_1_1_140",
"Room_1_1_141",
"Room_1_1_142",
"Room_1_1_143",
"Room_1_1_144",
"Room_1_1_150",
"Room_1_1_184"]
room_combobox = wx.ComboBox(self, choices=room_list)
room_hbox.Add(room_combobox, proportion=8, flag=wx.TOP, border=5)
# room_info = wx.TextCtrl(self)
# room_hbox.Add(room_combobox, proportion=8, flag=wx.TOP, border=5)
main_vbox.Add(room_hbox, proportion=0, flag=wx.ALL, border=5)
# confirm button
self._add_confirm_button(main_vbox, 2)
# add result label
self._add_result_label(main_vbox)
# add widget infomation to dict
self.info_widget_dict["feeder"]["room_select"] = room_combobox
# add result widget
# add count
hbox = wx.BoxSizer(wx.HORIZONTAL)
label = wx.StaticText(self, label="Occupancy")
label.SetFont(self.font)
hbox.Add(label, proportion=2, flag=wx.TOP|wx.RIGHT, border=5)
occu_label = wx.StaticText(self, label="__")
occu_label.SetFont(self.font)
hbox.Add(occu_label, proportion=2, flag=wx.TOP|wx.RIGHT, border=5)
main_vbox.Add(hbox, proportion=0, flag=wx.ALL, border=5)
# add name list
namelb = wx.ListBox(self)
main_vbox.Add(namelb, proportion=0, flag=wx.ALL, border=5)
self.info_widget_dict["consumer"]["occu_label"] = occu_label
self.info_widget_dict["consumer"]["name_list"] = namelb
self.SetSizer(main_vbox)
# https://stackoverflow.com/questions/42365239/wxpython-after-changing-panel-and-redo-layout-panel-is-very-small
self.Fit()
self.GetParent().SendSizeEvent()
def _q3_panel(self):
print("q3")
vbox = self.add_date_time_picker_layout()
hbox1 = wx.BoxSizer(wx.HORIZONTAL)
name_label = wx.StaticText(self, label="Name")
name_label.SetFont(self.font)
hbox1.Add(name_label, proportion=2, flag=wx.TOP|wx.RIGHT, border=5)
name_text_ctrl = wx.TextCtrl(self)
name_text_ctrl.AppendText('Please enter unique name')
hbox1.Add(name_text_ctrl, proportion=8, flag=wx.TOP, border=5)
vbox.Add(hbox1, proportion=0, flag=wx.ALL, border=5)
# confirm button
self._add_confirm_button(vbox, 3)
# add result label
self._add_result_label(vbox)
# add widget infomation to dict
self.info_widget_dict["feeder"]["name_select"] = name_text_ctrl
# add result widget
# add count
hbox = wx.BoxSizer(wx.HORIZONTAL)
label = wx.StaticText(self, label="Room Count")
label.SetFont(self.font)
hbox.Add(label, proportion=2, flag=wx.TOP|wx.RIGHT, border=5)
occu_label = wx.StaticText(self, label="__")
occu_label.SetFont(self.font)
hbox.Add(occu_label, proportion=2, flag=wx.TOP|wx.RIGHT, border=5)
vbox.Add(hbox, proportion=0, flag=wx.ALL, border=5)
# add name list
roomlb = wx.ListBox(self)
vbox.Add(roomlb, proportion=0, flag=wx.ALL, border=5)
self.info_widget_dict["consumer"]["count_label"] = occu_label
self.info_widget_dict["consumer"]["room_list"] = roomlb
self.SetSizer(vbox)
# https://stackoverflow.com/questions/42365239/wxpython-after-changing-panel-and-redo-layout-panel-is-very-small
self.Fit()
self.GetParent().SendSizeEvent()
def _q4_panel(self):
print("q4")
main_vbox = self.add_date_time_picker_layout()
hbox1 = wx.BoxSizer(wx.HORIZONTAL)
name_label = wx.StaticText(self, label="Name")
name_label.SetFont(self.font)
hbox1.Add(name_label, proportion=2, flag=wx.TOP|wx.RIGHT, border=5)
name_text_ctrl = wx.TextCtrl(self)
name_text_ctrl.AppendText('Please enter unique name')
hbox1.Add(name_text_ctrl, proportion=8, flag=wx.TOP, border=5)
main_vbox.Add(hbox1, proportion=0, flag=wx.ALL, border=5)
# confirm button
self._add_confirm_button(main_vbox, 4)
# add result label
self._add_result_label(main_vbox)
# add widget infomation to dict
self.info_widget_dict["feeder"]["name_select"] = name_text_ctrl
# add result widget
# add count
hbox = wx.BoxSizer(wx.HORIZONTAL)
label = wx.StaticText(self, label="Room Count")
label.SetFont(self.font)
hbox.Add(label, proportion=2, flag=wx.TOP|wx.RIGHT, border=5)
occu_label = wx.StaticText(self, label="__")
occu_label.SetFont(self.font)
hbox.Add(occu_label, proportion=2, flag=wx.TOP|wx.RIGHT, border=5)
main_vbox.Add(hbox, proportion=0, flag=wx.ALL, border=5)
# add name list
roomlb = wx.ListBox(self)
main_vbox.Add(roomlb, proportion=0, flag=wx.ALL, border=5)
self.info_widget_dict["consumer"]["count_label"] = occu_label
self.info_widget_dict["consumer"]["room_list"] = roomlb
self.SetSizer(main_vbox)
# https://stackoverflow.com/questions/42365239/wxpython-after-changing-panel-and-redo-layout-panel-is-very-small
self.Fit()
self.GetParent().SendSizeEvent()
def _q5_panel(self):
print("q5")
vbox = wx.BoxSizer(wx.VERTICAL)
# datetime
date_hbox = wx.BoxSizer(wx.HORIZONTAL)
date_label = wx.StaticText(self, label="Datetime")
date_label.SetFont(self.font)
dpc = wx.adv.DatePickerCtrl(self, -1, wx.DefaultDateTime)
tpc = wx.adv.TimePickerCtrl(self, -1, wx.DefaultDateTime)
date_hbox.Add(date_label, proportion=2, flag=wx.RIGHT|wx.TOP, border=4)
date_hbox.Add(dpc, proportion=3, flag=wx.RIGHT, border=5)
date_hbox.Add(tpc, proportion=3, flag=wx.RIGHT, border=5)
vbox.Add(date_hbox, proportion=0, flag=wx.ALL, border=5)
# Room Info
room_hbox = wx.BoxSizer(wx.HORIZONTAL)
room_label = wx.StaticText(self, label="Room")
room_label.SetFont(self.font)
room_hbox.Add(room_label, proportion=2, flag=wx.TOP|wx.RIGHT, border=5)
room_list = [
"",
"Room_1_1_140",
"Room_1_1_141",
"Room_1_1_142",
"Room_1_1_143",
"Room_1_1_144",
"Room_1_1_150",
"Room_1_1_184"]
room_combobox = wx.ComboBox(self, choices=room_list)
room_hbox.Add(room_combobox, proportion=8, flag=wx.TOP, border=5)
vbox.Add(room_hbox, proportion=0, flag=wx.ALL, border=5)
# confirm button
self._add_confirm_button(vbox, 5)
# add result label
self._add_result_label(vbox)
# add widget infomation to dict
self.info_widget_dict["feeder"]["date_picker"] = dpc
self.info_widget_dict["feeder"]["time_picker"] = tpc
self.info_widget_dict["feeder"]["room_select"] = room_combobox
# add result widget
hbox = wx.BoxSizer(wx.HORIZONTAL)
label = wx.StaticText(self, label="Utilization")
label.SetFont(self.font)
hbox.Add(label, proportion=2, flag=wx.TOP|wx.RIGHT, border=5)
occu_label = wx.StaticText(self, label="__")
occu_label.SetFont(self.font)
hbox.Add(occu_label, proportion=2, flag=wx.TOP|wx.RIGHT, border=5)
vbox.Add(hbox, proportion=0, flag=wx.ALL, border=5)
self.info_widget_dict["consumer"]["utilization_label"] = occu_label
self.SetSizer(vbox)
# https://stackoverflow.com/questions/42365239/wxpython-after-changing-panel-and-redo-layout-panel-is-very-small
self.Fit()
self.GetParent().SendSizeEvent()
| 2.28125 | 2 |
webapp/search.py | henchan/memfinity | 0 | 5117 | <gh_stars>0
"""High-level search API.
This module implements application-specific search semantics on top of
App Engine's search API. There are two chief operations: querying for
entities, and managing entities in the search facility.
Add and remove Card entities in the search facility:
insert_cards([models.Card])
delete_cards([models.Card])
Query for Card entities:
query_cards(query_string, limit=20) -> search.SearchResults
The results items will have the following fields:
user_key, user_nickname, front, back, info, tag (repeated), added,
modified, source_url
The query_string is free-form, as a user would enter it, and passes
through a custom query processor before the query is submitted to App
Engine. Notably, pass @username to restrict the query to entities
authored by username, and #tag to restrict the query to only documents
matching the given tag. Multiple @usernames or #tags result in an OR
query.
"""
import re
from google.appengine.api import search
from google.appengine.ext import ndb
QUERY_LIMIT = 20
CARD_INDEX_NAME = 'cards'
# Increase this value when _card2doc changes its format so that
# queries can determine the data available on returned documents.
CARD_DOCUMENT_VERSION = '1'
# Ensure we're under the 2000 character limit from
# https://developers.google.com/appengine/docs/python/search/query_strings
MAX_QUERY_LEN = 200
# TODO(chris): it would be better if this module didn't know about
# specific entity types, but instead defined a protocol to get
# metadata from an entity and generate a document.
def insert_cards(cards):
"""Insert or update models.Card entities in the search facility."""
# TODO(chris): should we allow more than 200 cards per call?
assert len(cards) <= 200, len(cards)
card_docs = map(_card2doc, cards)
index = search.Index(name=CARD_INDEX_NAME)
index.put(card_docs)
def delete_cards(cards):
"""Delete models.Card entities from the search facility."""
index = search.Index(name=CARD_INDEX_NAME)
card_doc_ids = map(_card2docid, cards)
index.delete(card_doc_ids)
def query_cards(query_str, limit=QUERY_LIMIT, web_safe_cursor=None,
ids_only=False, user_key=None):
"""Return the search.SearchResults for a query.
ids_only is useful because the returned document IDs are url-safe
keys for models.Card entities.
"""
if web_safe_cursor:
cursor = search.Cursor(web_safe_string=web_safe_cursor)
else:
cursor = None
index = search.Index(name=CARD_INDEX_NAME)
query_processor = _QueryProcessor(
query_str,
name_field='user_nickname',
tag_field='tag',
private_field='private',
user_key_field='user_key',
query_options=search.QueryOptions(limit=limit, cursor=cursor,
ids_only=ids_only),
user_key=user_key)
search_results = index.search(query_processor.query())
# TODO(chris): should this return partially-instantiated
# models.Card instances instead of leaking implementation details
# like we do now?
return search_results
def _card2doc(card):
# TODO(chris): should we include all fields that would be needed
# for rendering a search results item to avoid entity lookup?
tag_fields = [search.AtomField(name='tag', value=tag) for tag in card.tags]
doc = search.Document(
doc_id=_card2docid(card),
fields=[
search.AtomField(name='doc_version', value=CARD_DOCUMENT_VERSION),
search.AtomField(name='user_key', value=card.user_key.urlsafe()),
# TODO(chris): is user_nickname always a direct-match
# shortname, e.g., @chris?
search.AtomField(name='user_nickname', value=card.user_nickname),
# TODO(chris): support HtmlField for richer cards?
search.TextField(name='front', value=card.front),
search.TextField(name='back', value=card.back),
search.TextField(name='info', value=card.info),
search.DateField(name='added', value=card.added),
search.DateField(name='modified', value=card.modified),
search.AtomField(name='source_url', value=card.source_url),
search.AtomField(name='private', value="1" if card.private else "0"),
] + tag_fields)
return doc
def _card2docid(card):
# We set the search.Document's ID to the entity key it mirrors.
return card.key.urlsafe()
def _sanitize_user_input(query_str):
# The search API puts special meaning on certain inputs and we
# don't want to expose the internal query language to users so
# we strictly restrict inputs. The rules are:
#
# Allowed characters for values are [a-zA-Z0-9._-].
# @name is removed and 'name' values returned as a list.
# #tag is removed and 'tag' values returned as a list.
terms, names, tags = [], [], []
for token in query_str.split():
# TODO(chris): allow international characters.
sane_token = re.sub(r'[^a-zA-Z0-9._-]+', '', token)
if sane_token:
if sane_token in ('AND', 'OK'):
continue # ignore special search keywords
elif token.startswith('@'):
names.append(sane_token)
elif token.startswith('#'):
tags.append(sane_token)
else:
terms.append(sane_token)
return terms, names, tags
class _QueryProcessor(object):
"""Simple queries, possibly with @name and #tag tokens.
name_field is the field @name tokens should apply to.
tag_field is the name of the field #tag tokens should apply to.
"""
def __init__(self, query_str,
name_field, tag_field, private_field, user_key_field,
query_options=None, user_key=None):
self.query_str = query_str
self.name_field = name_field
self.tag_field = tag_field
self.private_field = private_field
self.user_key_field = user_key_field
self.query_options = query_options
self.user_key = user_key
def _sanitize_user_input(self):
query_str = self.query_str[:MAX_QUERY_LEN]
return _sanitize_user_input(query_str)
def _build_query_string(self):
terms, names, tags = self._sanitize_user_input()
# Our simply query logic is to OR together all terms from the
# user, then AND in the name or tag filters (plus a privacy clause).
parts = []
if terms:
parts.append(' OR '.join(terms))
if names:
parts.append('%s: (%s)' % (self.name_field, ' OR '.join(names)))
if tags:
parts.append('%s: (%s)' % (self.tag_field, ' OR '.join(tags)))
# Don't return cards that other users have marked private...
privacy = '%s: 0' % self.private_field
if self.user_key:
# ... but always show the user their own cards in results.
privacy += ' OR %s: (%s)' % (self.user_key_field, self.user_key)
parts.append('(' + privacy + ')')
return ' AND '.join(parts)
def query(self):
query = search.Query(
query_string=self._build_query_string(),
options=self.query_options)
return query
| 3 | 3 |
Bot/Bot/board.py | Baidi96/AI-Agent-for-Light-Rider | 1 | 5118 | import copy
import sys
PLAYER1, PLAYER2, EMPTY, BLOCKED = [0, 1, 2, 3]
S_PLAYER1, S_PLAYER2, S_EMPTY, S_BLOCKED, = ['0', '1', '.', 'x']
CHARTABLE = [(PLAYER1, S_PLAYER1), (PLAYER2, S_PLAYER2), (EMPTY, S_EMPTY), (BLOCKED, S_BLOCKED)]
DIRS = [
((-1, 0), "up"),
((1, 0), "down"),
((0, 1), "right"),
((0, -1), "left")
]
#the information of the whole grid
class Board:
def __init__(self, width, height):
self.width = width
self.height = height
self.cell = [[EMPTY for col in range (0, width)] for row in range(0, height)]
def parse_cell_char(self, players, row, col, char):
result = -1
if char == S_PLAYER1:
players[0].row = row;
players[0].col = col;
elif char == S_PLAYER2:
players[1].row = row;
players[1].col = col;
for (i, symbol) in CHARTABLE:
if symbol == char:
result = i
break
return result
def parse_cell(self, players, row, col, data):
cell = []
for char in data:
item = self.parse_cell_char(players, row, col, char)
cell.append(item)
return cell
def parse(self, players, data):
cells = data.split(',')
col = 0
row = 0
for cell in cells:
if (col >= self.width):
col = 0
row +=1
self.cell[row][col] = self.parse_cell(players, row, col, cell)
col += 1
def in_bounds (self, row, col):
return row >= 0 and col >= 0 and col < self.width and row < self.height
def is_legal(self, row, col, my_id):
enemy_id = my_id ^ 1
return (self.in_bounds(row, col)) and (not BLOCKED == self.cell[row][col]) and (not enemy_id == self.cell[row][col])
def is_legal_tuple(self, loc):
row, col = loc
return self.is_legal(row, col)
def get_adjacent(self, row, col):
result = []
for (o_row, o_col), _ in DIRS:
t_row, t_col = o_row + row, o_col + col
if self.is_legal(t_row, t_col):
result.append((t_row, t_col))
return result
def legal_moves(self, my_id, players):
my_player = players[my_id]
result = []
for ((o_row, o_col), order) in DIRS:
t_row = my_player.row + o_row
t_col = my_player.col + o_col
if self.is_legal(t_row, t_col, my_id):
result.append(((o_row, o_col), order))
else:
pass
return result
def update_cell(self, row, col, data):
self.cell[row][col] = data
def output_cell(self, cell):
done = False
for (i, symbol) in CHARTABLE:
if i == cell:
if not done:
sys.stderr.write(symbol)
done = True
break
if not done:
sys.stderr.write("!")
done = True
def output(self):
for row in self.cell:
sys.stderr.write("\n")
for cell in row:
self.output_cell(cell)
sys.stderr.write("\n")
sys.stderr.flush()
def tostring(self):
res = ""
for row in xrange(self.height):
for col in xrange(self.width):
res += str(self.cell[row][col])
res += ","
return res
| 2.953125 | 3 |
baekjoon/1012.py | wonnerky/coteMaster | 0 | 5119 | import sys
sys.setrecursionlimit(10000)
def dfs(r, c):
global visit
visit[r][c] = True
mov = [(-1, 0), (0, -1), (1, 0), (0, 1)]
for i in range(4):
dr, dc = mov[i]
nr, nc = r + dr, c + dc
if 0 <= nr < N and 0 <= nc < M and visit[nr][nc] == False and board[nr][nc] == 1:
dfs(nr, nc)
T = int(input())
for _ in range(T):
M, N, K = map(int, input().split())
board = [[0] * M for _ in range(N)]
for _ in range(K):
c, r = map(int, input().split())
board[r][c] = 1
visit = [[False] * M for _ in range(N)]
cnt = 0
for r in range(N):
for c in range(M):
if not visit[r][c] and board[r][c] == 1:
cnt += 1
dfs(r, c)
for ele in visit:
print(ele)
print()
print(cnt) | 2.71875 | 3 |
collection/cp/algorithms-master/python/binary_tree.py | daemonslayer/Notebook | 1 | 5120 | <reponame>daemonslayer/Notebook
"""
Binary Tree and basic properties
1. In-Order Traversal
2. Pre-Order Traversal
3. Post-Order Traversal
4. Level-Order Traversal
"""
from collections import deque
class BinaryTree(object):
"""
Representation of a general binary tree
data: value of element
left: Left subtree
right: Right subtree
"""
def __init__(self, data, left=None, right=None):
if data is None:
raise ValueError('data cannot be null')
self.data = data
self.left = left
self.right = right
def insert(self, data):
raise NotImplementedError('Method insert is not Implemented')
def delete(self, data):
raise NotImplementedError('Method delete is not implemented')
def inorder_traversal(self, write=True):
"""
Return list of node data as inorder traversal. If write is True then print as well.
This is a iterative tree inorder traversal.
Algorithm:
1. Create a stack of nodes node_stack
2. Mark root as current
3. While current is not none or node_stack is not empty
a. While current is not empty push current to nde_stack and reassign current to current->left
b. If current is empty and node_stack is not empty then pop the top of stack and print that node
c. mark current as poped_node->right
"""
traversal_lis = []
node_stack = []
current = self
while current or node_stack:
while current:
node_stack.append(current)
current = current.left
if node_stack:
node = node_stack.pop()
traversal_lis.append(node.data)
current = node.right
if write:
for item in traversal_lis:
print(item, end=' ')
return traversal_lis
def preorder_traversal(self, write=True):
"""
Return list of node data as preorder traversal. If write is true then print as well.
Algorithm:
1. Create stack of nodes as node_stack
2. Mark root as current
3. While current is not none or node_stack is not empty
a. While current is not empty
i. Push current to node_stack
ii. Add current->data to traversal_list
iii. Reassign current to current->left
b. If node_stack is not empty then pop the topmost node from node_stack and assign current to
poped_node->right
"""
traversal_lis = []
node_stack = []
current = self
while current or node_stack:
while current:
node_stack.append(current)
traversal_lis.append(current.data)
current = current.left
if node_stack:
node = node_stack.pop()
current = node.right
if write:
for item in traversal_lis:
print(item, end=' ')
return traversal_lis
def postorder_traversal(self, write=True):
"""
Return list of node data as postorder traversal. If write is true then print as well.
Algorithm:
1. Create stack of nodes as node_stack
2. Mark root as current
3. While current is not None or node_stack is not empty
a. While current is not None
i. Push current to node_stack
ii. Append current->data to traversal_list
iii. Reassign current as current->right !IMPORTANT: Here we're iterating on current-right as we're doing
postorder traversal
b. If node_stack is not empty then pop top node and assign poped_node->left to current
"""
traversal_lis = []
node_stack = []
current = self
while current or node_stack:
while current:
node_stack.append(current)
traversal_lis.append(current.data)
current = current.right
if node_stack:
node = node_stack.pop()
current = node.left
if write:
for item in traversal_lis:
print(item, end=' ')
return traversal_lis
def levelorder_traversal(self, write=True):
"""
Return list of node data as level order traversal. If write is true then print as well.
Algorithm:
1. Maintain a queue of nodes to process as node_queue
2. Push root to node_queue
3. While node_queue is not empty
a. Get top node of node_queue as top
b. Push top->data to traversal_list
c. Append top->left and top->right into node_queue if they are not null
"""
traversal_list = []
node_queue = deque()
node_queue.append(self)
while node_queue:
top = node_queue.popleft()
traversal_list.append(top.data)
if top.left:
node_queue.append(top.left)
if top.right:
node_queue.append(top.right)
if write:
for item in traversal_list:
print(item, end=' ')
return traversal_list
def main():
"""
Tree Structure:
1
/ \
2 3
/ \
4 5
"""
tree = BinaryTree(1)
tree.left = BinaryTree(2)
tree.right = BinaryTree(3)
tree.left.left = BinaryTree(4)
tree.left.right = BinaryTree(5)
assert tree.inorder_traversal(write=False) == [4, 2, 5, 1, 3]
assert tree.preorder_traversal(write=False) == [1, 2, 4, 5, 3]
assert tree.postorder_traversal(write=False) == [1, 3, 2, 5, 4]
assert tree.levelorder_traversal(write=False) == [1, 2, 3, 4, 5]
if __name__ == '__main__':
main()
| 3.84375 | 4 |
custom_components/vaddio_conferenceshot/const.py | rohankapoorcom/vaddio_conferenceshot | 0 | 5121 | import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.const import CONF_HOST, CONF_PASSWORD, CONF_PATH, CONF_USERNAME
DOMAIN = "vaddio_conferenceshot"
DATA_SCHEMA = vol.Schema(
{
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
}
)
SERVICE_RECALL_PRESET = "move_to_preset"
ATTR_PRESET_ID = "preset"
| 1.859375 | 2 |
lightonml/opu.py | lightonai/lightonml | 27 | 5122 | <gh_stars>10-100
# Copyright (c) 2020 LightOn, All Rights Reserved.
# This file is subject to the terms and conditions defined in
# file 'LICENSE.txt', which is part of this source code package.
"""
This module contains the OPU class
"""
import time
from math import sqrt
import pkg_resources
from lightonml.encoding.base import NoEncoding, NoDecoding
import warnings
from typing import Optional, Union, Tuple, TYPE_CHECKING
import numpy as np
from contextlib import ExitStack
import attr
import inspect
import lightonml
from lightonml.internal.config import get_host_option, opu_version
from lightonml.internal import config, output_roi, utils, types
from lightonml.internal.user_input import OpuUserInput, InputTraits
from lightonml.internal.simulated_device import SimulatedOpuDevice
from lightonml.context import ContextArray
from lightonml.internal.settings import OpuSettings, TransformSettings
from lightonml.internal.runner import TransformRunner, FitTransformRunner
from lightonml.internal.types import InputRoiStrategy, IntOrTuple, TransformOutput, AcqState
from lightonml.types import OutputRescaling
# Import lightonopu only for typechecking, as it's an optional module and may not be present
if TYPE_CHECKING:
from lightonopu.internal.device import OpuDevice
# noinspection PyPep8Naming
class OPU:
"""Interface to the OPU.
.. math:: \\mathbf{y} = \\lvert \\mathbf{R} \\mathbf{x} \\rvert^2 \\mbox{ (non-linear transform, the default)}
.. math:: \\mathbf{y} = \\mathbf{R}\\mathbf{x} \\mbox{ (linear transform)}
Main methods are `transform`, `linear_transform`, `fit1d` and `fit2d`,
and accept NumPy arrays or PyTorch tensors.
The non-linear transform (`transform`) is a native operation for the OPU, and performs at a higher
speed than `linear_transform`.
Acquiring/releasing hardware device resources is done by open/close and a
context-manager interface.
Unless `open_at_init=False`, these resources are acquired automatically at init.
If another process or kernel has not released the resources, an error will be
raised, call `close()` or shutdown the kernel on the OPU object to release it.
Parameters
----------
n_components : int,
dimensionality of the target projection space.
opu_device : OpuDevice or SimulatedOpuDevice, optional
optical processing unit instance linked to a physical or simulated device.
If not provided, a device is properly instantiated.
If opu_device is of type SimulatedOpuDevice, the random matrix is generated
at __init__, using max_n_features and n_components
max_n_features: int, optional
maximum number of binary features that the OPU will transform
used only if opu_device is a SimulatedOpuDevice,
in order to initiate the random matrix
config_file : str, optional
path to the configuration file (for dev purpose)
config_override: dict, optional
for override of the config_file (for dev purpose)
verbose_level: int, optional
deprecated, use lightonml.set_verbose_level() instead
.. seealso:: `lightonml.set_verbose_level`
input_roi_strategy: types.InputRoiStrategy, optional
describes how to display the features on the input device
.. seealso:: `lightonml.internal.types.InputRoiStrategy`
open_at_init: bool, optional
forces the setting of acquiring hardware resource at init. If
not provided, follow system's setting (usually True)
disable_pbar: bool, optional
disable display of the progress bar when verbose_level is set to 1
simulated: bool, optional
performs the random projection using CPU, in case no OPU is available on your machine
the random matrix is then generated at __init__, using max_n_features and n_components
rescale: types.OutputRescaling, optional,
output rescaling method for `linear_transform`.
Ignored by `transform`.
.. seealso:: `lightonml.types.OutputRescaling`
Attributes
----------
n_components: int
dimensionality of the target projection space.
rescale: types.OutputRescaling,
output rescaling method for `linear_transform`.
Ignored by `transform`.
max_n_features: int
maximum number of binary features that the OPU will transform
writeable only if opu_device is a SimulatedOpuDevice,
in order to initiate or resize the random matrix
device: OpuDevice or SimulatedOpuDevice
underlying hardware that performs transformation (read-only)
input_roi_strategy: types.InputRoiStrategy, optional
describes how to display the features on the input device
"""
def __init__(self, n_components: int = 200000,
opu_device: Optional[Union["OpuDevice", SimulatedOpuDevice]] = None,
max_n_features: int = 1000, config_file: str = "",
config_override: dict = None, verbose_level: int = -1,
input_roi_strategy: types.InputRoiStrategy = types.InputRoiStrategy.full,
open_at_init: bool = None, disable_pbar=False, simulated=False,
rescale: Union[OutputRescaling, str] = OutputRescaling.variance):
self.__opu_config = None
self.__config_file = config_file
self.__config_override = config_override
self._max_n_features = max_n_features
self.disable_pbar = disable_pbar
self.rescale = rescale
# Get trace and print functions
if verbose_level != -1:
warnings.warn("Verbose level arg will removed in 1.3, "
"Use lightonml.set_verbose_level instead",
DeprecationWarning)
lightonml.set_verbose_level(verbose_level)
else:
verbose_level = lightonml.get_verbose_level()
self._debug = lightonml.get_debug_fn()
self._trace = lightonml.get_trace_fn()
self._print = lightonml.get_print_fn()
no_config_msg = "No configuration files for the OPU was found on this machine.\n" \
"You may want to run the OPU in a simulated manner, by passing the " \
"simulated argument to True at init.\n" \
"See https://docs.lighton.ai/notes/get_started.html#Simulating-an-OPU " \
"for more details.\n" \
"See also https://lighton.ai/products for getting access to our technology."
if simulated and opu_device is not None:
raise ValueError("simulated and opu_device arguments are conflicting")
# Device init, or take the one passed as input
if opu_device:
if type(opu_device).__name__ not in ["SimulatedOpuDevice", "OpuDevice"]:
raise TypeError("opu_device must be of type SimulatedOpuDevice or OpuDevice")
self.device = opu_device
elif simulated:
self.device = SimulatedOpuDevice()
else:
# Instantiate device directly
from lightonopu.internal.device import OpuDevice
if not self.__config_file and not config.host_has_opu_config():
# Looks like there's no OPU on this host as we didn't find configuration files
raise RuntimeError(no_config_msg)
opu_type = self.config["type"]
frametime_us = self.config["input"]["frametime_us"]
exposure_us = self.config["output"]["exposure_us"]
seq_nb_prelim = self.config.get("sequence_nb_prelim", 0)
name = self.config["name"]
self.device = OpuDevice(opu_type, frametime_us, exposure_us, seq_nb_prelim,
None, verbose_level, name)
self._base_frametime_us = self.device.frametime_us
self._base_exposure_us = self.device.exposure_us
if self._s.simulated:
# build the random matrix if not done already
self._resize_rnd_matrix(max_n_features, n_components)
else:
# Make sure lightonopu is at 1.4.1 or later, needed for linear_reconstruction
pkg_resources.require("lightonopu>=1.4.1")
# initialize linear_reconstruction library
from lightonopu import linear_reconstruction
linear_reconstruction.init(np.prod(self.device.input_shape))
self._output_roi = output_roi.OutputRoi(self.device.output_shape_max,
self.device.output_roi_strategy,
self._s.allowed_roi, self._s.min_n_components)
# This also sets the output ROI
self.n_components = n_components
self.input_roi_strategy = input_roi_strategy
# Runner initialized when entering fit
self._runner = None # type: Optional[TransformRunner]
# ExitStack for device acquisition, initialized when entering fit
self._acq_stack = ExitStack()
self._trace("OPU initialized")
# Open at init, unless relevant host.json option is False
if open_at_init is None:
open_at_init = get_host_option("lightonml_open_at_init", True)
if open_at_init:
self.open()
def _tr_settings(self, no_input=False, **override) -> TransformSettings:
"""Returns transform settings for feeding to TransformRunner"""
init = TransformSettings(self.input_roi_strategy, self.n_components)
settings = attr.evolve(init, **override)
if no_input and self.input_roi_strategy is InputRoiStrategy.auto:
# If no input_roi, replace auto by full strategy
settings.input_roi_strategy = InputRoiStrategy.full
assert settings.input_roi is None
return settings
def fit1d(self, X=None, n_features: int = None,
packed: bool = False, online=False, **override):
"""
Configure OPU transform for 1d vectors
The function can be either called with input vector, for fitting OPU
parameters to it, or just vector dimensions, with ``n_features``.
When input is bit-packed the packed flag must be set to True.
When input vectors must be transformed one by one, performance will
be improved with the online flag set to True.
Parameters
----------
X: np.ndarray or torch.Tensor
Fit will be made on this vector to optimize transform parameters
n_features: int
Number of features for the input, necessary if X parameter isn't provided
packed: bool
Set to true if the input vectors will be already bit-packed
online: bool, optional
Set to true if the transforms will be made one vector after the other
defaults to False
override: dict, optional
keyword args for overriding transform settings (advanced parameters)
"""
return self.__fit(X, n_features, packed, online, False, **override)
def fit2d(self, X=None, n_features: Tuple[int, int] = None,
packed: bool = False, online=False, **override):
"""
Configure OPU transform for 2d vectors
The function can be either called with input vector, for fitting OPU
parameters to it, or just vector dimensions, with `n_features`.
When input is bit-packed the packed flag must be set to True.
Number of features must be then provided with `n_features`
When input vectors must be transformed one by one, performance will
be improved with the online flag set to True.
Parameters
----------
X: np.ndarray or torch.Tensor
a 2d input vector, or batch of 2d input_vectors, binary encoded, packed or not
n_features: tuple(int)
Number of features for the input, necessary if X parameter isn't provided, or
if input is bit-packed
packed: bool, optional
whether the input data is in bit-packed representation
if True, each input vector is assumed to be a 1d array, and the "real" number
of features must be provided as n_features
defaults to False
online: bool, optional
Set to true if the transforms will be made one vector after the other
defaults to False
override: dict, optional
keyword args for overriding transform settings (advanced parameters)
"""
return self.__fit(X, n_features, packed, online, True, **override)
def transform(self, X, encoder_cls=NoEncoding, decoder_cls=NoDecoding) -> TransformOutput:
"""
Performs the nonlinear random projections of one or several input vectors.
The `fit1d` or `fit2d` method must be called before, for setting vector dimensions
or online option.
If you need to transform one vector after each other, add `online=True` in the fit function.
Parameters
----------
X: np.ndarray or torch.Tensor
input vector, or batch of input vectors.
Each vector must have the same dimensions as the one given in `fit1d` or `fit2d`.
encoder_cls: encoder.base.BaseTransformer, optional
class or instance of class that transform the input into binary vectors to be processed by the opu.
decoder_cls: encoder.base.BaseTransformer, optional
class or instance of class that transforms the output of the opu back into the appropriate format.
Returns
-------
Y: np.ndarray or torch.Tensor
complete array of nonlinear random projections of X,
of size self.n_components
If input is an ndarray, type is actually ContextArray,
with a context attribute to add metadata
"""
assert self._runner, "Call fit1d or fit2d before transform"
assert self.device.active, "OPU device isn't active, use opu.open() or \"with opu:\""
if inspect.isclass(encoder_cls):
encoder = encoder_cls()
else:
encoder = encoder_cls
X_enc = encoder.transform(X)
user_input = OpuUserInput.from_traits(X_enc, self._runner.traits)
self._debug(str(user_input))
if user_input.is_batch and not self._s.simulated:
# With batch input start acquisition first
assert self.device.acq_state.value != AcqState.online.value, \
"Can't transform a batch of vectors when acquisition is" \
" in online mode, only single vectors"
with self.device.acquiring(n_images=self._s.n_samples_by_pass):
out = self._runner.transform(user_input)
else:
out = self._runner.transform(user_input)
return self._post_transform(out, user_input, encoder, decoder_cls)
def linear_transform(self, X, encoder_cls=NoEncoding, decoder_cls=NoDecoding) -> TransformOutput:
"""
Do a linear transform of X, for Nitro (non-linear) photonic cores.
Parameters
----------
X: np.ndarray or torch.Tensor
input vector, or batch of input vectors.
Each vector must have the same dimensions as the one given in `fit1d` or `fit2d`.
encoder_cls: encoding.base.BaseTransformer, optional
class or instance of class that transform the input into binary vectors to be processed by the opu.
decoder_cls: encoding.base.BaseTransformer, optional
class or instance of class that transforms the output of the opu back into the appropriate format.
Returns
-------
Y: np.ndarray or torch.Tensor
complete array of nonlinear random projections of X,
of size self.n_components
If input is an ndarray, type is actually ContextArray,
with a context attribute to add metadata
"""
assert self._runner, "Call fit1d or fit2d before linear_transform"
traits = self._runner.traits
if traits.packed:
# TODO implement for packed
raise RuntimeError("Linear transform isn't yet implemented for packed input :/")
if inspect.isclass(encoder_cls):
encoder = encoder_cls()
else:
encoder = encoder_cls
X_enc = encoder.transform(X)
user_input = OpuUserInput.from_traits(X_enc, traits)
_, result_ctx = self._raw_linear_transform(X_enc, traits, user_input)
# Decoding, add context, and optional convert back to torch if needed
output = self._post_transform(result_ctx, user_input, encoder, decoder_cls)
# Rescale the output, intentionally after the decoding step
if self.rescale is OutputRescaling.variance:
n_features = user_input.n_features_s
output = output / (self._s.stdev * sqrt(n_features))
elif self.rescale is OutputRescaling.norm:
output = output / (self._s.stdev * sqrt(self.n_components))
return output
def transform1d(self, *args, **kwargs):
raise RuntimeError("transform1d is deprecated, you must now use fit1d and transform")
def transform2d(self, *args, **kwargs):
raise RuntimeError("transform2d is deprecated, you must now use fit2d and transform")
def fit_transform1d(self, X, packed: bool = False,
**override) -> ContextArray:
"""Performs the nonlinear random projections of 1d input vector(s).
This function is the one-liner equivalent of `fit1d` and `transform` calls.
.. warning:: when making several transform calls, prefer calling `fit1d`
and then `transform`, or you might encounter an inconsistency in the
transformation matrix.
The input data can be bit-packed, where ``n_features = 8*X.shape[-1]``
Otherwise ``n_features = X.shape[-1]``
If tqdm module is available, it is used for progress display
Parameters
----------
X: np.ndarray or torch.Tensor
a 1d input vector, or batch of 1d input_vectors, binary encoded, packed or not
batch can be 1d or 2d. In all cases ``output.shape[:-1] = X.shape[:-1]``
packed: bool, optional
whether the input data is in bit-packed representation
defaults to False
override: keyword args for overriding transform settings (advanced parameters)
Returns
-------
Y: np.ndarray or torch.Tensor
complete array of nonlinear random projections of X,
of size self.n_components
If input is an ndarray, type is actually ContextArray,
with a context attribute to add metadata
"""
self.fit1d(X, None, packed, False, **override)
return self.transform(X)
def fit_transform2d(self, X, packed: bool = False, n_2d_features=None,
**override) -> ContextArray:
"""Performs the nonlinear random projections of 2d input vector(s).
This function is the one-liner equivalent of `fit2d` and `transform` calls.
.. warning:: when making several transform calls, prefer calling `fit2d`
and then `transform`, or you might encounter an inconsistency in the
transformation matrix.
If tqdm module is available, it is used for progress display
Parameters
----------
X: np.ndarray or torch.Tensor
a 2d input vector, or batch of 2d input_vectors, binary encoded, packed or not
packed: bool, optional
whether the input data is in bit-packed representation
if True, each input vector is assumed to be a 1d array, and the "real" number
of features must be provided as n_2d_features
defaults to False
n_2d_features: list, tuple or np.ndarray of length 2
If the input is bit-packed, specifies the shape of each input vector.
Not needed if the input isn't bit-packed.
override: keyword args for overriding transform settings (advanced parameters)
Returns
-------
Y: np.ndarray or torch.Tensor
complete array of nonlinear random projections of X,
of size self.n_components
If input is an ndarray, type is actually ContextArray,
with a context attribute to add metadata
"""
self.fit2d(X, n_2d_features, packed, False, **override)
return self.transform(X)
def __fit(self, X, n_features: IntOrTuple,
packed: bool, online: bool, is_2d_features: bool,
**override):
"""Internal working of the fitXd calls
Instantiates a TransformRunner, and start online acq if needs be.
"""
if X is not None:
# Input is provided, do the fit with user input
user_input = OpuUserInput.from_input(X, packed, is_2d_features, n_features)
tr_settings = self._tr_settings(no_input=False, **override)
self._runner = FitTransformRunner(self._s, tr_settings, user_input,
device=self.device,
disable_pbar=self.disable_pbar)
else:
# Only dimensions are provided, no fitting happens on input
assert n_features, "either input vector or n_features must be specified"
# tr_settings has no input_roi, since it uses X to compute it
tr_settings = self._tr_settings(no_input=True, **override)
traits = InputTraits(n_features, packed)
self._runner = TransformRunner(self._s, tr_settings, traits,
device=self.device,
disable_pbar=self.disable_pbar)
self._acq_stack.close()
if online:
if self._s.no_single_transform:
raise RuntimeError("Online transform isn't available with this OPU")
# Start acquisition only if online. Batch transform start their own.
self._acq_stack.enter_context(self.device.acquiring(online=True))
@staticmethod
def _post_transform(output, user_input, encoder, decoder_cls):
"""Final steps after transform
1. reshape
2. decode the output
3. convert to tensor if user input was tensor
"""
output = user_input.reshape_output(output)
# If encoder has get_params method, it's for transmitting it to decoder init
if inspect.isclass(decoder_cls):
if hasattr(encoder, "get_params"):
decoder = decoder_cls(**encoder.get_params())
else:
decoder = decoder_cls()
else:
decoder = decoder_cls
output = decoder.transform(output)
if user_input.is_tensor:
# noinspection PyPackageRequirements,PyUnresolvedReferences
import torch
return torch.from_numpy(output)
else:
return output
def _raw_linear_transform(self, X, traits=None, user_input=None):
"""
Do linear_transform of X, and return both raw OPU output and decoded output in a tuple
"""
if traits is None:
assert self._runner, "Call fit1d or fit2d before linear_transform"
traits = self._runner.traits
if user_input is None:
user_input = OpuUserInput.from_traits(X, traits)
if self._s.simulated:
prepared_X = X
else:
assert self.device.acq_state.value != AcqState.online.value, \
"Can't do linear transform when acquisition is" \
" in online mode, only single vectors"
assert self._runner.t.input_roi_strategy == InputRoiStrategy.full, \
"ROI strategy must be full for linear_transform to be correct.\n" \
"Set input_roi_strategy attribute to InputRoiStrategy.full."
# X2 is now numpy 2D, whatever the initial shape and the type (torch or numpy)
X2 = user_input.reshape_input(raveled_features=True, leave_single_dim=True)
try:
import lightonopu.linear_reconstruction as reconstruction
except ImportError:
raise RuntimeError("Need a lightonopu version with linear_reconstruction module")
start = time.time()
prepared_X = reconstruction.encode_batch(X2)
self._trace(f"Encoding time {time.time() - start} s")
# Restore the dimension after batch encoding to something suitable for formatting
prepared_X = user_input.unravel_features(prepared_X)
# Run the OPU transform
prepared_input = OpuUserInput.from_traits(prepared_X, traits)
start = time.time()
with self.device.acquiring(n_images=self._s.n_samples_by_pass):
rp_opu = self._runner.transform(prepared_input, linear=True)
self._trace(f"Transform time {time.time() - start} s")
if self._s.simulated:
result_ctx = rp_opu
else:
# Decoding forgets about the context, re-add it to result afterwards
start = time.time()
result = reconstruction.decode_batch(rp_opu)
self._trace(f"Decoding time {time.time() - start} s")
result_ctx = ContextArray(result, rp_opu.context)
return rp_opu, result_ctx
def __enter__(self):
"""Context manager interface that acquires hardware resources
used by the OPU device."""
self.__active_before_enter = self.device.active
self.open()
return self
def __exit__(self, *args):
# Don't close if OPU was already active
if not self.__active_before_enter:
self.close()
def open(self):
"""Acquires hardware resources used by the OPU device
.. seealso:: `close()` or use the context manager interface for
closing at the end af an indent block
"""
if self.device.active:
return
self.device.open()
# initial reservation for giving batch transforms a buffer ready to use
self.device.reserve(self._s.n_samples_by_pass)
if self._s.detect_trigger:
# Detect trigger issue, and take action if needed
issue = utils.detect_trigger_issue(self.device)
if issue:
# noinspection PyProtectedMember,PyUnresolvedReferences
self.device._OpuDevice__opu.nb_prelim = 1
self._debug("trigger issue detected, workaround applied")
else:
self._debug("trigger issue not detected")
self._debug("OPU opened")
def close(self):
"""Releases hardware resources used by the OPU device"""
self._acq_stack.close()
self.device.close()
self._debug("OPU closed")
@property
def config(self):
"""Returns the internal configuration object"""
# Load it when asked first time
if not self.__opu_config:
self.__opu_config = config.load_config(self.__config_file, self._trace)
if self.__config_override is not None:
utils.recurse_update(self.__opu_config, self.__config_override)
return self.__opu_config
@property
def rescale(self):
return self._rescale
@rescale.setter
def rescale(self, value):
# If str it's the enum value
if isinstance(value, str):
self._rescale = OutputRescaling[value.lower()]
else:
assert isinstance(value, OutputRescaling)
self._rescale = value
@property
def max_n_components(self):
return self._output_roi.max_components
@property
def n_components(self) -> int:
return self._n_components
@n_components.setter
def n_components(self, value: int):
if self._s.simulated:
self._resize_rnd_matrix(self.max_n_features, value)
else:
self.device.output_roi = self._output_roi.compute_roi(value)
# We used to call device.reserve here, but moved to device.acquiring()
self._n_components = value
@property
def max_n_features(self) -> int:
return self._s.max_n_features
@max_n_features.setter
def max_n_features(self, value: int):
if not self._s.simulated:
raise AttributeError("max_n_feature can't be set if device is real")
self._resize_rnd_matrix(value, self._n_components)
self._max_n_features = value
@property
def _s(self) -> OpuSettings:
"""Returns immutable settings associated with the OPU
Settings are immutable (attrs frozen), so generate it at
each call. Performance impact is negligible"""
# Get default value
pass_default = attr.fields(OpuSettings).n_samples_by_pass.default
# Common settings to both simulated and base
kwargs = {"input_shape": self.device.input_shape,
"output_max_shape": self.device.output_shape_max,
"frametime_us": self._base_frametime_us,
"exposure_us": self._base_exposure_us}
if isinstance(self.device, SimulatedOpuDevice):
# Notice we never query self.config here, in order not to
# need a configuration file for simulated device
return OpuSettings(max_n_features=self._max_n_features,
n_samples_by_pass=pass_default,
simulated=True, **kwargs
)
return OpuSettings(
max_n_features=int(np.prod(self.device.input_shape)),
# Will use defaults of OpuSettings if not found
n_samples_by_pass=self.config.get("n_samples_by_pass", pass_default),
min_batch_size=self.config["input"].get("minimum_batch_size", 0),
allowed_roi=self.config["output"].get("allowed_roi"),
# min_n_components is linked to the minimum output size
min_n_components=self.config["output"].get("minimum_output_size", 0),
ones_range=self.config["ones_range"],
n_tries=self.config.get("n_transform_tries", 5),
detect_trigger=self.config.get("detect_trigger_issue", False),
no_single_transform=self.config.get("no_single_transform", False),
stdev=self.config["output"].get("stdev", 1.),
**kwargs)
def _resize_rnd_matrix(self, n_features: int, n_components: int):
"""Resize device's random matrix"""
assert isinstance(self.device, SimulatedOpuDevice)
rnd_mat = self.device.random_matrix
if rnd_mat is None or rnd_mat.shape != (n_features, n_components):
self._print("OPU: computing the random matrix... ", end='', flush=True)
self.device.build_random_matrix(n_features, n_components)
self._print("OK")
def version(self, devices=False):
"""Returns a multi-line string containing name and versions of the OPU"""
version = []
# Build OPU name
if not self._s.simulated:
version.append(opu_version(self.__opu_config))
# module version
version.append(f"lightonml version {lightonml.__version__}")
try:
# noinspection PyUnresolvedReferences
import lightonopu
version.append(f"lightonopu version {lightonopu.__version__}")
except ImportError:
pass
if devices:
version.append(self.device.versions())
return '\n'.join(version)
def __getstate__(self):
state = self.__dict__.copy()
# Remove logging functions, they can't be pickled
state.pop("_debug")
state.pop("_trace")
state.pop("_print")
# acq stack can't be pickled, will be restored
state.pop("_acq_stack")
# If acquisition is ongoing, close it
if not self._s.simulated:
state["__online_acq"] = self.device.acq_state.value == AcqState.online.value
self._acq_stack.close()
# Device itself is closed on pickling
return state
def __setstate__(self, state):
self.__dict__.update(state)
# Restore logging functions removed at getstate
self._debug = lightonml.get_debug_fn()
self._trace = lightonml.get_trace_fn()
self._print = lightonml.get_print_fn()
self._acq_stack = ExitStack()
# Restore online acquisition if it was the case
if state.get("__online_acq", False):
self._acq_stack.enter_context(self.device.acquiring(online=True))
| 1.882813 | 2 |
example/shovel/bar.py | demiurgestudios/shovel | 202 | 5123 | from shovel import task
@task
def hello(name='Foo'):
'''Prints "Hello, " followed by the provided name.
Examples:
shovel bar.hello
shovel bar.hello --name=Erin
http://localhost:3000/bar.hello?Erin'''
print('Hello, %s' % name)
@task
def args(*args):
'''Echos back all the args you give it.
This exists mostly to demonstrate the fact that shovel
is compatible with variable argument functions.
Examples:
shovel bar.args 1 2 3 4
http://localhost:3000/bar.args?1&2&3&4'''
for arg in args:
print('You said "%s"' % arg)
@task
def kwargs(**kwargs):
'''Echos back all the kwargs you give it.
This exists mostly to demonstrate that shovel is
compatible with the keyword argument functions.
Examples:
shovel bar.kwargs --foo=5 --bar 5 --howdy hey
http://localhost:3000/bar.kwargs?foo=5&bar=5&howdy=hey'''
for key, val in kwargs.items():
print('You said "%s" => "%s"' % (key, val)) | 4.03125 | 4 |
scripts/external_libs/scapy-2.4.3/scapy/config.py | timgates42/trex-core | 956 | 5124 | # This file is part of Scapy
# See http://www.secdev.org/projects/scapy for more information
# Copyright (C) <NAME> <<EMAIL>>
# This program is published under a GPLv2 license
"""
Implementation of the configuration object.
"""
from __future__ import absolute_import
from __future__ import print_function
import functools
import os
import re
import time
import socket
import sys
from scapy import VERSION, base_classes
from scapy.consts import DARWIN, WINDOWS, LINUX, BSD, SOLARIS
from scapy.error import log_scapy, warning, ScapyInvalidPlatformException
from scapy.modules import six
from scapy.themes import NoTheme, apply_ipython_style
############
# Config #
############
class ConfClass(object):
def configure(self, cnf):
self.__dict__ = cnf.__dict__.copy()
def __repr__(self):
return str(self)
def __str__(self):
s = ""
keys = self.__class__.__dict__.copy()
keys.update(self.__dict__)
keys = sorted(keys)
for i in keys:
if i[0] != "_":
r = repr(getattr(self, i))
r = " ".join(r.split())
wlen = 76 - max(len(i), 10)
if len(r) > wlen:
r = r[:wlen - 3] + "..."
s += "%-10s = %s\n" % (i, r)
return s[:-1]
class Interceptor(object):
def __init__(self, name=None, default=None,
hook=None, args=None, kargs=None):
self.name = name
self.intname = "_intercepted_%s" % name
self.default = default
self.hook = hook
self.args = args if args is not None else []
self.kargs = kargs if kargs is not None else {}
def __get__(self, obj, typ=None):
if not hasattr(obj, self.intname):
setattr(obj, self.intname, self.default)
return getattr(obj, self.intname)
@staticmethod
def set_from_hook(obj, name, val):
int_name = "_intercepted_%s" % name
setattr(obj, int_name, val)
def __set__(self, obj, val):
setattr(obj, self.intname, val)
self.hook(self.name, val, *self.args, **self.kargs)
def _readonly(name):
default = Conf.__dict__[name].default
Interceptor.set_from_hook(conf, name, default)
raise ValueError("Read-only value !")
ReadOnlyAttribute = functools.partial(
Interceptor,
hook=(lambda name, *args, **kwargs: _readonly(name))
)
ReadOnlyAttribute.__doc__ = "Read-only class attribute"
class ProgPath(ConfClass):
universal_open = "open" if DARWIN else "xdg-open"
pdfreader = universal_open
psreader = universal_open
svgreader = universal_open
dot = "dot"
display = "display"
tcpdump = "tcpdump"
tcpreplay = "tcpreplay"
hexedit = "hexer"
tshark = "tshark"
wireshark = "wireshark"
ifconfig = "ifconfig"
class ConfigFieldList:
def __init__(self):
self.fields = set()
self.layers = set()
@staticmethod
def _is_field(f):
return hasattr(f, "owners")
def _recalc_layer_list(self):
self.layers = {owner for f in self.fields for owner in f.owners}
def add(self, *flds):
self.fields |= {f for f in flds if self._is_field(f)}
self._recalc_layer_list()
def remove(self, *flds):
self.fields -= set(flds)
self._recalc_layer_list()
def __contains__(self, elt):
if isinstance(elt, base_classes.Packet_metaclass):
return elt in self.layers
return elt in self.fields
def __repr__(self):
return "<%s [%s]>" % (self.__class__.__name__, " ".join(str(x) for x in self.fields)) # noqa: E501
class Emphasize(ConfigFieldList):
pass
class Resolve(ConfigFieldList):
pass
class Num2Layer:
def __init__(self):
self.num2layer = {}
self.layer2num = {}
def register(self, num, layer):
self.register_num2layer(num, layer)
self.register_layer2num(num, layer)
def register_num2layer(self, num, layer):
self.num2layer[num] = layer
def register_layer2num(self, num, layer):
self.layer2num[layer] = num
def __getitem__(self, item):
if isinstance(item, base_classes.Packet_metaclass):
return self.layer2num[item]
return self.num2layer[item]
def __contains__(self, item):
if isinstance(item, base_classes.Packet_metaclass):
return item in self.layer2num
return item in self.num2layer
def get(self, item, default=None):
return self[item] if item in self else default
def __repr__(self):
lst = []
for num, layer in six.iteritems(self.num2layer):
if layer in self.layer2num and self.layer2num[layer] == num:
dir = "<->"
else:
dir = " ->"
lst.append((num, "%#6x %s %-20s (%s)" % (num, dir, layer.__name__,
layer._name)))
for layer, num in six.iteritems(self.layer2num):
if num not in self.num2layer or self.num2layer[num] != layer:
lst.append((num, "%#6x <- %-20s (%s)" % (num, layer.__name__,
layer._name)))
lst.sort()
return "\n".join(y for x, y in lst)
class LayersList(list):
def __init__(self):
list.__init__(self)
self.ldict = {}
def __repr__(self):
return "\n".join("%-20s: %s" % (l.__name__, l.name) for l in self)
def register(self, layer):
self.append(layer)
if layer.__module__ not in self.ldict:
self.ldict[layer.__module__] = []
self.ldict[layer.__module__].append(layer)
def layers(self):
result = []
# This import may feel useless, but it is required for the eval below
import scapy # noqa: F401
for lay in self.ldict:
doc = eval(lay).__doc__
result.append((lay, doc.strip().split("\n")[0] if doc else lay))
return result
class CommandsList(list):
def __repr__(self):
s = []
for l in sorted(self, key=lambda x: x.__name__):
doc = l.__doc__.split("\n")[0] if l.__doc__ else "--"
s.append("%-20s: %s" % (l.__name__, doc))
return "\n".join(s)
def register(self, cmd):
self.append(cmd)
return cmd # return cmd so that method can be used as a decorator
def lsc():
"""Displays Scapy's default commands"""
print(repr(conf.commands))
class CacheInstance(dict, object):
__slots__ = ["timeout", "name", "_timetable", "__dict__"]
def __init__(self, name="noname", timeout=None):
self.timeout = timeout
self.name = name
self._timetable = {}
def flush(self):
self.__init__(name=self.name, timeout=self.timeout)
def __getitem__(self, item):
if item in self.__slots__:
return object.__getattribute__(self, item)
val = dict.__getitem__(self, item)
if self.timeout is not None:
t = self._timetable[item]
if time.time() - t > self.timeout:
raise KeyError(item)
return val
def get(self, item, default=None):
# overloading this method is needed to force the dict to go through
# the timetable check
try:
return self[item]
except KeyError:
return default
def __setitem__(self, item, v):
if item in self.__slots__:
return object.__setattr__(self, item, v)
self._timetable[item] = time.time()
dict.__setitem__(self, item, v)
def update(self, other):
for key, value in six.iteritems(other):
# We only update an element from `other` either if it does
# not exist in `self` or if the entry in `self` is older.
if key not in self or self._timetable[key] < other._timetable[key]:
dict.__setitem__(self, key, value)
self._timetable[key] = other._timetable[key]
def iteritems(self):
if self.timeout is None:
return six.iteritems(self.__dict__)
t0 = time.time()
return ((k, v) for (k, v) in six.iteritems(self.__dict__) if t0 - self._timetable[k] < self.timeout) # noqa: E501
def iterkeys(self):
if self.timeout is None:
return six.iterkeys(self.__dict__)
t0 = time.time()
return (k for k in six.iterkeys(self.__dict__) if t0 - self._timetable[k] < self.timeout) # noqa: E501
def __iter__(self):
return six.iterkeys(self.__dict__)
def itervalues(self):
if self.timeout is None:
return six.itervalues(self.__dict__)
t0 = time.time()
return (v for (k, v) in six.iteritems(self.__dict__) if t0 - self._timetable[k] < self.timeout) # noqa: E501
def items(self):
if self.timeout is None:
return dict.items(self)
t0 = time.time()
return [(k, v) for (k, v) in six.iteritems(self.__dict__) if t0 - self._timetable[k] < self.timeout] # noqa: E501
def keys(self):
if self.timeout is None:
return dict.keys(self)
t0 = time.time()
return [k for k in six.iterkeys(self.__dict__) if t0 - self._timetable[k] < self.timeout] # noqa: E501
def values(self):
if self.timeout is None:
return list(six.itervalues(self))
t0 = time.time()
return [v for (k, v) in six.iteritems(self.__dict__) if t0 - self._timetable[k] < self.timeout] # noqa: E501
def __len__(self):
if self.timeout is None:
return dict.__len__(self)
return len(self.keys())
def summary(self):
return "%s: %i valid items. Timeout=%rs" % (self.name, len(self), self.timeout) # noqa: E501
def __repr__(self):
s = []
if self:
mk = max(len(k) for k in six.iterkeys(self.__dict__))
fmt = "%%-%is %%s" % (mk + 1)
for item in six.iteritems(self.__dict__):
s.append(fmt % item)
return "\n".join(s)
class NetCache:
def __init__(self):
self._caches_list = []
def add_cache(self, cache):
self._caches_list.append(cache)
setattr(self, cache.name, cache)
def new_cache(self, name, timeout=None):
c = CacheInstance(name=name, timeout=timeout)
self.add_cache(c)
def __delattr__(self, attr):
raise AttributeError("Cannot delete attributes")
def update(self, other):
for co in other._caches_list:
if hasattr(self, co.name):
getattr(self, co.name).update(co)
else:
self.add_cache(co.copy())
def flush(self):
for c in self._caches_list:
c.flush()
def __repr__(self):
return "\n".join(c.summary() for c in self._caches_list)
def _version_checker(module, minver):
"""Checks that module has a higher version that minver.
params:
- module: a module to test
- minver: a tuple of versions
"""
# We could use LooseVersion, but distutils imports imp which is deprecated
version_regexp = r'[a-z]?((?:\d|\.)+\d+)(?:\.dev[0-9]+)?'
version_tags = re.match(version_regexp, module.__version__)
if not version_tags:
return False
version_tags = version_tags.group(1).split(".")
version_tags = tuple(int(x) for x in version_tags)
return version_tags >= minver
def isCryptographyValid():
"""
Check if the cryptography library is present, and if it is recent enough
for most usages in scapy (v1.7 or later).
"""
try:
import cryptography
except ImportError:
return False
return _version_checker(cryptography, (1, 7))
def isCryptographyRecent():
"""
Check if the cryptography library is recent (2.0 and later)
"""
try:
import cryptography
except ImportError:
return False
return _version_checker(cryptography, (2, 0))
def isCryptographyAdvanced():
"""
Check if the cryptography library is present, and if it supports X25519,
ChaCha20Poly1305 and such (v2.0 or later).
"""
try:
from cryptography.hazmat.primitives.asymmetric.x25519 import X25519PrivateKey # noqa: E501
X25519PrivateKey.generate()
except Exception:
return False
else:
return True
def isPyPy():
"""Returns either scapy is running under PyPy or not"""
try:
import __pypy__ # noqa: F401
return True
except ImportError:
return False
def _prompt_changer(attr, val):
"""Change the current prompt theme"""
try:
sys.ps1 = conf.color_theme.prompt(conf.prompt)
except Exception:
pass
try:
apply_ipython_style(get_ipython())
except NameError:
pass
def _set_conf_sockets():
"""Populate the conf.L2Socket and conf.L3Socket
according to the various use_* parameters
"""
from scapy.main import _load
if conf.use_bpf and not BSD:
Interceptor.set_from_hook(conf, "use_bpf", False)
raise ScapyInvalidPlatformException("BSD-like (OSX, *BSD...) only !")
if not conf.use_pcap and SOLARIS:
Interceptor.set_from_hook(conf, "use_pcap", True)
raise ScapyInvalidPlatformException(
"Scapy only supports libpcap on Solaris !"
)
# we are already in an Interceptor hook, use Interceptor.set_from_hook
if conf.use_pcap or conf.use_dnet:
try:
from scapy.arch.pcapdnet import L2pcapListenSocket, L2pcapSocket, \
L3pcapSocket
except (OSError, ImportError):
warning("No libpcap provider available ! pcap won't be used")
Interceptor.set_from_hook(conf, "use_pcap", False)
else:
conf.L3socket = L3pcapSocket
conf.L3socket6 = functools.partial(L3pcapSocket, filter="ip6")
conf.L2socket = L2pcapSocket
conf.L2listen = L2pcapListenSocket
# Update globals
_load("scapy.arch.pcapdnet")
return
if conf.use_bpf:
from scapy.arch.bpf.supersocket import L2bpfListenSocket, \
L2bpfSocket, L3bpfSocket
conf.L3socket = L3bpfSocket
conf.L3socket6 = functools.partial(L3bpfSocket, filter="ip6")
conf.L2socket = L2bpfSocket
conf.L2listen = L2bpfListenSocket
# Update globals
_load("scapy.arch.bpf")
return
if LINUX:
from scapy.arch.linux import L3PacketSocket, L2Socket, L2ListenSocket
conf.L3socket = L3PacketSocket
conf.L3socket6 = functools.partial(L3PacketSocket, filter="ip6")
conf.L2socket = L2Socket
conf.L2listen = L2ListenSocket
# Update globals
_load("scapy.arch.linux")
return
if WINDOWS:
from scapy.arch.windows import _NotAvailableSocket
from scapy.arch.windows.native import L3WinSocket, L3WinSocket6
conf.L3socket = L3WinSocket
conf.L3socket6 = L3WinSocket6
conf.L2socket = _NotAvailableSocket
conf.L2listen = _NotAvailableSocket
# No need to update globals on Windows
return
from scapy.supersocket import L3RawSocket
from scapy.layers.inet6 import L3RawSocket6
conf.L3socket = L3RawSocket
conf.L3socket6 = L3RawSocket6
def _socket_changer(attr, val):
if not isinstance(val, bool):
raise TypeError("This argument should be a boolean")
dependencies = { # Things that will be turned off
"use_pcap": ["use_bpf"],
"use_bpf": ["use_pcap"],
}
restore = {k: getattr(conf, k) for k in dependencies}
del restore[attr] # This is handled directly by _set_conf_sockets
if val: # Only if True
for param in dependencies[attr]:
Interceptor.set_from_hook(conf, param, False)
try:
_set_conf_sockets()
except (ScapyInvalidPlatformException, ImportError) as e:
for key, value in restore.items():
Interceptor.set_from_hook(conf, key, value)
if isinstance(e, ScapyInvalidPlatformException):
raise
def _loglevel_changer(attr, val):
"""Handle a change of conf.logLevel"""
log_scapy.setLevel(val)
class Conf(ConfClass):
"""This object contains the configuration of Scapy.
session : filename where the session will be saved
interactive_shell : can be "ipython", "python" or "auto". Default: Auto
stealth : if 1, prevents any unwanted packet to go out (ARP, DNS, ...)
checkIPID: if 0, doesn't check that IPID matches between IP sent and ICMP IP citation received # noqa: E501
if 1, checks that they either are equal or byte swapped equals (bug in some IP stacks) # noqa: E501
if 2, strictly checks that they are equals
checkIPsrc: if 1, checks IP src in IP and ICMP IP citation match (bug in some NAT stacks) # noqa: E501
checkIPinIP: if True, checks that IP-in-IP layers match. If False, do not
check IP layers that encapsulates another IP layer
check_TCPerror_seqack: if 1, also check that TCP seq and ack match the ones in ICMP citation # noqa: E501
iff : selects the default output interface for srp() and sendp(). default:"eth0") # noqa: E501
verb : level of verbosity, from 0 (almost mute) to 3 (verbose)
promisc : default mode for listening socket (to get answers if you spoof on a lan) # noqa: E501
sniff_promisc : default mode for sniff()
filter : bpf filter added to every sniffing socket to exclude traffic from analysis # noqa: E501
histfile : history file
padding : includes padding in disassembled packets
except_filter : BPF filter for packets to ignore
debug_match : when 1, store received packet that are not matched into debug.recv # noqa: E501
route : holds the Scapy routing table and provides methods to manipulate it
warning_threshold : how much time between warnings from the same place
ASN1_default_codec: Codec used by default for ASN1 objects
mib : holds MIB direct access dictionary
resolve : holds list of fields for which resolution should be done
noenum : holds list of enum fields for which conversion to string should NOT be done # noqa: E501
AS_resolver: choose the AS resolver class to use
extensions_paths: path or list of paths where extensions are to be looked for
contribs : a dict which can be used by contrib layers to store local configuration # noqa: E501
debug_tls:When 1, print some TLS session secrets when they are computed.
recv_poll_rate: how often to check for new packets. Defaults to 0.05s.
"""
version = ReadOnlyAttribute("version", VERSION)
session = ""
interactive = False
interactive_shell = ""
stealth = "not implemented"
iface = None
iface6 = None
layers = LayersList()
commands = CommandsList()
dot15d4_protocol = None # Used in dot15d4.py
logLevel = Interceptor("logLevel", log_scapy.level, _loglevel_changer)
checkIPID = False
checkIPsrc = True
checkIPaddr = True
checkIPinIP = True
check_TCPerror_seqack = False
verb = 2
prompt = Interceptor("prompt", ">>> ", _prompt_changer)
promisc = True
sniff_promisc = 1
raw_layer = None
raw_summary = False
default_l2 = None
l2types = Num2Layer()
l3types = Num2Layer()
L3socket = None
L3socket6 = None
L2socket = None
L2listen = None
BTsocket = None
USBsocket = None
min_pkt_size = 60
bufsize = 2**16
histfile = os.getenv('SCAPY_HISTFILE',
os.path.join(os.path.expanduser("~"),
".scapy_history"))
padding = 1
except_filter = ""
debug_match = False
debug_tls = False
wepkey = ""
cache_iflist = {}
route = None # Filed by route.py
route6 = None # Filed by route6.py
auto_fragment = True
debug_dissector = False
color_theme = Interceptor("color_theme", NoTheme(), _prompt_changer)
warning_threshold = 5
prog = ProgPath()
resolve = Resolve()
noenum = Resolve()
emph = Emphasize()
use_pypy = ReadOnlyAttribute("use_pypy", isPyPy())
use_pcap = Interceptor(
"use_pcap",
os.getenv("SCAPY_USE_PCAPDNET", "").lower().startswith("y"),
_socket_changer
)
# XXX use_dnet is deprecated
use_dnet = os.getenv("SCAPY_USE_PCAPDNET", "").lower().startswith("y")
use_bpf = Interceptor("use_bpf", False, _socket_changer)
use_npcap = False
ipv6_enabled = socket.has_ipv6
extensions_paths = "."
stats_classic_protocols = []
stats_dot11_protocols = []
temp_files = []
netcache = NetCache()
geoip_city = None
# can, tls, http are not loaded by default
load_layers = ['bluetooth', 'bluetooth4LE', 'dhcp', 'dhcp6', 'dns',
'dot11', 'dot15d4', 'eap', 'gprs', 'hsrp', 'inet',
'inet6', 'ipsec', 'ir', 'isakmp', 'l2', 'l2tp',
'llmnr', 'lltd', 'mgcp', 'mobileip', 'netbios',
'netflow', 'ntp', 'ppi', 'ppp', 'pptp', 'radius', 'rip',
'rtp', 'sctp', 'sixlowpan', 'skinny', 'smb', 'snmp',
'tftp', 'vrrp', 'vxlan', 'x509', 'zigbee']
contribs = dict()
crypto_valid = isCryptographyValid()
crypto_valid_recent = isCryptographyRecent()
crypto_valid_advanced = crypto_valid_recent and isCryptographyAdvanced()
fancy_prompt = True
auto_crop_tables = True
recv_poll_rate = 0.05
def __getattr__(self, attr):
# Those are loaded on runtime to avoid import loops
if attr == "manufdb":
from scapy.data import MANUFDB
return MANUFDB
if attr == "ethertypes":
from scapy.data import ETHER_TYPES
return ETHER_TYPES
if attr == "protocols":
from scapy.data import IP_PROTOS
return IP_PROTOS
if attr == "services_udp":
from scapy.data import UDP_SERVICES
return UDP_SERVICES
if attr == "services_tcp":
from scapy.data import TCP_SERVICES
return TCP_SERVICES
return object.__getattr__(self, attr)
if not Conf.ipv6_enabled:
log_scapy.warning("IPv6 support disabled in Python. Cannot load Scapy IPv6 layers.") # noqa: E501
for m in ["inet6", "dhcp6"]:
if m in Conf.load_layers:
Conf.load_layers.remove(m)
conf = Conf()
def crypto_validator(func):
"""
This a decorator to be used for any method relying on the cryptography library. # noqa: E501
Its behaviour depends on the 'crypto_valid' attribute of the global 'conf'.
"""
def func_in(*args, **kwargs):
if not conf.crypto_valid:
raise ImportError("Cannot execute crypto-related method! "
"Please install python-cryptography v1.7 or later.") # noqa: E501
return func(*args, **kwargs)
return func_in
| 2.0625 | 2 |
tests/base/test_server.py | Prodigy123/rasa_nlu_zh | 4 | 5125 | <filename>tests/base/test_server.py
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import tempfile
import pytest
import time
from treq.testing import StubTreq
from rasa_nlu.config import RasaNLUConfig
import json
import io
from tests import utilities
from tests.utilities import ResponseTest
from rasa_nlu.server import RasaNLU
@pytest.fixture(scope="module")
def app(tmpdir_factory):
"""
This fixture makes use of the IResource interface of the Klein application to mock Rasa HTTP server.
:param component_builder:
:return:
"""
_, nlu_log_file = tempfile.mkstemp(suffix="_rasa_nlu_logs.json")
_config = {
'write': nlu_log_file,
'port': -1, # unused in test app
"pipeline": "keyword",
"path": tmpdir_factory.mktemp("projects").strpath,
"server_model_dirs": {},
"data": "./data/demo-restaurants.json",
"emulate": "wit",
"max_training_processes": 1
}
config = RasaNLUConfig(cmdline_args=_config)
rasa = RasaNLU(config, testing=True)
return StubTreq(rasa.app.resource())
@pytest.fixture
def rasa_default_train_data():
with io.open('data/examples/rasa/demo-rasa.json',
encoding='utf-8') as train_file:
return json.loads(train_file.read())
@pytest.inlineCallbacks
def test_root(app):
response = yield app.get("http://dummy_uri/")
content = yield response.text()
assert response.code == 200 and content.startswith("hello")
@pytest.inlineCallbacks
def test_status(app):
response = yield app.get("http://dummy_uri/status")
rjs = yield response.json()
assert response.code == 200 and "available_projects" in rjs
assert "default" in rjs["available_projects"]
@pytest.inlineCallbacks
def test_config(app):
response = yield app.get("http://dummy_uri/config")
assert response.code == 200
@pytest.inlineCallbacks
def test_version(app):
response = yield app.get("http://dummy_uri/version")
rjs = yield response.json()
assert response.code == 200 and "version" in rjs
@pytest.mark.parametrize("response_test", [
ResponseTest(
"http://dummy_uri/parse?q=hello",
[{"entities": {}, "confidence": 1.0, "intent": "greet", "_text": "hello"}]
),
ResponseTest(
"http://dummy_uri/parse?query=hello",
[{"entities": {}, "confidence": 1.0, "intent": "greet", "_text": "hello"}]
),
ResponseTest(
"http://dummy_uri/parse?q=hello ńöñàśçií",
[{"entities": {}, "confidence": 1.0, "intent": "greet", "_text": "hello ńöñàśçií"}]
),
ResponseTest(
"http://dummy_uri/parse?q=",
[{"entities": {}, "confidence": 0.0, "intent": None, "_text": ""}]
),
])
@pytest.inlineCallbacks
def test_get_parse(app, response_test):
response = yield app.get(response_test.endpoint)
rjs = yield response.json()
assert response.code == 200
assert len(rjs) == 1
assert all(prop in rjs[0] for prop in ['entities', 'intent', '_text', 'confidence'])
@pytest.mark.parametrize("response_test", [
ResponseTest(
"http://dummy_uri/parse",
[{"entities": {}, "confidence": 1.0, "intent": "greet", "_text": "hello"}],
payload={"q": "hello"}
),
ResponseTest(
"http://dummy_uri/parse",
[{"entities": {}, "confidence": 1.0, "intent": "greet", "_text": "hello"}],
payload={"query": "hello"}
),
ResponseTest(
"http://dummy_uri/parse",
[{"entities": {}, "confidence": 1.0, "intent": "greet", "_text": "hello ńöñàśçií"}],
payload={"q": "hello ńöñàśçií"}
),
])
@pytest.inlineCallbacks
def test_post_parse(app, response_test):
response = yield app.post(response_test.endpoint, data=json.dumps(response_test.payload),
content_type='application/json')
rjs = yield response.json()
assert response.code == 200
assert len(rjs) == 1
assert all(prop in rjs[0] for prop in ['entities', 'intent', '_text', 'confidence'])
@utilities.slowtest
@pytest.inlineCallbacks
def test_post_train(app, rasa_default_train_data):
response = app.post("http://dummy_uri/train", data=json.dumps(rasa_default_train_data),
content_type='application/json')
time.sleep(3)
app.flush()
response = yield response
rjs = yield response.json()
assert response.code == 404, "A project name to train must be specified"
assert "error" in rjs
@utilities.slowtest
@pytest.inlineCallbacks
def test_post_train_internal_error(app, rasa_default_train_data):
response = app.post("http://dummy_uri/train?project=test",
data=json.dumps({"data": "dummy_data_for_triggering_an_error"}),
content_type='application/json')
time.sleep(3)
app.flush()
response = yield response
rjs = yield response.json()
assert response.code == 500, "The training data format is not valid"
assert "error" in rjs
@pytest.inlineCallbacks
def test_model_hot_reloading(app, rasa_default_train_data):
query = "http://dummy_uri/parse?q=hello&project=my_keyword_model"
response = yield app.get(query)
assert response.code == 404, "Project should not exist yet"
train_u = "http://dummy_uri/train?project=my_keyword_model&pipeline=keyword"
response = app.post(train_u,
data=json.dumps(rasa_default_train_data),
content_type='application/json')
time.sleep(3)
app.flush()
response = yield response
assert response.code == 200, "Training should end successfully"
response = yield app.get(query)
assert response.code == 200, "Project should now exist after it got trained"
| 2.171875 | 2 |
bugsnag/configuration.py | ForroKulcs/bugsnag-python | 0 | 5126 | <filename>bugsnag/configuration.py<gh_stars>0
import os
import platform
import socket
import sysconfig
from typing import List, Any, Tuple, Union
import warnings
from bugsnag.sessiontracker import SessionMiddleware
from bugsnag.middleware import DefaultMiddleware, MiddlewareStack
from bugsnag.utils import (fully_qualified_class_name, validate_str_setter,
validate_bool_setter, validate_iterable_setter,
validate_required_str_setter)
from bugsnag.delivery import (create_default_delivery, DEFAULT_ENDPOINT,
DEFAULT_SESSIONS_ENDPOINT)
from bugsnag.uwsgi import warn_if_running_uwsgi_without_threads
try:
from contextvars import ContextVar
_request_info = ContextVar('bugsnag-request', default=None) # type: ignore
except ImportError:
from bugsnag.utils import ThreadContextVar
_request_info = ThreadContextVar('bugsnag-request', default=None) # type: ignore # noqa: E501
__all__ = ('Configuration', 'RequestConfiguration')
class Configuration:
"""
Global app-level Bugsnag configuration settings.
"""
def __init__(self):
self.api_key = os.environ.get('BUGSNAG_API_KEY', None)
self.release_stage = os.environ.get("BUGSNAG_RELEASE_STAGE",
"production")
self.notify_release_stages = None
self.auto_notify = True
self.send_code = True
self.send_environment = False
self.asynchronous = True
self.delivery = create_default_delivery()
self.lib_root = sysconfig.get_path('purelib')
self.project_root = os.getcwd()
self.app_type = None
self.app_version = None
self.params_filters = ["password", "password_confirmation", "cookie",
"authorization"]
self.ignore_classes = [
"KeyboardInterrupt",
"django.http.Http404",
"django.http.response.Http404",
]
self.endpoint = DEFAULT_ENDPOINT
self.session_endpoint = DEFAULT_SESSIONS_ENDPOINT
self.auto_capture_sessions = True
self.traceback_exclude_modules = []
self.middleware = MiddlewareStack()
self.internal_middleware = MiddlewareStack()
self.internal_middleware.append(DefaultMiddleware)
self.internal_middleware.append(SessionMiddleware)
self.proxy_host = None
if not os.getenv("DYNO"):
self.hostname = socket.gethostname()
else:
self.hostname = None
self.runtime_versions = {"python": platform.python_version()}
def configure(self, api_key=None, app_type=None, app_version=None,
asynchronous=None, auto_notify=None,
auto_capture_sessions=None, delivery=None, endpoint=None,
hostname=None, ignore_classes=None, lib_root=None,
notify_release_stages=None, params_filters=None,
project_root=None, proxy_host=None, release_stage=None,
send_code=None, send_environment=None, session_endpoint=None,
traceback_exclude_modules=None):
"""
Validate and set configuration options. Will warn if an option is of an
incorrect type.
"""
if api_key is not None:
self.api_key = api_key
if app_type is not None:
self.app_type = app_type
if app_version is not None:
self.app_version = app_version
if asynchronous is not None:
self.asynchronous = asynchronous
if auto_notify is not None:
self.auto_notify = auto_notify
if auto_capture_sessions is not None:
self.auto_capture_sessions = auto_capture_sessions
if delivery is not None:
self.delivery = delivery
if endpoint is not None:
self.endpoint = endpoint
if hostname is not None:
self.hostname = hostname
if ignore_classes is not None:
self.ignore_classes = ignore_classes
if lib_root is not None:
self.lib_root = lib_root
if notify_release_stages is not None:
self.notify_release_stages = notify_release_stages
if params_filters is not None:
self.params_filters = params_filters
if project_root is not None:
self.project_root = project_root
if proxy_host is not None:
self.proxy_host = proxy_host
if release_stage is not None:
self.release_stage = release_stage
if send_code is not None:
self.send_code = send_code
if send_environment is not None:
self.send_environment = send_environment
if session_endpoint is not None:
self.session_endpoint = session_endpoint
if traceback_exclude_modules is not None:
self.traceback_exclude_modules = traceback_exclude_modules
return self
def get(self, name):
"""
Get a single configuration option
"""
warnings.warn('Using get() to retrieve a Configuration property is ' +
'deprecated in favor of referencing properties directly',
DeprecationWarning)
return getattr(self, name)
@property
def api_key(self):
"""
Unique application identifier
"""
return self._api_key
@api_key.setter # type: ignore
@validate_required_str_setter
def api_key(self, value: str):
self._api_key = value
@property
def app_type(self):
"""
Category for the current application or task
"""
return self._app_type
@app_type.setter # type: ignore
@validate_str_setter
def app_type(self, value: str):
self._app_type = value
@property
def app_version(self):
"""
Release version of the current application
"""
return self._app_version
@app_version.setter # type: ignore
@validate_str_setter
def app_version(self, value: str):
self._app_version = value
@property
def asynchronous(self):
"""
If API requests should be sent asynchronously
"""
return self._asynchronous
@asynchronous.setter # type: ignore
@validate_bool_setter
def asynchronous(self, value: bool):
self._asynchronous = value
if value:
warn_if_running_uwsgi_without_threads()
@property
def auto_capture_sessions(self):
"""
If sessions should be automatically detected and delivered from web
request integrations
"""
return self._auto_capture_sessions
@auto_capture_sessions.setter # type: ignore
@validate_bool_setter
def auto_capture_sessions(self, value: bool):
self._auto_capture_sessions = value
@property
def auto_notify(self):
"""
If uncaught exceptions should be automatically captured and reported
"""
return self._auto_notify
@auto_notify.setter # type: ignore
@validate_bool_setter
def auto_notify(self, value: bool):
self._auto_notify = value
@property
def delivery(self):
"""
Transport mechanism used to make API requests. Implement the Delivery
interface to customize how requests are sent.
"""
return self._delivery
@delivery.setter # type: ignore
def delivery(self, value):
if hasattr(value, 'deliver') and callable(value.deliver):
self._delivery = value
else:
message = ('delivery should implement Delivery interface, got ' +
'{0}. This will be an error in a future release.')
warnings.warn(message.format(type(value).__name__), RuntimeWarning)
@property
def endpoint(self):
"""
Event API endpoint. Set this property if using Bugsnag On-Premise.
>>> config = Configuration()
>>> config.endpoint = 'https://notify.bugsnag.example.co'
"""
return self._endpoint
@endpoint.setter # type: ignore
@validate_required_str_setter
def endpoint(self, value: str):
self._endpoint = value
@property
def hostname(self):
"""
The host name of the application server. This value is automatically
detected for Heroku applications and included in event device metadata.
"""
return self._hostname
@hostname.setter # type: ignore
@validate_str_setter
def hostname(self, value: str):
self._hostname = value
@property
def ignore_classes(self):
"""
Fully qualified class names which should be ignored when capturing
uncaught exceptions and other events. KeyboardInterrupt and Http404
exceptions are ignored by default.
"""
return self._ignore_classes
@ignore_classes.setter # type: ignore
@validate_iterable_setter
def ignore_classes(self, value: Union[List[str], Tuple[str]]):
self._ignore_classes = value
@property
def lib_root(self):
"""
The path to the Python library. Any traceback frame which contains
lib_root as a prefix is considered out-of-project. The prefix is also
stripped to make file names easier to read.
"""
return self._lib_root
@lib_root.setter # type: ignore
@validate_str_setter
def lib_root(self, value: str):
self._lib_root = value
@property
def notify_release_stages(self):
"""
A list of release_stage values which are permitted to capture and send
events and sessions. By default this value is None and all events and
sessions are delivered.
"""
return self._notify_release_stages
@notify_release_stages.setter # type: ignore
@validate_iterable_setter
def notify_release_stages(self, value: List[str]):
self._notify_release_stages = value
@property
def params_filters(self):
"""
A list of filters applied to event metadata to prevent the values from
being sent in events. By default the following keys are filtered:
* authorization
* cookie
* password
* password_confirmation
"""
return self._params_filters
@params_filters.setter # type: ignore
@validate_iterable_setter
def params_filters(self, value: List[str]):
self._params_filters = value
@property
def project_root(self):
"""
The working directory containing the application source code.
Traceback file paths which contain this prefix are considered a part of
the project. This prefix is also stripped to increase file name
readability in traceback lines.
"""
return self._project_root
@project_root.setter # type: ignore
@validate_str_setter
def project_root(self, value: str):
self._project_root = value
@property
def proxy_host(self):
"""
The host name of the proxy to use to deliver requests, if any
"""
return self._proxy_host
@proxy_host.setter # type: ignore
@validate_str_setter
def proxy_host(self, value: str):
self._proxy_host = value
@property
def release_stage(self):
"""
The development phase of the deployed application. This value is used
to differentiate events which occur in production vs development or
staging environments.
"""
return self._release_stage
@release_stage.setter # type: ignore
@validate_str_setter
def release_stage(self, value: str):
self._release_stage = value
@property
def send_code(self):
"""
If the source code lines immediately surrounding traceback locations
should be sent with events
"""
return self._send_code
@send_code.setter # type: ignore
@validate_bool_setter
def send_code(self, value: bool):
self._send_code = value
@property
def send_environment(self):
"""
If the request environment should be automatically collected and
attached to events
"""
return self._send_environment
@send_environment.setter # type: ignore
@validate_bool_setter
def send_environment(self, value: bool):
self._send_environment = value
@property
def session_endpoint(self):
"""
Sessions API endpoint. Set this property if using Bugsnag On-Premise.
>>> config = Configuration()
>>> config.session_endpoint = 'https://sessions.bugsnag.example.co'
"""
return self._session_endpoint
@session_endpoint.setter # type: ignore
@validate_required_str_setter
def session_endpoint(self, value: str):
self._session_endpoint = value
@property
def traceback_exclude_modules(self):
"""
Modules which should be stripped from event tracebacks entirely
"""
return self._traceback_exclude_modules
@traceback_exclude_modules.setter # type: ignore
@validate_iterable_setter
def traceback_exclude_modules(self, value: List[str]):
self._traceback_exclude_modules = value
def should_notify(self) -> bool:
return self.notify_release_stages is None or \
(isinstance(self.notify_release_stages, (tuple, list)) and
self.release_stage in self.notify_release_stages)
def should_ignore(self, exception: BaseException) -> bool:
return self.ignore_classes is not None and \
fully_qualified_class_name(exception) in self.ignore_classes
class RequestConfiguration:
"""
Per-request Bugsnag configuration settings.
"""
@classmethod
def get_instance(cls):
"""
Get this thread's instance of the RequestConfiguration.
"""
try:
instance = _request_info.get()
except LookupError:
instance = None
if instance is None:
instance = RequestConfiguration()
_request_info.set(instance) # type: ignore
return instance
@classmethod
def clear(cls):
"""
Clear this thread's instance of the RequestConfiguration.
"""
_request_info.set(None)
def __init__(self):
self.context = None
self.grouping_hash = None
self.user = {}
self.metadata = {}
# legacy fields
self.user_id = None
self.extra_data = {}
self.request_data = {}
self.environment_data = {}
self.session_data = {}
def get(self, name) -> Any:
"""
Get a single configuration option
"""
return getattr(self, name)
def configure(self, **options):
"""
Set one or more configuration settings.
"""
for name, value in options.items():
setattr(self, name, value)
return self
@property
def meta_data(self) -> Any:
warnings.warn('RequestConfiguration.meta_data has been renamed to ' +
'"metadata"', DeprecationWarning)
return self.metadata
| 1.945313 | 2 |
secret_injector/secret.py | failk8s/failk8s-operator | 0 | 5127 | import kopf
from .functions import global_logger, reconcile_secret
@kopf.on.event("", "v1", "secrets")
def injector_secret_event(type, event, logger, **_):
obj = event["object"]
namespace = obj["metadata"]["namespace"]
name = obj["metadata"]["name"]
# If secret already exists, indicated by type being None, the
# secret is added or modified later, do a full reconcilation to
# ensure that if now match will inject the secret.
with global_logger(logger):
if type in (None, "ADDED", "MODIFIED"):
reconcile_secret(name, namespace, obj)
| 2.109375 | 2 |
src/py/gee/utils.py | openforis/collectearthonline | 0 | 5128 | <filename>src/py/gee/utils.py
import datetime
import os
import ee
import math
import sys
import json
from ee.ee_exception import EEException
from gee.inputs import getLandsat, getS1
########## Helper functions ##########
def initialize(ee_account='', ee_key_path=''):
try:
if ee_account and ee_key_path and os.path.exists(ee_key_path):
credentials = ee.ServiceAccountCredentials(ee_account, ee_key_path)
ee.Initialize(credentials)
else:
ee.Initialize()
except Exception as e:
print(e)
def getReducer(reducer):
reducerName = reducer.lower()
if(reducerName == 'min'):
return ee.Reducer.min()
elif (reducerName == 'max'):
return ee.Reducer.max()
elif (reducerName == 'mean'):
return ee.Reducer.mean()
elif (reducerName == 'mode'):
return ee.Reducer.mode()
elif (reducerName == 'first'):
return ee.Reducer.first()
elif (reducerName == 'last'):
return ee.Reducer.last()
elif (reducerName == 'sum'):
return ee.Reducer.sum()
else:
return ee.Reducer.median()
def reduceIC(imageCollection, reducer):
reducerName = reducer.lower()
if(reducerName == 'min'):
return imageCollection.min()
elif (reducerName == 'max'):
return imageCollection.max()
elif (reducerName == 'mean'):
return imageCollection.mean()
elif (reducerName == 'mode'):
return imageCollection.mode()
elif (reducerName == 'mosaic'):
return imageCollection.mosaic()
elif (reducerName == 'first'):
return imageCollection.first()
elif (reducerName == 'sum'):
return imageCollection.sum()
else:
return imageCollection.median()
def safeParseJSON(val):
if isinstance(val, dict):
return val
else:
try:
return json.loads(val)
except Exception as e:
try:
return json.loads(val.replace("'", "\""))
except Exception as e:
return {}
########## Helper routes ##########
def listAvailableBands(name, assetType):
eeImage = None
if assetType == "imageCollection":
eeImage = ee.ImageCollection(name).first()
else:
eeImage = ee.Image(name)
return {
'bands': eeImage.bandNames().getInfo(),
'imageName': name
}
########## ee.Image ##########
def imageToMapId(image, visParams):
eeImage = ee.Image(image)
mapId = eeImage.getMapId(visParams)
# TODO, just return URL so the routes are easier to deduce whats being returned.
return {
'url': mapId['tile_fetcher'].url_format
}
########## ee.ImageCollection ##########
def imageCollectionToMapId(assetId, visParams, reducer, startDate, endDate):
eeCollection = ee.ImageCollection(assetId)
if (startDate and endDate):
eeFilterDate = ee.Filter.date(startDate, endDate)
eeCollection = eeCollection.filter(eeFilterDate)
reducedImage = ee.Image(reduceIC(eeCollection, reducer))
return imageToMapId(reducedImage, visParams)
# TODO, should we allow user to select first cloud free image again?
def firstCloudFreeImageInMosaicToMapId(assetId, visParams, startDate, endDate):
skipCloudMask = False
eeCollection = ee.ImageCollection(assetId)
lowerAsset = assetId.lower()
if("b2" not in visParams["bands"].lower()):
skipCloudMask = True
elif ("lc8" in lowerAsset):
skipCloudMask = False
elif ("le7" in lowerAsset):
skipCloudMask = False
elif ("lt5" in lowerAsset):
skipCloudMask = False
else:
skipCloudMask = True
if (startDate and endDate):
eeFilterDate = ee.Filter.date(startDate, endDate)
eeCollection = eeCollection.filter(eeFilterDate)
eeFirstImage = ee.Image(eeCollection.mosaic())
try:
if(skipCloudMask == False):
sID = ''
if ("lc8" in lowerAsset):
sID = 'OLI_TIRS'
elif ("le7" in lowerAsset):
sID = 'ETM'
elif ("lt5" in lowerAsset):
sID = 'TM'
scored = ee.Algorithms.Landsat.simpleCloudScore(
eeFirstImage.set('SENSOR_ID', sID))
mask = scored.select(['cloud']).lte(20)
masked = eeFirstImage.updateMask(mask)
values = imageToMapId(masked, visParams)
else:
values = imageToMapId(eeFirstImage, visParams)
except EEException as ine:
imageToMapId(eeFirstImage, visParams)
return values
########## ee.FeatureCollection ##########
def getFeatureCollectionTileUrl(featureCollection, field, matchID, visParams):
fc = ee.FeatureCollection(featureCollection)
single = fc.filter(ee.Filter.equals(field, matchID))
mapId = ee.Image().paint(single, 0, 2).getMapId(visParams)
return mapId['tile_fetcher'].url_format
########## Pre defined ee.ImageCollection ##########
# Index Image Collection
def lsMaskClouds(img, cloudThresh=10):
score = ee.Image(1.0)
# Clouds are reasonably bright in the blue band.
blue_rescale = img.select('blue').subtract(ee.Number(0.1)).divide(
ee.Number(0.3).subtract(ee.Number(0.1)))
score = score.min(blue_rescale)
# Clouds are reasonably bright in all visible bands.
visible = img.select('red').add(
img.select('green')).add(img.select('blue'))
visible_rescale = visible.subtract(ee.Number(0.2)).divide(
ee.Number(0.8).subtract(ee.Number(0.2)))
score = score.min(visible_rescale)
# Clouds are reasonably bright in all infrared bands.
infrared = img.select('nir').add(
img.select('swir1')).add(img.select('swir2'))
infrared_rescale = infrared.subtract(ee.Number(0.3)).divide(
ee.Number(0.8).subtract(ee.Number(0.3)))
score = score.min(infrared_rescale)
# Clouds are reasonably cool in temperature.
temp_rescale = img.select('temp').subtract(ee.Number(300)).divide(
ee.Number(290).subtract(ee.Number(300)))
score = score.min(temp_rescale)
# However, clouds are not snow.
ndsi = img.normalizedDifference(['green', 'swir1'])
ndsi_rescale = ndsi.subtract(ee.Number(0.8)).divide(
ee.Number(0.6).subtract(ee.Number(0.8)))
score = score.min(ndsi_rescale).multiply(100).byte()
mask = score.lt(cloudThresh).rename(['cloudMask'])
img = img.updateMask(mask)
return img.addBands(score)
def s2MaskClouds(img):
qa = img.select('QA60')
# Bits 10 and 11 are clouds and cirrus, respectively.
cloudBitMask = int(math.pow(2, 10))
cirrusBitMask = int(math.pow(2, 11))
# clear if both flags set to zero.
clear = qa.bitwiseAnd(cloudBitMask).eq(0).And(
qa.bitwiseAnd(cirrusBitMask).eq(0))
return img.divide(10000).updateMask(clear).set('system:time_start', img.get('system:time_start'))
def bandPassAdjustment(img):
keep = img.select(['temp'])
bands = ['blue', 'green', 'red', 'nir', 'swir1', 'swir2']
# linear regression coefficients for adjustment
gain = ee.Array([[0.977], [1.005], [0.982], [1.001], [1.001], [0.996]])
bias = ee.Array([[-0.00411], [-0.00093], [0.00094],
[-0.00029], [-0.00015], [-0.00097]])
# Make an Array Image, with a 2-D Array per pixel.
arrayImage2D = img.select(bands).toArray().toArray(1)
# apply correction factors and reproject array to geographic image
componentsImage = ee.Image(gain).multiply(arrayImage2D).add(ee.Image(bias)) \
.arrayProject([0]).arrayFlatten([bands]).float()
# .set('system:time_start',img.get('system:time_start'));
return keep.addBands(componentsImage)
def getLandSatMergedCollection():
sensorBandDictLandsatTOA = {'L8': [1, 2, 3, 4, 5, 9, 6],
'L7': [0, 1, 2, 3, 4, 5, 7],
'L5': [0, 1, 2, 3, 4, 5, 6],
'L4': [0, 1, 2, 3, 4, 5, 6],
'S2': [1, 2, 3, 7, 11, 10, 12]}
bandNamesLandsatTOA = ['blue', 'green',
'red', 'nir', 'swir1', 'temp', 'swir2']
metadataCloudCoverMax = 100
lt4 = ee.ImageCollection('LANDSAT/LT4_L1T_TOA') \
.filterMetadata('CLOUD_COVER', 'less_than', metadataCloudCoverMax) \
.select(sensorBandDictLandsatTOA['L4'], bandNamesLandsatTOA).map(lsMaskClouds)
lt5 = ee.ImageCollection('LANDSAT/LT5_L1T_TOA') \
.filterMetadata('CLOUD_COVER', 'less_than', metadataCloudCoverMax) \
.select(sensorBandDictLandsatTOA['L5'], bandNamesLandsatTOA).map(lsMaskClouds)
le7 = ee.ImageCollection('LANDSAT/LE7_L1T_TOA') \
.filterMetadata('CLOUD_COVER', 'less_than', metadataCloudCoverMax) \
.select(sensorBandDictLandsatTOA['L7'], bandNamesLandsatTOA).map(lsMaskClouds)
lc8 = ee.ImageCollection('LANDSAT/LC08/C01/T1_TOA') \
.filterMetadata('CLOUD_COVER', 'less_than', metadataCloudCoverMax) \
.select(sensorBandDictLandsatTOA['L8'], bandNamesLandsatTOA).map(lsMaskClouds)
s2 = ee.ImageCollection('COPERNICUS/S2') \
.filterMetadata('CLOUDY_PIXEL_PERCENTAGE', 'less_than', metadataCloudCoverMax) \
.map(s2MaskClouds).select(sensorBandDictLandsatTOA['S2'], bandNamesLandsatTOA) \
.map(bandPassAdjustment)
return ee.ImageCollection(lt4.merge(lt5).merge(le7).merge(lc8).merge(s2))
def filteredImageNDVIToMapId(startDate, endDate):
def calcNDVI(img):
return img.expression('(i.nir - i.red) / (i.nir + i.red)', {'i': img}).rename(['NDVI']) \
.set('system:time_start', img.get('system:time_start'))
eeCollection = getLandSatMergedCollection().filterDate(startDate, endDate)
colorPalette = 'c9c0bf,435ebf,eee8aa,006400'
visParams = {'opacity': 1, 'max': 1,
'min': -1, 'palette': colorPalette}
eviImage = ee.Image(eeCollection.map(calcNDVI).mean())
return imageToMapId(eviImage, visParams)
def filteredImageEVIToMapId(startDate, endDate):
def calcEVI(img):
return img.expression('2.5 * (i.nir - i.red) / (i.nir + 6.0 * i.red - 7.5 * i.blue + 1)', {'i': img}).rename(['EVI']) \
.set('system:time_start', img.get('system:time_start'))
eeCollection = getLandSatMergedCollection().filterDate(startDate, endDate)
colorPalette = 'F5F5F5,E6D3C5,C48472,B9CF63,94BF3D,6BB037,42A333,00942C,008729,007824,004A16'
visParams = {'opacity': 1, 'max': 1,
'min': -1, 'palette': colorPalette}
eviImage = ee.Image(eeCollection.map(calcEVI).mean())
return imageToMapId(eviImage, visParams)
def filteredImageEVI2ToMapId(startDate, endDate):
def calcEVI2(img):
return img.expression('2.5 * (i.nir - i.red) / (i.nir + 2.4 * i.red + 1)', {'i': img}).rename(['EVI2']) \
.set('system:time_start', img.get('system:time_start'))
eeCollection = getLandSatMergedCollection().filterDate(startDate, endDate)
colorPalette = 'F5F5F5,E6D3C5,C48472,B9CF63,94BF3D,6BB037,42A333,00942C,008729,007824,004A16'
visParams = {'opacity': 1, 'max': 1,
'min': -1, 'palette': colorPalette}
eviImage = ee.Image(eeCollection.map(calcEVI2).mean())
return imageToMapId(eviImage, visParams)
def filteredImageNDMIToMapId(startDate, endDate):
def calcNDMI(img):
return img.expression('(i.nir - i.swir1) / (i.nir + i.swir1)', {'i': img}).rename(['NDMI']) \
.set('system:time_start', img.get('system:time_start'))
eeCollection = getLandSatMergedCollection().filterDate(startDate, endDate)
colorPalette = '0000FE,2E60FD,31B0FD,00FEFE,50FE00,DBFE66,FEFE00,FFBB00,FF6F00,FE0000'
visParams = {'opacity': 1, 'max': 1,
'min': -1, 'palette': colorPalette}
eviImage = ee.Image(eeCollection.map(calcNDMI).mean())
return imageToMapId(eviImage, visParams)
def filteredImageNDWIToMapId(startDate, endDate):
def calcNDWI(img):
return img.expression('(i.green - i.nir) / (i.green + i.nir)', {'i': img}).rename(['NDWI']) \
.set('system:time_start', img.get('system:time_start'))
eeCollection = getLandSatMergedCollection().filterDate(startDate, endDate)
colorPalette = '505050,E8E8E8,00FF33,003300'
visParams = {'opacity': 1, 'max': 1,
'min': -1, 'palette': colorPalette}
eviImage = ee.Image(eeCollection.map(calcNDWI).mean())
return imageToMapId(eviImage, visParams)
def filteredImageByIndexToMapId(startDate, endDate, index):
lowerIndex = index.lower()
if (lowerIndex == 'ndvi'):
return filteredImageNDVIToMapId(startDate, endDate)
elif (lowerIndex == 'evi'):
return filteredImageEVIToMapId(startDate, endDate)
elif (lowerIndex == 'evi2'):
return filteredImageEVI2ToMapId(startDate, endDate)
elif (lowerIndex == 'ndmi'):
return filteredImageNDMIToMapId(startDate, endDate)
elif (lowerIndex == 'ndwi'):
return filteredImageNDWIToMapId(startDate, endDate)
def filteredImageCompositeToMapId(assetId, visParams, startDate, endDate, metadataCloudCoverMax, simpleCompositeVariable):
eeCollection = ee.ImageCollection(assetId)
if (startDate and endDate):
eeCollection = eeCollection.filterDate(startDate, endDate)
eeCollection.filterMetadata(
'CLOUD_COVER',
'less_than',
metadataCloudCoverMax
)
eeMosaicImage = ee.Algorithms.Landsat.simpleComposite(
eeCollection,
simpleCompositeVariable,
10,
40,
True
)
return imageToMapId(eeMosaicImage, visParams)
def filteredSentinelComposite(visParams, startDate, endDate, metadataCloudCoverMax):
def cloudScore(img):
def rescale(img, exp, thresholds):
return img.expression(exp, {'img': img}).subtract(thresholds[0]).divide(thresholds[1] - thresholds[0])
score = ee.Image(1.0)
score = score.min(rescale(img, 'img.B2', [0.1, 0.3]))
score = score.min(rescale(img, 'img.B4 + img.B3 + img.B2', [0.2, 0.8]))
score = score.min(
rescale(img, 'img.B8 + img.B11 + img.B12', [0.3, 0.8]))
ndsi = img.normalizedDifference(['B3', 'B11'])
return score.min(rescale(ndsi, 'img', [0.8, 0.6]))
def cloudScoreS2(img):
rescale = img.divide(10000)
score = cloudScore(rescale).multiply(100).rename('cloudscore')
return img.addBands(score)
sentinel2 = ee.ImageCollection('COPERNICUS/S2')
f2017s2 = sentinel2.filterDate(startDate, endDate).filterMetadata(
'CLOUDY_PIXEL_PERCENTAGE', 'less_than', metadataCloudCoverMax)
m2017s2 = f2017s2.map(cloudScoreS2)
m2017s3 = m2017s2.median()
return imageToMapId(m2017s3, visParams)
def filteredSentinelSARComposite(visParams, startDate, endDate):
def toNatural(img):
return ee.Image(10).pow(img.divide(10))
def addRatioBands(img):
# not using angle band
vv = img.select('VV')
vh = img.select('VH')
vv_vh = vv.divide(vh).rename('VV/VH')
vh_vv = vh.divide(vv).rename('VH/VV')
return vv.addBands(vh).addBands(vv_vh).addBands(vh_vv)
sentinel1 = ee.ImageCollection('COPERNICUS/S1_GRD')
sentinel1 = sentinel1.filterDate(startDate, endDate) \
.filter(ee.Filter.listContains('transmitterReceiverPolarisation', 'VV')) \
.filter(ee.Filter.listContains('transmitterReceiverPolarisation', 'VH')) \
.filter(ee.Filter.eq('instrumentMode', 'IW'))
sentinel1 = sentinel1.map(toNatural)
sentinel1 = sentinel1.map(addRatioBands)
median = sentinel1.median()
return imageToMapId(median, visParams)
########## Time Series ##########
def getTimeSeriesByCollectionAndIndex(assetId, indexName, scale, coords, startDate, endDate, reducer):
geometry = None
indexCollection = None
if isinstance(coords[0], list):
geometry = ee.Geometry.Polygon(coords)
else:
geometry = ee.Geometry.Point(coords)
if indexName != None:
indexCollection = ee.ImageCollection(assetId).filterDate(
startDate, endDate).select(indexName)
else:
indexCollection = ee.ImageCollection(
assetId).filterDate(startDate, endDate)
def getIndex(image):
theReducer = getReducer(reducer)
if indexName != None:
indexValue = image.reduceRegion(
theReducer, geometry, scale).get(indexName)
else:
indexValue = image.reduceRegion(theReducer, geometry, scale)
date = image.get('system:time_start')
indexImage = ee.Image().set(
'indexValue', [ee.Number(date), indexValue])
return indexImage
def getClipped(image):
return image.clip(geometry)
clippedcollection = indexCollection.map(getClipped)
indexCollection1 = clippedcollection.map(getIndex)
indexCollection2 = indexCollection1.aggregate_array('indexValue')
return indexCollection2.getInfo()
def getTimeSeriesByIndex(indexName, scale, coords, startDate, endDate, reducer):
bandsByCollection = {
'LANDSAT/LC08/C01/T1_TOA': ['B2', 'B3', 'B4', 'B5', 'B6', 'B7'],
'LANDSAT/LC08/C01/T2_TOA': ['B2', 'B3', 'B4', 'B5', 'B6', 'B7'],
'LANDSAT/LE07/C01/T1_TOA': ['B1', 'B2', 'B3', 'B4', 'B5', 'B7'],
'LANDSAT/LE07/C01/T2_TOA': ['B1', 'B2', 'B3', 'B4', 'B5', 'B7'],
'LANDSAT/LT05/C01/T1_TOA': ['B1', 'B2', 'B3', 'B4', 'B5', 'B7'],
'LANDSAT/LT05/C01/T2_TOA': ['B1', 'B2', 'B3', 'B4', 'B5', 'B7'],
'LANDSAT/LT04/C01/T1_TOA': ['B1', 'B2', 'B3', 'B4', 'B5', 'B7'],
'LANDSAT/LT04/C01/T2_TOA': ['B1', 'B2', 'B3', 'B4', 'B5', 'B7']
}
indexes = {
'NDVI': '(nir - red) / (nir + red)',
'EVI': '2.5 * (nir - red) / (nir + 6.0 * red - 7.5 * blue + 1)',
'EVI2': '2.5 * (nir - red) / (nir + 2.4 * red + 1)',
'NDMI': '(nir - swir1) / (nir + swir1)',
'NDWI': '(green - nir) / (green + nir)',
'NBR': '(nir - swir2) / (nir + swir2)',
'LSAVI': '((nir - red) / (nir + red + 0.5)) * (1 + 0.5)'
}
def create(name):
def maskClouds(image):
def isSet(types):
""" https://landsat.usgs.gov/collectionqualityband """
typeByValue = {
'badPixels': 15,
'cloud': 16,
'shadow': 256,
'snow': 1024,
'cirrus': 4096
}
anySet = ee.Image(0)
for Type in types:
anySet = anySet.Or(image.select(
'BQA').bitwiseAnd(typeByValue[Type]).neq(0))
return anySet
return image.updateMask(isSet(['badPixels', 'cloud', 'shadow', 'cirrus']).Not())
def toIndex(image):
bands = bandsByCollection[name]
return image.expression(indexes[indexName], {
'blue': image.select(bands[0]),
'green': image.select(bands[1]),
'red': image.select(bands[2]),
'nir': image.select(bands[3]),
'swir1': image.select(bands[4]),
'swir2': image.select(bands[5]),
}).clamp(-1, 1).rename(['index'])
def toIndexWithTimeStart(image):
time = image.get('system:time_start')
image = maskClouds(image)
return toIndex(image).set('system:time_start', time)
#
if startDate and endDate:
return ee.ImageCollection(name).filterDate(startDate, endDate).filterBounds(geometry).map(toIndexWithTimeStart, True)
else:
return ee.ImageCollection(name).filterBounds(geometry).map(toIndexWithTimeStart, True)
def reduceRegion(image):
theReducer = getReducer(reducer)
reduced = image.reduceRegion(
theReducer, geometry=geometry, scale=scale, maxPixels=1e6)
return ee.Feature(None, {
'index': reduced.get('index'),
'timeIndex': [image.get('system:time_start'), reduced.get('index')]
})
geometry = None
if isinstance(coords[0], list) or isinstance(coords[0], tuple):
geometry = ee.Geometry.Polygon(coords)
else:
geometry = ee.Geometry.Point(coords)
collection = ee.ImageCollection([])
for name in bandsByCollection:
collection = collection.merge(create(name))
return ee.ImageCollection(ee.ImageCollection(collection).sort('system:time_start').distinct('system:time_start')) \
.map(reduceRegion) \
.filterMetadata('index', 'not_equals', None) \
.aggregate_array('timeIndex') \
.getInfo()
########## Degradation##########
def getDegradationTileUrlByDateS1(geometry, date, visParams):
imDate = datetime.datetime.strptime(date, "%Y-%m-%d")
befDate = imDate - datetime.timedelta(days=1)
aftDate = imDate + datetime.timedelta(days=1)
if isinstance(geometry[0], list):
geometry = ee.Geometry.Polygon(geometry)
else:
geometry = ee.Geometry.Point(geometry)
sentinel1Data = getS1({
"targetBands": ['VV', 'VH', 'VV/VH'],
'region': geometry})
start = befDate.strftime('%Y-%m-%d')
end = aftDate.strftime('%Y-%m-%d')
selectedImage = sentinel1Data.filterDate(start, end).first()
selectedImage = ee.Image(selectedImage)
mapparams = selectedImage.getMapId(visParams)
return mapparams['tile_fetcher'].url_format
def getDegradationPlotsByPointS1(geometry, start, end):
if isinstance(geometry[0], list):
geometry = ee.Geometry.Polygon(geometry)
else:
geometry = ee.Geometry.Point(geometry)
sentinel1Data = getS1({
"targetBands": ['VV', 'VH', 'VV/VH'],
'region': geometry
}).filterDate(start, end)
def myimageMapper(img):
theReducer = ee.Reducer.mean()
indexValue = img.reduceRegion(theReducer, geometry, 30)
date = img.get('system:time_start')
visParams = {'bands': ['VV', 'VH', 'ratioVVVH'],
'min': [-15, -25, .40], 'max': [0, -10, 1], 'gamma': 1.6}
indexImage = ee.Image().set(
'indexValue', [ee.Number(date), indexValue])
return indexImage
lsd = sentinel1Data.map(myimageMapper, True)
indexCollection2 = lsd.aggregate_array('indexValue')
values = indexCollection2.getInfo()
return values
def getDegradationTileUrlByDate(geometry, date, visParams):
imDate = datetime.datetime.strptime(date, "%Y-%m-%d")
startDate = imDate - datetime.timedelta(days=1)
endDate = imDate + datetime.timedelta(days=1)
if isinstance(geometry[0], list):
geometry = ee.Geometry.Polygon(geometry)
else:
geometry = ee.Geometry.Point(geometry)
landsatData = getLandsat({
"start": startDate.strftime('%Y-%m-%d'),
"end": endDate.strftime('%Y-%m-%d'),
"targetBands": ['RED', 'GREEN', 'BLUE', 'SWIR1', 'NIR'],
"region": geometry,
"sensors": {"l4": False, "l5": False, "l7": False, "l8": True}
})
selectedImage = landsatData.first()
unmasked = ee.Image(selectedImage).multiply(10000).toInt16().unmask()
mapparams = unmasked.getMapId(visParams)
return mapparams['tile_fetcher'].url_format
def getDegradationPlotsByPoint(geometry, start, end, band):
if isinstance(geometry[0], list):
geometry = ee.Geometry.Polygon(geometry)
else:
geometry = ee.Geometry.Point(geometry)
landsatData = getLandsat({
"start": start,
"end": end,
"targetBands": [band],
"region": geometry,
"sensors": {"l4": True, "l5": True, "l7": True, "l8": True}
})
def myImageMapper(img):
theReducer = ee.Reducer.mean()
indexValue = img.reduceRegion(theReducer, geometry, 30)
date = img.get('system:time_start')
indexImage = ee.Image().set(
'indexValue',
[ee.Number(date), indexValue]
)
return indexImage
lsd = landsatData.map(myImageMapper, True)
indexCollection2 = lsd.aggregate_array('indexValue')
values = indexCollection2.getInfo()
return values
########## Stats ##########
def getStatistics(extent):
extentGeom = ee.Geometry.Polygon(extent)
elev = ee.Image('USGS/GTOPO30')
minmaxElev = elev.reduceRegion(
ee.Reducer.minMax(), extentGeom, 1000, maxPixels=500000000)
minElev = minmaxElev.get('elevation_min').getInfo()
maxElev = minmaxElev.get('elevation_max').getInfo()
ciesinPopGrid = ee.Image('CIESIN/GPWv4/population-count/2020')
popDict = ciesinPopGrid.reduceRegion(
ee.Reducer.sum(), extentGeom, maxPixels=500000000)
pop = popDict.get('population-count').getInfo()
pop = int(pop)
return {
'minElev': minElev,
'maxElev': maxElev,
'pop': pop
}
| 2.0625 | 2 |
userManagement/management/urls.py | shubhamguptaorg/user_managementl | 0 | 5129 | from django.contrib import admin
from django.urls import path,include
from django.views.generic import TemplateView
from .views import Index,SignUp,UserDashboard,AdminDashboard,logout,showAdminData,deleteuser,activeUser,deactiveUser,UserDetailEdit,uploadImage
# from .views import Index,UserDashboard,SignUp,AdminDashboard
app_name='management'
urlpatterns = [
# path('',homepage,name="index"),
path('',Index.as_view(), name='index'),
path('signup',SignUp.as_view(),name="signup"),
path('userdashboard',UserDashboard.as_view(),name="userDashboard"),
path('admindashboard',AdminDashboard.as_view(),name="adminDashboard"),
path('admindashboard/showuserdata/',showAdminData.as_view(),name='showAdminData'),
path('admindashboard/showuserdata/deleteuser/<userId>',deleteuser,name='deleteuser'),
path('admindashboard/showuserdata/activeUser/<userId>', activeUser, name='activeUser'),
path('admindashboard/showuserdata/deactiveUser/<userId>', deactiveUser, name='deactiveUser'),
path('uploadimage/',uploadImage,name="uploadImage"),
path('editUserDetail/',UserDetailEdit.as_view(),name='userEditDetail'),
path('logout',logout,name='logout')
]
| 1.84375 | 2 |
sickbeard/lib/hachoir_parser/container/riff.py | Branlala/docker-sickbeardfr | 0 | 5130 | <reponame>Branlala/docker-sickbeardfr
# -*- coding: UTF-8 -*-
"""
RIFF parser, able to parse:
* AVI video container
* WAV audio container
* CDA file
Documents:
- libavformat source code from ffmpeg library
http://ffmpeg.mplayerhq.hu/
- Video for Windows Programmer's Guide
http://www.opennet.ru/docs/formats/avi.txt
- What is an animated cursor?
http://www.gdgsoft.com/anituner/help/aniformat.htm
Authors:
* <NAME>
* <NAME>
* <NAME>
Changelog:
* 2007-03-30: support ACON (animated icons)
* 2006-08-08: merge AVI, WAV and CDA parsers into RIFF parser
* 2006-08-03: creation of CDA parser by <NAME>
* 2005-06-21: creation of WAV parser by <NAME>
* 2005-06-08: creation of AVI parser by <NAME> and <NAME>
Thanks to:
* <NAME> (wojtekka AT logonet.com.pl) for its CDA file
format information
"""
from lib.hachoir_parser import Parser
from lib.hachoir_core.field import (FieldSet, ParserError,
UInt8, UInt16, UInt32, Enum,
Bit, NullBits, NullBytes,
RawBytes, String, PaddingBytes,
SubFile)
from lib.hachoir_core.tools import alignValue, humanDuration
from lib.hachoir_core.endian import LITTLE_ENDIAN
from lib.hachoir_core.text_handler import filesizeHandler, textHandler
from lib.hachoir_parser.video.fourcc import audio_codec_name, video_fourcc_name
from lib.hachoir_parser.image.ico import IcoFile
from datetime import timedelta
def parseText(self):
yield String(self, "text", self["size"].value,
strip=" \0", truncate="\0",
charset="ISO-8859-1")
def parseRawFormat(self, size):
yield RawBytes(self, "raw_format", size)
def parseVideoFormat(self, size):
yield UInt32(self, "video_size", "Video format: Size")
yield UInt32(self, "width", "Video format: Width")
yield UInt32(self, "height", "Video format: Height")
yield UInt16(self, "panes", "Video format: Panes")
yield UInt16(self, "depth", "Video format: Depth")
yield UInt32(self, "tag1", "Video format: Tag1")
yield UInt32(self, "img_size", "Video format: Image size")
yield UInt32(self, "xpels_meter", "Video format: XPelsPerMeter")
yield UInt32(self, "ypels_meter", "Video format: YPelsPerMeter")
yield UInt32(self, "clr_used", "Video format: ClrUsed")
yield UInt32(self, "clr_important", "Video format: ClrImportant")
def parseAudioFormat(self, size):
yield Enum(UInt16(self, "codec", "Audio format: Codec id"), audio_codec_name)
yield UInt16(self, "channel", "Audio format: Channels")
yield UInt32(self, "sample_rate", "Audio format: Sample rate")
yield UInt32(self, "bit_rate", "Audio format: Bit rate")
yield UInt16(self, "block_align", "Audio format: Block align")
if size >= 16:
yield UInt16(self, "bits_per_sample", "Audio format: Bits per sample")
if size >= 18:
yield UInt16(self, "ext_size", "Audio format: Size of extra information")
if size >= 28: # and self["a_channel"].value > 2
yield UInt16(self, "reserved", "Audio format: ")
yield UInt32(self, "channel_mask", "Audio format: channels placement bitmask")
yield UInt32(self, "subformat", "Audio format: Subformat id")
def parseAVIStreamFormat(self):
size = self["size"].value
strtype = self["../stream_hdr/stream_type"].value
TYPE_HANDLER = {
"vids": (parseVideoFormat, 40),
"auds": (parseAudioFormat, 16)
}
handler = parseRawFormat
if strtype in TYPE_HANDLER:
info = TYPE_HANDLER[strtype]
if info[1] <= size:
handler = info[0]
for field in handler(self, size):
yield field
def parseAVIStreamHeader(self):
if self["size"].value != 56:
raise ParserError("Invalid stream header size")
yield String(self, "stream_type", 4, "Stream type four character code", charset="ASCII")
field = String(self, "fourcc", 4, "Stream four character code", strip=" \0", charset="ASCII")
if self["stream_type"].value == "vids":
yield Enum(field, video_fourcc_name, lambda text: text.upper())
else:
yield field
yield UInt32(self, "flags", "Stream flags")
yield UInt16(self, "priority", "Stream priority")
yield String(self, "language", 2, "Stream language", charset="ASCII", strip="\0")
yield UInt32(self, "init_frames", "InitialFrames")
yield UInt32(self, "scale", "Time scale")
yield UInt32(self, "rate", "Divide by scale to give frame rate")
yield UInt32(self, "start", "Stream start time (unit: rate/scale)")
yield UInt32(self, "length", "Stream length (unit: rate/scale)")
yield UInt32(self, "buf_size", "Suggested buffer size")
yield UInt32(self, "quality", "Stream quality")
yield UInt32(self, "sample_size", "Size of samples")
yield UInt16(self, "left", "Destination rectangle (left)")
yield UInt16(self, "top", "Destination rectangle (top)")
yield UInt16(self, "right", "Destination rectangle (right)")
yield UInt16(self, "bottom", "Destination rectangle (bottom)")
class RedBook(FieldSet):
"""
RedBook offset parser, used in CD audio (.cda) file
"""
def createFields(self):
yield UInt8(self, "frame")
yield UInt8(self, "second")
yield UInt8(self, "minute")
yield PaddingBytes(self, "notused", 1)
def formatSerialNumber(field):
"""
Format an disc serial number.
Eg. 0x00085C48 => "0008-5C48"
"""
sn = field.value
return "%04X-%04X" % (sn >> 16, sn & 0xFFFF)
def parseCDDA(self):
"""
HSG address format: number of 1/75 second
HSG offset = (minute*60 + second)*75 + frame + 150 (from RB offset)
HSG length = (minute*60 + second)*75 + frame (from RB length)
"""
yield UInt16(self, "cda_version", "CD file version (currently 1)")
yield UInt16(self, "track_no", "Number of track")
yield textHandler(UInt32(self, "disc_serial", "Disc serial number"),
formatSerialNumber)
yield UInt32(self, "hsg_offset", "Track offset (HSG format)")
yield UInt32(self, "hsg_length", "Track length (HSG format)")
yield RedBook(self, "rb_offset", "Track offset (Red-book format)")
yield RedBook(self, "rb_length", "Track length (Red-book format)")
def parseWAVFormat(self):
size = self["size"].value
if size not in (16, 18):
self.warning("Format with size of %s bytes is not supported!" % size)
yield Enum(UInt16(self, "codec", "Audio codec"), audio_codec_name)
yield UInt16(self, "nb_channel", "Number of audio channel")
yield UInt32(self, "sample_per_sec", "Sample per second")
yield UInt32(self, "byte_per_sec", "Average byte per second")
yield UInt16(self, "block_align", "Block align")
yield UInt16(self, "bit_per_sample", "Bits per sample")
def parseWAVFact(self):
yield UInt32(self, "nb_sample", "Number of samples in audio stream")
def parseAviHeader(self):
yield UInt32(self, "microsec_per_frame", "Microsecond per frame")
yield UInt32(self, "max_byte_per_sec", "Maximum byte per second")
yield NullBytes(self, "reserved", 4)
# Flags
yield NullBits(self, "reserved[]", 4)
yield Bit(self, "has_index")
yield Bit(self, "must_use_index")
yield NullBits(self, "reserved[]", 2)
yield Bit(self, "is_interleaved")
yield NullBits(self, "reserved[]", 2)
yield Bit(self, "trust_cktype")
yield NullBits(self, "reserved[]", 4)
yield Bit(self, "was_capture_file")
yield Bit(self, "is_copyrighted")
yield NullBits(self, "reserved[]", 14)
yield UInt32(self, "total_frame", "Total number of frames in the video")
yield UInt32(self, "init_frame", "Initial frame (used in interleaved video)")
yield UInt32(self, "nb_stream", "Number of streams")
yield UInt32(self, "sug_buf_size", "Suggested buffer size")
yield UInt32(self, "width", "Width in pixel")
yield UInt32(self, "height", "Height in pixel")
yield UInt32(self, "scale")
yield UInt32(self, "rate")
yield UInt32(self, "start")
yield UInt32(self, "length")
def parseODML(self):
yield UInt32(self, "total_frame", "Real number of frame of OpenDML video")
padding = self["size"].value - 4
if 0 < padding:
yield NullBytes(self, "padding[]", padding)
class AVIIndexEntry(FieldSet):
size = 16*8
def createFields(self):
yield String(self, "tag", 4, "Tag", charset="ASCII")
yield UInt32(self, "flags")
yield UInt32(self, "start", "Offset from start of movie data")
yield UInt32(self, "length")
def parseIndex(self):
while not self.eof:
yield AVIIndexEntry(self, "index[]")
class Chunk(FieldSet):
TAG_INFO = {
# This dictionnary is edited by RiffFile.validate()
"LIST": ("list[]", None, "Sub-field list"),
"JUNK": ("junk[]", None, "Junk (padding)"),
# Metadata
"INAM": ("title", parseText, "Document title"),
"IART": ("artist", parseText, "Artist"),
"ICMT": ("comment", parseText, "Comment"),
"ICOP": ("copyright", parseText, "Copyright"),
"IENG": ("author", parseText, "Author"),
"ICRD": ("creation_date", parseText, "Creation date"),
"ISFT": ("producer", parseText, "Producer"),
"IDIT": ("datetime", parseText, "Date time"),
# TODO: Todo: see below
# "strn": Stream description
# TWOCC code, movie/field[]/tag.value[2:4]:
# "db": "Uncompressed video frame",
# "dc": "Compressed video frame",
# "wb": "Audio data",
# "pc": "Palette change"
}
subtag_info = {
"INFO": ("info", "File informations"),
"hdrl": ("headers", "Headers"),
"strl": ("stream[]", "Stream header list"),
"movi": ("movie", "Movie stream"),
"odml": ("odml", "ODML"),
}
def __init__(self, *args, **kw):
FieldSet.__init__(self, *args, **kw)
self._size = (8 + alignValue(self["size"].value, 2)) * 8
tag = self["tag"].value
if tag in self.TAG_INFO:
self.tag_info = self.TAG_INFO[tag]
if tag == "LIST":
subtag = self["subtag"].value
if subtag in self.subtag_info:
info = self.subtag_info[subtag]
self.tag_info = (info[0], None, info[1])
self._name = self.tag_info[0]
self._description = self.tag_info[2]
else:
self.tag_info = ("field[]", None, None)
def createFields(self):
yield String(self, "tag", 4, "Tag", charset="ASCII")
yield filesizeHandler(UInt32(self, "size", "Size"))
if not self["size"].value:
return
if self["tag"].value == "LIST":
yield String(self, "subtag", 4, "Sub-tag", charset="ASCII")
handler = self.tag_info[1]
while 8 < (self.size - self.current_size)/8:
field = self.__class__(self, "field[]")
yield field
if (field.size/8) % 2 != 0:
yield UInt8(self, "padding[]", "Padding")
else:
handler = self.tag_info[1]
if handler:
for field in handler(self):
yield field
else:
yield RawBytes(self, "raw_content", self["size"].value)
padding = self.seekBit(self._size)
if padding:
yield padding
def createDescription(self):
tag = self["tag"].display
return u"Chunk (tag %s)" % tag
class ChunkAVI(Chunk):
TAG_INFO = Chunk.TAG_INFO.copy()
TAG_INFO.update({
"strh": ("stream_hdr", parseAVIStreamHeader, "Stream header"),
"strf": ("stream_fmt", parseAVIStreamFormat, "Stream format"),
"avih": ("avi_hdr", parseAviHeader, "AVI header"),
"idx1": ("index", parseIndex, "Stream index"),
"dmlh": ("odml_hdr", parseODML, "ODML header"),
})
class ChunkCDDA(Chunk):
TAG_INFO = Chunk.TAG_INFO.copy()
TAG_INFO.update({
'fmt ': ("cdda", parseCDDA, "CD audio informations"),
})
class ChunkWAVE(Chunk):
TAG_INFO = Chunk.TAG_INFO.copy()
TAG_INFO.update({
'fmt ': ("format", parseWAVFormat, "Audio format"),
'fact': ("nb_sample", parseWAVFact, "Number of samples"),
'data': ("audio_data", None, "Audio stream data"),
})
def parseAnimationHeader(self):
yield UInt32(self, "hdr_size", "Size of header (36 bytes)")
if self["hdr_size"].value != 36:
self.warning("Animation header with unknown size (%s)" % self["size"].value)
yield UInt32(self, "nb_frame", "Number of unique Icons in this cursor")
yield UInt32(self, "nb_step", "Number of Blits before the animation cycles")
yield UInt32(self, "cx")
yield UInt32(self, "cy")
yield UInt32(self, "bit_count")
yield UInt32(self, "planes")
yield UInt32(self, "jiffie_rate", "Default Jiffies (1/60th of a second) if rate chunk not present")
yield Bit(self, "is_icon")
yield NullBits(self, "padding", 31)
def parseAnimationSequence(self):
while not self.eof:
yield UInt32(self, "icon[]")
def formatJiffie(field):
sec = float(field.value) / 60
return humanDuration(timedelta(seconds=sec))
def parseAnimationRate(self):
while not self.eof:
yield textHandler(UInt32(self, "rate[]"), formatJiffie)
def parseIcon(self):
yield SubFile(self, "icon_file", self["size"].value, parser_class=IcoFile)
class ChunkACON(Chunk):
TAG_INFO = Chunk.TAG_INFO.copy()
TAG_INFO.update({
'anih': ("anim_hdr", parseAnimationHeader, "Animation header"),
'seq ': ("anim_seq", parseAnimationSequence, "Animation sequence"),
'rate': ("anim_rate", parseAnimationRate, "Animation sequence"),
'icon': ("icon[]", parseIcon, "Icon"),
})
class RiffFile(Parser):
PARSER_TAGS = {
"id": "riff",
"category": "container",
"file_ext": ("avi", "cda", "wav", "ani"),
"min_size": 16*8,
"mime": (u"video/x-msvideo", u"audio/x-wav", u"audio/x-cda"),
# FIXME: Use regex "RIFF.{4}(WAVE|CDDA|AVI )"
"magic": (
("AVI LIST", 8*8),
("WAVEfmt ", 8*8),
("CDDAfmt ", 8*8),
("ACONanih", 8*8),
),
"description": "Microsoft RIFF container"
}
VALID_TYPES = {
"WAVE": (ChunkWAVE, u"audio/x-wav", u"Microsoft WAVE audio", ".wav"),
"CDDA": (ChunkCDDA, u"audio/x-cda", u"Microsoft Windows audio CD file (cda)", ".cda"),
"AVI ": (ChunkAVI, u"video/x-msvideo", u"Microsoft AVI video", ".avi"),
"ACON": (ChunkACON, u"image/x-ani", u"Microsoft Windows animated cursor", ".ani"),
}
endian = LITTLE_ENDIAN
def validate(self):
if self.stream.readBytes(0, 4) != "RIFF":
return "Wrong signature"
if self["type"].value not in self.VALID_TYPES:
return "Unknown RIFF content type"
return True
def createFields(self):
yield String(self, "signature", 4, "AVI header (RIFF)", charset="ASCII")
yield filesizeHandler(UInt32(self, "filesize", "File size"))
yield String(self, "type", 4, "Content type (\"AVI \", \"WAVE\", ...)", charset="ASCII")
# Choose chunk type depending on file type
try:
chunk_cls = self.VALID_TYPES[self["type"].value][0]
except KeyError:
chunk_cls = Chunk
# Parse all chunks up to filesize
while self.current_size < self["filesize"].value*8+8:
yield chunk_cls(self, "chunk[]")
if not self.eof:
yield RawBytes(self, "padding[]", (self.size-self.current_size)/8)
def createMimeType(self):
try:
return self.VALID_TYPES[self["type"].value][1]
except KeyError:
return None
def createDescription(self):
tag = self["type"].value
if tag == "AVI ":
desc = u"Microsoft AVI video"
if "headers/avi_hdr" in self:
header = self["headers/avi_hdr"]
desc += ": %ux%u pixels" % (header["width"].value, header["height"].value)
microsec = header["microsec_per_frame"].value
if microsec:
desc += ", %.1f fps" % (1000000.0 / microsec)
if "total_frame" in header and header["total_frame"].value:
delta = timedelta(seconds=float(header["total_frame"].value) * microsec)
desc += ", " + humanDuration(delta)
return desc
else:
try:
return self.VALID_TYPES[tag][2]
except KeyError:
return u"Microsoft RIFF container"
def createContentSize(self):
size = (self["filesize"].value + 8) * 8
return min(size, self.stream.size)
def createFilenameSuffix(self):
try:
return self.VALID_TYPES[self["type"].value][3]
except KeyError:
return ".riff"
| 2.03125 | 2 |
Utils.py | MartinEngen/NaiveBayesianClassifier | 0 | 5131 | <filename>Utils.py
import os
import re
def get_subfolder_paths(folder_relative_path: str) -> list:
"""
Gets all subfolders of a given path
:param folder_relative_path: Relative path of folder to find subfolders of
:return: list of relative paths to any subfolders
"""
return [f.path for f in os.scandir(folder_relative_path) if f.is_dir()]
def get_group_name(group_path: str) -> str:
return group_path.split("\\")[-1]
def replace_unwanted_characters(line: str) -> str:
return re.sub(
r'([^\s\w]|_)+',
u' ',
line.replace('\n', ' ').replace('\t', ' '),
flags=re.UNICODE
)
def clean_document(document_file) -> list:
document = document_file.read().lower().split("\n\n")
cleaned_lines = list(map(replace_unwanted_characters, document[1:]))
# lambda x, y: x + y, a, b
list_of_lines = map(lambda x: x.split(" "), cleaned_lines)
flattened_list_of_lines = [val for sublist in list_of_lines for val in sublist]
return filter(lambda x: x != '', flattened_list_of_lines)
| 3.28125 | 3 |
tools/ldbc_benchmark/neo4j/load_scripts/time_index.py | carlboudreau007/ecosys | 245 | 5132 | from datetime import datetime
with open('/home/neo4j/neo4j-community-3.5.1/logs/debug.log', 'r') as log:
begin = []
end = []
for line in log:
if 'Index population started' in line:
begin.append(line[:23])
elif 'Index creation finished' in line:
end.append(line[:23])
if len(begin) == 0 or len(begin) > 9:
print("Something went wrong. Please check debug.log")
elif len(begin) != len(end):
print("{}/{} Done. Please come back later.".format(len(end), len(begin)))
else:
elapsed_time = 0
for i in range(0,9):
begin_tmp = datetime.strptime(begin[i], '%Y-%m-%d %H:%M:%S.%f')
end_tmp = datetime.strptime(end[i],'%Y-%m-%d %H:%M:%S.%f')
elapsed_time += (end_tmp-begin_tmp).total_seconds()
print("Done in {} s".format(elapsed_time))
| 2.671875 | 3 |
zf-setup.py | Ziki2001/new-school-sdk | 0 | 5133 | <filename>zf-setup.py<gh_stars>0
# -*- coding: utf-8 -*-
'''
:file: setup.py
:author: -Farmer
:url: https://blog.farmer233.top
:date: 2021/09/20 11:11:54
'''
from os import path
from setuptools import setup, find_packages
basedir = path.abspath(path.dirname(__file__))
with open(path.join(basedir, "README.md"), encoding='utf-8') as f:
long_description = f.read()
setup(
name="zf-school-sdk",
author="farmer.chillax",
version="1.3.2",
license='MIT',
author_email="<EMAIL>",
description="zf School SDK for Python",
long_description=long_description,
long_description_content_type='text/markdown',
url='https://github.com/Farmer-chong/new-school-sdk',
packages=find_packages(),
# package_data={},
package_data={"school_sdk": ['check_code/model.pkl']},
include_package_data=True,
platforms='any',
zip_safe=False,
install_requires=[
'requests',
'pyquery',
'bs4',
'Pillow',
'fake-headers',
'torch',
'torchvision',
],
classifiers=[
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3.8',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules'
]
)
# python zf-setup.py bdist_wheel sdist
# twine upload dist/* | 1.578125 | 2 |
RunIt/airt/poker_cards.py | antx-code/funcode | 3 | 5134 | <gh_stars>1-10
# Square 方片 => sq => RGB蓝色(Blue)
# Plum 梅花 => pl => RGB绿色(Green)
# Spade 黑桃 => sp => RGB黑色(Black)
# Heart 红桃 => he => RGB红色(Red)
init_poker = {
'local': {
'head': [-1, -1, -1],
'mid': [-1, -1, -1, -1, -1],
'tail': [-1, -1, -1, -1, -1],
'drop': [-1, -1, -1, -1],
'hand': [-1, -1, -1]
},
'player1': {
'head': [-1, -1, -1],
'mid': [-1, -1, -1, -1, -1],
'tail': [-1, -1, -1, -1, -1],
'drop': [-1, -1, -1, -1],
'hand': [-1, -1, -1]
},
'player2': {
'head': [-1, -1, -1],
'mid': [-1, -1, -1, -1, -1],
'tail': [-1, -1, -1, -1, -1],
'drop': [-1, -1, -1, -1],
'hand': [-1, -1, -1]
}
}
# Square
Blue = {
'2': 0,
'3': 1,
'4': 2,
'5': 3,
'6': 4,
'7': 5,
'8': 6,
'9': 7,
'10': 8,
'J': 9,
'Q': 10,
'K': 11,
'A': 12
}
# Plum
Green = {
'2': 13,
'3': 14,
'4': 15,
'5': 16,
'6': 17,
'7': 18,
'8': 19,
'9': 20,
'10': 21,
'J': 22,
'Q': 23,
'K': 24,
'A': 25
}
# Heart
Red = {
'2': 26,
'3': 27,
'4': 28,
'5': 29,
'6': 30,
'7': 31,
'8': 32,
'9': 33,
'10': 34,
'J': 35,
'Q': 36,
'K': 37,
'A': 38
}
# Spade
Black = {
'2': 39,
'3': 40,
'4': 41,
'5': 42,
'6': 43,
'7': 44,
'8': 45,
'9': 46,
'10': 47,
'J': 48,
'Q': 49,
'K': 50,
'A': 51
}
POKER_SCOPE = [
'2',
'3',
'4',
'5',
'6',
'7',
'8',
'9',
'10',
'J',
'Q',
'K',
'A'
]
| 1.9375 | 2 |
main.py | reflective21/iportfolio | 0 | 5135 | name = "<NAME>"
print(name) | 1.484375 | 1 |
tcex/services/api_service.py | kdeltared/tcex | 0 | 5136 | <filename>tcex/services/api_service.py
"""TcEx Framework API Service module."""
# standard library
import json
import sys
import threading
import traceback
from io import BytesIO
from typing import Any
from .common_service import CommonService
class ApiService(CommonService):
"""TcEx Framework API Service module."""
def __init__(self, tcex: object):
"""Initialize the Class properties.
Args:
tcex: Instance of TcEx.
"""
super().__init__(tcex)
# properties
self._metrics = {'Errors': 0, 'Requests': 0, 'Responses': 0}
# config callbacks
self.api_event_callback = None
@property
def command_map(self) -> dict:
"""Return the command map for the current Service type."""
command_map = super().command_map
command_map.update({'runservice': self.process_run_service_command})
return command_map
def format_query_string(self, params: dict) -> str:
"""Convert name/value array to a query string.
Args:
params: The query params for the request.
Returns:
str: The query params reformatted as a string.
"""
query_string = []
try:
for q in params:
query_string.append(f'''{q.get('name')}={q.get('value')}''')
except AttributeError as e:
self.log.error(
f'feature=api-service, event=bad-params-provided, params={params}, error="""{e})"""'
)
self.log.trace(traceback.format_exc())
return '&'.join(query_string)
def format_request_headers(self, headers: dict) -> dict:
"""Convert name/value array to a headers dict.
Args:
headers: The dict of key/value header data.
Returns:
dict: The restructured header data.
"""
headers_ = {}
try:
for h in headers:
# TODO: either support tuple or csv list of values
# headers_.setdefault(h.get('name').lower(), []).append(h.get('value'))
headers_.setdefault(h.get('name').lower(), str(h.get('value')))
except AttributeError as e:
self.log.error(
f'feature=api-service, event=bad-headers-provided, '
f'headers={headers}, error="""{e})"""'
)
self.log.trace(traceback.format_exc())
return headers_
def format_response_headers(self, headers: dict) -> dict:
"""Convert name/value array to a query string.
Args:
headers: The dict header data to be converted to key/value pairs.
Returns:
dict: The restructured header data.
"""
headers_ = []
try:
for h in headers:
headers_.append({'name': h[0], 'value': h[1]})
except AttributeError as e:
self.log.error(
f'feature=api-service, event=bad-headers-provided, '
f'headers={headers}, error="""{e})"""'
)
self.log.trace(traceback.format_exc())
return headers_
def process_run_service_response(self, *args, **kwargs) -> None:
"""Handle service event responses.
('200 OK', [('content-type', 'application/json'), ('content-length', '103')])
"""
self.log.info('feature=api-service, event=response-received, status=waiting-for-body')
kwargs.get('event').wait(30) # wait for thread event - (set on body write)
self.log.trace(f'feature=api-service, event=response, args={args}')
try:
status_code, status = args[0].split(' ', 1)
response = {
'bodyVariable': 'response.body',
'command': 'Acknowledged',
'headers': self.format_response_headers(args[1]),
'requestKey': kwargs.get('request_key'), # pylint: disable=cell-var-from-loop
'status': status,
'statusCode': status_code,
'type': 'RunService',
}
self.log.info('feature=api-service, event=response-sent')
self.message_broker.publish(json.dumps(response), self.args.tc_svc_client_topic)
self.increment_metric('Responses')
except Exception as e:
self.log.error(
f'feature=api-service, event=failed-creating-response-body, error="""{e}"""'
)
self.log.trace(traceback.format_exc())
self.increment_metric('Errors')
def process_run_service_command(self, message: dict) -> None:
"""Process the RunService command.
.. code-block:: python
:linenos:
:lineno-start: 1
{
"command": "RunService",
"apiToken": "abc123",
"bodyVariable": "request.body",
"headers": [ { key/value pairs } ],
"method": "GET",
"queryParams": [ { key/value pairs } ],
"requestKey": "123abc",
"userConfig": [{
"name": "tlpExportSetting",
"value": "TLP:RED"
}],
}
Args:
message: The message payload from the server topic.
"""
# register config apiToken (before any logging)
self.token.register_token(
self.thread_name, message.get('apiToken'), message.get('expireSeconds')
)
self.log.info(f'feature=api-service, event=runservice-command, message="{message}"')
# thread event used to block response until body is written
event = threading.Event()
# process message
request_key: str = message.get('requestKey')
body = None
try:
# read body from redis
body_variable: str = message.pop('bodyVariable', None)
if body_variable is not None:
body: Any = self.key_value_store.read(request_key, body_variable)
if body is not None:
# for API service the data in Redis is not b64 encoded
body = BytesIO(body)
except Exception as e:
self.log.error(f'feature=api-service, event=failed-reading-body, error="""{e}"""')
self.log.trace(traceback.format_exc())
headers: dict = self.format_request_headers(message.pop('headers'))
method: str = message.pop('method')
params: dict = message.pop('queryParams')
path: str = message.pop('path')
try:
environ = {
'wsgi.errors': sys.stderr,
'wsgi.input': body,
'wsgi.multithread': True,
'wsgi.multiprocess': False,
'wsgi.run_once': True,
'wsgi.url_scheme': 'https',
'wsgi.version': (1, 0),
'PATH_INFO': path,
'QUERY_STRING': self.format_query_string(params),
'REMOTE_ADDR': message.get('remoteAddress', ''),
# 'REMOTE_HOST': message.get('remoteAddress', ''),
'REQUEST_METHOD': method.upper(),
'SCRIPT_NAME': '/',
'SERVER_NAME': '',
'SERVER_PORT': '',
'SERVER_PROTOCOL': 'HTTP/1.1',
}
# Add user config for TAXII or other service that supports the data type
environ['user_config'] = message.get('userConfig', [])
# add headers
if headers.get('content-type') is not None:
environ['CONTENT_TYPE'] = headers.pop('content-type')
# add content length
if headers.get('content-length') is not None:
environ['CONTENT_LENGTH'] = headers.pop('content-length')
for header, value in headers.items():
environ[f'HTTP_{header}'.upper()] = value
# make values from message available in env in camel
# case (e.g., falcon -> req.env.get('request_url))
for key, value in message.items():
if key not in environ and self.tcex.utils.camel_to_snake(key) not in environ:
environ[self.tcex.utils.camel_to_snake(key)] = value
self.log.trace(f'feature=api-service, environ={environ}')
self.increment_metric('Requests')
except Exception as e:
self.log.error(f'feature=api-service, event=failed-building-environ, error="""{e}"""')
self.log.trace(traceback.format_exc())
self.increment_metric('Errors')
return # stop processing
def response_handler(*args, **kwargs): # pylint: disable=unused-argument
"""Handle WSGI Response"""
kwargs['event'] = event # add event to kwargs for blocking
kwargs['request_key'] = request_key
self.service_thread(
name='response-handler',
target=self.process_run_service_response,
args=args,
kwargs=kwargs,
)
if callable(self.api_event_callback):
try:
body_data: Any = self.api_event_callback( # pylint: disable=not-callable
environ, response_handler
)
# process body
body = ''
if hasattr(body_data, 'read'):
body = body_data.read()
elif isinstance(body_data, list):
for bd in body_data:
if hasattr(bd, 'read'):
body += bd.read()
elif isinstance(bd, bytes):
body += bd.decode()
elif isinstance(bd, list):
for b in bd:
self.log.error(f'unhandled type - {type(b)}')
else:
self.log.error(f'unhandled type - {type(body)}')
self.log.error(f'unhandled type dir - {dir(body)}')
# write body to Redis
self.key_value_store.create(request_key, 'response.body', body)
# set thread event to True to trigger response
self.log.info('feature=api-service, event=response-body-written')
event.set()
except Exception as e:
self.log.error(
f'feature=api-service, event=api-event-callback-failed, error="""{e}""".'
)
self.log.trace(traceback.format_exc())
self.increment_metric('Errors')
# unregister config apiToken
self.token.unregister_token(self.thread_name)
| 2.296875 | 2 |
mmpose/core/optimizer/builder.py | vsatyakumar/mmpose | 1 | 5137 | <reponame>vsatyakumar/mmpose
from mmcv.runner import build_optimizer
def build_optimizers(model, cfgs):
"""Build multiple optimizers from configs.
If `cfgs` contains several dicts for optimizers, then a dict for each
constructed optimizers will be returned.
If `cfgs` only contains one optimizer config, the constructed optimizer
itself will be returned.
For example,
1) Multiple optimizer configs:
.. code-block:: python
optimizer_cfg = dict(
model1=dict(type='SGD', lr=lr),
model2=dict(type='SGD', lr=lr))
The return dict is
``dict('model1': torch.optim.Optimizer, 'model2': torch.optim.Optimizer)``
2) Single optimizer config:
.. code-block:: python
optimizer_cfg = dict(type='SGD', lr=lr)
The return is ``torch.optim.Optimizer``.
Args:
model (:obj:`nn.Module`): The model with parameters to be optimized.
cfgs (dict): The config dict of the optimizer.
Returns:
dict[:obj:`torch.optim.Optimizer`] | :obj:`torch.optim.Optimizer`:
The initialized optimizers.
"""
optimizers = {}
if hasattr(model, 'module'):
model = model.module
# determine whether 'cfgs' has several dicts for optimizers
if all(isinstance(v, dict) for v in cfgs.values()):
for key, cfg in cfgs.items():
cfg_ = cfg.copy()
module = getattr(model, key)
optimizers[key] = build_optimizer(module, cfg_)
return optimizers
else:
return build_optimizer(model, cfgs)
| 2.9375 | 3 |
register/views.py | angel-vazquez25/My-Backlog-Handler | 3 | 5138 | <reponame>angel-vazquez25/My-Backlog-Handler
import datetime
from django.contrib.auth import logout
from django.shortcuts import render, redirect
from .forms import RegisterForm
from django.http import HttpResponse
from django.contrib.auth.forms import AuthenticationForm
from django.conf import settings
from django.contrib.auth import authenticate, login
from django.http import HttpResponseRedirect
from django.contrib import messages
# Create your views here.
def register(response):
if response.user.is_authenticated:
return redirect("homepage")
else:
if response.method == "POST":
form = RegisterForm(response.POST)
if form.is_valid():
new_user = form.save()
# messages.info(response, "Thanks for registering. You are now logged in.")
new_user = authenticate(username=form.cleaned_data['username'],
password=<PASSWORD>.cleaned_data['<PASSWORD>'],
)
login(response, new_user)
return redirect("homepage")
else:
form = RegisterForm()
return render(response, "register/register.html", {"form": form})
| 2.15625 | 2 |
forum/migrations/0001_initial.py | Aerodlyn/mu | 1 | 5139 | <filename>forum/migrations/0001_initial.py
# Generated by Django 3.1.7 on 2021-03-26 01:27
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Community',
fields=[
('name', models.CharField(max_length=64, primary_key=True, serialize=False)),
('description', models.TextField()),
('private', models.BooleanField(default=False)),
('slug', models.SlugField()),
],
),
]
| 1.882813 | 2 |
custom_train.py | shirley-wu/text_to_table | 3 | 5140 | #!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Train a new model on one or across multiple GPUs.
"""
import collections
import logging
import math
import os
import sys
import numpy as np
import torch
from fairseq import (
checkpoint_utils,
distributed_utils,
options,
quantization_utils,
tasks,
utils,
)
from fairseq import meters
from fairseq.checkpoint_utils import checkpoint_paths
from fairseq.data import iterators
from fairseq.file_io import PathManager
from fairseq.logging import metrics, progress_bar
from fairseq.model_parallel.megatron_trainer import MegatronTrainer
from fairseq.trainer import Trainer
logging.basicConfig(
format="%(asctime)s | %(levelname)s | %(name)s | %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=os.environ.get("LOGLEVEL", "INFO").upper(),
stream=sys.stdout,
)
logger = logging.getLogger("fairseq_cli.train")
class Saver:
def __init__(self):
self.best = None
self.keep_best = []
def save_checkpoint(self, args, trainer, epoch_itr, val_loss):
# only one worker should attempt to create the required dir
if args.distributed_rank == 0:
os.makedirs(args.save_dir, exist_ok=True)
prev_best = val_loss if self.best is None else self.best
if val_loss is not None:
best_function = max if args.maximize_best_checkpoint_metric else min
self.best = best_function(val_loss, prev_best)
if args.no_save:
return
trainer.consolidate_optimizer()
if not trainer.is_data_parallel_master:
return
def is_better(a, b):
return a >= b if args.maximize_best_checkpoint_metric else a <= b
write_timer = meters.StopwatchMeter()
write_timer.start()
epoch = epoch_itr.epoch
end_of_epoch = epoch_itr.end_of_epoch()
updates = trainer.get_num_updates()
suffix = getattr(args, "checkpoint_suffix", "")
checkpoint_conds = collections.OrderedDict()
save_epoch_checkpoint = (
end_of_epoch
and not args.no_epoch_checkpoints
and epoch % args.save_interval == 0
)
checkpoint_conds["checkpoint{}{}.pt".format(epoch, suffix)] = save_epoch_checkpoint
checkpoint_conds["checkpoint_{}_{}{}.pt".format(epoch, updates, suffix)] = (
not save_epoch_checkpoint
and args.save_interval_updates > 0
and updates % args.save_interval_updates == 0
)
checkpoint_conds["checkpoint_best{}.pt".format(suffix)] = val_loss is not None and (
self.best is None
or is_better(val_loss, self.best)
)
checkpoint_conds[
"checkpoint_last{}.pt".format(suffix)
] = not args.no_last_checkpoints
extra_state = {"train_iterator": epoch_itr.state_dict(), "val_loss": val_loss}
if self.best is not None:
extra_state.update({"best": self.best})
if args.keep_best_checkpoints > 0 and (len(self.keep_best) < args.keep_best_checkpoints or (
val_loss is not None and not is_better(self.keep_best[-1][0], val_loss))):
ckpt_name = "checkpoint{}{}.best_{:.4f}.pt".format(epoch, suffix, val_loss) if save_epoch_checkpoint \
else "checkpoint_{}_{}{}.best_{:.4f}.pt".format(epoch, updates, suffix, val_loss)
checkpoint_conds[ckpt_name] = True
self.keep_best.append((val_loss, ckpt_name))
self.keep_best = sorted(self.keep_best)
checkpoints = [
os.path.join(args.save_dir, fn) for fn, cond in checkpoint_conds.items() if cond
]
if len(checkpoints) > 0:
trainer.save_checkpoint(checkpoints[0], extra_state)
for cp in checkpoints[1:]:
PathManager.copy(checkpoints[0], cp, overwrite=True)
write_timer.stop()
logger.info(
"saved checkpoint {} (epoch {} @ {} updates, score {}) (writing took {} seconds)".format(
checkpoints[0], epoch, updates, val_loss, write_timer.sum
)
)
if not end_of_epoch and args.keep_interval_updates > 0:
# remove old checkpoints; checkpoints are sorted in descending order
checkpoints = checkpoint_paths(
args.save_dir, pattern=r"checkpoint_\d+_(\d+)\.pt"
)
for old_chk in checkpoints[args.keep_interval_updates:]:
if os.path.lexists(old_chk):
os.remove(old_chk)
if args.keep_last_epochs > 0:
# remove old epoch checkpoints; checkpoints are sorted in descending order
checkpoints = checkpoint_paths(args.save_dir, pattern=r"checkpoint(\d+)\.pt")
for old_chk in checkpoints[args.keep_last_epochs:]:
if os.path.lexists(old_chk):
os.remove(old_chk)
if len(self.keep_best) > args.keep_best_checkpoints:
for _, x in self.keep_best[args.keep_best_checkpoints:]:
x = os.path.join(args.save_dir, x)
if os.path.lexists(x):
os.remove(x)
self.keep_best = self.keep_best[:args.keep_best_checkpoints]
def main(args):
saver = Saver()
utils.import_user_module(args)
assert (
args.max_tokens is not None or args.batch_size is not None
), "Must specify batch size either with --max-tokens or --batch-size"
metrics.reset()
np.random.seed(args.seed)
utils.set_torch_seed(args.seed)
if distributed_utils.is_master(args):
checkpoint_utils.verify_checkpoint_directory(args.save_dir)
# Print args
logger.info(args)
# Setup task, e.g., translation, language modeling, etc.
task = tasks.setup_task(args)
# Load valid dataset (we load training data below, based on the latest checkpoint)
for valid_sub_split in args.valid_subset.split(","):
task.load_dataset(valid_sub_split, combine=False, epoch=1)
# Build model and criterion
model = task.build_model(args)
criterion = task.build_criterion(args)
logger.info(model)
logger.info("task: {} ({})".format(args.task, task.__class__.__name__))
logger.info("model: {} ({})".format(args.arch, model.__class__.__name__))
logger.info(
"criterion: {} ({})".format(args.criterion, criterion.__class__.__name__)
)
logger.info(
"num. model params: {} (num. trained: {})".format(
sum(p.numel() for p in model.parameters()),
sum(p.numel() for p in model.parameters() if p.requires_grad),
)
)
# (optionally) Configure quantization
if args.quantization_config_path is not None:
quantizer = quantization_utils.Quantizer(
config_path=args.quantization_config_path,
max_epoch=args.max_epoch,
max_update=args.max_update,
)
else:
quantizer = None
# Build trainer
if args.model_parallel_size == 1:
trainer = Trainer(args, task, model, criterion, quantizer)
else:
trainer = MegatronTrainer(args, task, model, criterion)
logger.info(
"training on {} devices (GPUs/TPUs)".format(args.distributed_world_size)
)
logger.info(
"max tokens per GPU = {} and max sentences per GPU = {}".format(
args.max_tokens, args.batch_size
)
)
# Load the latest checkpoint if one is available and restore the
# corresponding train iterator
extra_state, epoch_itr = checkpoint_utils.load_checkpoint(
args,
trainer,
# don't cache epoch iterators for sharded datasets
disable_iterator_cache=task.has_sharded_data("train"),
)
# Train until the learning rate gets too small
max_epoch = args.max_epoch or math.inf
lr = trainer.get_lr()
train_meter = meters.StopwatchMeter()
train_meter.start()
while lr > args.min_lr and epoch_itr.next_epoch_idx <= max_epoch:
# train for one epoch
valid_losses, should_stop = train(args, trainer, task, epoch_itr, saver)
if should_stop:
break
# only use first validation loss to update the learning rate
lr = trainer.lr_step(epoch_itr.epoch, valid_losses[0])
epoch_itr = trainer.get_train_iterator(
epoch_itr.next_epoch_idx,
# sharded data: get train iterator for next epoch
load_dataset=task.has_sharded_data("train"),
# don't cache epoch iterators for sharded datasets
disable_iterator_cache=task.has_sharded_data("train"),
)
train_meter.stop()
logger.info("done training in {:.1f} seconds".format(train_meter.sum))
def should_stop_early(args, valid_loss):
# skip check if no validation was done in the current epoch
if valid_loss is None:
return False
if args.patience <= 0:
return False
def is_better(a, b):
return a > b if args.maximize_best_checkpoint_metric else a < b
prev_best = getattr(should_stop_early, "best", None)
if prev_best is None or is_better(valid_loss, prev_best):
should_stop_early.best = valid_loss
should_stop_early.num_runs = 0
return False
else:
should_stop_early.num_runs += 1
if should_stop_early.num_runs >= args.patience:
logger.info(
"early stop since valid performance hasn't improved for last {} runs".format(
args.patience
)
)
return True
else:
return False
@metrics.aggregate("train")
def train(args, trainer, task, epoch_itr, saver):
"""Train the model for one epoch and return validation losses."""
# Initialize data iterator
itr = epoch_itr.next_epoch_itr(
fix_batches_to_gpus=args.fix_batches_to_gpus,
shuffle=(epoch_itr.next_epoch_idx > args.curriculum),
)
update_freq = (
args.update_freq[epoch_itr.epoch - 1]
if epoch_itr.epoch <= len(args.update_freq)
else args.update_freq[-1]
)
itr = iterators.GroupedIterator(itr, update_freq)
if getattr(args, "tpu", False):
itr = utils.tpu_data_loader(itr)
progress = progress_bar.progress_bar(
itr,
log_format=args.log_format,
log_interval=args.log_interval,
epoch=epoch_itr.epoch,
tensorboard_logdir=(
args.tensorboard_logdir if distributed_utils.is_master(args) else None
),
default_log_format=("tqdm" if not args.no_progress_bar else "simple"),
)
trainer.begin_epoch(epoch_itr.epoch)
valid_losses = [None]
valid_subsets = args.valid_subset.split(",")
should_stop = False
num_updates = trainer.get_num_updates()
for i, samples in enumerate(progress):
with metrics.aggregate("train_inner"), torch.autograd.profiler.record_function(
"train_step-%d" % i
):
log_output = trainer.train_step(samples)
if log_output is not None: # not OOM, overflow, ...
# log mid-epoch stats
num_updates = trainer.get_num_updates()
if num_updates % args.log_interval == 0:
stats = get_training_stats(metrics.get_smoothed_values("train_inner"))
progress.log(stats, tag="train_inner", step=num_updates)
# reset mid-epoch stats after each log interval
# the end-of-epoch stats will still be preserved
metrics.reset_meters("train_inner")
end_of_epoch = not itr.has_next()
valid_losses, should_stop = validate_and_save(
args, trainer, task, epoch_itr, valid_subsets, end_of_epoch, saver
)
if should_stop:
break
# log end-of-epoch stats
logger.info("end of epoch {} (average epoch stats below)".format(epoch_itr.epoch))
stats = get_training_stats(metrics.get_smoothed_values("train"))
progress.print(stats, tag="train", step=num_updates)
# reset epoch-level meters
metrics.reset_meters("train")
return valid_losses, should_stop
def validate_and_save(args, trainer, task, epoch_itr, valid_subsets, end_of_epoch, saver):
num_updates = trainer.get_num_updates()
max_update = args.max_update or math.inf
do_save = (
(end_of_epoch and epoch_itr.epoch % args.save_interval == 0)
or num_updates >= max_update
or (
args.save_interval_updates > 0
and num_updates > 0
and num_updates % args.save_interval_updates == 0
and num_updates >= args.validate_after_updates
)
)
do_validate = (
(not end_of_epoch and do_save) # validate during mid-epoch saves
or (end_of_epoch and epoch_itr.epoch % args.validate_interval == 0)
or num_updates >= max_update
or (
args.validate_interval_updates > 0
and num_updates > 0
and num_updates % args.validate_interval_updates == 0
)
) and not args.disable_validation
# Validate
valid_losses = [None]
if do_validate:
valid_losses = validate(args, trainer, task, epoch_itr, valid_subsets, saver)
# Stopping conditions
should_stop = (
should_stop_early(args, valid_losses[0])
or num_updates >= max_update
or (
args.stop_time_hours > 0
and trainer.cumulative_training_time() / (60 * 60) > args.stop_time_hours
)
)
# Save checkpoint
if do_save or should_stop:
logger.info("begin save checkpoint")
saver.save_checkpoint(args, trainer, epoch_itr, valid_losses[0])
return valid_losses, should_stop
def get_training_stats(stats):
stats["wall"] = round(metrics.get_meter("default", "wall").elapsed_time, 0)
return stats
def validate(args, trainer, task, epoch_itr, subsets, saver):
"""Evaluate the model on the validation set(s) and return the losses."""
if args.fixed_validation_seed is not None:
# set fixed seed for every validation
utils.set_torch_seed(args.fixed_validation_seed)
trainer.begin_valid_epoch(epoch_itr.epoch)
valid_losses = []
for subset in subsets:
logger.info('begin validation on "{}" subset'.format(subset))
# Initialize data iterator
itr = trainer.get_valid_iterator(subset).next_epoch_itr(shuffle=False)
if getattr(args, "tpu", False):
itr = utils.tpu_data_loader(itr)
progress = progress_bar.progress_bar(
itr,
log_format=args.log_format,
log_interval=args.log_interval,
epoch=epoch_itr.epoch,
prefix=f"valid on '{subset}' subset",
tensorboard_logdir=(
args.tensorboard_logdir if distributed_utils.is_master(args) else None
),
default_log_format=("tqdm" if not args.no_progress_bar else "simple"),
)
# create a new root metrics aggregator so validation metrics
# don't pollute other aggregators (e.g., train meters)
with metrics.aggregate(new_root=True) as agg:
for sample in progress:
trainer.valid_step(sample)
# log validation stats
stats = get_valid_stats(args, trainer, agg.get_smoothed_values(), saver)
progress.print(stats, tag=subset, step=trainer.get_num_updates())
valid_losses.append(stats[args.best_checkpoint_metric])
return valid_losses
def get_valid_stats(args, trainer, stats, saver):
stats["num_updates"] = trainer.get_num_updates()
if hasattr(saver.save_checkpoint, "best"):
key = "best_{0}".format(args.best_checkpoint_metric)
best_function = max if args.maximize_best_checkpoint_metric else min
stats[key] = best_function(
saver.save_checkpoint.best, stats[args.best_checkpoint_metric]
)
return stats
def cli_main(modify_parser=None):
parser = options.get_training_parser()
args = options.parse_args_and_arch(parser, modify_parser=modify_parser)
if args.profile:
with torch.cuda.profiler.profile():
with torch.autograd.profiler.emit_nvtx():
distributed_utils.call_main(args, main)
else:
distributed_utils.call_main(args, main)
if __name__ == "__main__":
cli_main()
| 1.835938 | 2 |
src/ucar/unidata/idv/resources/python/griddiag.py | JessicaWiedemeier/IDV | 1 | 5141 | """
This is the doc for the Grid Diagnostics module. These functions
are based on the grid diagnostics from the GEneral Meteorological
PAcKage (GEMPAK). Note that the names are case sensitive and some
are named slightly different from GEMPAK functions to avoid conflicts
with Jython built-ins (e.g. str).
<P>
In the following operators, scalar operands are named S<sub>n</sub> and
vector operands are named V<sub>n</sub>. Lowercase u and v refer to the
grid relative components of a vector.
"""
def GRAVITY():
""" Gravity constant """
return DerivedGridFactory.GRAVITY;
# Math functions
def atn2(S1,S2,WA=0):
""" Wrapper for atan2 built-in
<div class=jython>
ATN2 (S1, S2) = ATAN ( S1 / S2 )<br>
WA = use WEIGHTED_AVERAGE (default NEAREST_NEIGHBOR)
</div>
"""
return GridMath.atan2(S1,S2,WA)
def add(S1,S2,WA=0):
""" Addition
<div class=jython>
ADD (S1, S2) = S1 + S2<br>
WA = use WEIGHTED_AVERAGE (default NEAREST_NEIGHBOR)
</div>
"""
return GridMath.add(S1,S2,WA)
def mul(S1,S2,WA=0):
""" Multiply
<div class=jython>
MUL (S1, S2) = S1 * S2<br>
WA = use WEIGHTED_AVERAGE (default NEAREST_NEIGHBOR)
</div>
"""
return GridMath.multiply(S1,S2,WA)
def quo(S1,S2,WA=0):
""" Divide
<div class=jython>
QUO (S1, S2) = S1 / S2<br>
WA = use WEIGHTED_AVERAGE (default NEAREST_NEIGHBOR)
</div>
"""
return GridMath.divide(S1,S2,WA)
def sub(S1,S2,WA=0):
""" Subtract
<div class=jython>
SUB (S1, S2) = S1 - S2<br>
WA = use WEIGHTED_AVERAGE (default NEAREST_NEIGHBOR)
</div>
"""
return GridMath.subtract(S1,S2,WA)
# Scalar quantities
def adv(S,V):
""" Horizontal Advection, negative by convention
<div class=jython>
ADV ( S, V ) = - ( u * DDX (S) + v * DDY (S) )
</div>
"""
return -add(mul(ur(V),ddx(S)),mul(vr(V),ddy(S)))
def avg(S1,S2):
""" Average of 2 scalars
<div class=jython>
AVG (S1, S2) = ( S1 + S2 ) / 2
</div>
"""
return add(S1,S2)/2
def avor(V):
""" Absolute Vorticity
<div class=jython>
AVOR ( V ) = VOR ( V ) + CORL(V)
</div>
"""
relv = vor(V)
return add(relv,corl(relv))
def circs(S, D=2):
"""
<div class=jython>
Apply a circular aperature smoothing to the grid points. The weighting
function is the circular aperature diffraction function. D is
the radius of influence in grid increments, increasing D increases
the smoothing. (default D=2)
</div>
"""
return GridUtil.smooth(S, "CIRC", int(D))
def corl(S):
""" Coriolis Parameter for all points in a grid
<div class=jython>
CORL = TWO_OMEGA*sin(latr)
</div>
"""
return DerivedGridFactory.createCoriolisGrid(S)
def cress(S, D=2):
"""
<div class=jython>
Apply a Cressman smoothing to the grid points. The smoothed value
is given by a weighted average of surrounding grid points. D is
the radius of influence in grid increments,
increasing D increases the smoothing. (default D=2)
</div>
"""
return GridUtil.smooth(S, "CRES", int(D))
def cros(V1,V2):
""" Vector cross product magnitude
<div class=jython>
CROS ( V1, V2 ) = u1 * v2 - u2 * v1
</div>
"""
return sub(mul(ur(V1),vr(V2)),mul(ur(V2),vr(V1)))
def ddx(S):
""" Take the derivative with respect to the domain's X coordinate
"""
return GridMath.ddx(S);
def ddy(S):
""" Take the derivative with respect to the domain's Y coordinate
"""
return GridMath.ddy(S);
def defr(V):
""" Total deformation
<div class=jython>
DEF ( V ) = ( STRD (V) ** 2 + SHR (V) ** 2 ) ** .5
</div>
"""
return mag(strd(V),shr(V))
def div(V):
""" Horizontal Divergence
<div class=jython>
DIV ( V ) = DDX ( u ) + DDY ( v )
</div>
"""
return add(ddx(ur(V)),ddy(vr(V)))
def dirn(V):
""" North relative direction of a vector
<div class=jython>
DIRN ( V ) = DIRR ( un(v), vn(v) )
</div>
"""
return dirr(DerivedGridFactory.createTrueFlowVector(V))
def dirr(V):
""" Grid relative direction of a vector
"""
return DerivedGridFactory.createVectorDirection(V)
def dot(V1,V2):
""" Vector dot product
<div class=jython>
DOT ( V1, V2 ) = u1 * u2 + v1 * v2
</div>
"""
product = mul(V1,V2)
return add(ur(product),vr(product))
def gwfs(S, N=6):
"""
<div class=jython>
Horizontal smoothing using normally distributed weights
with theoretical response of 1/e for N * delta-x wave.
Increasing N increases the smoothing. (default N=6)
</div>
"""
return GridUtil.smooth(S, "GWFS", int(N))
def jcbn(S1,S2):
""" Jacobian Determinant
<div class=jython>
JCBN ( S1, S2 ) = DDX (S1) * DDY (S2) - DDY (S1) * DDX (S2)
</div>
"""
return sub(mul(ddx(S1),ddy(S2)),mul(ddy(S1),ddx(S2)))
def latr(S):
""" Latitudue all points in a grid
"""
return DerivedGridFactory.createLatitudeGrid(S)
def lap(S):
""" Laplacian operator
<div class=jython>
LAP ( S ) = DIV ( GRAD (S) )
</div>
"""
grads = grad(S)
return div(grads)
def lav(S,level1=None,level2=None, unit=None):
""" Layer Average of a multi layer grid
<div class=jython>
LAV ( S ) = ( S (level1) + S (level2) ) / 2.
</div>
"""
if level1 == None:
return GridMath.applyFunctionOverLevels(S, GridMath.FUNC_AVERAGE)
else:
return layerAverage(S,level1,level2, unit)
def ldf(S,level1,level2, unit=None):
""" Layer Difference
<div class=jython>
LDF ( S ) = S (level1) - S (level2)
</div>
"""
return layerDiff(S,level1,level2, unit);
def mag(*a):
""" Magnitude of a vector
"""
if (len(a) == 1):
return DerivedGridFactory.createVectorMagnitude(a[0]);
else:
return DerivedGridFactory.createVectorMagnitude(a[0],a[1]);
def mixr(temp,rh):
""" Mixing Ratio from Temperature, RH (requires pressure domain)
"""
return DerivedGridFactory.createMixingRatio(temp,rh)
def relh(temp,mixr):
""" Create Relative Humidity from Temperature, mixing ratio (requires pressure domain)
"""
return DerivedGridFactory.createRelativeHumidity(temp,mixr)
def pvor(S,V):
""" Potetial Vorticity (usually from theta and wind)
"""
return DerivedGridFactory.createPotentialVorticity(S,V)
def rects(S, D=2):
"""
<div class=jython>
Apply a rectangular aperature smoothing to the grid points. The weighting
function is the product of the rectangular aperature diffraction function
in the x and y directions. D is the radius of influence in grid
increments, increasing D increases the smoothing. (default D=2)
</div>
"""
return GridUtil.smooth(S, "RECT", int(D))
def savg(S):
""" Average over whole grid
<div class=jython>
SAVG ( S ) = average of all non-missing grid point values
</div>
"""
return GridMath.applyFunctionToLevels(S, GridMath.FUNC_AVERAGE)
def savs(S):
""" Average over grid subset
<div class=jython>
SAVS ( S ) = average of all non-missing grid point values in the subset
area
</div>
"""
return savg(S)
def sdiv(S,V):
""" Horizontal Flux Divergence
<div class=jython>
SDIV ( S, V ) = S * DIV ( V ) + DOT ( V, GRAD ( S ) )
</div>
"""
return add(mul(S,(div(V))) , dot(V,grad(S)))
def shr(V):
""" Shear Deformation
<div class=jython>
SHR ( V ) = DDX ( v ) + DDY ( u )
</div>
"""
return add(ddx(vr(V)),ddy(ur(V)))
def sm5s(S):
""" Smooth a scalar grid using a 5-point smoother
<div class=jython>
SM5S ( S ) = .5 * S (i,j) + .125 * ( S (i+1,j) + S (i,j+1) +
S (i-1,j) + S (i,j-1) )
</div>
"""
return GridUtil.smooth(S, "SM5S")
def sm9s(S):
""" Smooth a scalar grid using a 9-point smoother
<div class=jython>
SM9S ( S ) = .25 * S (i,j) + .125 * ( S (i+1,j) + S (i,j+1) +
S (i-1,j) + S (i,j-1) )
+ .0625 * ( S (i+1,j+1) +
S (i+1,j-1) +
S (i-1,j+1) +
S (i-1,j-1) )
</div>
"""
return GridUtil.smooth(S, "SM9S")
def strd(V):
""" Stretching Deformation
<div class=jython>
STRD ( V ) = DDX ( u ) - DDY ( v )
</div>
"""
return sub(ddx(ur(V)),ddy(vr(V)))
def thta(temp):
""" Potential Temperature from Temperature (requires pressure domain)
"""
return DerivedGridFactory.createPotentialTemperature(temp)
def thte(temp,rh):
""" Equivalent Potential Temperature from Temperature and Relative
humidity (requires pressure domain)
"""
return DerivedGridFactory.createEquivalentPotentialTemperature(temp,rh)
def un(V):
""" North relative u component
"""
return ur(DerivedGridFactory.createTrueFlowVector(V))
def ur(V):
""" Grid relative u component
"""
return DerivedGridFactory.getUComponent(V)
def vn(V):
""" North relative v component
"""
return vr(DerivedGridFactory.createTrueFlowVector(V))
def vor(V):
""" Relative Vorticity
<div class=jython>
VOR ( V ) = DDX ( v ) - DDY ( u )
</div>
"""
return sub(ddx(vr(V)),ddy(ur(V)))
def vr(V):
""" Grid relative v component
"""
return DerivedGridFactory.getVComponent(V)
def xav(S):
""" Average along a grid row
<div class=jython>
XAV (S) = ( S (X1) + S (X2) + ... + S (KXD) ) / KNT
KXD = number of points in row
KNT = number of non-missing points in row
XAV for a row is stored at every point in that row.
</div>
"""
return GridMath.applyFunctionToAxis(S, GridMath.FUNC_AVERAGE, GridMath.AXIS_X)
def xsum(S):
""" Sum along a grid row
<div class=jython>
XSUM (S) = ( S (X1) + S (X2) + ... + S (KXD) )
KXD = number of points in row
XSUM for a row is stored at every point in that row.
</div>
"""
return GridMath.applyFunctionToAxis(S, GridMath.FUNC_SUM, GridMath.AXIS_X)
def yav(S):
""" Average along a grid column
<div class=jython>
YAV (S) = ( S (Y1) + S (Y2) + ... + S (KYD) ) / KNT
KYD = number of points in column
KNT = number of non-missing points in column
</div>
"""
return GridMath.applyFunctionToAxis(S, GridMath.FUNC_AVERAGE, GridMath.AXIS_Y)
def ysum(S):
""" Sum along a grid column
<div class=jython>
YSUM (S) = ( S (Y1) + S (Y2) + ... + S (KYD) )
KYD = number of points in row
YSUM for a column is stored at every point in that column.
</div>
"""
return GridMath.applyFunctionToAxis(S, GridMath.FUNC_SUM, GridMath.AXIS_Y)
def zav(S):
""" Average across the levels of a grid at all points
<div class=jython>
ZAV (S) = ( S (Z1) + S (Z2) + ... + S (KZD) ) / KNT
KZD = number of levels
KNT = number of non-missing points in column
</div>
"""
return GridMath.applyFunctionToLevels(S, GridMath.FUNC_AVERAGE)
def zsum(S):
""" Sum across the levels of a grid at all points
<div class=jython>
ZSUM (S) = ( S (Z1) + S (Z2) + ... + S (KZD) )
KZD = number of levels
ZSUM for a vertical column is stored at every point
</div>
"""
return GridMath.applyFunctionOverLevels(S, GridMath.FUNC_SUM)
def wshr(V, Z, top, bottom):
""" Magnitude of the vertical wind shear in a layer
<div class=jython>
WSHR ( V ) = MAG [ VLDF (V) ] / LDF (Z)
</div>
"""
dv = mag(vldf(V,top,bottom))
dz = ldf(Z,top,bottom)
return quo(dv,dz)
# Vector output
def age(obs,geo):
""" Ageostrophic wind
<div class=jython>
AGE ( S ) = [ u (OBS) - u (GEO(S)), v (OBS) - v (GEO(S)) ]
</div>
"""
return sub(obs,geo)
def circv(S, D=2):
"""
<div class=jython>
Apply a circular aperature smoothing to the grid points. The weighting
function is the circular aperature diffraction function. D is
the radius of influence in grid increments, increasing D increases
the smoothing. (default D=2)
</div>
"""
return GridUtil.smooth(S, "CIRC", int(D))
def cresv(S, D=2):
"""
<div class=jython>
Apply a Cressman smoothing to the grid points. The smoothed value
is given by a weighted average of surrounding grid points. D is
the radius of influence in grid increments,
increasing D increases the smoothing. (default D=2)
</div>
"""
return GridUtil.smooth(S, "CRES", int(D))
def dvdx(V):
""" Partial x derivative of a vector
<div class=jython>
DVDX ( V ) = [ DDX (u), DDX (v) ]
</div>
"""
return vecr(ddx(ur(V)), ddx(vr(V)))
def dvdy(V):
""" Partial x derivative of a vector
<div class=jython>
DVDY ( V ) = [ DDY (u), DDY (v) ]
</div>
"""
return vecr(ddy(ur(V)), ddy(vr(V)))
def frnt(S,V):
""" Frontogenesis function from theta and the wind
<div class=jython>
FRNT ( THTA, V ) = 1/2 * MAG ( GRAD (THTA) ) *
( DEF * COS (2 * BETA) - DIV ) <p>
Where: BETA = ASIN ( (-DDX (THTA) * COS (PSI) <br>
- DDY (THTA) * SIN (PSI))/ <br>
MAG ( GRAD (THTA) ) ) <br>
PSI = 1/2 ATAN2 ( SHR / STR ) <br>
</div>
"""
shear = shr(V)
strch = strd(V)
psi = .5*atn2(shear,strch)
dxt = ddx(S)
dyt = ddy(S)
cosd = cos(psi)
sind = sin(psi)
gradt = grad(S)
mgradt = mag(gradt)
a = -cosd*dxt-sind*dyt
beta = asin(a/mgradt)
frnto = .5*mgradt*(defr(V)*cos(2*beta)-div(V))
return frnto
def geo(z):
""" geostrophic wind from height
<div class=jython>
GEO ( S ) = [ - DDY (S) * const / CORL, DDX (S) * const / CORL ]
</div>
"""
return DerivedGridFactory.createGeostrophicWindVector(z)
def grad(S):
""" Gradient of a scalar
<div class=jython>
GRAD ( S ) = [ DDX ( S ), DDY ( S ) ]
</div>
"""
return vecr(ddx(S),ddy(S))
def gwfv(V, N=6):
"""
<div class=jython>
Horizontal smoothing using normally distributed weights
with theoretical response of 1/e for N * delta-x wave.
Increasing N increases the smoothing. (default N=6)
</div>
"""
return gwfs(V, N)
def inad(V1,V2):
""" Inertial advective wind
<div class=jython>
INAD ( V1, V2 ) = [ DOT ( V1, GRAD (u2) ),
DOT ( V1, GRAD (v2) ) ]
</div>
"""
return vecr(dot(V1,grad(ur(V2))),dot(V1,grad(vr(V2))))
def qvec(S,V):
""" Q-vector at a level ( K / m / s )
<div class=jython>
QVEC ( S, V ) = [ - ( DOT ( DVDX (V), GRAD (S) ) ),
- ( DOT ( DVDY (V), GRAD (S) ) ) ]
where S can be any thermal paramenter, usually THTA.
</div>
"""
grads = grad(S)
qvecu = newName(-dot(dvdx(V),grads),"qvecu")
qvecv = newName(-dot(dvdy(V),grads),"qvecv")
return vecr(qvecu,qvecv)
def qvcl(THTA,V):
""" Q-vector ( K / m / s )
<div class=jython>
QVCL ( THTA, V ) = ( 1/( D (THTA) / DP ) ) *
[ ( DOT ( DVDX (V), GRAD (THTA) ) ),
( DOT ( DVDY (V), GRAD (THTA) ) ) ]
</div>
"""
dtdp = GridMath.partial(THTA,2)
gradt = grad(THTA)
qvecudp = newName(quo(dot(dvdx(V),gradt),dtdp),"qvecudp")
qvecvdp = newName(quo(dot(dvdy(V),gradt),dtdp),"qvecvdp")
return vecr(qvecudp,qvecvdp)
def rectv(S, D=2):
"""
<div class=jython>
Apply a rectangular aperature smoothing to the grid points. The weighting
function is the product of the rectangular aperature diffraction function
in the x and y directions. D is the radius of influence in grid
increments, increasing D increases the smoothing. (default D=2)
</div>
"""
return GridUtil.smooth(S, "RECT", int(D))
def sm5v(V):
""" Smooth a scalar grid using a 5-point smoother (see sm5s)
"""
return sm5s(V)
def sm9v(V):
""" Smooth a scalar grid using a 9-point smoother (see sm9s)
"""
return sm9s(V)
def thrm(S, level1, level2, unit=None):
""" Thermal wind
<div class=jython>
THRM ( S ) = [ u (GEO(S)) (level1) - u (GEO(S)) (level2),
v (GEO(S)) (level1) - v (GEO(S)) (level2) ]
</div>
"""
return vldf(geo(S),level1,level2, unit)
def vadd(V1,V2):
""" add the components of 2 vectors
<div class=jython>
VADD (V1, V2) = [ u1+u2, v1+v2 ]
</div>
"""
return add(V1,V2)
def vecn(S1,S2):
""" Make a true north vector from two components
<div class=jython>
VECN ( S1, S2 ) = [ S1, S2 ]
</div>
"""
return makeTrueVector(S1,S2)
def vecr(S1,S2):
""" Make a vector from two components
<div class=jython>
VECR ( S1, S2 ) = [ S1, S2 ]
</div>
"""
return makeVector(S1,S2)
def vlav(V,level1,level2, unit=None):
""" calculate the vector layer average
<div class=jython>
VLDF(V) = [(u(level1) - u(level2))/2,
(v(level1) - v(level2))/2]
</div>
"""
return layerAverage(V, level1, level2, unit)
def vldf(V,level1,level2, unit=None):
""" calculate the vector layer difference
<div class=jython>
VLDF(V) = [u(level1) - u(level2),
v(level1) - v(level2)]
</div>
"""
return layerDiff(V,level1,level2, unit)
def vmul(V1,V2):
""" Multiply the components of 2 vectors
<div class=jython>
VMUL (V1, V2) = [ u1*u2, v1*v2 ]
</div>
"""
return mul(V1,V2)
def vquo(V1,V2):
""" Divide the components of 2 vectors
<div class=jython>
VQUO (V1, V2) = [ u1/u2, v1/v2 ]
</div>
"""
return quo(V1,V2)
def vsub(V1,V2):
""" subtract the components of 2 vectors
<div class=jython>
VSUB (V1, V2) = [ u1-u2, v1-v2 ]
</div>
"""
return sub(V1,V2)
def LPIndex(u, v, z, t, top, bottom, unit):
""" calculate the wind shear between discrete layers
<div class=jython>
LP = 7.268DUDZ + 0.718DTDN + 0.318DUDN - 2.52
</div>
"""
Z = windShear(u, v, z, top, bottom, unit)*7.268
uwind = getSliceAtLevel(u, top)
vwind = getSliceAtLevel(v, top)
temp = newUnit(getSliceAtLevel(t, top), "temperature", "celsius")
HT = sqrt(ddx(temp)*ddx(temp) + ddy(temp)*ddy(temp))*0.718
HU = (ddx(vwind) + ddy(uwind))*0.318
L = add(noUnit(Z), add(noUnit(HU), noUnit(HT)))
L = (L - 2.520)*(-0.59)
P= 1.0/(1.0 + GridMath.applyFunctionOverGridsExt(L,"exp"))
LP = setLevel(P ,top, unit)
return LP
def EllrodIndex(u, v, z, top, bottom, unit):
""" calculate the wind shear between discrete layers
<div class=jython>
EI = VWS X ( DEF + DIV)
</div>
"""
VWS = windShear(u, v, z, top, bottom, unit)*100.0
#
uwind = getSliceAtLevel(u, top)
vwind = getSliceAtLevel(v, top)
DIV = (ddx(uwind) + ddy(vwind))* (-1.0)
#
DSH = ddx(vwind) + ddy(uwind)
DST = ddx(uwind) - ddy(vwind)
DEF = sqrt(DSH * DSH + DST * DST)
EI = mul(noUnit(VWS), add(noUnit(DEF), noUnit(DIV)))
return setLevel(EI, top, unit)
| 3.140625 | 3 |
app.py | DevilBit/Twitter-Bot | 0 | 5142 | from selenium import webdriver #to get the browser
from selenium.webdriver.common.keys import Keys #to send key to browser
import getpass #to get password safely
import time #to pause the program
#a calss to store all twetter related objects and functions
class twitter_bot:
def __init__(self, username, password):
self.username = username
self.password = password
self.bot = webdriver.Firefox()
#login function
def login(self):
bot = self.bot
bot.get('https://twitter.com/login')
#sleep to wait for the browser to get the website
time.sleep(3)
email = bot.find_element_by_class_name('js-username-field') #get the email field
password = bot.find_element_by_class_name('js-password-field') #get the password field
#clear the email and password field just in case of autofill
email.clear()
password.clear()
#fill in email field
email.send_keys(self.username)
time.sleep(2)
#fill in password field
password.send_keys(<PASSWORD>)
time.sleep(2)
#click the login button
bot.find_element_by_class_name("EdgeButtom--medium").click()
time.sleep(3)
def like_tweet(self, search):
bot = self.bot
#use keyword to search
bot.get('https://twitter.com/search?q=' + search + '&src=typd')
bot.implicitly_wait(3)
#get posts
for i in range(0, 30):
bot.execute_script('window.scrollTo(0, document.body.scrollHeight)')
time.sleep(10)
tweets = bot.find_elements_by_class_name('tweet')
links = [element.get_attribute('data-permalink-path') for element in tweets]
#like posts
for link in links:
bot.get('https://twitter.com/' + link)
try:
bot.find_element_by_class_name('HeartAnimation').click()
time.sleep(10)
except Exception as ex:
time.sleep(60)
if __name__ == '__main__':
username = input('Email: ')
password = <PASSWORD>('Password: ')
search = input('Please enter keyword: ')
user = twitter_bot(username, password)
user.login()
time.sleep(10)
user.like_tweet(search)
| 3.1875 | 3 |
visualization.py | shyhyawJou/GradCAM-pytorch | 0 | 5143 | import torch
import torch.nn as nn
from torch.nn import functional as F
from PIL import Image
import cv2 as cv
from matplotlib import cm
import numpy as np
class GradCAM:
"""
#### Args:
layer_name: module name (not child name), if None,
will use the last layer before average pooling
, default is None
"""
def __init__(self, model, device, layer_name=None, close_some_grad=True):
if layer_name is None:
layer_name = self.get_layer_name(model)
if layer_name is None:
raise ValueError(
"There is no global average pooling layer, plz specify 'layer_name'"
)
for n, m in model.named_children():
if close_some_grad:
m.requires_grad_(False)
for sub_n, sub_m in m.named_modules():
if '.'.join((n, sub_n)) == layer_name:
sub_m.register_forward_hook(self.forward_hook)
sub_m.register_full_backward_hook(self.backward_hook)
m.requires_grad_(True)
break
model = model.to(device)
self.model = model
self.device = device
self.feature_maps = {}
self.gradients = {}
def get_heatmap(self, img, img_tensor):
self.model.zero_grad()
img_tensor = img_tensor.to(self.device)
outputs = self.model(img_tensor)
_, pred_label = outputs.max(1)
# outputs shape = 1x2
outputs[0][pred_label].backward()
with torch.no_grad():
feature_maps = self.feature_maps["output"]
# "gradients" is a tuple with one item
grad_weights = self.gradients["output"][0]
h, w = grad_weights.size()[-2:]
grad_weights = grad_weights.sum((2,3), True) / (h * w)
cam = (grad_weights * feature_maps).sum(1)
F.relu(cam, True)
cam = cam / cam.max() * 255
cam = cam.to(dtype=torch.uint8, device="cpu")
cam = cam.numpy().transpose(1,2,0)
cam = cv.resize(cam, img.size[:2], interpolation=4)
cam = np.uint8(255 * cm.get_cmap("jet")(cam.squeeze()))
if not isinstance(img, np.ndarray):
img = np.asarray(img)
img_size = img.shape[:2][::-1] # w, h
overlay = np.uint8(0.6*img + 0.4 * cam[:,:,:3])
overlay = Image.fromarray(overlay)
if overlay.size != img_size:
overlay = overlay.resize(img_size, Image.BILINEAR)
return outputs.detach(), overlay
def get_layer_name(self, model):
layer_name = None
for n, m in model.named_children():
for sub_n, sub_m in m.named_modules():
if isinstance(sub_m, (nn.AdaptiveAvgPool2d, nn.AvgPool2d)):
layer_name = tmp
tmp = '.'.join((n, sub_n))
return layer_name
def forward_hook(self, module, x, y):
#self.feature_maps["input"] = x
self.feature_maps["output"] = y
def backward_hook(self, module, x, y):
#self.gradients["input"] = x
self.gradients["output"] = y
self.gradients["output"] = y
| 2.609375 | 3 |
frame_2D_alg/alternative versions/intra_blob_xy.py | Mechachleopteryx/CogAlg | 0 | 5144 | <filename>frame_2D_alg/alternative versions/intra_blob_xy.py<gh_stars>0
'''
2D version of 1st-level algorithm is a combination of frame_blobs, intra_blob, and comp_P: optional raster-to-vector conversion.
intra_blob recursively evaluates each blob for two forks of extended internal cross-comparison and sub-clustering:
der+: incremental derivation cross-comp in high-variation edge areas of +vg: positive deviation of gradient triggers comp_g,
rng+: incremental range cross-comp in low-variation flat areas of +v--vg: positive deviation of negated -vg triggers comp_r.
Each adds a layer of sub_blobs per blob.
Please see diagram: https://github.com/boris-kz/CogAlg/blob/master/frame_2D_alg/Illustrations/intra_blob_2_fork_scheme.png
Blob structure, for all layers of blob hierarchy:
root_dert__,
Dert = I, iDy, iDx, G, Dy, Dx, M, S (area), Ly (vertical dimension)
# I: input, (iDy, iDx): angle of input gradient, G: gradient, (Dy, Dx): vertical and lateral Ds, M: match
sign,
box, # y0, yn, x0, xn
dert__, # box of derts, each = i, idy, idx, g, dy, dx, m
stack_[ stack_params, Py_ [(P_params, dert_)]]: refs down blob formation tree, in vertical (horizontal) order
# next fork:
fcr, # flag comp rng, also clustering criterion in dert and Dert: g in der+ fork, i+m in rng+ fork?
fig, # flag input is gradient
rdn, # redundancy to higher layers
rng, # comp range
sub_layers # [sub_blobs ]: list of layers across sub_blob derivation tree
# deeper layers are nested, multiple forks: no single set of fork params?
'''
from collections import deque, defaultdict
from class_cluster import ClusterStructure, NoneType
from class_bind import AdjBinder
from frame_blobs_yx import assign_adjacents
from intra_comp_g import comp_g, comp_r
from itertools import zip_longest
from class_stream import BlobStreamer
from utils import pairwise
import numpy as np
# from comp_P_draft import comp_P_blob
# filters, All *= rdn:
ave = 50 # fixed cost per dert, from average m, reflects blob definition cost, may be different for comp_a?
aveB = 50 # fixed cost per intra_blob comp and clustering
class CDeepP(ClusterStructure):
I = int
G = int
Dy = int
Dx = int
M = int
iDy = int
iDx = int
L = int
x0 = int
sign = NoneType
class CDeepStack(ClusterStructure):
I = int
G = int
Dy = int
Dx = int
M = int
iDy = int
iDx = int
S = int
Ly = int
y0 = int
Py_ = list
blob = object
down_connect_cnt = int
sign = NoneType
class CDeepBlob(ClusterStructure):
Dert = dict
box = list
stack_ = list
sign = NoneType
open_stacks = int
root_dert__ = object
dert__ = object
mask = object
adj_blobs = list
fopen = bool
margin = list
fcr = bool
fig = bool
rdn = float
rng = int
Ls = int # for visibility and next-fork rdn
sub_layers = list
# --------------------------------------------------------------------------------------------------------------
# functions, ALL WORK-IN-PROGRESS:
def intra_blob(blob, rdn, rng, fig, fcr, **kwargs): # recursive input rng+ | der+ cross-comp within blob
# fig: flag input is g | p, fcr: flag comp over rng+ | der+
if kwargs.get('render', None) is not None: # stop rendering sub-blobs when blob is too small
if blob.Dert['S'] < 100:
kwargs['render'] = False
spliced_layers = [] # to extend root_blob sub_layers
ext_dert__, ext_mask = extend_dert(blob)
if fcr:
dert__, mask = comp_r(ext_dert__, fig, fcr, ext_mask) # -> m sub_blobs
else:
dert__, mask = comp_g(ext_dert__, ext_mask) # -> g sub_blobs:
if dert__[0].shape[0] > 2 and dert__[0].shape[1] > 2 and False in mask: # min size in y and x, least one dert in dert__
sub_blobs = cluster_derts(dert__, mask, ave * rdn, fcr, fig, **kwargs)
# fork params:
blob.fcr = fcr
blob.fig = fig
blob.rdn = rdn
blob.rng = rng
blob.Ls = len(sub_blobs) # for visibility and next-fork rdn
blob.sub_layers = [sub_blobs] # 1st layer of sub_blobs
for sub_blob in sub_blobs: # evaluate for intra_blob comp_g | comp_r:
G = blob.Dert['G']; adj_G = blob.adj_blobs[2]
borrow = min(abs(G), abs(adj_G) / 2) # or adjacent M if negative sign?
if sub_blob.sign:
if sub_blob.Dert['M'] - borrow > aveB * rdn: # M - (intra_comp value lend to edge blob)
# comp_r fork:
blob.sub_layers += intra_blob(sub_blob, rdn + 1 + 1 / blob.Ls, rng * 2, fig=fig, fcr=1, **kwargs)
# else: comp_P_
elif sub_blob.Dert['G'] + borrow > aveB * rdn: # G + (intra_comp value borrow from flat blob)
# comp_g fork:
blob.sub_layers += intra_blob(sub_blob, rdn + 1 + 1 / blob.Ls, rng=rng, fig=1, fcr=0, **kwargs)
# else: comp_P_
spliced_layers = [spliced_layers + sub_layers for spliced_layers, sub_layers in
zip_longest(spliced_layers, blob.sub_layers, fillvalue=[])]
return spliced_layers
def cluster_derts(dert__, mask, Ave, fcr, fig, render=False): # similar to frame_to_blobs
if fcr: # comp_r output; form clustering criterion:
if fig:
crit__ = dert__[0] + dert__[6] - Ave # eval by i + m, accum in rng; dert__[:,:,0] if not transposed
else:
crit__ = Ave - dert__[3] # eval by -g, accum in rng
else: # comp_g output
crit__ = dert__[6] - Ave # comp_g output eval by m, or clustering is always by m?
root_dert__ = dert__ # derts after the comps operation, which is the root_dert__
dert__ = [*zip(*dert__)] # transpose dert__ into shape [y, params, x]
sub_blobs = [] # from form_blob:
stack_ = deque() # buffer of running vertical stacks of Ps
stack_binder = AdjBinder(CDeepStack)
if render:
streamer = BlobStreamer(CDeepBlob, crit__, mask)
if render:
streamer = BlobStreamer(CDeepBlob, crit__, mask)
for y, dert_ in enumerate(dert__): # in height, first and last row are discarded; print(f'Processing intra line {y}...')
# if False in mask[i]: # [y,x,params], there is at least one dert in line
P_binder = AdjBinder(CDeepP) # binder needs data about clusters of the same level
P_ = form_P_(zip(*dert_), crit__[y], mask[y], P_binder) # horizontal clustering, adds a row of Ps
if render:
render = streamer.update_blob_conversion(y, P_) # if return False, stop rendering
P_ = scan_P_(P_, stack_, root_dert__, sub_blobs, P_binder) # vertical clustering, adds up_connects per P and down_connect_cnt per stack
stack_ = form_stack_(P_, root_dert__, sub_blobs, y)
stack_binder.bind_from_lower(P_binder)
while stack_: # frame ends, last-line stacks are merged into their blobs:
form_blob(stack_.popleft(), root_dert__, sub_blobs)
blob_binder = AdjBinder(CDeepBlob)
blob_binder.bind_from_lower(stack_binder)
assign_adjacents(blob_binder) # add adj_blobs to each blob
# sub_blobs = find_adjacent(sub_blobs)
if render: # rendering mode after blob conversion
streamer.end_blob_conversion(y)
return sub_blobs
# clustering functions:
# -------------------------------------------------------------------------------------------------------------------
def form_P_(dert_, crit_, mask_, binder): # segment dert__ into P__, in horizontal ) vertical order
P_ = deque() # row of Ps
sign_ = crit_ > 0
x0 = 0
try:
while mask_[x0]: # skip until not masked
next(dert_)
x0 += 1
except IndexError:
return P_ # the whole line is masked, return an empty P
I, iDy, iDx, G, Dy, Dx, M, L = *next(dert_), 1 # initialize P params
_sign = sign_[x0]
_mask = mask_[x0] # mask bit per dert
for x, (i, idy, idx, g, dy, dx, m) in enumerate(dert_, start=x0+1): # loop left to right in each row of derts
mask = mask_[x]
if ~mask: # current dert is not masked
sign = sign_[x]
if ~_mask and sign != _sign: # prior dert is not masked and sign changed
# pack P
P = CDeepP(I=I, G=G, Dy=Dy, Dx=Dx, M=M, iDy=iDy, iDx=iDx, L=L,x0=x0, sign=_sign)
P_.append(P)
# initialize P params:
I, iDy, iDx, G, Dy, Dx, M, L, x0 = 0, 0, 0, 0, 0, 0, 0, 0, x
elif _mask:
I, iDy, iDx, G, Dy, Dx, M, L, x0 = 0, 0, 0, 0, 0, 0, 0, 0, x
# current dert is masked
elif ~_mask: # prior dert is not masked
# pack P
P = CDeepP(I=I, G=G, Dy=Dy, Dx=Dx, M=M, iDy=iDy, iDx=iDx, L=L, x0=x0, sign=_sign)
P_.append(P)
# initialize P params: (redundant)
# I, iDy, iDx, G, Dy, Dx, M, L, x0 = 0, 0, 0, 0, 0, 0, 0, 0, x + 1
if ~mask: # accumulate P params:
I += i
iDy += idy
iDx += idx
G += g
Dy += dy
Dx += dx
M += m
L += 1
_sign = sign # prior sign
_mask = mask
if ~_mask: # terminate and pack last P in a row if prior dert is unmasked
P = CDeepP(I=I, G=G, Dy=Dy, Dx=Dx, M=M, iDy=iDy, iDx=iDx, L=L, x0=x0, sign=_sign)
P_.append(P)
for _P, P in pairwise(P_):
if _P.x0 + _P.L == P.x0: # check if Ps are adjacents
binder.bind(_P, P)
return P_
def scan_P_(P_, stack_, root_dert__, sub_blobs, binder): # merge P into higher-row stack of Ps with same sign and x_coord overlap
next_P_ = deque() # to recycle P + up_connect_ that finished scanning _P, will be converted into next_stack_
if P_ and stack_: # if both input row and higher row have any Ps / _Ps left
P = P_.popleft() # load left-most (lowest-x) input-row P
stack = stack_.popleft() # higher-row stacks
_P = stack.Py_[-1] # last element of each stack is higher-row P
up_connect_ = [] # list of same-sign x-overlapping _Ps per P
while True: # while both P_ and stack_ are not empty
x0 = P.x0 # first x in P
xn = x0 + P.L # first x beyond P
_x0 = _P.x0 # first x in _P
_xn = _x0 + _P.L # first x beyond _P
if stack.G > 0: # check for overlaps in 8 directions, else a blob may leak through its external blob
if _x0 - 1 < xn and x0 < _xn + 1: # x overlap between loaded P and _P
if P.sign == stack.sign: # sign match
stack.down_connect_cnt += 1
up_connect_.append(stack) # buffer P-connected higher-row stacks into P' up_connect_
else:
binder.bind(_P, P)
else: # -G, check for orthogonal overlaps only: 4 directions, edge blobs are more selective
if _x0 < xn and x0 < _xn: # x overlap between loaded P and _P
if P.sign == stack.sign: # sign match
stack.down_connect_cnt += 1
up_connect_.append(stack) # buffer P-connected higher-row stacks into P' up_connect_
else:
binder.bind(_P, P)
if (xn < _xn or # _P overlaps next P in P_
xn == _xn and stack.sign): # sign taken accounted
next_P_.append((P, up_connect_)) # recycle _P for the next run of scan_P_
up_connect_ = []
if P_:
P = P_.popleft() # load next P
else: # terminate loop
if stack.down_connect_cnt != 1: # terminate stack, merge it into up_connects' blobs
form_blob(stack, root_dert__, sub_blobs)
break
else: # no next-P overlap
if stack.down_connect_cnt != 1: # terminate stack, merge it into up_connects' blobs
form_blob(stack, root_dert__, sub_blobs)
if stack_: # load stack with next _P
stack = stack_.popleft()
_P = stack.Py_[-1]
else: # no stack left: terminate loop
next_P_.append((P, up_connect_))
break
while P_: # terminate Ps and stacks that continue at row's end
next_P_.append((P_.popleft(), [])) # no up_connect
while stack_:
form_blob(stack_.popleft(), root_dert__, sub_blobs) # down_connect_cnt always == 0
return next_P_ # each element is P + up_connect_ refs
def form_stack_(P_, root_dert__, sub_blobs, y): # Convert or merge every P into its stack of Ps, merge blobs
next_stack_ = deque() # converted to stack_ in the next run of scan_P_
while P_:
P, up_connect_ = P_.popleft()
I, G, Dy, Dx, M, iDy, iDx, L, x0, s = P.unpack()
xn = x0 + L # next-P x0
if not up_connect_:
# initialize new stack for each input-row P that has no connections in higher row:
blob = CDeepBlob(Dert=dict(I=0, G=0, Dy=0, Dx=0, M=0, iDy=0, iDx=0, S=0, Ly=0),
box=[y, x0, xn], stack_=[], sign=s, open_stacks=1)
new_stack = CDeepStack(I=I, G=G, Dy=0, Dx=Dx, M=M, iDy=iDy, iDx=iDx, S=L, Ly=1,
y0=y, Py_=[P], blob=blob, down_connect_cnt=0, sign=s)
new_stack.hid = blob.id
blob.stack_.append(new_stack)
else:
if len(up_connect_) == 1 and up_connect_[0].down_connect_cnt == 1:
# P has one up_connect and that up_connect has one down_connect=P: merge P into up_connect stack:
new_stack = up_connect_[0]
new_stack.accumulate(I=I, G=G, Dy=Dy, Dx=Dx, M=M, iDy=iDy, iDx=iDx, S=L, Ly=1)
new_stack.Py_.append(P) # Py_: vertical buffer of Ps
new_stack.down_connect_cnt = 0 # reset down_connect_cnt
blob = new_stack.blob
else: # if > 1 up_connects, or 1 up_connect that has > 1 down_connect_cnt:
blob = up_connect_[0].blob
# initialize new_stack with up_connect blob:
new_stack = CDeepStack(I=I, G=G, Dy=0, Dx=Dx, M=M, iDy=iDy, iDx=iDx, S=L, Ly=1,
y0=y, Py_=[P], blob=blob, down_connect_cnt=0, sign=s)
new_stack.hid = blob.id
blob.stack_.append(new_stack)
if len(up_connect_) > 1: # merge blobs of all up_connects
if up_connect_[0].down_connect_cnt == 1: # up_connect is not terminated
form_blob(up_connect_[0], root_dert__, sub_blobs) # merge stack of 1st up_connect into its blob
for up_connect in up_connect_[1:len(up_connect_)]: # merge blobs of other up_connects into blob of 1st up_connect
if up_connect.down_connect_cnt == 1:
form_blob(up_connect, root_dert__, sub_blobs)
if not up_connect.blob is blob:
merged_blob = up_connect.blob
I, G, Dy, Dx, M, iDy, iDx, S, Ly = merged_blob.Dert.values()
accum_Dert(blob.Dert, I=I, G=G, Dy=Dy, Dx=Dx, M=M, iDy=iDy, iDx=iDx, S=S, Ly=Ly)
blob.open_stacks += merged_blob.open_stacks
blob.box[0] = min(blob.box[0], merged_blob.box[0]) # extend box y0
blob.box[1] = min(blob.box[1], merged_blob.box[1]) # extend box x0
blob.box[2] = max(blob.box[2], merged_blob.box[2]) # extend box xn
for stack in merged_blob.stack_:
if not stack is up_connect:
stack.blob = blob # blobs in other up_connects are references to blob in the first up_connect.
stack.hid = blob.id
blob.stack_.append(stack) # buffer of merged root stacks.
up_connect.blob = blob
up_connect.hid = blob.id
blob.stack_.append(up_connect)
blob.open_stacks -= 1 # overlap with merged blob.
blob.box[1] = min(blob.box[1], x0) # extend box x0
blob.box[2] = max(blob.box[2], xn) # extend box xn
P.hid = new_stack.id # assign higher cluster id for P
next_stack_.append(new_stack)
return next_stack_
def form_blob(stack, root_dert__, sub_blobs): # increment blob with terminated stack, check for blob termination
I, G, Dy, Dx, M, iDy, iDx, S, Ly, y0, Py_, blob, down_connect_cnt, sign = stack.unpack()
accum_Dert(blob.Dert, I=I, G=G, Dy=Dy, Dx=Dx, M=M, iDy=iDy, iDx=iDx, S=S, Ly=Ly)
# terminated stack is merged into continued or initialized blob (all connected stacks):
blob.open_stacks += down_connect_cnt - 1 # incomplete stack cnt + terminated stack down_connect_cnt - 1: stack itself
# open stacks contain Ps of a current row and may be extended with new x-overlapping Ps in next run of scan_P_
if blob.open_stacks == 0: # if number of incomplete stacks == 0
# blob is terminated and packed in blob root:
last_stack = stack
y0, x0, xn = blob.box
yn = last_stack.y0 + last_stack.Ly
mask = np.ones((yn - y0, xn - x0), dtype=bool) # mask box, then unmask Ps:
for stack in blob.stack_:
for y, P in enumerate(stack.Py_, start=stack.y0 - y0):
x_start = P.x0 - x0
x_stop = x_start + P.L
mask[y, x_start:x_stop] = False
fopen = 0 # flag: blob on frame boundary
if x0 == 0 or xn == root_dert__[0].shape[1] or y0 == 0 or yn == root_dert__[0].shape[0]:
fopen = 1
blob.root_dert__ = root_dert__
blob.box = (y0, yn, x0, xn)
blob.dert__ = [derts[y0:yn, x0:xn] for derts in root_dert__]
blob.mask = mask
blob.adj_blobs = [[], 0, 0]
blob.fopen = fopen
sub_blobs.append(blob)
def extend_dert(blob): # extend dert borders (+1 dert to boundaries)
y0, yn, x0, xn = blob.box # extend dert box:
rY, rX = blob.root_dert__[0].shape # higher dert size
# determine pad size
y0e = max(0, y0 - 1)
yne = min(rY, yn + 1)
x0e = max(0, x0 - 1)
xne = min(rX, xn + 1) # e is for extended
# take ext_dert__ from part of root_dert__
ext_dert__ = [derts[y0e:yne, x0e:xne] if derts is not None else None
for derts in blob.root_dert__]
# pad mask: top, btm, left, right. 1 or 0 at boundaries
mask = np.pad(blob.mask, ((y0 - y0e, yne - yn), (x0 - x0e, xne - xn)),
mode='constant', constant_values=True)
return ext_dert__, mask
def accum_Dert(Dert: dict, **params) -> None:
Dert.update({param: Dert[param] + value for param, value in params.items()}) | 2.1875 | 2 |
examples/pybullet/gym/pybullet_envs/minitaur/envs/env_randomizers/minitaur_terrain_randomizer.py | felipeek/bullet3 | 9,136 | 5145 | """Generates a random terrain at Minitaur gym environment reset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os, inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(os.path.dirname(currentdir))
parentdir = os.path.dirname(os.path.dirname(parentdir))
os.sys.path.insert(0, parentdir)
import itertools
import math
import enum
import numpy as np
from pybullet_envs.minitaur.envs import env_randomizer_base
_GRID_LENGTH = 15
_GRID_WIDTH = 10
_MAX_SAMPLE_SIZE = 30
_MIN_BLOCK_DISTANCE = 0.7
_MAX_BLOCK_LENGTH = _MIN_BLOCK_DISTANCE
_MIN_BLOCK_LENGTH = _MAX_BLOCK_LENGTH / 2
_MAX_BLOCK_HEIGHT = 0.05
_MIN_BLOCK_HEIGHT = _MAX_BLOCK_HEIGHT / 2
class PoissonDisc2D(object):
"""Generates 2D points using Poisson disk sampling method.
Implements the algorithm described in:
http://www.cs.ubc.ca/~rbridson/docs/bridson-siggraph07-poissondisk.pdf
Unlike the uniform sampling method that creates small clusters of points,
Poisson disk method enforces the minimum distance between points and is more
suitable for generating a spatial distribution of non-overlapping objects.
"""
def __init__(self, grid_length, grid_width, min_radius, max_sample_size):
"""Initializes the algorithm.
Args:
grid_length: The length of the bounding square in which points are
sampled.
grid_width: The width of the bounding square in which points are
sampled.
min_radius: The minimum distance between any pair of points.
max_sample_size: The maximum number of sample points around a active site.
See details in the algorithm description.
"""
self._cell_length = min_radius / math.sqrt(2)
self._grid_length = grid_length
self._grid_width = grid_width
self._grid_size_x = int(grid_length / self._cell_length) + 1
self._grid_size_y = int(grid_width / self._cell_length) + 1
self._min_radius = min_radius
self._max_sample_size = max_sample_size
# Flattern the 2D grid as an 1D array. The grid is used for fast nearest
# point searching.
self._grid = [None] * self._grid_size_x * self._grid_size_y
# Generate the first sample point and set it as an active site.
first_sample = np.array(np.random.random_sample(2)) * [grid_length, grid_width]
self._active_list = [first_sample]
# Also store the sample point in the grid.
self._grid[self._point_to_index_1d(first_sample)] = first_sample
def _point_to_index_1d(self, point):
"""Computes the index of a point in the grid array.
Args:
point: A 2D point described by its coordinates (x, y).
Returns:
The index of the point within the self._grid array.
"""
return self._index_2d_to_1d(self._point_to_index_2d(point))
def _point_to_index_2d(self, point):
"""Computes the 2D index (aka cell ID) of a point in the grid.
Args:
point: A 2D point (list) described by its coordinates (x, y).
Returns:
x_index: The x index of the cell the point belongs to.
y_index: The y index of the cell the point belongs to.
"""
x_index = int(point[0] / self._cell_length)
y_index = int(point[1] / self._cell_length)
return x_index, y_index
def _index_2d_to_1d(self, index2d):
"""Converts the 2D index to the 1D position in the grid array.
Args:
index2d: The 2D index of a point (aka the cell ID) in the grid.
Returns:
The 1D position of the cell within the self._grid array.
"""
return index2d[0] + index2d[1] * self._grid_size_x
def _is_in_grid(self, point):
"""Checks if the point is inside the grid boundary.
Args:
point: A 2D point (list) described by its coordinates (x, y).
Returns:
Whether the point is inside the grid.
"""
return (0 <= point[0] < self._grid_length) and (0 <= point[1] < self._grid_width)
def _is_in_range(self, index2d):
"""Checks if the cell ID is within the grid.
Args:
index2d: The 2D index of a point (aka the cell ID) in the grid.
Returns:
Whether the cell (2D index) is inside the grid.
"""
return (0 <= index2d[0] < self._grid_size_x) and (0 <= index2d[1] < self._grid_size_y)
def _is_close_to_existing_points(self, point):
"""Checks if the point is close to any already sampled (and stored) points.
Args:
point: A 2D point (list) described by its coordinates (x, y).
Returns:
True iff the distance of the point to any existing points is smaller than
the min_radius
"""
px, py = self._point_to_index_2d(point)
# Now we can check nearby cells for existing points
for neighbor_cell in itertools.product(xrange(px - 1, px + 2), xrange(py - 1, py + 2)):
if not self._is_in_range(neighbor_cell):
continue
maybe_a_point = self._grid[self._index_2d_to_1d(neighbor_cell)]
if maybe_a_point is not None and np.linalg.norm(maybe_a_point - point) < self._min_radius:
return True
return False
def sample(self):
"""Samples new points around some existing point.
Removes the sampling base point and also stores the new jksampled points if
they are far enough from all existing points.
"""
active_point = self._active_list.pop()
for _ in xrange(self._max_sample_size):
# Generate random points near the current active_point between the radius
random_radius = np.random.uniform(self._min_radius, 2 * self._min_radius)
random_angle = np.random.uniform(0, 2 * math.pi)
# The sampled 2D points near the active point
sample = random_radius * np.array([np.cos(random_angle),
np.sin(random_angle)]) + active_point
if not self._is_in_grid(sample):
continue
if self._is_close_to_existing_points(sample):
continue
self._active_list.append(sample)
self._grid[self._point_to_index_1d(sample)] = sample
def generate(self):
"""Generates the Poisson disc distribution of 2D points.
Although the while loop looks scary, the algorithm is in fact O(N), where N
is the number of cells within the grid. When we sample around a base point
(in some base cell), new points will not be pushed into the base cell
because of the minimum distance constraint. Once the current base point is
removed, all future searches cannot start from within the same base cell.
Returns:
All sampled points. The points are inside the quare [0, grid_length] x [0,
grid_width]
"""
while self._active_list:
self.sample()
all_sites = []
for p in self._grid:
if p is not None:
all_sites.append(p)
return all_sites
class TerrainType(enum.Enum):
"""The randomzied terrain types we can use in the gym env."""
RANDOM_BLOCKS = 1
TRIANGLE_MESH = 2
class MinitaurTerrainRandomizer(env_randomizer_base.EnvRandomizerBase):
"""Generates an uneven terrain in the gym env."""
def __init__(self,
terrain_type=TerrainType.TRIANGLE_MESH,
mesh_filename="robotics/reinforcement_learning/minitaur/envs/testdata/"
"triangle_mesh_terrain/terrain9735.obj",
mesh_scale=None):
"""Initializes the randomizer.
Args:
terrain_type: Whether to generate random blocks or load a triangle mesh.
mesh_filename: The mesh file to be used. The mesh will only be loaded if
terrain_type is set to TerrainType.TRIANGLE_MESH.
mesh_scale: the scaling factor for the triangles in the mesh file.
"""
self._terrain_type = terrain_type
self._mesh_filename = mesh_filename
self._mesh_scale = mesh_scale if mesh_scale else [1.0, 1.0, 0.3]
def randomize_env(self, env):
"""Generate a random terrain for the current env.
Args:
env: A minitaur gym environment.
"""
if self._terrain_type is TerrainType.TRIANGLE_MESH:
self._load_triangle_mesh(env)
if self._terrain_type is TerrainType.RANDOM_BLOCKS:
self._generate_convex_blocks(env)
def _load_triangle_mesh(self, env):
"""Represents the random terrain using a triangle mesh.
It is possible for Minitaur leg to stuck at the common edge of two triangle
pieces. To prevent this from happening, we recommend using hard contacts
(or high stiffness values) for Minitaur foot in sim.
Args:
env: A minitaur gym environment.
"""
env.pybullet_client.removeBody(env.ground_id)
terrain_collision_shape_id = env.pybullet_client.createCollisionShape(
shapeType=env.pybullet_client.GEOM_MESH,
fileName=self._mesh_filename,
flags=1,
meshScale=self._mesh_scale)
env.ground_id = env.pybullet_client.createMultiBody(
baseMass=0, baseCollisionShapeIndex=terrain_collision_shape_id, basePosition=[0, 0, 0])
def _generate_convex_blocks(self, env):
"""Adds random convex blocks to the flat ground.
We use the Possion disk algorithm to add some random blocks on the ground.
Possion disk algorithm sets the minimum distance between two sampling
points, thus voiding the clustering effect in uniform N-D distribution.
Args:
env: A minitaur gym environment.
"""
poisson_disc = PoissonDisc2D(_GRID_LENGTH, _GRID_WIDTH, _MIN_BLOCK_DISTANCE, _MAX_SAMPLE_SIZE)
block_centers = poisson_disc.generate()
for center in block_centers:
# We want the blocks to be in front of the robot.
shifted_center = np.array(center) - [2, _GRID_WIDTH / 2]
# Do not place blocks near the point [0, 0], where the robot will start.
if abs(shifted_center[0]) < 1.0 and abs(shifted_center[1]) < 1.0:
continue
half_length = np.random.uniform(_MIN_BLOCK_LENGTH, _MAX_BLOCK_LENGTH) / (2 * math.sqrt(2))
half_height = np.random.uniform(_MIN_BLOCK_HEIGHT, _MAX_BLOCK_HEIGHT) / 2
box_id = env.pybullet_client.createCollisionShape(
env.pybullet_client.GEOM_BOX, halfExtents=[half_length, half_length, half_height])
env.pybullet_client.createMultiBody(
baseMass=0,
baseCollisionShapeIndex=box_id,
basePosition=[shifted_center[0], shifted_center[1], half_height])
| 3.15625 | 3 |
polecat/db/sql/expression/values.py | furious-luke/polecat | 4 | 5146 | <reponame>furious-luke/polecat
from functools import partial
from polecat.db.query import query as query_module
from psycopg2.sql import SQL, Placeholder
from .expression import Expression
class Values(Expression):
def __init__(self, values, relation=None):
self.values = values
self.relation = relation
self.keyword = 'VALUES'
def to_sql(self):
if isinstance(self.values, query_module.Values):
get_values_sql = partial(
self.get_values_sql_from_values, self.values
)
else:
get_values_sql = partial(
self.get_values_sql_from_dict, self.values
)
return self.get_values_sql(get_values_sql)
def get_values_sql(self, get_values_sql):
values_sql, values_args = get_values_sql()
joined_sql = SQL(', ').join(
SQL('({})').format(
SQL(', ').join(row_sql)
)
for row_sql in values_sql
)
return SQL('%s {}' % self.keyword).format(joined_sql), values_args
def get_values_sql_from_values(self, values):
column_values_sql = []
column_values = ()
for row in values.iter_rows():
row_values_sql = []
for column_name, column_value in row:
value_sql, value = self.value_to_sql(column_value, column_name)
row_values_sql.append(value_sql)
column_values += value
column_values_sql.append(row_values_sql)
return column_values_sql, column_values
def get_values_sql_from_dict(self, values_dict):
column_values_sql = []
column_values = ()
for column_name, column_value in values_dict.items():
value_sql, value = self.value_to_sql(column_value, column_name)
column_values_sql.append(value_sql)
column_values += value
return (column_values_sql,), column_values
def value_to_sql(self, value, column_name=None):
if isinstance(value, Expression):
sql, args = value.to_sql()
return SQL('{}').format(sql), args
else:
if self.relation and column_name:
column = self.relation.get_column(column_name)
value = column.to_db_value(value)
return Placeholder(), (value,)
def iter_column_names(self):
if isinstance(self.values, dict):
return self.values.keys()
else:
return self.values.iter_column_names()
| 2.75 | 3 |
python/day3p1.py | swilcox/2019adventofcode | 1 | 5147 | # 2019 advent day 3
MOVES = {
'R': (lambda x: (x[0], x[1] + 1)),
'L': (lambda x: (x[0], x[1] - 1)),
'U': (lambda x: (x[0] + 1, x[1])),
'D': (lambda x: (x[0] - 1, x[1])),
}
def build_route(directions: list) -> list:
current_location = (0, 0)
route = []
for d in directions:
direction, amount = d[0], int(d[1:])
for _ in range(amount):
current_location = MOVES[direction](current_location)
route.append(current_location)
return route
def find_intersections(r1: list, r2: list) -> set:
return set(r1).intersection(set(r2))
def find_shortest_manhattan_distance(points: set) -> int:
return min((abs(p[0]) + abs(p[1])) for p in points)
#R1 = 'R75,D30,R83,U83,L12,D49,R71,U7,L72'
#R2 = 'U62,R66,U55,R34,D71,R55,D58,R83'
#R1 = 'R98,U47,R26,D63,R33,U87,L62,D20,R33,U53,R51'
#R2 = 'U98,R91,D20,R16,D67,R40,U7,R15,U6,R7'
def main():
#route1 = build_route(R1.split(','))
#route2 = build_route(R2.split(','))
with open('day3input.txt') as f:
line1, line2 = f.readlines()
route1 = build_route(line1.strip().split(','))
route2 = build_route(line2.strip().split(','))
print(find_shortest_manhattan_distance(find_intersections(route1, route2)))
if __name__ == "__main__":
main()
| 3.671875 | 4 |
examples/demo/python/catalog.py | JavDomGom/mist | 1 | 5148 | import asyncio
async def searchDomains(domain, q):
domains = []
proc = await asyncio.create_subprocess_shell(f"dnsrecon -d {domain} -t crt", stdout=asyncio.subprocess.PIPE)
line = True
while line:
line = (await proc.stdout.readline()).decode('utf-8')
fields = line.split()
if len(fields)>1 and fields[1]=="A":
if q:
await q.put(fields[2])
domains.append(fields[2])
return domains
async def findOpenPorts(ip, ports, q):
openPorts = []
proc = await asyncio.create_subprocess_shell(f"nmap -p {ports} --open {ip}",stdout=asyncio.subprocess.PIPE)
line = True
while line:
line = (await proc.stdout.readline()).decode('utf-8')
fields = line.split()
if len(fields)>1 and fields[1]=="open":
openPort = fields[0].split("/")
if q:
await q.put({"ip": ip, "port": openPort[0], "protocol": openPort[1]})
openPorts.append({"port": openPort[0], "protocol": openPort[1]})
return openPorts
| 2.8125 | 3 |
tests/api/v3_1_0/test_security_groups_acls.py | CiscoISE/ciscoisesdk | 36 | 5149 | <reponame>CiscoISE/ciscoisesdk
# -*- coding: utf-8 -*-
"""IdentityServicesEngineAPI security_groups_acls API fixtures and tests.
Copyright (c) 2021 Cisco and/or its affiliates.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import pytest
from fastjsonschema.exceptions import JsonSchemaException
from ciscoisesdk.exceptions import MalformedRequest
from ciscoisesdk.exceptions import ciscoisesdkException
from tests.environment import IDENTITY_SERVICES_ENGINE_VERSION
pytestmark = pytest.mark.skipif(IDENTITY_SERVICES_ENGINE_VERSION != '3.1.0', reason='version does not match')
def is_valid_get_security_groups_acl_by_id(json_schema_validate, obj):
if not obj:
return False
assert hasattr(obj, 'headers')
assert hasattr(obj, 'content')
assert hasattr(obj, 'text')
assert hasattr(obj, 'response')
json_schema_validate('jsd_a50d1bd34d5f593aadf8eb02083c67b0_v3_1_0').validate(obj.response)
return True
def get_security_groups_acl_by_id(api):
endpoint_result = api.security_groups_acls.get_security_groups_acl_by_id(
id='string'
)
return endpoint_result
@pytest.mark.security_groups_acls
def test_get_security_groups_acl_by_id(api, validator):
try:
assert is_valid_get_security_groups_acl_by_id(
validator,
get_security_groups_acl_by_id(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest)):
print("ERROR: {error}".format(error=original_e))
raise original_e
def get_security_groups_acl_by_id_default(api):
endpoint_result = api.security_groups_acls.get_security_groups_acl_by_id(
id='string'
)
return endpoint_result
@pytest.mark.security_groups_acls
def test_get_security_groups_acl_by_id_default(api, validator):
try:
assert is_valid_get_security_groups_acl_by_id(
validator,
get_security_groups_acl_by_id_default(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest, TypeError)):
raise original_e
def is_valid_update_security_groups_acl_by_id(json_schema_validate, obj):
if not obj:
return False
assert hasattr(obj, 'headers')
assert hasattr(obj, 'content')
assert hasattr(obj, 'text')
assert hasattr(obj, 'response')
json_schema_validate('jsd_afc81cd1e25c50319f75606b97c23b3d_v3_1_0').validate(obj.response)
return True
def update_security_groups_acl_by_id(api):
endpoint_result = api.security_groups_acls.update_security_groups_acl_by_id(
aclcontent='string',
active_validation=False,
description='string',
generation_id='string',
id='string',
ip_version='string',
is_read_only=True,
modelled_content={},
name='string',
payload=None
)
return endpoint_result
@pytest.mark.security_groups_acls
def test_update_security_groups_acl_by_id(api, validator):
try:
assert is_valid_update_security_groups_acl_by_id(
validator,
update_security_groups_acl_by_id(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest)):
print("ERROR: {error}".format(error=original_e))
raise original_e
def update_security_groups_acl_by_id_default(api):
endpoint_result = api.security_groups_acls.update_security_groups_acl_by_id(
active_validation=False,
id='string',
aclcontent=None,
description=None,
generation_id=None,
ip_version=None,
is_read_only=None,
modelled_content=None,
name=None,
payload=None
)
return endpoint_result
@pytest.mark.security_groups_acls
def test_update_security_groups_acl_by_id_default(api, validator):
try:
assert is_valid_update_security_groups_acl_by_id(
validator,
update_security_groups_acl_by_id_default(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest, TypeError)):
raise original_e
def is_valid_delete_security_groups_acl_by_id(json_schema_validate, obj):
if not obj:
return False
assert hasattr(obj, 'headers')
assert hasattr(obj, 'content')
assert hasattr(obj, 'text')
assert hasattr(obj, 'response')
json_schema_validate('jsd_b0a2bea8bfec52b68663ef3f7ac6d7a7_v3_1_0').validate(obj.response)
return True
def delete_security_groups_acl_by_id(api):
endpoint_result = api.security_groups_acls.delete_security_groups_acl_by_id(
id='string'
)
return endpoint_result
@pytest.mark.security_groups_acls
def test_delete_security_groups_acl_by_id(api, validator):
try:
assert is_valid_delete_security_groups_acl_by_id(
validator,
delete_security_groups_acl_by_id(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest)):
print("ERROR: {error}".format(error=original_e))
raise original_e
def delete_security_groups_acl_by_id_default(api):
endpoint_result = api.security_groups_acls.delete_security_groups_acl_by_id(
id='string'
)
return endpoint_result
@pytest.mark.security_groups_acls
def test_delete_security_groups_acl_by_id_default(api, validator):
try:
assert is_valid_delete_security_groups_acl_by_id(
validator,
delete_security_groups_acl_by_id_default(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest, TypeError)):
raise original_e
def is_valid_get_security_groups_acl(json_schema_validate, obj):
if not obj:
return False
assert hasattr(obj, 'headers')
assert hasattr(obj, 'content')
assert hasattr(obj, 'text')
assert hasattr(obj, 'response')
json_schema_validate('jsd_999b22d6ad9f595ab7e3eee5cf44de8a_v3_1_0').validate(obj.response)
return True
def get_security_groups_acl(api):
endpoint_result = api.security_groups_acls.get_security_groups_acl(
filter='value1,value2',
filter_type='string',
page=0,
size=0,
sortasc='string',
sortdsc='string'
)
return endpoint_result
@pytest.mark.security_groups_acls
def test_get_security_groups_acl(api, validator):
try:
assert is_valid_get_security_groups_acl(
validator,
get_security_groups_acl(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest)):
print("ERROR: {error}".format(error=original_e))
raise original_e
def get_security_groups_acl_default(api):
endpoint_result = api.security_groups_acls.get_security_groups_acl(
filter=None,
filter_type=None,
page=None,
size=None,
sortasc=None,
sortdsc=None
)
return endpoint_result
@pytest.mark.security_groups_acls
def test_get_security_groups_acl_default(api, validator):
try:
assert is_valid_get_security_groups_acl(
validator,
get_security_groups_acl_default(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest, TypeError)):
raise original_e
def is_valid_create_security_groups_acl(json_schema_validate, obj):
if not obj:
return False
assert hasattr(obj, 'headers')
assert hasattr(obj, 'content')
assert hasattr(obj, 'text')
assert hasattr(obj, 'response')
json_schema_validate('jsd_9ab61f24bdaf508590f7686e1130913f_v3_1_0').validate(obj.response)
return True
def create_security_groups_acl(api):
endpoint_result = api.security_groups_acls.create_security_groups_acl(
aclcontent='string',
active_validation=False,
description='string',
generation_id='string',
ip_version='string',
is_read_only=True,
modelled_content={},
name='string',
payload=None
)
return endpoint_result
@pytest.mark.security_groups_acls
def test_create_security_groups_acl(api, validator):
try:
assert is_valid_create_security_groups_acl(
validator,
create_security_groups_acl(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest)):
print("ERROR: {error}".format(error=original_e))
raise original_e
def create_security_groups_acl_default(api):
endpoint_result = api.security_groups_acls.create_security_groups_acl(
active_validation=False,
aclcontent=None,
description=None,
generation_id=None,
ip_version=None,
is_read_only=None,
modelled_content=None,
name=None,
payload=None
)
return endpoint_result
@pytest.mark.security_groups_acls
def test_create_security_groups_acl_default(api, validator):
try:
assert is_valid_create_security_groups_acl(
validator,
create_security_groups_acl_default(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest, TypeError)):
raise original_e
def is_valid_get_version(json_schema_validate, obj):
if not obj:
return False
assert hasattr(obj, 'headers')
assert hasattr(obj, 'content')
assert hasattr(obj, 'text')
assert hasattr(obj, 'response')
json_schema_validate('jsd_6704e67a1131578aa794d8377da9a1de_v3_1_0').validate(obj.response)
return True
def get_version(api):
endpoint_result = api.security_groups_acls.get_version(
)
return endpoint_result
@pytest.mark.security_groups_acls
def test_get_version(api, validator):
try:
assert is_valid_get_version(
validator,
get_version(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest)):
print("ERROR: {error}".format(error=original_e))
raise original_e
def get_version_default(api):
endpoint_result = api.security_groups_acls.get_version(
)
return endpoint_result
@pytest.mark.security_groups_acls
def test_get_version_default(api, validator):
try:
assert is_valid_get_version(
validator,
get_version_default(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest, TypeError)):
raise original_e
def is_valid_bulk_request_for_security_groups_acl(json_schema_validate, obj):
if not obj:
return False
assert hasattr(obj, 'headers')
assert hasattr(obj, 'content')
assert hasattr(obj, 'text')
assert hasattr(obj, 'response')
json_schema_validate('jsd_7da250e23ac05e6a8dcf32a81effcee9_v3_1_0').validate(obj.response)
return True
def bulk_request_for_security_groups_acl(api):
endpoint_result = api.security_groups_acls.bulk_request_for_security_groups_acl(
active_validation=False,
operation_type='string',
payload=None,
resource_media_type='string'
)
return endpoint_result
@pytest.mark.security_groups_acls
def test_bulk_request_for_security_groups_acl(api, validator):
try:
assert is_valid_bulk_request_for_security_groups_acl(
validator,
bulk_request_for_security_groups_acl(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest)):
print("ERROR: {error}".format(error=original_e))
raise original_e
def bulk_request_for_security_groups_acl_default(api):
endpoint_result = api.security_groups_acls.bulk_request_for_security_groups_acl(
active_validation=False,
operation_type=None,
payload=None,
resource_media_type=None
)
return endpoint_result
@pytest.mark.security_groups_acls
def test_bulk_request_for_security_groups_acl_default(api, validator):
try:
assert is_valid_bulk_request_for_security_groups_acl(
validator,
bulk_request_for_security_groups_acl_default(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest, TypeError)):
raise original_e
def is_valid_monitor_bulk_status_security_groups_acl(json_schema_validate, obj):
if not obj:
return False
assert hasattr(obj, 'headers')
assert hasattr(obj, 'content')
assert hasattr(obj, 'text')
assert hasattr(obj, 'response')
json_schema_validate('jsd_07af5ee576605a5a915d888924c1e804_v3_1_0').validate(obj.response)
return True
def monitor_bulk_status_security_groups_acl(api):
endpoint_result = api.security_groups_acls.monitor_bulk_status_security_groups_acl(
bulkid='string'
)
return endpoint_result
@pytest.mark.security_groups_acls
def test_monitor_bulk_status_security_groups_acl(api, validator):
try:
assert is_valid_monitor_bulk_status_security_groups_acl(
validator,
monitor_bulk_status_security_groups_acl(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest)):
print("ERROR: {error}".format(error=original_e))
raise original_e
def monitor_bulk_status_security_groups_acl_default(api):
endpoint_result = api.security_groups_acls.monitor_bulk_status_security_groups_acl(
bulkid='string'
)
return endpoint_result
@pytest.mark.security_groups_acls
def test_monitor_bulk_status_security_groups_acl_default(api, validator):
try:
assert is_valid_monitor_bulk_status_security_groups_acl(
validator,
monitor_bulk_status_security_groups_acl_default(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest, TypeError)):
raise original_e
| 1.625 | 2 |
geomstats/geometry/riemannian_metric.py | stefanheyder/geomstats | 0 | 5150 | """Riemannian and pseudo-Riemannian metrics."""
import math
import warnings
import autograd
import geomstats.backend as gs
from geomstats.geometry.connection import Connection
EPSILON = 1e-4
N_CENTERS = 10
TOLERANCE = 1e-5
N_REPETITIONS = 20
N_MAX_ITERATIONS = 50000
N_STEPS = 10
def loss(y_pred, y_true, metric):
"""Compute loss function between prediction and ground truth.
Loss function given by a Riemannian metric,
expressed as the squared geodesic distance between the prediction
and the ground truth.
Parameters
----------
y_pred
y_true
metric
Returns
-------
loss
"""
loss = metric.squared_dist(y_pred, y_true)
return loss
def grad(y_pred, y_true, metric):
"""Closed-form for the gradient of the loss function."""
tangent_vec = metric.log(base_point=y_pred, point=y_true)
grad_vec = - 2. * tangent_vec
inner_prod_mat = metric.inner_product_matrix(base_point=y_pred)
grad = gs.einsum('ni,nij->ni',
grad_vec,
gs.transpose(inner_prod_mat, axes=(0, 2, 1)))
return grad
class RiemannianMetric(Connection):
"""Class for Riemannian and pseudo-Riemannian metrics."""
def __init__(self, dimension, signature=None):
assert isinstance(dimension, int) or dimension == math.inf
assert dimension > 0
super().__init__(dimension=dimension)
self.signature = signature
def inner_product_matrix(self, base_point=None):
"""Inner product matrix at the tangent space at a base point.
Parameters
----------
base_point : array-like, shape=[n_samples, dimension], optional
"""
raise NotImplementedError(
'The computation of the inner product matrix'
' is not implemented.')
def inner_product_inverse_matrix(self, base_point=None):
"""Inner product matrix at the tangent space at a base point.
Parameters
----------
base_point : array-like, shape=[n_samples, dimension], optional
"""
metric_matrix = self.inner_product_matrix(base_point)
cometric_matrix = gs.linalg.inv(metric_matrix)
return cometric_matrix
def inner_product_derivative_matrix(self, base_point=None):
"""Compute derivative of the inner prod matrix at base point.
Parameters
----------
base_point : array-like, shape=[n_samples, dimension], optional
"""
metric_derivative = autograd.jacobian(self.inner_product_matrix)
return metric_derivative(base_point)
def christoffels(self, base_point):
"""Compute Christoffel symbols associated with the connection.
Parameters
----------
base_point: array-like, shape=[n_samples, dimension]
Returns
-------
christoffels: array-like,
shape=[n_samples, dimension, dimension, dimension]
"""
cometric_mat_at_point = self.inner_product_inverse_matrix(base_point)
metric_derivative_at_point = self.inner_product_derivative_matrix(
base_point)
term_1 = gs.einsum('nim,nmkl->nikl',
cometric_mat_at_point,
metric_derivative_at_point)
term_2 = gs.einsum('nim,nmlk->nilk',
cometric_mat_at_point,
metric_derivative_at_point)
term_3 = - gs.einsum('nim,nklm->nikl',
cometric_mat_at_point,
metric_derivative_at_point)
christoffels = 0.5 * (term_1 + term_2 + term_3)
return christoffels
def inner_product(self, tangent_vec_a, tangent_vec_b, base_point=None):
"""Inner product between two tangent vectors at a base point.
Parameters
----------
tangent_vec_a: array-like, shape=[n_samples, dimension]
or shape=[1, dimension]
tangent_vec_b: array-like, shape=[n_samples, dimension]
or shape=[1, dimension]
base_point: array-like, shape=[n_samples, dimension]
or shape=[1, dimension]
Returns
-------
inner_product : array-like, shape=[n_samples,]
"""
tangent_vec_a = gs.to_ndarray(tangent_vec_a, to_ndim=2)
tangent_vec_b = gs.to_ndarray(tangent_vec_b, to_ndim=2)
n_tangent_vec_a = gs.shape(tangent_vec_a)[0]
n_tangent_vec_b = gs.shape(tangent_vec_b)[0]
inner_prod_mat = self.inner_product_matrix(base_point)
inner_prod_mat = gs.to_ndarray(inner_prod_mat, to_ndim=3)
n_mats = gs.shape(inner_prod_mat)[0]
if n_tangent_vec_a != n_mats:
if n_tangent_vec_a == 1:
tangent_vec_a = gs.squeeze(tangent_vec_a, axis=0)
einsum_str_a = 'j,njk->nk'
elif n_mats == 1:
inner_prod_mat = gs.squeeze(inner_prod_mat, axis=0)
einsum_str_a = 'nj,jk->nk'
else:
raise ValueError('Shape mismatch for einsum.')
else:
einsum_str_a = 'nj,njk->nk'
aux = gs.einsum(einsum_str_a, tangent_vec_a, inner_prod_mat)
n_auxs, _ = gs.shape(aux)
if n_tangent_vec_b != n_auxs:
if n_auxs == 1:
aux = gs.squeeze(aux, axis=0)
einsum_str_b = 'k,nk->n'
elif n_tangent_vec_b == 1:
tangent_vec_b = gs.squeeze(tangent_vec_b, axis=0)
einsum_str_b = 'nk,k->n'
else:
raise ValueError('Shape mismatch for einsum.')
else:
einsum_str_b = 'nk,nk->n'
inner_prod = gs.einsum(einsum_str_b, aux, tangent_vec_b)
inner_prod = gs.to_ndarray(inner_prod, to_ndim=2, axis=1)
assert gs.ndim(inner_prod) == 2, inner_prod.shape
return inner_prod
def squared_norm(self, vector, base_point=None):
"""Compute the square of the norm of a vector.
Squared norm of a vector associated to the inner product
at the tangent space at a base point.
Parameters
----------
vector : array-like, shape=[n_samples, dimension]
base_point : array-like, shape=[n_samples, dimension]
Returns
-------
sq_norm : array-like, shape=[n_samples,]
"""
sq_norm = self.inner_product(vector, vector, base_point)
return sq_norm
def norm(self, vector, base_point=None):
"""Compute norm of a vector.
Norm of a vector associated to the inner product
at the tangent space at a base point.
Note: This only works for positive-definite
Riemannian metrics and inner products.
Parameters
----------
vector : array-like, shape=[n_samples, dimension]
base_point : array-like, shape=[n_samples, dimension]
Returns
-------
norm : array-like, shape=[n_samples,]
"""
sq_norm = self.squared_norm(vector, base_point)
norm = gs.sqrt(sq_norm)
return norm
def geodesic(self, initial_point,
end_point=None, initial_tangent_vec=None,
point_type='vector'):
"""Return the geodesic as function of t.
Geodesic curve defined by either:
- an initial point and an initial tangent vector, or
- an initial point and an end point.
The geodesic is returned as a function parameterized by t.
Parameters
----------
initial_point : array-like, shape=[n_samples, dimension]
end_point : array-like, shape=[n_samples, dimension], optional
initial_tangent_vec : array-like, shape=[n_samples, dimension],
optional
point_type : str, optional
Returns
-------
path : callable
"""
point_ndim = 1
if point_type == 'matrix':
point_ndim = 2
initial_point = gs.to_ndarray(initial_point,
to_ndim=point_ndim + 1)
if end_point is None and initial_tangent_vec is None:
raise ValueError('Specify an end point or an initial tangent '
'vector to define the geodesic.')
if end_point is not None:
end_point = gs.to_ndarray(end_point,
to_ndim=point_ndim + 1)
shooting_tangent_vec = self.log(point=end_point,
base_point=initial_point)
if initial_tangent_vec is not None:
assert gs.allclose(shooting_tangent_vec, initial_tangent_vec)
initial_tangent_vec = shooting_tangent_vec
initial_tangent_vec = gs.array(initial_tangent_vec)
initial_tangent_vec = gs.to_ndarray(initial_tangent_vec,
to_ndim=point_ndim + 1)
def path(t):
"""Generate a function parameterizing the geodesic.
Parameters
----------
t : parameter value of the geodesic
Returns
-------
point_at_time_t : callable
"""
t = gs.cast(t, gs.float32)
t = gs.to_ndarray(t, to_ndim=1)
t = gs.to_ndarray(t, to_ndim=2, axis=1)
new_initial_point = gs.to_ndarray(
initial_point,
to_ndim=point_ndim + 1)
new_initial_tangent_vec = gs.to_ndarray(
initial_tangent_vec,
to_ndim=point_ndim + 1)
if point_type == 'vector':
tangent_vecs = gs.einsum('il,nk->ik',
t,
new_initial_tangent_vec)
elif point_type == 'matrix':
tangent_vecs = gs.einsum('il,nkm->ikm',
t,
new_initial_tangent_vec)
point_at_time_t = self.exp(tangent_vec=tangent_vecs,
base_point=new_initial_point)
return point_at_time_t
return path
def squared_dist(self, point_a, point_b):
"""Squared geodesic distance between two points.
Parameters
----------
point_a : array-like, shape=[n_samples, dimension]
point_b : array-like, shape=[n_samples, dimension]
Returns
-------
sq_dist : array-like, shape=[n_samples,]
"""
log = self.log(point=point_b, base_point=point_a)
sq_dist = self.squared_norm(vector=log, base_point=point_a)
return sq_dist
def dist(self, point_a, point_b):
"""Geodesic distance between two points.
Note: It only works for positive definite
Riemannian metrics.
Parameters
----------
point_a : array-like, shape=[n_samples, dimension]
point_b : array-like, shape=[n_samples, dimension]
Returns
-------
dist : array-like, shape=[n_samples,]
"""
sq_dist = self.squared_dist(point_a, point_b)
dist = gs.sqrt(sq_dist)
return dist
def variance(self,
points,
weights=None,
base_point=None,
point_type='vector'):
"""Variance of (weighted) points wrt a base point.
Parameters
----------
points: array-like, shape=[n_samples, dimension]
weights: array-like, shape=[n_samples, 1], optional
"""
if point_type == 'vector':
points = gs.to_ndarray(points, to_ndim=2)
if point_type == 'matrix':
points = gs.to_ndarray(points, to_ndim=3)
n_points = gs.shape(points)[0]
if weights is None:
weights = gs.ones((n_points, 1))
weights = gs.array(weights)
weights = gs.to_ndarray(weights, to_ndim=2, axis=1)
sum_weights = gs.sum(weights)
if base_point is None:
base_point = self.mean(points, weights)
variance = 0.
sq_dists = self.squared_dist(base_point, points)
variance += gs.einsum('nk,nj->j', weights, sq_dists)
variance = gs.array(variance)
variance /= sum_weights
variance = gs.to_ndarray(variance, to_ndim=1)
variance = gs.to_ndarray(variance, to_ndim=2, axis=1)
return variance
def mean(self, points,
weights=None,
n_max_iterations=32,
epsilon=EPSILON,
point_type='vector',
mean_method='default',
verbose=False):
"""Frechet mean of (weighted) points.
Parameters
----------
points : array-like, shape=[n_samples, dimension]
weights : array-like, shape=[n_samples, 1], optional
verbose : bool, optional
Returns
-------
mean : array-like
the Frechet mean of points, a point on the manifold
"""
if mean_method == 'default':
# TODO(nina): Profile this code to study performance,
# i.e. what to do with sq_dists_between_iterates.
def while_loop_cond(iteration, mean, variance, sq_dist):
result = ~gs.logical_or(
gs.isclose(variance, 0.),
gs.less_equal(sq_dist, epsilon * variance))
return result[0, 0] or iteration == 0
def while_loop_body(iteration, mean, variance, sq_dist):
logs = self.log(point=points, base_point=mean)
tangent_mean = gs.einsum('nk,nj->j', weights, logs)
tangent_mean /= sum_weights
mean_next = self.exp(
tangent_vec=tangent_mean,
base_point=mean)
sq_dist = self.squared_dist(mean_next, mean)
sq_dists_between_iterates.append(sq_dist)
variance = self.variance(points=points,
weights=weights,
base_point=mean_next)
mean = mean_next
iteration += 1
return [iteration, mean, variance, sq_dist]
if point_type == 'vector':
points = gs.to_ndarray(points, to_ndim=2)
if point_type == 'matrix':
points = gs.to_ndarray(points, to_ndim=3)
n_points = gs.shape(points)[0]
if weights is None:
weights = gs.ones((n_points, 1))
weights = gs.array(weights)
weights = gs.to_ndarray(weights, to_ndim=2, axis=1)
sum_weights = gs.sum(weights)
mean = points[0]
if point_type == 'vector':
mean = gs.to_ndarray(mean, to_ndim=2)
if point_type == 'matrix':
mean = gs.to_ndarray(mean, to_ndim=3)
if n_points == 1:
return mean
sq_dists_between_iterates = []
iteration = 0
sq_dist = gs.array([[0.]])
variance = gs.array([[0.]])
last_iteration, mean, variance, sq_dist = gs.while_loop(
lambda i, m, v, sq: while_loop_cond(i, m, v, sq),
lambda i, m, v, sq: while_loop_body(i, m, v, sq),
loop_vars=[iteration, mean, variance, sq_dist],
maximum_iterations=n_max_iterations)
if last_iteration == n_max_iterations:
print('Maximum number of iterations {} reached.'
'The mean may be inaccurate'.format(n_max_iterations))
if verbose:
print('n_iter: {}, final variance: {}, final dist: {}'.format(
last_iteration, variance, sq_dist))
mean = gs.to_ndarray(mean, to_ndim=2)
return mean
if mean_method == 'frechet-poincare-ball':
lr = 1e-3
tau = 5e-3
if len(points) == 1:
return points
iteration = 0
convergence = math.inf
barycenter = points.mean(0, keepdims=True) * 0
while convergence > tau and n_max_iterations > iteration:
iteration += 1
expand_barycenter = gs.repeat(barycenter, points.shape[0], 0)
grad_tangent = 2 * self.log(points, expand_barycenter)
cc_barycenter = self.exp(lr * grad_tangent.sum(0,
keepdims=True),
barycenter)
convergence = self.dist(cc_barycenter, barycenter).max().item()
barycenter = cc_barycenter
if iteration == n_max_iterations:
warnings.warn(
'Maximum number of iterations {} reached. The '
'mean may be inaccurate'.format(n_max_iterations))
return barycenter
def adaptive_gradientdescent_mean(self, points,
weights=None,
n_max_iterations=40,
epsilon=1e-12,
init_points=[],
verbose=False):
"""Compute Frechet mean of (weighted) points using adaptive time-steps.
Frechet mean of (weighted) points using adaptive time-steps
The loss function optimized is ||M_1(x)||_x (where M_1(x) is
the tangent mean at x) rather than the mean-square-distance (MSD)
because this saves computation time.
Parameters
----------
points: array-like, shape=[n_samples, dimension]
weights: array-like, shape=[n_samples, 1], optional
init_points: array-like, shape=[n_init, dimension]
epsilon: tolerance for stopping the gradient descent
verbose: verbose mode printing the surrogate value
epsilon: tolerance for stopping the gradient descent
"""
# TODO(Xavier): This function assumes that all points are lists
# of vectors and not of matrices
n_points = gs.shape(points)[0]
if n_points == 1:
return gs.to_ndarray(points[0], to_ndim=2)
if weights is None:
weights = gs.ones((n_points, 1))
weights = gs.array(weights)
weights = gs.to_ndarray(weights, to_ndim=2, axis=1)
sum_weights = gs.sum(weights)
n_init = len(init_points)
if n_init == 0:
current_mean = points[0]
else:
current_mean = init_points[0]
tau = 1.0
iteration = 0
logs = self.log(point=points, base_point=current_mean)
current_tangent_mean = gs.einsum('nk,nj->j', weights, logs)
current_tangent_mean /= sum_weights
norm_current_tangent_mean = gs.linalg.norm(current_tangent_mean)
while (norm_current_tangent_mean > epsilon
and iteration < n_max_iterations):
iteration = iteration + 1
shooting_vector = gs.to_ndarray(
tau * current_tangent_mean,
to_ndim=2)
next_mean = self.exp(
tangent_vec=shooting_vector,
base_point=current_mean)
logs = self.log(point=points, base_point=next_mean)
next_tangent_mean = gs.einsum('nk,nj->j', weights, logs)
next_tangent_mean /= sum_weights
norm_next_tangent_mean = gs.linalg.norm(next_tangent_mean)
if verbose:
print(
"Iter {0}: tau= {1}, "
"norm_current_tangent_mean = {2}".format(
iter, tau, norm_current_tangent_mean))
if norm_next_tangent_mean < norm_current_tangent_mean:
current_mean = next_mean
current_tangent_mean = next_tangent_mean
norm_current_tangent_mean = norm_next_tangent_mean
tau = max(1.0, 1.0511111 * tau)
else:
tau = tau * 0.8
if iteration == n_max_iterations:
warnings.warn(
'Maximum number of iterations {} reached.'
'The mean may be inaccurate'.format(n_max_iterations))
return gs.to_ndarray(current_mean, to_ndim=2)
def diameter(self, points):
"""Give the distance between two farthest points.
Distance between the two points that are farthest away from each other
in points.
Parameters
----------
points
Returns
-------
diameter
"""
diameter = 0.0
n_points = points.shape[0]
for i in range(n_points - 1):
dist_to_neighbors = self.dist(points[i, :], points[i + 1:, :])
dist_to_farthest_neighbor = gs.amax(dist_to_neighbors)
diameter = gs.maximum(diameter, dist_to_farthest_neighbor)
return diameter
def closest_neighbor_index(self, point, neighbors):
"""Closest neighbor of point among neighbors.
Parameters
----------
point
neighbors
Returns
-------
closest_neighbor_index
"""
dist = self.dist(point, neighbors)
closest_neighbor_index = gs.argmin(dist)
return closest_neighbor_index
| 3.0625 | 3 |
app/main/pages/instrument/hrs/red/order/plots.py | hettlage/salt-data-quality-site | 0 | 5151 | import pandas as pd
from bokeh.models import HoverTool
from bokeh.models.formatters import DatetimeTickFormatter
from bokeh.palettes import Plasma256
from bokeh.plotting import figure, ColumnDataSource
from app import db
from app.decorators import data_quality
# creates your plot
date_formatter = DatetimeTickFormatter(microseconds=['%f'],
milliseconds=['%S.%2Ns'],
seconds=[':%Ss'],
minsec=[':%Mm:%Ss'],
minutes=['%H:%M:%S'],
hourmin=['%H:%M:'],
hours=["%H:%M"],
days=["%d %b"],
months=["%d %b %Y"],
years=["%b %Y"])
def get_position_source(start_date, end_date, obsmode):
logic = " and HrsMode_Id = {obsmode} " \
" and FileName like 'RORDER%%' " \
.format(obsmode=obsmode)
sql = "select Date, y_upper, HrsOrder, CONVERT(Date,char) AS Time " \
" from DQ_HrsOrder join NightInfo using (NightInfo_Id) " \
" where Date > '{start_date}' and Date <'{end_date}' {logic}" \
.format(start_date=start_date, end_date=end_date, logic=logic)
df = pd.read_sql(sql, db.engine)
colors = []
if len(df) > 0:
ord_min = df['HrsOrder'].min()
ord_max = df['HrsOrder'].max()
colors = [Plasma256[int((y - ord_min) * (len(Plasma256) - 1) / float(ord_max - ord_min))] for y in
df["HrsOrder"]]
df['colors'] = colors
source = ColumnDataSource(df)
return source
@data_quality(name='hrs_order', caption='HRS Order')
def hrs_order_plot(start_date, end_date):
"""Return a <div> element with the Order plot.
The plot shows the HRS order for obsmode High, low and medium over time
Params:
-------
start_date: date
Earliest date to include in the plot.
end_date: date
Earliest date not to include in the plot.
Return:
-------
str:
A <div> element with the Order plot.
"""
def get_source(obsmode):
logic = " and HrsMode_Id = {obsmode} " \
" and FileName like 'RORDER%%' " \
" group by Date " \
.format(obsmode=obsmode)
sql = "select Date, (Max(HrsOrder) - Min(HrsOrder)) as ord, CONVERT(Date, char) AS Time " \
" from DQ_HrsOrder join NightInfo using (NightInfo_Id) " \
" where Date > '{start_date}' and Date <'{end_date}' {logic}" \
.format(start_date=start_date, end_date=end_date, logic=logic)
df = pd.read_sql(sql, db.engine)
source = ColumnDataSource(df)
return source
low_source = get_source(1) # HrsMode_Id = 1 low
med_source = get_source(2) # HrsMode_Id = 2 med
high_source = get_source(3) # HrsMode_Id = 3 high
tool_list = "pan,reset,save,wheel_zoom, box_zoom"
_hover = HoverTool(
tooltips="""
<div>
<div>
<span style="font-size: 15px; font-weight: bold;">Date: </span>
<span style="font-size: 15px;"> @Time</span>
</div>
<div>
<span style="font-size: 15px; font-weight: bold;">HrsOrder(Max - Min): </span>
<span style="font-size: 15px;"> @ord</span>
</div>
</div>
"""
)
p = figure(title="HRS Order",
x_axis_label='Date',
y_axis_label='Max(HrsOrder) - Min(HrsOrder)',
x_axis_type='datetime',
tools=[tool_list, _hover])
p.scatter(source=low_source, x='Date', y='ord', color='red', fill_alpha=0.2, legend='Low', size=10)
p.scatter(source=med_source, x='Date', y='ord', color='orange', fill_alpha=0.2, legend='Medium', size=10)
p.scatter(source=high_source, x='Date', y='ord', color='green', fill_alpha=0.2, legend='High', size=10)
p.legend.location = "top_right"
p.legend.click_policy = "hide"
p.legend.background_fill_alpha = 0.3
p.legend.inactive_fill_alpha = 0.8
p.xaxis[0].formatter = date_formatter
return p
@data_quality(name='hrs_order_position_high', caption=' ')
def hrs_order_position_plot(start_date, end_date):
"""
Return a <div> element with the Order Position plot.
The plot shows the HRS order for obsmode High resolution over time
Params:
-------
start_date: date
Earliest date to include in the plot.
end_date: date
Earliest date not to include in the plot.
Return:
-------
str:
A <div> element with the Order Position plot.
"""
high_source = get_position_source(start_date, end_date, 3) # HrsMode_Id = 3 high
tool_list = "pan,reset,save,wheel_zoom, box_zoom"
_hover = HoverTool(
tooltips="""
<div>
<div>
<span style="font-size: 15px; font-weight: bold;">Date: </span>
<span style="font-size: 15px;"> @Time</span>
</div>
<div>
<span style="font-size: 15px; font-weight: bold;">Y Upper: </span>
<span style="font-size: 15px;"> @y_upper</span>
</div>
<div>
<span style="font-size: 15px; font-weight: bold;">HRS Order: </span>
<span style="font-size: 15px;"> @HrsOrder</span>
</div>
</div>
"""
)
p = figure(title="HRS Order Position High Resolution",
x_axis_label='Date',
y_axis_label='y_upper',
x_axis_type='datetime',
tools=[tool_list, _hover])
p.scatter(source=high_source, x='Date', y='y_upper', color='colors', fill_alpha=0.2, size=10)
p.xaxis[0].formatter = date_formatter
return p
@data_quality(name='hrs_order_position_medium', caption=' ')
def hrs_order_position_plot(start_date, end_date):
"""
Return a <div> element with the Order Position plot.
The plot shows the HRS order for obsmode High resolution over time
Params:
-------
start_date: date
Earliest date to include in the plot.
end_date: date
Earliest date not to include in the plot.
Return:
-------
str:
A <div> element with the Order Position plot.
"""
high_source = get_position_source(start_date, end_date, 2) # HrsMode_Id = 3 high
tool_list = "pan,reset,save,wheel_zoom, box_zoom"
_hover = HoverTool(
tooltips="""
<div>
<div>
<span style="font-size: 15px; font-weight: bold;">Date: </span>
<span style="font-size: 15px;"> @Time</span>
</div>
<div>
<span style="font-size: 15px; font-weight: bold;">Y Upper: </span>
<span style="font-size: 15px;"> @y_upper</span>
</div>
<div>
<span style="font-size: 15px; font-weight: bold;">HRS Order: </span>
<span style="font-size: 15px;"> @HrsOrder</span>
</div>
</div>
"""
)
p = figure(title="HRS Order Position Medium Resolution",
x_axis_label='Date',
y_axis_label='y_upper',
x_axis_type='datetime',
tools=[tool_list, _hover])
p.scatter(source=high_source, x='Date', y='y_upper', color='colors', fill_alpha=0.2, size=10)
p.xaxis[0].formatter = date_formatter
return p
@data_quality(name='hrs_order_position_low', caption=' ')
def hrs_order_position_plot(start_date, end_date):
"""
Return a <div> element with the Order Position plot.
The plot shows the HRS order for obsmode High resolution over time
Params:
-------
start_date: date
Earliest date to include in the plot.
end_date: date
Earliest date not to include in the plot.
Return:
-------
str:
A <div> element with the Order Position plot.
"""
high_source = get_position_source(start_date, end_date, 3) # HrsMode_Id = 3 high
tool_list = "pan,reset,save,wheel_zoom, box_zoom"
_hover = HoverTool(
tooltips="""
<div>
<div>
<span style="font-size: 15px; font-weight: bold;">Date: </span>
<span style="font-size: 15px;"> @Time</span>
</div>
<div>
<span style="font-size: 15px; font-weight: bold;">Y Upper: </span>
<span style="font-size: 15px;"> @y_upper</span>
</div>
<div>
<span style="font-size: 15px; font-weight: bold;">HRS Order: </span>
<span style="font-size: 15px;"> @HrsOrder</span>
</div>
</div>
"""
)
p = figure(title="HRS Order Position Low Resolution",
x_axis_label='Date',
y_axis_label='y_upper',
x_axis_type='datetime',
tools=[tool_list, _hover])
p.scatter(source=high_source, x='Date', y='y_upper', color='colors', fill_alpha=0.2, size=10)
p.xaxis[0].formatter = date_formatter
return p | 2.625 | 3 |
p6e8.py | yannickbf-prog/python | 0 | 5152 | <reponame>yannickbf-prog/python
#<NAME>6e8 Escribe un programa que te pida primero un número y luego te pida números hasta que la suma de los números introducidos coincida con el número inicial. El programa termina escribiendo la lista de números.
limite = int(input("Escribe limite:"))
valores = int(input("Escribe un valor:"))
listavalores = []
listavalores.append(valores)
while limite > sum(listavalores):
valores = int(input("Escribe otro valor"))
listavalores.append(valores)
print(f"El limite a superar es {limite}. La lista creada es ", end="")
for i in range(len(listavalores)):
print (listavalores[i], end=" ")
print(f"ya que la suma de estos numeros es {sum(listavalores)}")
| 3.953125 | 4 |
.venv/lib/python3.8/site-packages/cleo/application.py | RivtLib/replit01 | 1 | 5153 | <gh_stars>1-10
from typing import Optional
from typing import Tuple
from clikit.console_application import ConsoleApplication
from .commands import BaseCommand
from .commands.completions_command import CompletionsCommand
from .config import ApplicationConfig
class Application(ConsoleApplication, object):
"""
An Application is the container for a collection of commands.
This class is optimized for a standard CLI environment.
Usage:
>>> app = Application('myapp', '1.0 (stable)')
>>> app.add(HelpCommand())
>>> app.run()
"""
def __init__(
self, name=None, version=None, complete=True, config=None
): # type: (str, str, bool, Optional[ApplicationConfig]) -> None
if config is None:
config = ApplicationConfig(name, version)
super(Application, self).__init__(config)
if complete:
self.add(CompletionsCommand())
def add_commands(self, *commands): # type: (Tuple[BaseCommand]) -> None
for command in commands:
self.add(command)
def add(self, command): # type: (BaseCommand) -> Application
"""
Adds a command object.
"""
self.add_command(command.config)
command.set_application(self)
return self
def find(self, name): # type: (str) -> BaseCommand
names = name.split(" ")
command = self.get_command(names[0])
for name in names[1:]:
command = command.get_sub_command(name)
return command.config.handler
| 2.796875 | 3 |
simone/person/management.py | zuhalcakir/simone | 16 | 5154 | #from django.dispatch import dispatcher
#def UserProfilePostInsert(sender, instance, signal, *args, **kwargs):
#"""
#Inserts a blank imap server entry (if necessary) and associates it with the user
#"""
#user = instance
#i = user.get_profile().imap_servers.create()
#user.get_profile().about = 'test'
#i.save()
#user.save_profile()
## we want this called after every user is inserted
#dispatcher.connect(UserProfilePostInsert, signal=signals.pre_save, sender=User)
| 2.328125 | 2 |
watcher/fly.py | cog-isa/htm-rl | 1 | 5155 | <filename>watcher/fly.py
from utils.drawer import Drawer
import argparse
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("name", help="the name of the datafile")
parser.add_argument("--size", help="width,height")
args = parser.parse_args()
if args.size is None:
width, height = 1280, 720
else:
width, height = args.size.split(',')
drawer = Drawer('data/'+args.name, [int(width), int(height)])
while not drawer.window.should_close():
drawer.update()
# the main application loop
while not drawer.window.should_close() and not drawer.window.next and not drawer.window.previous:
drawer.process()
if drawer.window.next and drawer.current + 2 < len(drawer.data_base.keys()): drawer.current = drawer.current + 1
if drawer.window.previous and drawer.current > 0: drawer.current = drawer.current - 1
drawer.window.next = False
drawer.window.previous = False
drawer.window.terminate()
| 3 | 3 |
test/unit/test_structures.py | ourobouros/aws-encryption-sdk-python | 0 | 5156 | <filename>test/unit/test_structures.py
# Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
"""Unit test suite for aws_encryption_sdk.structures"""
import pytest
from aws_encryption_sdk.identifiers import Algorithm, ContentType, ObjectType, SerializationVersion
from aws_encryption_sdk.structures import DataKey, EncryptedDataKey, MasterKeyInfo, MessageHeader, RawDataKey
from .unit_test_utils import all_invalid_kwargs, all_valid_kwargs
pytestmark = [pytest.mark.unit, pytest.mark.local]
VALID_KWARGS = {
MessageHeader: [
dict(
version=SerializationVersion.V1,
type=ObjectType.CUSTOMER_AE_DATA,
algorithm=Algorithm.AES_256_GCM_IV12_TAG16_HKDF_SHA384_ECDSA_P384,
message_id=b"aosiejfoaiwej",
encryption_context={},
encrypted_data_keys=set([]),
content_type=ContentType.FRAMED_DATA,
content_aad_length=32456,
header_iv_length=32456,
frame_length=234567,
)
],
MasterKeyInfo: [
dict(provider_id="fawnofijawef", key_info="ajsnoiajerofi"),
dict(provider_id=b"fawnofijawef", key_info="ajsnoiajerofi"),
dict(provider_id="fawnofijawef", key_info=b"ajsnoiajerofi"),
dict(provider_id=b"fawnofijawef", key_info=b"ajsnoiajerofi"),
],
RawDataKey: [
dict(key_provider=MasterKeyInfo(provider_id="asjnoa", key_info=b"<KEY>"), data_key=b"<KEY>")
],
DataKey: [
dict(
key_provider=MasterKeyInfo(provider_id="asjnoa", key_info=b"<KEY>"),
data_key=b"<KEY>",
encrypted_data_key=b"aisofiawjef",
)
],
EncryptedDataKey: [
dict(
key_provider=MasterKeyInfo(provider_id="asjnoa", key_info=b"<KEY>"), encrypted_data_key=b"aisofiawjef"
)
],
}
@pytest.mark.parametrize("cls, kwargs", all_valid_kwargs(VALID_KWARGS))
def test_attributes_valid_kwargs(cls, kwargs):
cls(**kwargs)
@pytest.mark.parametrize("cls, kwargs", all_invalid_kwargs(VALID_KWARGS))
def test_attributes_invalid_kwargs(cls, kwargs):
with pytest.raises(TypeError):
cls(**kwargs)
@pytest.mark.parametrize(
"kwargs, attribute, expected_value",
(
(dict(provider_id="asfoijwae", key_info=b"<KEY>"), "provider_id", "asfoijwae"),
(dict(provider_id=b"asfoijwae", key_info=b"<KEY>"), "provider_id", "asfoijwae"),
(dict(provider_id="asfoijwae", key_info="<KEY>"), "key_info", b"<KEY>"),
(dict(provider_id="asfoijwae", key_info=b"<KEY>"), "key_info", b"<KEY>"),
),
)
def test_master_key_info_convert(kwargs, attribute, expected_value):
test = MasterKeyInfo(**kwargs)
assert getattr(test, attribute) == expected_value
| 1.703125 | 2 |
codes/utils/mygraph.py | CristianLazoQuispe/Datathon-Interbank-2020 | 0 | 5157 | import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
path_results = '../results/images/'
# this function receives a dataset with binary target and it will graph a hist of values
def graph_target(data,name="target",figsize=(6,4),title_name=None,color_text="white",save=False,name_file='target_distribution'):
plt.figure(figsize=figsize)
total = float(len(data)) # one person per row
title_name = "Target distribution"+" of "+str(int(total))+" users" if title_name is None else title_name+" of "+str(int(total))+" users"
ax = sns.countplot(x=name, data=data) # for Seaborn version 0.7 and more
for p in ax.patches:
height = p.get_height()
ax.text(p.get_x()+p.get_width()/2.,
height/3,
'{:.2f}%\n{:d}'.format(100*height/total,height),
ha="center",color=color_text,fontweight='bold')#fontsize=10
plt.title(title_name)
plt.show()
if save:
figure = ax.get_figure()
figure.savefig(path_results+name_file+'.png',dpi=400, bbox_inches = 'tight')
# plot histograms of train and test to understand the differences between them
def plot_comp_hist(data1,data2,l_range=[-np.inf,np.inf],labels=['x','y'],title='histogram',bins=20,alpha=0.5):
x = data1[(data1>=l_range[0])&(data1<l_range[1])]
y = data2[(data2>=l_range[0])&(data2<l_range[1])]
plt.hist([x, y],label=labels, bins = bins, alpha=alpha)
plt.legend(loc='upper right')
plt.title(title)
#rcc_train[(rcc_train.saldo>=0.2)&(rcc_train.saldo<3)].saldo.plot.hist(title="Fraud Tranascation <3", alpha=0.5)
#rcc_train[(rcc_test.saldo>=0.2)&(rcc_test.saldo<3)].saldo.plot.hist(title="Fraud Tranascation <3", alpha=0.5) | 3.09375 | 3 |
src/pretix/base/validators.py | td00/pretix | 0 | 5158 | from django.core.exceptions import ValidationError
from django.utils.deconstruct import deconstructible
from django.utils.translation import ugettext_lazy as _
class BlacklistValidator:
blacklist = []
def __call__(self, value):
# Validation logic
if value in self.blacklist:
raise ValidationError(
_('This slug has an invalid value: %(value)s.'),
code='invalid',
params={'value': value},
)
@deconstructible
class EventSlugBlacklistValidator(BlacklistValidator):
blacklist = [
'download',
'healthcheck',
'locale',
'control',
'redirect',
'jsi18n',
'metrics',
'_global',
'__debug__',
'api',
'events',
]
@deconstructible
class OrganizerSlugBlacklistValidator(BlacklistValidator):
blacklist = [
'download',
'healthcheck',
'locale',
'control',
'pretixdroid',
'redirect',
'jsi18n',
'metrics',
'_global',
'__debug__',
'about',
'api',
]
| 2.21875 | 2 |
fortnitepy/ext/commands/bot.py | gfdb/fortnitepy | 127 | 5159 | <filename>fortnitepy/ext/commands/bot.py<gh_stars>100-1000
"""
The MIT License (MIT)
Copyright (c) 2015-present Rapptz
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
import logging
import inspect
import asyncio
import types
import sys
import importlib
import collections
import traceback
from typing import Any, List, Optional, Mapping, Set
from fortnitepy.client import Client
from fortnitepy.auth import Auth
from fortnitepy.typedefs import MaybeCoro, ListOrTuple
from ._types import _BaseCommand
from .errors import (ExtensionFailed, ExtensionMissingEntryPoint,
ExtensionNotLoaded, ExtensionAlreadyLoaded,
ExtensionNotFound, CheckFailure, CommandError,
CommandNotFound)
from .core import GroupMixin
from .cog import Cog
from .view import StringView
from .context import Context
from .help import HelpCommand, FortniteHelpCommand
from .typedefs import Message
log = logging.getLogger(__name__)
def _is_submodule(parent: str, child: str) -> bool:
return parent == child or child.startswith(parent + ".")
class _DefaultRepr:
def __repr__(self) -> str:
return '<default-help-command>'
_default = _DefaultRepr()
class Bot(GroupMixin, Client):
"""Represents a fortnite bot.
This class is a subclass of :class:`fortnitepy.Client` and as a result
anything that you can do with a :class:`fortnitepy.Client` you can do with
this bot.
This class also subclasses :class:`.GroupMixin` to provide the
functionality to manage commands.
Attributes
-----------
command_prefix
The command prefix is what the message content must contain initially
to have a command invoked. This prefix could either be a string to
indicate what the prefix should be, or a callable that takes in the bot
as its first parameter and :class:`fortnitepy.FriendMessage` or
:class:`fortnitepy.PartyMessage` as its second parameter and returns
the prefix. This is to facilitate "dynamic" command prefixes. This
callable can be either a regular function or a coroutine.
An empty string as the prefix always matches, enabling prefix-less
command invocation.
The command prefix could also be an iterable of strings indicating that
multiple checks for the prefix should be used and the first one to
match will be the invocation prefix. You can get this prefix via
:attr:`.Context.prefix`. To avoid confusion empty iterables are not
allowed.
.. note::
When passing multiple prefixes be careful to not pass a prefix
that matches a longer prefix occurring later in the sequence. For
example, if the command prefix is ``('!', '!?')`` the ``'!?'``
prefix will never be matched to any message as the previous one
matches messages starting with ``!?``. This is especially important
when passing an empty string, it should always be last as no prefix
after it will be matched.
case_insensitive: :class:`bool`
Whether the commands should be case insensitive. Defaults to ``False``.
This attribute does not carry over to groups. You must set it to every
group if you require group commands to be case insensitive as well.
description: :class:`str`
The content prefixed into the default help message.
help_command: Optional[:class:`.HelpCommand`]
The help command implementation to use. This can be dynamically
set at runtime. To remove the help command pass ``None``. For more
information on implementing a help command, see
:ref:`ext_commands_help_command`.
owner_id: Optional[:class:`int`]
The user ID that owns the bot. This is used by :meth:`.is_owner()`
and checks that call this method.
owner_ids: Optional[Collection[:class:`int`]]
The user IDs that owns the bot. This is similar to `owner_id`.
For performance reasons it is recommended to use a :class:`set`
for the collection. You cannot set both `owner_id` and `owner_ids`.
This is used by :meth:`.is_owner()` and checks that call this method.
"""
def __init__(self, command_prefix: Any, auth: Auth, *,
help_command: Optional[HelpCommand] = _default,
description: Optional[str] = None,
**kwargs: Any) -> None:
kwargs['case_insensitive'] = kwargs.get('case_insensitive', False)
super().__init__(auth, **kwargs)
self.command_prefix = command_prefix
self.description = inspect.cleandoc(description) if description else ''
self.owner_id = kwargs.get('owner_id')
self.owner_ids = kwargs.get('owner_ids', set())
if self.owner_id and self.owner_ids:
raise TypeError('Both owner_id and owner_ids are set.')
if (self.owner_ids and not isinstance(self.owner_ids,
collections.abc.Collection)):
raise TypeError(
'owner_ids must be a collection not '
'{0.__class__!r}'.format(self.owner_ids)
)
self.__cogs = {}
self.__extensions = {}
self._checks = []
self._check_once = []
self._help_command = None
self._before_invoke = None
self._after_invoke = None
if help_command is _default:
self.help_command = FortniteHelpCommand()
else:
self.help_command = help_command
self.add_event_handler('friend_message', self.process_commands)
self.add_event_handler('party_message', self.process_commands)
def register_methods(self) -> None:
for _, obj in inspect.getmembers(self):
if isinstance(obj, _BaseCommand):
obj.instance = self
if obj.parent is None:
try:
self.add_command(obj)
except CommandError:
traceback.print_exc()
continue
super().register_methods()
async def close(self, *,
close_http: bool = True,
dispatch_close: bool = True) -> None:
if dispatch_close:
await asyncio.gather(
self.dispatch_and_wait_event('before_close'),
self.dispatch_and_wait_event('close'),
)
for extension in tuple(self.__extensions):
try:
self.unload_extension(extension)
except Exception:
pass
for cog in tuple(self.__cogs):
try:
self.remove_cog(cog)
except Exception:
pass
await self._close(
close_http=close_http,
dispatch_close=dispatch_close
)
def check(self, func: MaybeCoro) -> MaybeCoro:
r"""A decorator that adds a check globally to every command.
.. note::
This function can either be a regular function or a coroutine.
This function takes a single parameter, :class:`.Context`, and can
only raise exceptions inherited from :exc:`.CommandError`.
Example
-------
.. code-block:: python3
@bot.check
def global_check(ctx):
# Allows only party commands.
return ctx.party is not None
"""
self.add_check(func)
return func
def add_check(self, func: MaybeCoro, *,
call_once: bool = False) -> None:
"""Adds a global check to the bot.
This is the non-decorator interface to :meth:`.check`
and :meth:`.check_once`.
Parameters
----------
func
The function that was used as a global check.
call_once: :class:`bool`
If the function should only be called once per
:meth:`Command.invoke` call.
"""
if call_once:
self._check_once.append(func)
else:
self._checks.append(func)
def remove_check(self, func: MaybeCoro, *,
call_once: bool = False) -> None:
"""Removes a global check from the bot.
Parameters
----------
func
The function to remove from the global checks.
call_once: :class:`bool`
If the function was added with ``call_once=True`` in
the :meth:`.Bot.add_check` call or using :meth:`.check_once`.
"""
list_ = self._check_once if call_once else self._checks
try:
list_.remove(func)
except ValueError:
pass
def check_once(self, func: MaybeCoro) -> MaybeCoro:
r"""A decorator that adds a "call once" global check to the bot.
Unlike regular global checks, this one is called only once
per :meth:`Command.invoke` call.
Regular global checks are called whenever a command is called
or :meth:`.Command.can_run` is called. This type of check
bypasses that and ensures that it's called only once, even inside
the default help command.
.. note::
This function can either be a regular function or a coroutine.
This function takes a single parameter, :class:`.Context`, and can
only raise exceptions inherited from :exc:`.CommandError`.
Example
-------
.. code-block:: python3
@bot.check_once
def whitelist(ctx):
return ctx.message.author.id in my_whitelist
"""
self.add_check(func, call_once=True)
return func
async def can_run(self, ctx: Context, *,
call_once: bool = False) -> bool:
data = self._check_once if call_once else self._checks
if len(data) == 0:
return True
for func in data:
if asyncio.iscoroutinefunction(func):
res = await func(ctx)
else:
res = func(ctx)
if not res:
return False
return True
async def is_owner(self, user_id: str) -> bool:
"""|coro|
Checks if a user id is the owner of the bot.
Parameters
----------
user_id: :class:`str`
The user id to check for.
Returns
-------
:class:`bool`
Whether the user is the owner.
"""
if self.owner_id:
return user_id == self.owner_id
else:
return user_id in self.owner_ids
def before_invoke(self, coro: MaybeCoro) -> MaybeCoro:
"""A decorator that registers a coroutine as a pre-invoke hook.
A pre-invoke hook is called directly before the command is
called. This makes it a useful function to set up database
connections or any type of set up required.
This pre-invoke hook takes a sole parameter, a :class:`.Context`.
.. note::
The :meth:`~.Bot.before_invoke` and :meth:`~.Bot.after_invoke`
hooks are only called if all checks and argument parsing
procedures pass without error. If any check or argument parsing
procedures fail then the hooks are not called.
Parameters
----------
coro
The coroutine to register as the pre-invoke hook.
Raises
------
TypeError
The coroutine passed is not actually a coroutine.
"""
if not asyncio.iscoroutinefunction(coro):
raise TypeError('The pre-invoke hook must be a coroutine.')
self._before_invoke = coro
return coro
def after_invoke(self, coro: MaybeCoro) -> MaybeCoro:
r"""A decorator that registers a coroutine as a post-invoke hook.
A post-invoke hook is called directly after the command is
called. This makes it a useful function to clean-up database
connections or any type of clean up required.
This post-invoke hook takes a sole parameter, a :class:`.Context`.
.. note::
Similar to :meth:`~.Bot.before_invoke`\, this is not called unless
checks and argument parsing procedures succeed. This hook is,
however, **always** called regardless of the internal command
callback raising an error (i.e. :exc:`.CommandInvokeError`\).
This makes it ideal for clean-up scenarios.
Parameters
----------
coro:
The coroutine to register as the post-invoke hook.
Raises
------
TypeError
The coroutine passed is not actually a coroutine.
"""
if not asyncio.iscoroutinefunction(coro):
raise TypeError('The post-invoke hook must be a coroutine.')
self._after_invoke = coro
return coro
def add_cog(self, cog: Cog) -> None:
"""Adds a "cog" to the bot.
A cog is a class that has its own event listeners and commands.
Parameters
----------
cog: :class:`.Cog`
The cog to register to the bot.
Raises
------
TypeError
The cog does not inherit from :class:`.Cog`.
CommandError
An error happened during loading.
"""
if not isinstance(cog, Cog):
raise TypeError('Cogs must derive from Cog.')
cog = cog._inject(self)
self.__cogs[cog.__cog_name__] = cog
def remove_cog(self, name: str) -> None:
"""Removes a cog from the bot.
All registered commands and event listeners that the
cog has registered will be removed as well.
If no cog is found then this method has no effect.
Parameters
----------
name: :class:`str`
The name of the cog to remove.
"""
cog = self.__cogs.pop(name, None)
if cog is None:
return
help_command = self.help_command
if help_command and help_command.cog is cog:
help_command.cog = None
cog._eject(self)
def get_cog(self, name: str) -> Optional[Cog]:
"""Gets the cog instance requested.
If the cog is not found, ``None`` is returned instead.
Parameters
-----------
name: :class:`str`
The name of the cog you are requesting.
This is equivalent to the name passed via keyword
argument in class creation or the class name if unspecified.
"""
return self.__cogs.get(name)
@property
def cogs(self) -> Mapping[str, Cog]:
"""Mapping[:class:`str`, :class:`Cog`]: A read-only mapping of cog
name to cog.
"""
return types.MappingProxyType(self.__cogs)
def _remove_module_references(self, name: str) -> None:
# find all references to the module
# remove the cogs registered from the module
for cogname, cog in self.__cogs.copy().items():
if _is_submodule(name, cog.__module__):
self.remove_cog(cogname)
# remove all the commands from the module
for cmd in self.all_commands.copy().values():
if cmd.module is not None and _is_submodule(name, cmd.module):
if isinstance(cmd, GroupMixin):
cmd.recursively_remove_all_commands()
self.remove_command(cmd.name)
# remove all the listeners from the module
for event_list in self._events.copy().values():
remove = []
for index, event in enumerate(event_list):
if (event.__module__ is not None
and _is_submodule(name, event.__module__)):
remove.append(index)
for index in reversed(remove):
del event_list[index]
def _call_module_finalizers(self, lib: object, key: str) -> None:
try:
func = getattr(lib, 'cog_teardown')
except AttributeError:
pass
else:
try:
func(self)
except Exception:
pass
finally:
self.__extensions.pop(key, None)
sys.modules.pop(key, None)
name = lib.__name__
for module in list(sys.modules.keys()):
if _is_submodule(name, module):
del sys.modules[module]
def _load_from_module_spec(self, spec: types.ModuleType,
key: str) -> None:
# precondition: key not in self.__extensions
lib = importlib.util.module_from_spec(spec)
sys.modules[key] = lib
try:
spec.loader.exec_module(lib)
except Exception as e:
del sys.modules[key]
raise ExtensionFailed(key, e) from e
try:
setup = getattr(lib, 'extension_setup')
except AttributeError:
del sys.modules[key]
raise ExtensionMissingEntryPoint(key)
try:
setup(self)
except Exception as e:
del sys.modules[key]
self._remove_module_references(lib.__name__)
self._call_module_finalizers(lib, key)
raise ExtensionFailed(key, e) from e
else:
self.__extensions[key] = lib
def load_extension(self, name: str) -> None:
"""Loads an extension.
An extension is a python module that contains commands, cogs, or
listeners.
An extension must have a global function, ``extension_setup`` defined
as the entry point on what to do when the extension is loaded. This
entry point must have a single argument, the ``bot``.
Parameters
----------
name: :class:`str`
The extension name to load. It must be dot separated like
regular Python imports if accessing a sub-module. e.g.
``foo.test`` if you want to import ``foo/test.py``.
Raises
------
ExtensionNotFound
The extension could not be imported.
ExtensionAlreadyLoaded
The extension is already loaded.
ExtensionMissingEntryPoint
The extension does not have a extension_setup function.
ExtensionFailed
The extension or its setup function had an execution error.
"""
if name in self.__extensions:
raise ExtensionAlreadyLoaded(name)
spec = importlib.util.find_spec(name)
if spec is None:
raise ExtensionNotFound(name)
self._load_from_module_spec(spec, name)
def unload_extension(self, name: str) -> None:
"""Unloads an extension.
When the extension is unloaded, all commands, listeners, and cogs are
removed from the bot and the module is un-imported.
The extension can provide an optional global function,
``cog_teardown``, to do miscellaneous clean-up if necessary. This
function takes a single parameter, the ``bot``, similar to
``extension_setup`` from :meth:`~.Bot.load_extension`.
Parameters
------------
name: :class:`str`
The extension name to unload. It must be dot separated like
regular Python imports if accessing a sub-module. e.g.
``foo.test`` if you want to import ``foo/test.py``.
Raises
-------
ExtensionNotLoaded
The extension was not loaded.
"""
lib = self.__extensions.get(name)
if lib is None:
raise ExtensionNotLoaded(name)
self._remove_module_references(lib.__name__)
self._call_module_finalizers(lib, name)
def reload_extension(self, name: str) -> None:
"""Atomically reloads an extension.
This replaces the extension with the same extension, only refreshed.
This is equivalent to a :meth:`unload_extension` followed by
a :meth:`load_extension` except done in an atomic way. That is, if an
operation fails mid-reload then the bot will roll-back to the prior
working state.
Parameters
------------
name: :class:`str`
The extension name to reload. It must be dot separated like
regular Python imports if accessing a sub-module. e.g.
``foo.test`` if you want to import ``foo/test.py``.
Raises
-------
ExtensionNotLoaded
The extension was not loaded.
ExtensionNotFound
The extension could not be imported.
ExtensionMissingEntryPoint
The extension does not have a extension_setup function.
ExtensionFailed
The extension setup function had an execution error.
"""
lib = self.__extensions.get(name)
if lib is None:
raise ExtensionNotLoaded(name)
# get the previous module states from sys modules
modules = {
name: module
for name, module in sys.modules.items()
if _is_submodule(lib.__name__, name)
}
try:
# Unload and then load the module...
self._remove_module_references(lib.__name__)
self._call_module_finalizers(lib, name)
self.load_extension(name)
except Exception:
# if the load failed, the remnants should have been
# cleaned from the load_extension function call
# so let's load it from our old compiled library.
lib.extension_setup(self)
self.__extensions[name] = lib
# revert sys.modules back to normal and raise back to caller
sys.modules.update(modules)
raise
@property
def extensions(self) -> Mapping[str, types.ModuleType]:
"""Mapping[:class:`str`, :class:`py:types.ModuleType`]: A read-only
mapping of extension name to extension.
"""
return types.MappingProxyType(self.__extensions)
@property
def help_command(self) -> Optional[HelpCommand]:
return self._help_command
@help_command.setter
def help_command(self, value: Optional[HelpCommand]) -> None:
if value is not None:
if not isinstance(value, HelpCommand):
raise TypeError('help_command must be a subclass '
'of HelpCommand')
if self._help_command is not None:
self._help_command._remove_from_bot(self)
self._help_command = value
value._add_to_bot(self)
elif self._help_command is not None:
self._help_command._remove_from_bot(self)
self._help_command = None
else:
self._help_command = None
async def get_prefix(self, message: Message) -> Any:
"""|coro|
Retrieves the prefix the bot is listening to with the message as
a context.
Parameters
----------
message: Union[:class:`fortnitepy.FriendMessage`, :class:`fortnitepy.PartyMessage`]
The message context to get the prefix of.
Returns
--------
Union[List[:class:`str`], :class:`str`]
A list of prefixes or a single prefix that the bot is
listening for.
""" # noqa
prefix = ret = self.command_prefix
if callable(prefix):
if asyncio.iscoroutinefunction(prefix):
ret = await prefix(self, message)
else:
ret = prefix(self, message)
if not isinstance(ret, str):
try:
ret = list(ret)
except TypeError:
# It's possible that a generator raised this exception. Don't
# replace it with our own error if that's the case.
if isinstance(ret, collections.abc.Iterable):
raise
raise TypeError('command_prefix must be plain string, '
'iterable of strings, or callable '
'returning either of these, not '
'{}'.format(ret.__class__.__name__))
if not ret:
raise ValueError('Iterable command_prefix must contain at '
'least one prefix')
return ret
async def get_context(self, message: Message, *,
cls: Context = Context) -> Context:
r"""|coro|
Returns the invocation context from the message.
This is a more low-level counter-part for :meth:`.process_commands`
to allow users more fine grained control over the processing.
The returned context is not guaranteed to be a valid invocation
context, :attr:`.Context.valid` must be checked to make sure it is.
If the context is not valid then it is not a valid candidate to be
invoked under :meth:`~.Bot.invoke`.
Parameters
----------
message: Union[:class:`fortnitepy.FriendMessage`, :class:`fortnitepy.PartyMessage`]
The message to get the invocation context from.
cls
The factory class that will be used to create the context.
By default, this is :class:`.Context`. Should a custom
class be provided, it must be similar enough to :class:`.Context`\'s
interface.
Returns
-------
:class:`.Context`
The invocation context. The type of this can change via the
``cls`` parameter.
""" # noqa
view = StringView(message.content)
ctx = cls(prefix=None, view=view, bot=self, message=message)
prefix = await self.get_prefix(message)
invoked_prefix = prefix
if isinstance(prefix, str):
if not view.skip_string(prefix):
return ctx
else:
try:
if message.content.startswith(tuple(prefix)):
for element in prefix:
if view.skip_string(element):
invoked_prefix = element
break
else:
invoked_prefix = None
else:
return ctx
except TypeError:
if not isinstance(prefix, list):
raise TypeError('get_prefix must return either a string '
'or a list of string, not '
'{}'.format(prefix.__class__.__name__))
for value in prefix:
if not isinstance(value, str):
raise TypeError('Iterable command_prefix or list '
'returned from get_prefix must '
'contain only strings, not '
'{}'.format(value.__class__.__name__))
raise
invoker = view.get_word()
ctx.invoked_with = invoker
ctx.prefix = invoked_prefix
ctx.command = self.all_commands.get(invoker)
return ctx
def _print_error(self, ctx: Context, error: Exception) -> None:
print(
'Ignoring exception in command {}:'.format(ctx.command),
file=sys.stderr
)
traceback.print_exception(
type(error),
error,
error.__traceback__,
file=sys.stderr
)
async def wait_for_futures(self, futures: ListOrTuple, *,
check: Optional[callable] = None,
timeout: Optional[int] = None,
cancel: bool = False) -> None:
def _cancel_futs(pending_futures: Set[asyncio.Future]) -> None:
for p in pending_futures:
if not p.cancelled():
p.cancel()
pending = futures
while pending:
done, pending = await asyncio.wait(
pending,
return_when=asyncio.FIRST_COMPLETED,
timeout=timeout
)
# Set should only contain one value
for future in done:
if check is None or check(future):
if cancel:
_cancel_futs(pending)
return future
async def _wait_for_error_return(self, futures: List[asyncio.Future],
ctx: Context,
error: Exception) -> None:
def check(future):
return future.result() is False
ret = await self.wait_for_futures(futures, check=check)
if isinstance(ret, asyncio.Future):
self._print_error(ctx, error)
def dispatch_error(self, ctx: Context, error: Exception) -> None:
if self._event_has_handler('command_error'):
futures = self.dispatch_event('command_error', ctx, error)
asyncio.ensure_future(self._wait_for_error_return(
futures,
ctx,
error
))
else:
self._print_error(ctx, error)
async def invoke(self, ctx: Context) -> None:
"""|coro|
Invokes the command given under the invocation context and
handles all the internal event dispatch mechanisms.
Parameters
-----------
ctx: :class:`.Context`
The invocation context to invoke.
"""
if ctx.command is not None:
self.dispatch_event('command', ctx)
try:
if await self.can_run(ctx, call_once=True):
await ctx.command.invoke(ctx)
else:
raise CheckFailure('The global check once functions '
'failed.')
except CommandError as exc:
await ctx.command.dispatch_error(ctx, exc)
else:
self.dispatch_event('command_completion', ctx)
elif ctx.invoked_with:
exc = CommandNotFound('Command "{}" is not found'
''.format(ctx.invoked_with))
self.dispatch_error(ctx, exc)
async def process_commands(self, message: Message) -> None:
"""|coro|
This function processes the commands that have been registered
to the bot and other groups. Without this coroutine, none of the
commands will be triggered.
By default, this coroutine is called automatically when a new
message is received.
This is built using other low level tools, and is equivalent to a
call to :meth:`~.Bot.get_context` followed by a call to
:meth:`~.Bot.invoke`.
Parameters
-----------
message: Union[:class:`fortnitepy.FriendMessage`, :class:`fortnitepy.PartyMessage`]
The message to process commands for.
""" # noqa
if message.author.id == self.user.id:
return
ctx = await self.get_context(message)
await self.invoke(ctx)
| 1.796875 | 2 |
LeetCodeSolutions/python/64_Minimum_Path_Sum.py | ChuanleiGuo/AlgorithmsPlayground | 1 | 5160 | <filename>LeetCodeSolutions/python/64_Minimum_Path_Sum.py
class Solution(object):
def minPathSum(self, grid):
"""
:type grid: List[List[int]]
:rtype: int
"""
m, n = len(grid), len(grid[0])
dp = [[0] * n for _ in range(m)]
for i in range(m):
for j in range(n):
if i == 0 and j == 0:
dp[i][j] = grid[0][0]
elif i == 0:
dp[i][j] = grid[i][j] + dp[i][j - 1]
elif j == 0:
dp[i][j] = grid[i][j] + dp[i - 1][j]
else:
dp[i][j] = grid[i][j] + min(dp[i - 1][j], dp[i][j - 1])
return dp[m - 1][n - 1]
| 3.5 | 4 |
paths_win.py | tankbusta/rescache | 15 | 5161 | import _winreg
import os
def get_shared_cache_folder():
"""
Look in the registry for the configured cache folder.
If there is no entry, then we create one.
:return:
"""
_winreg.aReg = _winreg.ConnectRegistry(None, _winreg.HKEY_CURRENT_USER)
try:
key = _winreg.OpenKey(_winreg.aReg, r"SOFTWARE\CCP\EVEONLINE")
path, _ = _winreg.QueryValueEx(key, "CACHEFOLDER")
except OSError:
return None
return path
def set_shared_cache_folder(folder_path):
if not os.path.isdir(folder_path):
try:
os.makedirs(folder_path)
except OSError:
raise ValueError("Could not create directory {}".format(folder_path))
folder_path = os.path.normpath(folder_path) + os.sep
key_eveonline = _winreg.CreateKey(_winreg.aReg, r"SOFTWARE\CCP\EVEONLINE")
_winreg.SetValueEx(key_eveonline, "CACHEFOLDER", 0, _winreg.REG_SZ, folder_path)
key_eveprobe = _winreg.CreateKey(_winreg.aReg, r"SOFTWARE\CCP\EVEPROBE")
_winreg.SetValueEx(key_eveprobe, "CACHEFOLDER", 0, _winreg.REG_SZ, folder_path)
def get_index_path(hint):
return hint
| 2.703125 | 3 |
venv/lib/python3.8/site-packages/dateparser/data/date_translation_data/ebu.py | yuta-komura/vishnu | 1 | 5162 | <reponame>yuta-komura/vishnu
# -*- coding: utf-8 -*-
info = {
"name": "ebu",
"date_order": "DMY",
"january": [
"mweri wa mbere",
"mbe"
],
"february": [
"mweri wa kaĩri",
"kai"
],
"march": [
"mweri wa kathatũ",
"kat"
],
"april": [
"mweri wa kana",
"kan"
],
"may": [
"mweri wa gatano",
"gat"
],
"june": [
"mweri wa gatantatũ",
"gan"
],
"july": [
"mweri wa mũgwanja",
"mug"
],
"august": [
"mweri wa kanana",
"knn"
],
"september": [
"mweri wa kenda",
"ken"
],
"october": [
"mweri wa ikũmi",
"iku"
],
"november": [
"mweri wa ikũmi na ũmwe",
"imw"
],
"december": [
"mweri wa ikũmi na kaĩrĩ",
"igi"
],
"monday": [
"njumatatu",
"tat"
],
"tuesday": [
"njumaine",
"ine"
],
"wednesday": [
"njumatano",
"tan"
],
"thursday": [
"aramithi",
"arm"
],
"friday": [
"njumaa",
"maa"
],
"saturday": [
"njumamothii",
"nmm"
],
"sunday": [
"kiumia",
"kma"
],
"am": [
"ki"
],
"pm": [
"ut"
],
"year": [
"mwaka"
],
"month": [
"mweri"
],
"week": [
"kiumia"
],
"day": [
"mũthenya"
],
"hour": [
"ithaa"
],
"minute": [
"ndagĩka"
],
"second": [
"sekondi"
],
"relative-type": {
"1 year ago": [
"last year"
],
"0 year ago": [
"this year"
],
"in 1 year": [
"next year"
],
"1 month ago": [
"last month"
],
"0 month ago": [
"this month"
],
"in 1 month": [
"next month"
],
"1 week ago": [
"last week"
],
"0 week ago": [
"this week"
],
"in 1 week": [
"next week"
],
"1 day ago": [
"ĩgoro"
],
"0 day ago": [
"ũmũnthĩ"
],
"in 1 day": [
"rũciũ"
],
"0 hour ago": [
"this hour"
],
"0 minute ago": [
"this minute"
],
"0 second ago": [
"now"
]
},
"locale_specific": {},
"skip": [
" ",
".",
",",
";",
"-",
"/",
"'",
"|",
"@",
"[",
"]",
","
]
}
| 1.679688 | 2 |
deepa2/preptrain/__init__.py | debatelab/deepa2 | 0 | 5163 | """Preprocessing DeepA2 datasets for LM training"""
# flake8: noqa
from deepa2.preptrain.t2tpreprocessor import T2TPreprocessor
| 1.109375 | 1 |
setup.py | martinfarrow/awspk | 0 | 5164 | #!/usr/bin/env python3
from setuptools import setup, find_packages
setup(name='awspk',
version='0.1',
description='A aws cli pen knife with loads of interested stuff',
author='<NAME>',
author_email='<EMAIL>',
py_modules=['awspk'],
license='LICENSE',
)
| 1.15625 | 1 |
pyclicker/lib/python3.7/site-packages/Xlib/display.py | JayRovacsek/pyautoclick | 1 | 5165 | # Xlib.display -- high level display object
#
# Copyright (C) 2000 <NAME> <<EMAIL>>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public License
# as published by the Free Software Foundation; either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc.,
# 59 Temple Place,
# Suite 330,
# Boston, MA 02111-1307 USA
# Python modules
import types
# Python 2/3 compatibility.
from six import create_unbound_method
# Xlib modules
from . import error
from . import ext
from . import X
# Xlib.protocol modules
from .protocol import display as protocol_display
from .protocol import request, event, rq
# Xlib.xobjects modules
from .xobject import resource
from .xobject import drawable
from .xobject import fontable
from .xobject import colormap
from .xobject import cursor
_resource_baseclasses = {
'resource': resource.Resource,
'drawable': drawable.Drawable,
'window': drawable.Window,
'pixmap': drawable.Pixmap,
'fontable': fontable.Fontable,
'font': fontable.Font,
'gc': fontable.GC,
'colormap': colormap.Colormap,
'cursor': cursor.Cursor,
}
_resource_hierarchy = {
'resource': ('drawable', 'window', 'pixmap',
'fontable', 'font', 'gc',
'colormap', 'cursor'),
'drawable': ('window', 'pixmap'),
'fontable': ('font', 'gc')
}
class _BaseDisplay(protocol_display.Display):
resource_classes = _resource_baseclasses.copy()
# Implement a cache of atom names, used by Window objects when
# dealing with some ICCCM properties not defined in Xlib.Xatom
def __init__(self, *args, **keys):
protocol_display.Display.__init__(self, *args, **keys)
self._atom_cache = {}
def get_atom(self, atomname, only_if_exists=0):
if atomname in self._atom_cache:
return self._atom_cache[atomname]
r = request.InternAtom(display = self, name = atomname, only_if_exists = only_if_exists)
# don't cache NONE responses in case someone creates this later
if r.atom != X.NONE:
self._atom_cache[atomname] = r.atom
return r.atom
class Display(object):
def __init__(self, display = None):
self.display = _BaseDisplay(display)
# Create the keymap cache
self._keymap_codes = [()] * 256
self._keymap_syms = {}
self._update_keymap(self.display.info.min_keycode,
(self.display.info.max_keycode
- self.display.info.min_keycode + 1))
# Translations for keysyms to strings.
self.keysym_translations = {}
# Find all supported extensions
self.extensions = []
self.class_extension_dicts = {}
self.display_extension_methods = {}
# a dict that maps the event name to the code
# or, when it's an event with a subcode, to a tuple of (event,subcode)
# note this wraps the dict so you address it as
# extension_event.EXTENSION_EVENT_NAME rather than
# extension_event["EXTENSION_EVENT_NAME"]
self.extension_event = rq.DictWrapper({})
exts = self.list_extensions()
# Go through all extension modules
for extname, modname in ext.__extensions__:
if extname in exts:
# Import the module and fetch it
__import__('Xlib.ext.' + modname)
mod = getattr(ext, modname)
info = self.query_extension(extname)
self.display.set_extension_major(extname, info.major_opcode)
# Call initialiasation function
mod.init(self, info)
self.extensions.append(extname)
# Finalize extensions by creating new classes
for class_name, dictionary in self.class_extension_dicts.items():
origcls = self.display.resource_classes[class_name]
self.display.resource_classes[class_name] = type(origcls.__name__,
(origcls,),
dictionary)
# Problem: we have already created some objects without the
# extensions: the screen roots and default colormaps.
# Fix that by reinstantiating them.
for screen in self.display.info.roots:
screen.root = self.display.resource_classes['window'](self.display, screen.root.id)
screen.default_colormap = self.display.resource_classes['colormap'](self.display, screen.default_colormap.id)
def get_display_name(self):
"""Returns the name used to connect to the server, either
provided when creating the Display object, or fetched from the
environmental variable $DISPLAY."""
return self.display.get_display_name()
def fileno(self):
"""Returns the file descriptor number of the underlying socket.
This method is provided to allow Display objects to be passed
select.select()."""
return self.display.fileno()
def close(self):
"""Close the display, freeing the resources that it holds."""
self.display.close()
def set_error_handler(self, handler):
"""Set the default error handler which will be called for all
unhandled errors. handler should take two arguments as a normal
request error handler, but the second argument (the request) will
be None. See section Error Handling."""
self.display.set_error_handler(handler)
def flush(self):
"""Flush the request queue, building and sending the queued
requests. This can be necessary in applications that never wait
for events, and in threaded applications."""
self.display.flush()
def sync(self):
"""Flush the queue and wait until the server has processed all
the queued requests. Use this e.g. when it is important that
errors caused by a certain request is trapped."""
# Do a light-weight replyrequest to sync. There must
# be a better way to do it...
self.get_pointer_control()
def next_event(self):
"""Return the next event. If there are no events queued, it will
block until the next event is fetched from the server."""
return self.display.next_event()
def pending_events(self):
"""Return the number of events queued, i.e. the number of times
that Display.next_event() can be called without blocking."""
return self.display.pending_events()
def has_extension(self, extension):
"""Check if both the server and the client library support the X
extension named extension."""
return extension in self.extensions
def create_resource_object(self, type, id):
"""Create a resource object of type for the integer id. type
should be one of the following strings:
resource
drawable
window
pixmap
fontable
font
gc
colormap
cursor
This function can be used when a resource ID has been fetched
e.g. from an resource or a command line argument. Resource
objects should never be created by instantiating the appropriate
class directly, since any X extensions dynamically added by the
library will not be available.
"""
return self.display.resource_classes[type](self.display, id)
# We need this to handle display extension methods
def __getattr__(self, attr):
try:
function = self.display_extension_methods[attr]
return types.MethodType(function, self)
except KeyError:
raise AttributeError(attr)
###
### display information retrieval
###
def screen(self, sno = None):
if sno is None:
return self.display.info.roots[self.display.default_screen]
else:
return self.display.info.roots[sno]
def screen_count(self):
"""Return the total number of screens on the display."""
return len(self.display.info.roots)
def get_default_screen(self):
"""Return the number of the default screen, extracted from the
display name."""
return self.display.get_default_screen()
###
### Extension module interface
###
def extension_add_method(self, object, name, function):
"""extension_add_method(object, name, function)
Add an X extension module method. OBJECT is the type of
object to add the function to, a string from this list:
display
resource
drawable
window
pixmap
fontable
font
gc
colormap
cursor
NAME is the name of the method, a string. FUNCTION is a
normal function whose first argument is a 'self'.
"""
if object == 'display':
if hasattr(self, name):
raise AssertionError('attempting to replace display method: %s' % name)
self.display_extension_methods[name] = function
else:
class_list = (object, ) + _resource_hierarchy.get(object, ())
for class_name in class_list:
cls = _resource_baseclasses[class_name]
if hasattr(cls, name):
raise AssertionError('attempting to replace %s method: %s' % (class_name, name))
method = create_unbound_method(function, cls)
# Maybe should check extension overrides too
try:
self.class_extension_dicts[class_name][name] = method
except KeyError:
self.class_extension_dicts[class_name] = { name: method }
def extension_add_event(self, code, evt, name = None):
"""extension_add_event(code, evt, [name])
Add an extension event. CODE is the numeric code, and EVT is
the event class. EVT will be cloned, and the attribute _code
of the new event class will be set to CODE.
If NAME is omitted, it will be set to the name of EVT. This
name is used to insert an entry in the DictWrapper
extension_event.
"""
newevt = type(evt.__name__, evt.__bases__,
evt.__dict__.copy())
newevt._code = code
self.display.add_extension_event(code, newevt)
if name is None:
name = evt.__name__
setattr(self.extension_event, name, code)
def extension_add_subevent(self, code, subcode, evt, name = None):
"""extension_add_subevent(code, evt, [name])
Add an extension subevent. CODE is the numeric code, subcode
is the sub-ID of this event that shares the code ID with other
sub-events and EVT is the event class. EVT will be cloned, and
the attribute _code of the new event class will be set to CODE.
If NAME is omitted, it will be set to the name of EVT. This
name is used to insert an entry in the DictWrapper
extension_event.
"""
newevt = type(evt.__name__, evt.__bases__,
evt.__dict__.copy())
newevt._code = code
self.display.add_extension_event(code, newevt, subcode)
if name is None:
name = evt.__name__
# store subcodes as a tuple of (event code, subcode) in the
# extension dict maintained in the display object
setattr(self.extension_event, name, (code,subcode))
def add_extension_error(self, code, err):
"""add_extension_error(code, err)
Add an extension error. CODE is the numeric code, and ERR is
the error class.
"""
self.display.add_extension_error(code, err)
###
### keymap cache implementation
###
# The keycode->keysym map is stored in a list with 256 elements.
# Each element represents a keycode, and the tuple elements are
# the keysyms bound to the key.
# The keysym->keycode map is stored in a mapping, where the keys
# are keysyms. The values are a sorted list of tuples with two
# elements each: (index, keycode)
# keycode is the code for a key to which this keysym is bound, and
# index is the keysyms index in the map for that keycode.
def keycode_to_keysym(self, keycode, index):
"""Convert a keycode to a keysym, looking in entry index.
Normally index 0 is unshifted, 1 is shifted, 2 is alt grid, and 3
is shift+alt grid. If that key entry is not bound, X.NoSymbol is
returned."""
try:
return self._keymap_codes[keycode][index]
except IndexError:
return X.NoSymbol
def keysym_to_keycode(self, keysym):
"""Look up the primary keycode that is bound to keysym. If
several keycodes are found, the one with the lowest index and
lowest code is returned. If keysym is not bound to any key, 0 is
returned."""
try:
return self._keymap_syms[keysym][0][1]
except (KeyError, IndexError):
return 0
def keysym_to_keycodes(self, keysym):
"""Look up all the keycodes that is bound to keysym. A list of
tuples (keycode, index) is returned, sorted primarily on the
lowest index and secondarily on the lowest keycode."""
try:
# Copy the map list, reversing the arguments
return map(lambda x: (x[1], x[0]), self._keymap_syms[keysym])
except KeyError:
return []
def refresh_keyboard_mapping(self, evt):
"""This method should be called once when a MappingNotify event
is received, to update the keymap cache. evt should be the event
object."""
if isinstance(evt, event.MappingNotify):
if evt.request == X.MappingKeyboard:
self._update_keymap(evt.first_keycode, evt.count)
else:
raise TypeError('expected a MappingNotify event')
def _update_keymap(self, first_keycode, count):
"""Internal function, called to refresh the keymap cache.
"""
# Delete all sym->code maps for the changed codes
lastcode = first_keycode + count
for keysym, codes in self._keymap_syms.items():
i = 0
while i < len(codes):
code = codes[i][1]
if code >= first_keycode and code < lastcode:
del codes[i]
else:
i = i + 1
# Get the new keyboard mapping
keysyms = self.get_keyboard_mapping(first_keycode, count)
# Replace code->sym map with the new map
self._keymap_codes[first_keycode:lastcode] = keysyms
# Update sym->code map
code = first_keycode
for syms in keysyms:
index = 0
for sym in syms:
if sym != X.NoSymbol:
if sym in self._keymap_syms:
symcodes = self._keymap_syms[sym]
symcodes.append((index, code))
symcodes.sort()
else:
self._keymap_syms[sym] = [(index, code)]
index = index + 1
code = code + 1
###
### client-internal keysym to string translations
###
def lookup_string(self, keysym):
"""Return a string corresponding to KEYSYM, or None if no
reasonable translation is found.
"""
s = self.keysym_translations.get(keysym)
if s is not None:
return s
import Xlib.XK
return Xlib.XK.keysym_to_string(keysym)
def rebind_string(self, keysym, newstring):
"""Change the translation of KEYSYM to NEWSTRING.
If NEWSTRING is None, remove old translation if any.
"""
if newstring is None:
try:
del self.keysym_translations[keysym]
except KeyError:
pass
else:
self.keysym_translations[keysym] = newstring
###
### X requests
###
def intern_atom(self, name, only_if_exists = 0):
"""Intern the string name, returning its atom number. If
only_if_exists is true and the atom does not already exist, it
will not be created and X.NONE is returned."""
r = request.InternAtom(display = self.display,
name = name,
only_if_exists = only_if_exists)
return r.atom
def get_atom(self, atom, only_if_exists = 0):
"""Alias for intern_atom, using internal cache"""
return self.display.get_atom(atom, only_if_exists)
def get_atom_name(self, atom):
"""Look up the name of atom, returning it as a string. Will raise
BadAtom if atom does not exist."""
r = request.GetAtomName(display = self.display,
atom = atom)
return r.name
def get_selection_owner(self, selection):
"""Return the window that owns selection (an atom), or X.NONE if
there is no owner for the selection. Can raise BadAtom."""
r = request.GetSelectionOwner(display = self.display,
selection = selection)
return r.owner
def send_event(self, destination, event, event_mask = 0, propagate = 0,
onerror = None):
"""Send a synthetic event to the window destination which can be
a window object, or X.PointerWindow or X.InputFocus. event is the
event object to send, instantiated from one of the classes in
protocol.events. See XSendEvent(3X11) for details.
There is also a Window.send_event() method."""
request.SendEvent(display = self.display,
onerror = onerror,
propagate = propagate,
destination = destination,
event_mask = event_mask,
event = event)
def ungrab_pointer(self, time, onerror = None):
"""elease a grabbed pointer and any queued events. See
XUngrabPointer(3X11)."""
request.UngrabPointer(display = self.display,
onerror = onerror,
time = time)
def change_active_pointer_grab(self, event_mask, cursor, time, onerror = None):
"""Change the dynamic parameters of a pointer grab. See
XChangeActivePointerGrab(3X11)."""
request.ChangeActivePointerGrab(display = self.display,
onerror = onerror,
cursor = cursor,
time = time,
event_mask = event_mask)
def ungrab_keyboard(self, time, onerror = None):
"""Ungrab a grabbed keyboard and any queued events. See
XUngrabKeyboard(3X11)."""
request.UngrabKeyboard(display = self.display,
onerror = onerror,
time = time)
def allow_events(self, mode, time, onerror = None):
"""Release some queued events. mode should be one of
X.AsyncPointer, X.SyncPointer, X.AsyncKeyboard, X.SyncKeyboard,
X.ReplayPointer, X.ReplayKeyboard, X.AsyncBoth, or X.SyncBoth.
time should be a timestamp or X.CurrentTime."""
request.AllowEvents(display = self.display,
onerror = onerror,
mode = mode,
time = time)
def grab_server(self, onerror = None):
"""Disable processing of requests on all other client connections
until the server is ungrabbed. Server grabbing should be avoided
as much as possible."""
request.GrabServer(display = self.display,
onerror = onerror)
def ungrab_server(self, onerror = None):
"""Release the server if it was previously grabbed by this client."""
request.UngrabServer(display = self.display,
onerror = onerror)
def warp_pointer(self, x, y, src_window = X.NONE, src_x = 0, src_y = 0,
src_width = 0, src_height = 0, onerror = None):
"""Move the pointer relative its current position by the offsets
(x, y). However, if src_window is a window the pointer is only
moved if the specified rectangle in src_window contains it. If
src_width is 0 it will be replaced with the width of src_window -
src_x. src_height is treated in a similar way.
To move the pointer to absolute coordinates, use Window.warp_pointer()."""
request.WarpPointer(display = self.display,
onerror = onerror,
src_window = src_window,
dst_window = X.NONE,
src_x = src_x,
src_y = src_y,
src_width = src_width,
src_height = src_height,
dst_x = x,
dst_y = y)
def set_input_focus(self, focus, revert_to, time, onerror = None):
"""Set input focus to focus, which should be a window,
X.PointerRoot or X.NONE. revert_to specifies where the focus
reverts to if the focused window becomes not visible, and should
be X.RevertToParent, RevertToPointerRoot, or RevertToNone. See
XSetInputFocus(3X11) for details.
There is also a Window.set_input_focus()."""
request.SetInputFocus(display = self.display,
onerror = onerror,
revert_to = revert_to,
focus = focus,
time = time)
def get_input_focus(self):
"""Return an object with the following attributes:
focus
The window which currently holds the input
focus, X.NONE or X.PointerRoot.
revert_to
Where the focus will revert, one of X.RevertToParent,
RevertToPointerRoot, or RevertToNone. """
return request.GetInputFocus(display = self.display)
def query_keymap(self):
"""Return a bit vector for the logical state of the keyboard,
where each bit set to 1 indicates that the corresponding key is
currently pressed down. The vector is represented as a list of 32
integers. List item N contains the bits for keys 8N to 8N + 7
with the least significant bit in the byte representing key 8N."""
r = request.QueryKeymap(display = self.display)
return r.map
def open_font(self, name):
"""Open the font identifed by the pattern name and return its
font object. If name does not match any font, None is returned."""
fid = self.display.allocate_resource_id()
ec = error.CatchError(error.BadName)
request.OpenFont(display = self.display,
onerror = ec,
fid = fid,
name = name)
self.sync()
if ec.get_error():
self.display.free_resource_id(fid)
return None
else:
cls = self.display.get_resource_class('font', fontable.Font)
return cls(self.display, fid, owner = 1)
def list_fonts(self, pattern, max_names):
"""Return a list of font names matching pattern. No more than
max_names will be returned."""
r = request.ListFonts(display = self.display,
max_names = max_names,
pattern = pattern)
return r.fonts
def list_fonts_with_info(self, pattern, max_names):
"""Return a list of fonts matching pattern. No more than
max_names will be returned. Each list item represents one font
and has the following properties:
name
The name of the font.
min_bounds
max_bounds
min_char_or_byte2
max_char_or_byte2
default_char
draw_direction
min_byte1
max_byte1
all_chars_exist
font_ascent
font_descent
replies_hint
See the descripton of XFontStruct in XGetFontProperty(3X11)
for details on these values.
properties
A list of properties. Each entry has two attributes:
name
The atom identifying this property.
value
A 32-bit unsigned value.
"""
return request.ListFontsWithInfo(display = self.display,
max_names = max_names,
pattern = pattern)
def set_font_path(self, path, onerror = None):
"""Set the font path to path, which should be a list of strings.
If path is empty, the default font path of the server will be
restored."""
request.SetFontPath(display = self.display,
onerror = onerror,
path = path)
def get_font_path(self):
"""Return the current font path as a list of strings."""
r = request.GetFontPath(display = self.display)
return r.paths
def query_extension(self, name):
"""Ask the server if it supports the extension name. If it is
supported an object with the following attributes is returned:
major_opcode
The major opcode that the requests of this extension uses.
first_event
The base event code if the extension have additional events, or 0.
first_error
The base error code if the extension have additional errors, or 0.
If the extension is not supported, None is returned."""
r = request.QueryExtension(display = self.display,
name = name)
if r.present:
return r
else:
return None
def list_extensions(self):
"""Return a list of all the extensions provided by the server."""
r = request.ListExtensions(display = self.display)
return r.names
def change_keyboard_mapping(self, first_keycode, keysyms, onerror = None):
"""Modify the keyboard mapping, starting with first_keycode.
keysyms is a list of tuples of keysyms. keysyms[n][i] will be
assigned to keycode first_keycode+n at index i."""
request.ChangeKeyboardMapping(display = self.display,
onerror = onerror,
first_keycode = first_keycode,
keysyms = keysyms)
def get_keyboard_mapping(self, first_keycode, count):
"""Return the current keyboard mapping as a list of tuples,
starting at first_keycount and no more than count."""
r = request.GetKeyboardMapping(display = self.display,
first_keycode = first_keycode,
count = count)
return r.keysyms
def change_keyboard_control(self, onerror = None, **keys):
"""Change the parameters provided as keyword arguments:
key_click_percent
The volume of key clicks between 0 (off) and 100 (load).
-1 will restore default setting.
bell_percent
The base volume of the bell, coded as above.
bell_pitch
The pitch of the bell in Hz, -1 restores the default.
bell_duration
The duration of the bell in milliseconds, -1 restores
the default.
led
led_mode
led_mode should be X.LedModeOff or X.LedModeOn. If led is
provided, it should be a 32-bit mask listing the LEDs that
should change. If led is not provided, all LEDs are changed.
key
auto_repeat_mode
auto_repeat_mode should be one of X.AutoRepeatModeOff,
X.AutoRepeatModeOn, or X.AutoRepeatModeDefault. If key is
provided, that key will be modified, otherwise the global
state for the entire keyboard will be modified."""
request.ChangeKeyboardControl(display = self.display,
onerror = onerror,
attrs = keys)
def get_keyboard_control(self):
"""Return an object with the following attributes:
global_auto_repeat
X.AutoRepeatModeOn or X.AutoRepeatModeOff.
auto_repeats
A list of 32 integers. List item N contains the bits for keys
8N to 8N + 7 with the least significant bit in the byte
representing key 8N. If a bit is on, autorepeat is enabled
for the corresponding key.
led_mask
A 32-bit mask indicating which LEDs are on.
key_click_percent
The volume of key click, from 0 to 100.
bell_percent
bell_pitch
bell_duration
The volume, pitch and duration of the bell. """
return request.GetKeyboardControl(display = self.display)
def bell(self, percent = 0, onerror = None):
"""Ring the bell at the volume percent which is relative the base
volume. See XBell(3X11)."""
request.Bell(display = self.display,
onerror = onerror,
percent = percent)
def change_pointer_control(self, accel = None, threshold = None, onerror = None):
"""To change the pointer acceleration, set accel to a tuple (num,
denum). The pointer will then move num/denum times the normal
speed if it moves beyond the threshold number of pixels at once.
To change the threshold, set it to the number of pixels. -1
restores the default."""
if accel is None:
do_accel = 0
accel_num = 0
accel_denum = 0
else:
do_accel = 1
accel_num, accel_denum = accel
if threshold is None:
do_threshold = 0
else:
do_threshold = 1
request.ChangePointerControl(display = self.display,
onerror = onerror,
do_accel = do_accel,
do_thres = do_threshold,
accel_num = accel_num,
accel_denum = accel_denum,
threshold = threshold)
def get_pointer_control(self):
"""Return an object with the following attributes:
accel_num
accel_denom
The acceleration as numerator/denumerator.
threshold
The number of pixels the pointer must move before the
acceleration kicks in."""
return request.GetPointerControl(display = self.display)
def set_screen_saver(self, timeout, interval, prefer_blank, allow_exposures, onerror = None):
"""See XSetScreenSaver(3X11)."""
request.SetScreenSaver(display = self.display,
onerror = onerror,
timeout = timeout,
interval = interval,
prefer_blank = prefer_blank,
allow_exposures = allow_exposures)
def get_screen_saver(self):
"""Return an object with the attributes timeout, interval,
prefer_blanking, allow_exposures. See XGetScreenSaver(3X11) for
details."""
return request.GetScreenSaver(display = self.display)
def change_hosts(self, mode, host_family, host, onerror = None):
"""mode is either X.HostInsert or X.HostDelete. host_family is
one of X.FamilyInternet, X.FamilyDECnet or X.FamilyChaos.
host is a list of bytes. For the Internet family, it should be the
four bytes of an IPv4 address."""
request.ChangeHosts(display = self.display,
onerror = onerror,
mode = mode,
host_family = host_family,
host = host)
def list_hosts(self):
"""Return an object with the following attributes:
mode
X.EnableAccess if the access control list is used, X.DisableAccess otherwise.
hosts
The hosts on the access list. Each entry has the following attributes:
family
X.FamilyInternet, X.FamilyDECnet, or X.FamilyChaos.
name
A list of byte values, the coding depends on family. For the Internet family, it is the 4 bytes of an IPv4 address.
"""
return request.ListHosts(display = self.display)
def set_access_control(self, mode, onerror = None):
"""Enable use of access control lists at connection setup if mode
is X.EnableAccess, disable if it is X.DisableAccess."""
request.SetAccessControl(display = self.display,
onerror = onerror,
mode = mode)
def set_close_down_mode(self, mode, onerror = None):
"""Control what will happen with the client's resources at
connection close. The default is X.DestroyAll, the other values
are X.RetainPermanent and X.RetainTemporary."""
request.SetCloseDownMode(display = self.display,
onerror = onerror,
mode = mode)
def force_screen_saver(self, mode, onerror = None):
"""If mode is X.ScreenSaverActive the screen saver is activated.
If it is X.ScreenSaverReset, the screen saver is deactivated as
if device input had been received."""
request.ForceScreenSaver(display = self.display,
onerror = onerror,
mode = mode)
def set_pointer_mapping(self, map):
"""Set the mapping of the pointer buttons. map is a list of
logical button numbers. map must be of the same length as the
list returned by Display.get_pointer_mapping().
map[n] sets the
logical number for the physical button n+1. Logical number 0
disables the button. Two physical buttons cannot be mapped to the
same logical number.
If one of the buttons to be altered are
logically in the down state, X.MappingBusy is returned and the
mapping is not changed. Otherwise the mapping is changed and
X.MappingSuccess is returned."""
r = request.SetPointerMapping(display = self.display,
map = map)
return r.status
def get_pointer_mapping(self):
"""Return a list of the pointer button mappings. Entry N in the
list sets the logical button number for the physical button N+1."""
r = request.GetPointerMapping(display = self.display)
return r.map
def set_modifier_mapping(self, keycodes):
"""Set the keycodes for the eight modifiers X.Shift, X.Lock,
X.Control, X.Mod1, X.Mod2, X.Mod3, X.Mod4 and X.Mod5. keycodes
should be a eight-element list where each entry is a list of the
keycodes that should be bound to that modifier.
If any changed
key is logically in the down state, X.MappingBusy is returned and
the mapping is not changed. If the mapping violates some server
restriction, X.MappingFailed is returned. Otherwise the mapping
is changed and X.MappingSuccess is returned."""
r = request.SetModifierMapping(display = self.display,
keycodes = keycodes)
return r.status
def get_modifier_mapping(self):
"""Return a list of eight lists, one for each modifier. The list
can be indexed using X.ShiftMapIndex, X.Mod1MapIndex, and so on.
The sublists list the keycodes bound to that modifier."""
r = request.GetModifierMapping(display = self.display)
return r.keycodes
def no_operation(self, onerror = None):
"""Do nothing but send a request to the server."""
request.NoOperation(display = self.display,
onerror = onerror)
| 1.726563 | 2 |
Others/qupc/qupc2014/c/main.py | KATO-Hiro/AtCoder | 2 | 5166 | <reponame>KATO-Hiro/AtCoder<filename>Others/qupc/qupc2014/c/main.py
# -*- coding: utf-8 -*-
def main():
from string import ascii_uppercase
n, m, q_large = map(int, input().split())
s = [list(input()) for _ in range(n)]
q = [input() for _ in range(q_large)]
pos = [None for _ in range(26)]
for i in range(n):
for j in range(m):
sij = s[i][j]
if sij != "*":
index = ascii_uppercase.index(sij)
pos[index] = (i + 1, j + 1)
for qi in q:
index = ascii_uppercase.index(qi)
p = pos[index]
if p is None:
print("NA")
else:
print(p[0], p[1])
if __name__ == "__main__":
main()
| 3.0625 | 3 |
NetCatKS/DProtocol/api/interfaces/subscribers/__init__.py | dimddev/NetCatKS-CP | 0 | 5167 | <filename>NetCatKS/DProtocol/api/interfaces/subscribers/__init__.py<gh_stars>0
__author__ = 'dimd'
from zope.interface import Interface, Attribute
class IBaseResourceSubscriber(Interface):
"""
IBaseResourceSubscriber provides functionality for comparison of the signature on
a incoming request against a candidate DProtocol implementation registered as
IJSONResource
The `adapter` is our first argument in the constructor. It's used from the adapter pattern
and have to be from type IJSONResource
The `protocol` attribute is designed to be provided by classes which are implements IJSONResourceSubscriber,
or inherit from DProtocolSubscriber. If subclass does not provide the protocol argument will
raise AttributeError.
"""
adapter = Attribute("The implementer have to provide implementation of IJSONResource")
protocol = Attribute("DProtocol instance")
def compare():
"""
Designed to compare the the adapter and the DProtocol signature
if the signatures is equal
"""
class IJSONResourceSubscriber(Interface):
"""
"""
class IXMLResourceSubscriber(Interface):
"""
""" | 2.484375 | 2 |
analysis/notebooks/helper/anova.py | dpedrosac/DBSgait | 1 | 5168 | <reponame>dpedrosac/DBSgait<gh_stars>1-10
import numpy as np
import pandas as pd
from scipy.stats import f_oneway
from typing import Dict, Tuple, Set
def extract_significant_p(df: pd.DataFrame, p_value_limit: float):
"""Return a df, which replaces values that are above p_value_limit with `None`"""
return (
df.loc(axis=1)[f"p-value"]
.where(df[f"p-value"] < p_value_limit)
.dropna(axis=0, how="all")
)
def _calculate_anova(data: pd.DataFrame) -> Tuple:
"""Calculate one-way anova using each column as a different measurement."""
parameter = [column for column in data.columns if column != "configuration"][0]
data_ = [
data[data["configuration"] == configuration][parameter].T.to_numpy()
for configuration in set(data["configuration"])
]
return f_oneway(*data_)
def anova(
dataset: Dict, gait_test: str, gait_parameter: str
) -> Tuple[pd.DataFrame, Set]:
"""Calculat a one-way anova for a single gait test and gait parameter.
Parameters
----------
dataset
A dictionary, where the keys are descriptions for different subjects. The values are dataframes, which have a
pd.MultiIndex as columns. The first level describes the test paradigm, e.g. "slow" / "fast". The second level
describes the DBS configureation, e.g. "130", "100", "OFF". The third level is the gait parameter,
e.g. stride length.
gait_test
Used to select the first level of the columns
gait_parameter
Used to select the thrid level of the columns
Returns
-------
d
A dictionary where the keys are equal to the passed argument `dataset`. The values are dataframes,
where the columns correspond to the two feet and the rows are different gait parameters. The values are anova
p-values between all DBS configurations and the OFF state for this specific `gait_test`
"""
anova_dict = {}
anova_df = pd.DataFrame()
not_evaluated = []
for patient, patient_data in dataset.items():
anova_dict[patient] = {"LeftFoot": (None, None), "RightFoot": (None, None)}
for foot in set(patient_data["foot"]):
missing_condition = None
foot_data = patient_data[
(patient_data["foot"] == foot) & (patient_data["test"] == gait_test)
][[gait_parameter, "configuration"]]
possible_configurations = {
"030",
"033",
"040",
"066",
"085",
"090",
"100",
"130",
"OFF",
}
actual_configurations = set(foot_data["configuration"])
missing_configurations = possible_configurations - actual_configurations
if missing_configurations:
not_evaluated.append(
" ".join([gait_test, patient, *missing_configurations, foot])
)
if len(missing_configurations) > (len(possible_configurations) - 2):
print(
"Not evaluating this foot, because to few configurations available."
)
continue
# print(set(foot_data.columns) - set(foot_data_valid.columns))
anova_dict[patient][foot] = _calculate_anova(foot_data)
row = pd.DataFrame(
index=[patient],
columns=pd.MultiIndex.from_arrays(
[["p-value"] * 2, ["LeftFoot", "RightFoot"]]
),
data=[
[
anova_dict[patient]["LeftFoot"][1],
anova_dict[patient]["RightFoot"][1],
]
],
)
anova_df = pd.concat([anova_df, row])
return anova_df, set(not_evaluated)
def conclude_results(
all_results: pd.DataFrame,
p_value_limit: float
) -> pd.DataFrame:
anova_overview = pd.DataFrame()
significant_results = {}
for gait_parameter in all_results.keys():
significant_results[gait_parameter] = extract_significant_p(
all_results[gait_parameter], p_value_limit=p_value_limit
)
data = [
len(all_results[gait_parameter]),
len(significant_results[gait_parameter]),
significant_results[gait_parameter].count().sum(),
]
columns = ["n_patients", "n_patients_significant", "n_feet_significant"]
anova_overview = pd.concat(
[
anova_overview,
pd.DataFrame(data=[data], columns=columns, index=[gait_parameter]),
]
)
return anova_overview
| 2.953125 | 3 |
bux_recorder/utils.py | roaldarbol/bux | 0 | 5169 | <gh_stars>0
import os
import platform
import time
import csv
import serial
import cv2
import tkinter as tk
from tkinter.filedialog import askdirectory
from serial.tools import list_ports
# From https://raspberrypi.stackexchange.com/a/118473
def is_raspberrypi():
try:
with io.open('/sys/firmware/devicetree/base/model', 'r') as m:
if 'raspberry pi' in m.read().lower():
return(m)
except Exception:
pass
return False
def get_platform():
return platform.system()
def get_gui_coordinates(root, w, h):
# get screen width and height
ws = root.winfo_screenwidth() # width of the screen
hs = root.winfo_screenheight() # height of the screen
# calculate x and y coordinates for the Tk root window
x = (ws/2) - (w/2)
y = (hs/2) - (h/2)
return(w,h,x,y)
def handle_focus_in(button):
full_name_entry.delete(0, tk.END)
full_name_entry.config(fg='black')
def handle_focus_out(button):
full_name_entry.delete(0, tk.END)
full_name_entry.config(fg='grey')
full_name_entry.insert(0, "Example: <NAME>")
def hover(button, enter, message):
if message == "":
return
else:
button.configure(text=message)
def list_ports():
"""
Test the ports and returns a tuple with the available ports and the ones that are working.
"""
non_working_ports = []
dev_port = 0
working_ports = []
available_ports = []
while len(non_working_ports) < 6: # if there are more than 5 non working ports stop the testing.
camera = cv2.VideoCapture(dev_port)
if not camera.isOpened():
non_working_ports.append(dev_port)
# print("Port %s is not working." %dev_port)
else:
is_reading, img = camera.read()
w = camera.get(3)
h = camera.get(4)
if is_reading:
# print("Port %s is working and reads images (%s x %s)" %(dev_port,h,w))
working_ports.append(dev_port)
else:
# print("Port %s for camera ( %s x %s) is present but does not reads." %(dev_port,h,w))
available_ports.append(dev_port)
dev_port +=1
return available_ports,working_ports,non_working_ports | 2.953125 | 3 |
a2e/optimizer/hpbandster/_model_worker.py | maechler/a2e | 1 | 5170 | <filename>a2e/optimizer/hpbandster/_model_worker.py
from hpbandster.core.worker import Worker
from a2e.model import AbstractModel
from a2e.optimizer import EvaluationResultAggregator
from a2e.utility import inf_nan_to_float_max
class ModelWorker(Worker):
def __init__(
self,
model: AbstractModel,
evaluation_result_aggregator: EvaluationResultAggregator,
x_train,
y_train,
x_valid,
y_valid,
run_id,
nameserver=None,
nameserver_port=None,
logger=None,
host=None,
id=None,
timeout=None,
):
super().__init__(run_id, nameserver=nameserver, nameserver_port=nameserver_port, logger=logger, host=host, id=id, timeout=timeout)
self.model = model
self.evaluation_result_aggregator = evaluation_result_aggregator
self.x_train = x_train
self.y_train = y_train
self.x_valid = x_valid
self.y_valid = y_valid
def compute(self, config, budget, working_directory, **kwargs):
iteration, stage, actual_num_config = kwargs['config_id']
self.model.load_config(config, budget=budget, **kwargs)
evaluation_result = self.model.evaluate(
self.x_train,
self.y_train,
self.x_valid,
self.y_valid,
budget,
)
evaluation_result.add_info('iteration', iteration)
evaluation_result.add_info('stage', stage)
evaluation_result.add_info('actual_num_config', actual_num_config)
self.evaluation_result_aggregator.add_evaluation_result(evaluation_result)
return {
'loss': inf_nan_to_float_max(evaluation_result.cost),
'info': evaluation_result.info,
}
| 2.203125 | 2 |
xagents/__init__.py | schissmantics/xagents | 37 | 5171 | from xagents import a2c, acer, ddpg, dqn, ppo, td3, trpo
from xagents.a2c.agent import A2C
from xagents.acer.agent import ACER
from xagents.base import OffPolicy
from xagents.ddpg.agent import DDPG
from xagents.dqn.agent import DQN
from xagents.ppo.agent import PPO
from xagents.td3.agent import TD3
from xagents.trpo.agent import TRPO
from xagents.utils.cli import play_args, train_args, tune_args
from xagents.utils.common import register_models
__author__ = 'schissmantics'
__email__ = '<EMAIL>'
__license__ = 'MIT'
__version__ = '1.0.1'
agents = {
'a2c': {'module': a2c, 'agent': A2C},
'acer': {'module': acer, 'agent': ACER},
'dqn': {'module': dqn, 'agent': DQN},
'ppo': {'module': ppo, 'agent': PPO},
'td3': {'module': td3, 'agent': TD3},
'trpo': {'module': trpo, 'agent': TRPO},
'ddpg': {'module': ddpg, 'agent': DDPG},
}
register_models(agents)
commands = {
'train': (train_args, 'fit', 'Train given an agent and environment'),
'play': (
play_args,
'play',
'Play a game given a trained agent and environment',
),
'tune': (
tune_args,
'',
'Tune hyperparameters given an agent, hyperparameter specs, and environment',
),
}
| 1.90625 | 2 |
IsraeliQueue/__init__.py | YonLiud/Israeli-Queue | 2 | 5172 | from .IsraeliQueue import IsraeliQueue, Item, IsraeliQueueByType
| 1.109375 | 1 |
examples/MMPT/mmpt_cli/localjob.py | Este1le/fairseq | 0 | 5173 | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
from mmpt.utils import recursive_config
class BaseJob(object):
def __init__(self, yaml_file, dryrun=False):
self.yaml_file = yaml_file
self.config = recursive_config(yaml_file)
self.dryrun = dryrun
def submit(self, **kwargs):
raise NotImplementedError
def _normalize_cmd(self, cmd_list):
cmd_list = list(cmd_list)
yaml_index = cmd_list.index("[yaml]")
cmd_list[yaml_index] = self.yaml_file
return cmd_list
class LocalJob(BaseJob):
CMD_CONFIG = {
"local_single": [
"fairseq-train", "[yaml]", "--user-dir", "mmpt",
"--task", "mmtask", "--arch", "mmarch",
"--criterion", "mmloss",
],
"local_small": [
"fairseq-train", "[yaml]", "--user-dir", "mmpt",
"--task", "mmtask", "--arch", "mmarch",
"--criterion", "mmloss",
"--distributed-world-size", "2"
],
"local_big": [
"fairseq-train", "[yaml]", "--user-dir", "mmpt",
"--task", "mmtask", "--arch", "mmarch",
"--criterion", "mmloss",
"--distributed-world-size", "4"
],
"local_predict": ["python", "mmpt_cli/predict.py", "[yaml]"],
}
def __init__(self, yaml_file, job_type=None, dryrun=False):
super().__init__(yaml_file, dryrun)
if job_type is None:
self.job_type = "local_single"
if self.config.task_type is not None:
self.job_type = self.config.task_type
else:
self.job_type = job_type
if self.job_type in ["local_single", "local_small"]:
if self.config.fairseq.dataset.batch_size > 32:
print("decreasing batch_size to 32 for local testing?")
def submit(self):
cmd_list = self._normalize_cmd(LocalJob.CMD_CONFIG[self.job_type])
if "predict" not in self.job_type:
# append fairseq args.
from mmpt.utils import load_config
config = load_config(config_file=self.yaml_file)
for field in config.fairseq:
for key in config.fairseq[field]:
if key in ["fp16", "reset_optimizer", "reset_dataloader", "reset_meters"]: # a list of binary flag.
param = ["--" + key.replace("_", "-")]
else:
if key == "lr":
value = str(config.fairseq[field][key][0])
elif key == "adam_betas":
value = "'"+str(config.fairseq[field][key])+"'"
else:
value = str(config.fairseq[field][key])
param = [
"--" + key.replace("_", "-"),
value
]
cmd_list.extend(param)
print("launching", " ".join(cmd_list))
if not self.dryrun:
os.system(" ".join(cmd_list))
return JobStatus("12345678")
class JobStatus(object):
def __init__(self, job_id):
self.job_id = job_id
def __repr__(self):
return self.job_id
def __str__(self):
return self.job_id
def done(self):
return False
def running(self):
return False
def result(self):
if self.done():
return "{} is done.".format(self.job_id)
else:
return "{} is running.".format(self.job_id)
def stderr(self):
return self.result()
def stdout(self):
return self.result()
| 1.859375 | 2 |
tron/Nubs/deprecated/tcc25m-old.py | sdss/tron | 0 | 5174 | import os.path
from tron import g, hub
from tron.Hub.Command.Encoders.ASCIICmdEncoder import ASCIICmdEncoder
from tron.Hub.Nub.TCCShellNub import TCCShellNub
from tron.Hub.Reply.Decoders.ASCIIReplyDecoder import ASCIIReplyDecoder
name = 'tcc'
def start(poller):
stop()
initCmds = ('show version', 'show users', 'show time', 'show status', 'show inst/full',
'show object/full', 'show axisconfig', 'show focus', 'axis status', 'show scale',
'mir status')
safeCmds = r'(^show )|(status$)'
d = ASCIIReplyDecoder(EOL='\r', stripChars='\n', CIDfirst=False, debug=1)
e = ASCIICmdEncoder(EOL='\r', debug=1, CIDfirst=False)
tcc = TCCShellNub(poller, [
'/usr/bin/ssh', '-1', '-e', 'none', '-a', '-x', '-i',
os.path.expanduser('~/.ssh/tron'), '-T', 'tccuser@tcc25m'
],
initCmds=initCmds,
safeCmds=safeCmds,
needsAuth=True,
name=name,
encoder=e,
decoder=d,
logDir=os.path.join(g.logDir, name),
debug=1)
hub.addActor(tcc)
def stop():
n = hub.findActor(name)
if n:
hub.dropActor(n)
del n
| 2.015625 | 2 |
src/PtDb/test.py | ptphp/PyLib | 1 | 5175 | <gh_stars>1-10
#!/usr/bin/env python
# -*- coding=utf-8 -*-
'''
Created on 2013-3-31
@author: Joseph
'''
import PtDb
if __name__ == '__main__':
PtDb.config = {
'sqlite':{
'type':'sqlite',
'dbname':"data1.db"
},
'default':{
'type':'mysql',
'host':'localhost',
'port':3306,
'dbname':'game110_dev',
'dbuser':'root',
'dbpass':'root',
'charset':'utf8',
},
'default1':{
'type':'mysql',
'host':'localhost',
'port':3306,
'dbname':'game110_dev',
'dbuser':'root',
'dbpass':'<PASSWORD>',
'charset':'utf8',
},
}
PtDb.init('sqlite').open("test.db")
PtDb.init('sqlite').open("test1.db")
PtDb.init()
print PtDb.init().getAll("select * from orders")
print PtDb.init().getOne("select * from orders limit 1")
| 2.40625 | 2 |
services/object_storage/tests/integ/test_object_storage_bulk_operations.py | honzajavorek/oci-cli | 0 | 5176 | # coding: utf-8
# Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved.
import filecmp
import json
import pytest
import oci
import services.object_storage.src.oci_cli_object_storage as oci_cli_object_storage
import os
import random
import shutil
import six
import string
from tests import util
from tests import test_config_container
from mimetypes import guess_type
OBJECTS_TO_CREATE_IN_BUCKET_FOR_BULK_GET = 100
OBJECTS_TO_CREATE_IN_FOLDER_FOR_BULK_PUT = 20
CONTENT_STRING_LENGTH = 5000
MID_SIZED_FILE_IN_MEBIBTYES = 20
LARGE_CONTENT_FILE_SIZE_IN_MEBIBYTES = 150 # Default multipart is 128MiB
# Holds the objects we create and their content so that we can verify results
bulk_get_object_to_content = {}
bulk_get_prefix_to_object = {
'a/b/c/d': [],
'a/b/c': [],
'a/b': [],
'/a': [],
'': []
}
bulk_get_bucket_name = None
bulk_put_large_files = set()
bulk_put_mid_sized_files = set()
root_bulk_put_folder = None
bulk_put_bucket_name = None
@pytest.fixture
def vcr_fixture(request):
with test_config_container.create_vcr(cassette_library_dir='services/object_storage/tests/cassettes').use_cassette('object_storage_bulk_operations_{name}.yml'.format(name=request.function.__name__)):
yield
# Generate test data for different operations:
#
# Bulk Get: create a new bucket and populate it with some objects, then tear it all down afterwards
# Bulk Put: create a folder structure containing small and large files, then tear it all down afterwards
# Bulk Delete: uses the folders and files generated for bulk put
@pytest.fixture(scope='module', autouse=True)
def generate_test_data(object_storage_client):
global bulk_get_object_to_content, bulk_get_bucket_name, root_bulk_put_folder, bulk_put_large_files, bulk_put_mid_sized_files, bulk_put_bucket_name
# Create a test bucket
create_bucket_request = oci.object_storage.models.CreateBucketDetails()
create_bucket_request.name = 'ObjectStorageBulkGetTest_{}'.format(util.random_number_string())
create_bucket_request.compartment_id = util.COMPARTMENT_ID
util.clear_test_data(object_storage_client, util.NAMESPACE, util.COMPARTMENT_ID, create_bucket_request.name)
object_storage_client.create_bucket(util.NAMESPACE, create_bucket_request)
bulk_get_bucket_name = create_bucket_request.name
# Create items at various heirarchy levels (to be surfaced as different directories on disk)
for i in range(OBJECTS_TO_CREATE_IN_BUCKET_FOR_BULK_GET):
if i % 5 == 4:
object_name = 'a/b/c/d/Object_{}'.format(i)
bulk_get_prefix_to_object['a/b/c/d'].append(object_name)
elif i % 5 == 3:
object_name = 'a/b/c/Object_{}'.format(i)
bulk_get_prefix_to_object['a/b/c'].append(object_name)
elif i % 5 == 2:
object_name = 'a/b/Object_{}'.format(i)
bulk_get_prefix_to_object['a/b'].append(object_name)
elif i % 5 == 1:
# This is equivalent to a/ on the file system because we drop the leading slash (we drop path separators from the front to avoid unexpected results)
object_name = '/a/Object_{}'.format(i)
bulk_get_prefix_to_object['/a'].append(object_name)
else:
# At the root of the bucket
object_name = 'Object_{}'.format(i)
bulk_get_prefix_to_object[''].append(object_name)
object_content = generate_random_string(CONTENT_STRING_LENGTH)
object_storage_client.put_object(util.NAMESPACE, create_bucket_request.name, object_name, object_content)
bulk_get_object_to_content[object_name] = object_content
# makedirs creates all subfolders recursively
root_bulk_put_folder = 'tests/temp/bulk_put_{}'.format(util.random_number_string())
bulk_put_folder_leaf = '{}/subfolder1/subfolder2/subfolder3'.format(root_bulk_put_folder)
if not os.path.exists(bulk_put_folder_leaf):
os.makedirs(bulk_put_folder_leaf)
create_bucket_request = oci.object_storage.models.CreateBucketDetails()
create_bucket_request.name = 'ObjectStorageBulkPutTest_{}'.format(util.random_number_string())
create_bucket_request.compartment_id = util.COMPARTMENT_ID
util.clear_test_data(object_storage_client, util.NAMESPACE, util.COMPARTMENT_ID, create_bucket_request.name)
object_storage_client.create_bucket(util.NAMESPACE, create_bucket_request)
bulk_put_bucket_name = create_bucket_request.name
subfolders = ['', 'subfolder1', 'subfolder1/subfolder2', 'subfolder1/subfolder2/subfolder3']
for subfolder in subfolders:
if subfolder == '':
full_folder = root_bulk_put_folder
else:
full_folder = os.path.join(root_bulk_put_folder, subfolder)
for i in range(OBJECTS_TO_CREATE_IN_FOLDER_FOR_BULK_PUT + 1):
file_path = '{}/object_{}'.format(full_folder, i)
if i != 0 and i % OBJECTS_TO_CREATE_IN_FOLDER_FOR_BULK_PUT == 0:
# Put in one big file per subfolder
util.create_large_file(file_path, LARGE_CONTENT_FILE_SIZE_IN_MEBIBYTES)
bulk_put_large_files.add(file_path)
elif i != 0 and i % 10 == 0:
# Put in the occasional file with a reasonable size so that we can force multipart
util.create_large_file(file_path, MID_SIZED_FILE_IN_MEBIBTYES)
bulk_put_mid_sized_files.add(file_path)
else:
with open(file_path, 'w') as f:
f.write(generate_random_string(CONTENT_STRING_LENGTH))
yield
# Tear down stuff by deleting all the things and then deleting the buckets
delete_bucket_and_all_items(object_storage_client, bulk_get_bucket_name)
delete_bucket_and_all_items(object_storage_client, bulk_put_bucket_name)
# Remove all directories recursively
shutil.rmtree(root_bulk_put_folder)
@util.skip_while_rerecording
def test_normalize_object_name_path():
assert '/this/is/a/path' == oci_cli_object_storage.objectstorage_cli_extended.normalize_object_name_path_for_object_storage('/this/is/a/path')
assert '/this/is/a/path' == oci_cli_object_storage.objectstorage_cli_extended.normalize_object_name_path_for_object_storage('/this/is/a/path', '/')
assert '/this/is/a/path' == oci_cli_object_storage.objectstorage_cli_extended.normalize_object_name_path_for_object_storage('\\this\\is\\a\\path', '\\')
assert '/this/is/a/path' == oci_cli_object_storage.objectstorage_cli_extended.normalize_object_name_path_for_object_storage('\\this/is/a\\path', '\\')
assert 'thisisapath' == oci_cli_object_storage.objectstorage_cli_extended.normalize_object_name_path_for_object_storage('thisisapath')
assert 'thisisapath' == oci_cli_object_storage.objectstorage_cli_extended.normalize_object_name_path_for_object_storage('thisisapath', '/')
assert 'thisisapath' == oci_cli_object_storage.objectstorage_cli_extended.normalize_object_name_path_for_object_storage('thisisapath', '\\')
@util.skip_while_rerecording
def test_get_all_objects_in_bucket(vcr_fixture):
download_folder = 'tests/temp/get_all_{}'.format(bulk_get_bucket_name)
result = invoke(['os', 'object', 'bulk-download', '--namespace', util.NAMESPACE, '--bucket-name', bulk_get_bucket_name, '--download-dir', download_folder])
print(result.output)
# Ensure that content matches
for object_name in bulk_get_object_to_content:
if object_name[0] == '/' or object_name[0] == '\\':
file_path = os.path.join(download_folder, object_name[1:])
else:
file_path = os.path.join(download_folder, object_name)
with open(file_path, 'r') as content_file:
content = content_file.read()
assert content == bulk_get_object_to_content[object_name]
assert len(bulk_get_object_to_content) == get_count_of_files_in_folder_and_subfolders(download_folder)
shutil.rmtree(download_folder)
@util.skip_while_rerecording
def test_get_directory_and_subdirectories(vcr_fixture):
download_folder = 'tests/temp/get_directory_and_subdirectories_{}'.format(bulk_get_bucket_name)
# This should get us a/b/<object>, a/b/c/<object> and a/b/c/d/<object>
invoke(['os', 'object', 'bulk-download', '--namespace', util.NAMESPACE, '--bucket-name', bulk_get_bucket_name, '--download-dir', download_folder, '--prefix', 'a/b'])
for object_name in bulk_get_prefix_to_object['a/b']:
file_path = os.path.join(download_folder, object_name)
with open(file_path, 'r') as content_file:
content = content_file.read()
assert content == bulk_get_object_to_content[object_name]
for object_name in bulk_get_prefix_to_object['a/b/c']:
file_path = os.path.join(download_folder, object_name)
with open(file_path, 'r') as content_file:
content = content_file.read()
assert content == bulk_get_object_to_content[object_name]
for object_name in bulk_get_prefix_to_object['a/b/c/d']:
file_path = os.path.join(download_folder, object_name)
with open(file_path, 'r') as content_file:
content = content_file.read()
assert content == bulk_get_object_to_content[object_name]
assert len(bulk_get_prefix_to_object['a/b']) + len(bulk_get_prefix_to_object['a/b/c']) + len(bulk_get_prefix_to_object['a/b/c/d']) == get_count_of_files_in_folder_and_subfolders(download_folder)
shutil.rmtree(download_folder)
@util.skip_while_rerecording
def test_get_directory_no_subdirectory(vcr_fixture):
download_folder = 'tests/temp/get_directory_only_{}'.format(bulk_get_bucket_name)
invoke(['os', 'object', 'bulk-download', '--namespace', util.NAMESPACE, '--bucket-name', bulk_get_bucket_name, '--download-dir', download_folder, '--prefix', 'a/b/c/', '--delimiter', '/'])
for object_name in bulk_get_prefix_to_object['a/b/c']:
file_path = os.path.join(download_folder, object_name)
with open(file_path, 'r') as content_file:
content = content_file.read()
assert content == bulk_get_object_to_content[object_name]
assert len(bulk_get_prefix_to_object['a/b/c']) == get_count_of_files_in_folder_and_subfolders(download_folder)
shutil.rmtree(download_folder)
@util.skip_while_rerecording
def test_get_files_skipped():
download_folder = 'tests/temp/skip_and_replace_{}'.format(bulk_get_bucket_name)
invoke(['os', 'object', 'bulk-download', '--namespace', util.NAMESPACE, '--bucket-name', bulk_get_bucket_name, '--download-dir', download_folder])
# Sanity check
assert len(bulk_get_object_to_content) == get_count_of_files_in_folder_and_subfolders(download_folder)
# We should skip over all objects since there is no --overwrite. There should be prompts
result = invoke(['os', 'object', 'bulk-download', '--namespace', util.NAMESPACE, '--bucket-name', bulk_get_bucket_name, '--download-dir', download_folder])
parsed_result = parse_json_response_from_mixed_output(result.output)
assert 'Are you sure you want to overwrite it?' in result.output
assert len(parsed_result['skipped-objects']) == len(bulk_get_object_to_content)
# We should skip over all objects since we say --no-overwrite. Additionally there should be no prompts
result = invoke(['os', 'object', 'bulk-download', '--namespace', util.NAMESPACE, '--bucket-name', bulk_get_bucket_name, '--download-dir', download_folder, '--no-overwrite'])
parsed_result = parse_json_response_from_mixed_output(result.output)
assert 'Are you sure you want to overwrite it?' not in result.output
assert len(parsed_result['skipped-objects']) == len(bulk_get_object_to_content)
# We should skip over no objects since we --overwrite
result = invoke(['os', 'object', 'bulk-download', '--namespace', util.NAMESPACE, '--bucket-name', bulk_get_bucket_name, '--download-dir', download_folder, '--overwrite'])
parsed_result = parse_json_response_from_mixed_output(result.output)
assert len(parsed_result['skipped-objects']) == 0
shutil.rmtree(download_folder)
@util.skip_while_rerecording
def test_get_no_objects(vcr_fixture):
download_folder = 'tests/temp/no_objects_{}'.format(bulk_get_bucket_name)
invoke(['os', 'object', 'bulk-download', '--namespace', util.NAMESPACE, '--bucket-name', bulk_get_bucket_name, '--download-dir', download_folder, '--prefix', 'batman'])
assert 0 == get_count_of_files_in_folder_and_subfolders(download_folder)
shutil.rmtree(download_folder)
@util.skip_while_rerecording
def test_get_multipart(object_storage_client):
create_bucket_request = oci.object_storage.models.CreateBucketDetails()
create_bucket_request.name = 'ObjectStorageBulkGetMultipartsTest_{}'.format(util.random_number_string())
create_bucket_request.compartment_id = util.COMPARTMENT_ID
util.clear_test_data(object_storage_client, util.NAMESPACE, util.COMPARTMENT_ID, create_bucket_request.name)
object_storage_client.create_bucket(util.NAMESPACE, create_bucket_request)
large_file_root_dir = os.path.join('tests', 'temp', 'multipart_get_large_files')
if not os.path.exists(large_file_root_dir):
os.makedirs(large_file_root_dir)
util.create_large_file(os.path.join(large_file_root_dir, '1.bin'), LARGE_CONTENT_FILE_SIZE_IN_MEBIBYTES)
util.create_large_file(os.path.join(large_file_root_dir, '2.bin'), LARGE_CONTENT_FILE_SIZE_IN_MEBIBYTES)
util.create_large_file(os.path.join(large_file_root_dir, '3.bin'), LARGE_CONTENT_FILE_SIZE_IN_MEBIBYTES)
util.create_large_file(os.path.join(large_file_root_dir, '4.bin'), LARGE_CONTENT_FILE_SIZE_IN_MEBIBYTES)
util.create_large_file(os.path.join(large_file_root_dir, '5.bin'), LARGE_CONTENT_FILE_SIZE_IN_MEBIBYTES)
util.create_large_file(os.path.join(large_file_root_dir, '6.bin'), 1) # Creates a 1 MiB file for variety
invoke([
'os', 'object', 'bulk-upload',
'--namespace', util.NAMESPACE,
'--bucket-name', create_bucket_request.name,
'--src-dir', large_file_root_dir
])
large_file_verify_dir = os.path.join('tests', 'temp', 'multipart_get_large_files_verify')
invoke(['os', 'object', 'bulk-download', '--namespace', util.NAMESPACE, '--bucket-name', create_bucket_request.name, '--download-dir', large_file_verify_dir, '--multipart-download-threshold', '128'])
assert get_count_of_files_in_folder_and_subfolders(large_file_verify_dir) == 6
assert filecmp.cmp(os.path.join(large_file_root_dir, '1.bin'), os.path.join(large_file_verify_dir, '1.bin'))
assert filecmp.cmp(os.path.join(large_file_root_dir, '2.bin'), os.path.join(large_file_verify_dir, '2.bin'))
assert filecmp.cmp(os.path.join(large_file_root_dir, '3.bin'), os.path.join(large_file_verify_dir, '3.bin'))
assert filecmp.cmp(os.path.join(large_file_root_dir, '4.bin'), os.path.join(large_file_verify_dir, '4.bin'))
assert filecmp.cmp(os.path.join(large_file_root_dir, '5.bin'), os.path.join(large_file_verify_dir, '5.bin'))
assert filecmp.cmp(os.path.join(large_file_root_dir, '6.bin'), os.path.join(large_file_verify_dir, '6.bin'))
shutil.rmtree(large_file_root_dir)
shutil.rmtree(large_file_verify_dir)
delete_bucket_and_all_items(object_storage_client, create_bucket_request.name)
# Since we've created a reasonable number of objects in this test suite, it's a good opportunity to test using the --all and --limit parameters
@util.skip_while_rerecording
def test_list_all_objects_operations(vcr_fixture):
result = invoke(['os', 'object', 'list', '--namespace', util.NAMESPACE, '--bucket-name', bulk_get_bucket_name, '--all'])
parsed_result = json.loads(result.output)
assert len(parsed_result['data']) == OBJECTS_TO_CREATE_IN_BUCKET_FOR_BULK_GET
assert 'next-start-with' not in result.output
result = invoke(['os', 'object', 'list', '--namespace', util.NAMESPACE, '--bucket-name', bulk_get_bucket_name, '--all', '--page-size', '20'])
parsed_result = json.loads(result.output)
assert len(parsed_result['data']) == OBJECTS_TO_CREATE_IN_BUCKET_FOR_BULK_GET
assert 'next-start-with' not in result.output
result = invoke(['os', 'object', 'list', '--namespace', util.NAMESPACE, '--bucket-name', bulk_get_bucket_name, '--limit', '47'])
parsed_result = json.loads(result.output)
assert len(parsed_result['data']) == 47
assert 'next-start-with' in result.output
result = invoke(['os', 'object', 'list', '--namespace', util.NAMESPACE, '--bucket-name', bulk_get_bucket_name, '--limit', '33', '--page-size', '3'])
parsed_result = json.loads(result.output)
assert len(parsed_result['data']) == 33
assert 'next-start-with' in result.output
# Bulk puts objects, uses multipart where appropriate (when we breach the default of 128MiB)
@util.skip_while_rerecording
def test_bulk_put_default_options():
result = invoke(['os', 'object', 'bulk-upload', '--namespace', util.NAMESPACE, '--bucket-name', bulk_put_bucket_name, '--src-dir', root_bulk_put_folder])
# No failures or skips and we uploaded everything
parsed_result = parse_json_response_from_mixed_output(result.output)
assert parsed_result['skipped-objects'] == []
assert parsed_result['upload-failures'] == {}
assert len(parsed_result['uploaded-objects']) == get_count_of_files_in_folder_and_subfolders(root_bulk_put_folder)
# Pull everything down and verify that the files match (everything in source appears in destination and they are equal)
download_folder = 'tests/temp/verify_files_{}'.format(bulk_put_bucket_name)
invoke(['os', 'object', 'bulk-download', '--namespace', util.NAMESPACE, '--bucket-name', bulk_put_bucket_name, '--download-dir', download_folder])
object_name_set = set()
for dir_name, subdir_list, file_list in os.walk(root_bulk_put_folder):
for file in file_list:
source_file_path = os.path.join(dir_name, file)
downloaded_file_path = source_file_path.replace(root_bulk_put_folder, download_folder)
assert os.path.exists(downloaded_file_path)
assert filecmp.cmp(source_file_path, downloaded_file_path, shallow=False)
# Sanity check that we're reporting back that we uploaded the right files
assert get_object_name_from_path(root_bulk_put_folder, source_file_path) in parsed_result['uploaded-objects']
object_name_set.add(get_object_name_from_path(root_bulk_put_folder, source_file_path))
# If we try and put it in the same bucket without --overwrite then everything should be skipped. There should be prompts
result = invoke(['os', 'object', 'bulk-upload', '--namespace', util.NAMESPACE, '--bucket-name', bulk_put_bucket_name, '--src-dir', root_bulk_put_folder])
parsed_result = parse_json_response_from_mixed_output(result.output)
assert 'Are you sure you want to overwrite it?' in result.output
assert set(parsed_result['skipped-objects']) == object_name_set
assert parsed_result['upload-failures'] == {}
assert parsed_result['uploaded-objects'] == {}
# If we say to --no-overwrite then everything should be skipped. There should be no prompts
result = invoke(['os', 'object', 'bulk-upload', '--namespace', util.NAMESPACE, '--bucket-name', bulk_put_bucket_name, '--src-dir', root_bulk_put_folder, '--no-overwrite'])
parsed_result = parse_json_response_from_mixed_output(result.output)
assert 'Are you sure you want to overwrite it?' not in result.output
assert set(parsed_result['skipped-objects']) == object_name_set
assert parsed_result['upload-failures'] == {}
assert parsed_result['uploaded-objects'] == {}
# Now we force it
result = invoke(['os', 'object', 'bulk-upload', '--namespace', util.NAMESPACE, '--bucket-name', bulk_put_bucket_name, '--src-dir', root_bulk_put_folder, '--overwrite'])
parsed_result = parse_json_response_from_mixed_output(result.output)
assert parsed_result['skipped-objects'] == []
assert parsed_result['upload-failures'] == {}
assert len(parsed_result['uploaded-objects']) == len(object_name_set)
for object_name in object_name_set:
assert object_name in parsed_result['uploaded-objects']
shutil.rmtree(download_folder)
# Bulk puts objects with --content-type as auto
@util.skip_while_rerecording
def test_bulk_put_auto_content_type():
result = invoke(['os', 'object', 'bulk-upload', '--namespace', util.NAMESPACE, '--bucket-name', bulk_put_bucket_name, '--src-dir', root_bulk_put_folder, '--content-type', 'auto', '--overwrite'])
# No failures or skips and we uploaded everything
parsed_result = parse_json_response_from_mixed_output(result.output)
assert parsed_result['skipped-objects'] == []
assert parsed_result['upload-failures'] == {}
assert len(parsed_result['uploaded-objects']) == get_count_of_files_in_folder_and_subfolders(root_bulk_put_folder)
# Pull everything down and verify that the files match (everything in source appears in destination and they are equal)
download_folder = 'tests/temp/verify_files_{}'.format(bulk_put_bucket_name)
invoke(['os', 'object', 'bulk-download', '--namespace', util.NAMESPACE, '--bucket-name', bulk_put_bucket_name, '--download-dir', download_folder])
object_name_set = set()
for dir_name, subdir_list, file_list in os.walk(root_bulk_put_folder):
for file in file_list:
source_file_path = os.path.join(dir_name, file)
downloaded_file_path = source_file_path.replace(root_bulk_put_folder, download_folder)
assert os.path.exists(downloaded_file_path)
assert filecmp.cmp(source_file_path, downloaded_file_path, shallow=False)
assert guess_type(source_file_path) == guess_type(downloaded_file_path)
# Sanity check that we're reporting back that we uploaded the right files
assert get_object_name_from_path(root_bulk_put_folder, source_file_path) in parsed_result['uploaded-objects']
object_name_set.add(get_object_name_from_path(root_bulk_put_folder, source_file_path))
shutil.rmtree(download_folder)
# Tests that multipart params are applied:
#
# - Try to upload with a part size of 10MiB (this will force the large and mid-sized files to be multipart uploaded)
# - Try to upload with multipart disabled
@util.skip_while_rerecording
def test_bulk_put_with_multipart_params(object_storage_client):
create_bucket_request = oci.object_storage.models.CreateBucketDetails()
create_bucket_request.name = 'ObjectStorageBulkPutMultipartsTest_{}'.format(util.random_number_string())
create_bucket_request.compartment_id = util.COMPARTMENT_ID
util.clear_test_data(object_storage_client, util.NAMESPACE, util.COMPARTMENT_ID, create_bucket_request.name)
object_storage_client.create_bucket(util.NAMESPACE, create_bucket_request)
result = invoke([
'os', 'object', 'bulk-upload',
'--namespace', util.NAMESPACE,
'--bucket-name', create_bucket_request.name,
'--src-dir', root_bulk_put_folder,
'--part-size', '10'
])
parsed_result = parse_json_response_from_mixed_output(result.output)
assert parsed_result['skipped-objects'] == []
assert parsed_result['upload-failures'] == {}
assert len(parsed_result['uploaded-objects']) == get_count_of_files_in_folder_and_subfolders(root_bulk_put_folder)
result = invoke([
'os', 'object', 'bulk-upload',
'--namespace', util.NAMESPACE,
'--bucket-name', create_bucket_request.name,
'--src-dir', root_bulk_put_folder,
'--no-multipart',
'--overwrite'
])
parsed_result = parse_json_response_from_mixed_output(result.output)
assert parsed_result['skipped-objects'] == []
assert parsed_result['upload-failures'] == {}
assert len(parsed_result['uploaded-objects']) == get_count_of_files_in_folder_and_subfolders(root_bulk_put_folder)
delete_bucket_and_all_items(object_storage_client, create_bucket_request.name)
@util.skip_while_rerecording
def test_bulk_put_with_prefix():
result = invoke(['os', 'object', 'bulk-upload', '--namespace', util.NAMESPACE, '--bucket-name', bulk_put_bucket_name, '--src-dir', root_bulk_put_folder, '--object-prefix', 'bulk_put_prefix_test/'])
# No failures or skips and we uploaded everything
parsed_result = parse_json_response_from_mixed_output(result.output)
assert parsed_result['skipped-objects'] == []
assert parsed_result['upload-failures'] == {}
assert len(parsed_result['uploaded-objects']) == get_count_of_files_in_folder_and_subfolders(root_bulk_put_folder)
download_folder = 'tests/temp/verify_files_bulk_put_prefix_{}'.format(bulk_put_bucket_name)
invoke(['os', 'object', 'bulk-download', '--namespace', util.NAMESPACE, '--bucket-name', bulk_put_bucket_name, '--download-dir', download_folder, '--prefix', 'bulk_put_prefix_test/'])
actual_download_folder = os.path.join(download_folder, 'bulk_put_prefix_test')
for dir_name, subdir_list, file_list in os.walk(root_bulk_put_folder):
for file in file_list:
source_file_path = os.path.join(dir_name, file)
downloaded_file_path = source_file_path.replace(root_bulk_put_folder, actual_download_folder)
assert os.path.exists(downloaded_file_path)
assert filecmp.cmp(source_file_path, downloaded_file_path, shallow=False)
# Sanity check that we're reporting back that we uploaded the right files
assert 'bulk_put_prefix_test/{}'.format(get_object_name_from_path(root_bulk_put_folder, source_file_path)) in parsed_result['uploaded-objects']
shutil.rmtree(download_folder)
@util.skip_while_rerecording
def test_bulk_put_with_non_existent_folder():
fake_directory = 'tests/folder/not/exist'
result = invoke(['os', 'object', 'bulk-upload', '--namespace', util.NAMESPACE, '--bucket-name', bulk_put_bucket_name, '--src-dir', fake_directory])
assert 'UsageError' in result.output
assert 'The specified --src-dir {} (expanded to: {}) does not exist'.format(fake_directory, fake_directory) in result.output
@util.skip_while_rerecording
def test_bulk_put_get_delete_with_inclusions(object_storage_client):
inclusion_test_folder = os.path.join('tests', 'temp', 'os_bulk_upload_inclusion_test')
if not os.path.exists(inclusion_test_folder):
os.makedirs(inclusion_test_folder)
# Make some files for include/exclude
folders_to_files = {
'': ['test_file1.txt', 'test_file2.png'],
'subfolder': ['blah.pdf', 'hello.txt', 'testfile3.png'],
'subfolder/subfolder2': ['xyz.jpg', 'blag.txt', 'byz.jpg', 'testfile4.png']
}
for folder, files in six.iteritems(folders_to_files):
folder_path = os.path.join(inclusion_test_folder, folder)
if not os.path.exists(folder_path):
os.makedirs(folder_path)
for file in files:
file_path = os.path.join(folder_path, file)
with open(file_path, 'w') as f:
# For non-text extension types this won't create a valid file, but for testing is probably OK
f.write(generate_random_string(CONTENT_STRING_LENGTH))
result = invoke([
'os',
'object',
'bulk-upload',
'--namespace', util.NAMESPACE,
'--bucket-name', bulk_put_bucket_name,
'--src-dir', inclusion_test_folder,
'--object-prefix', 'inclusion_test/',
'--include', '*.txt', # Matches test_file1.txt, subfolder/hello.txt, subfolder/subfolder2/blag.txt
'--include', 'subfolder/*.png', # Matches subfolder/testfile3.png, subfolder/subfolder2/testfile4.png
'--include', 'subfolder/[b]lah.pdf', # Matches subfolder/blah.pdf
'--include', '*/[ax]yz.jpg' # Matches subfolder/subfolder2/xyz.jpg
])
parsed_result = parse_json_response_from_mixed_output(result.output)
assert parsed_result['skipped-objects'] == []
assert parsed_result['upload-failures'] == {}
expected_uploaded_files = [
'{}{}'.format('inclusion_test/', 'test_file1.txt'),
'{}{}'.format('inclusion_test/', 'subfolder/hello.txt'),
'{}{}'.format('inclusion_test/', 'subfolder/subfolder2/blag.txt'),
'{}{}'.format('inclusion_test/', 'subfolder/testfile3.png'),
'{}{}'.format('inclusion_test/', 'subfolder/subfolder2/testfile4.png'),
'{}{}'.format('inclusion_test/', 'subfolder/blah.pdf'),
'{}{}'.format('inclusion_test/', 'subfolder/subfolder2/xyz.jpg')
]
# Check that we uploaded what we said we did
assert len(parsed_result['uploaded-objects']) == len(expected_uploaded_files)
for f in expected_uploaded_files:
assert f in parsed_result['uploaded-objects']
download_folder_base = os.path.join('tests', 'temp', 'verify_os_bulk_upload_inclusion_test')
verify_downloaded_folders_for_inclusion_exclusion_tests(
expected_uploaded_files=expected_uploaded_files,
source_folder=inclusion_test_folder,
download_folder=download_folder_base,
download_prefix_no_slash='inclusion_test'
)
# Download objects with inclusions to make sure that works
target_download_folder = os.path.join(download_folder_base, 'get_with_include')
invoke([
'os', 'object', 'bulk-download',
'--namespace', util.NAMESPACE,
'--bucket-name', bulk_put_bucket_name,
'--download-dir', target_download_folder,
'--prefix', 'inclusion_test/',
'--include', '*.txt',
'--include', 'subfolder/*.png',
'--include', 'subfolder/blah.pdf',
])
expected_uploaded_files.remove('{}{}'.format('inclusion_test/', 'subfolder/subfolder2/xyz.jpg')) # This is not in our --include switches
assert not os.path.exists(os.path.join(target_download_folder, 'inclusion_test', 'subfolder', 'subfolder2', 'xyz.jpg'))
for expected_file in expected_uploaded_files:
target_file = os.path.join(target_download_folder, expected_file)
original_file = target_file.replace(os.path.join(target_download_folder, 'inclusion_test'), inclusion_test_folder)
assert os.path.exists(target_file)
assert filecmp.cmp(original_file, target_file, shallow=False)
# Download a specific object with inclusions
invoke([
'os', 'object', 'bulk-download',
'--namespace', util.NAMESPACE,
'--bucket-name', bulk_put_bucket_name,
'--download-dir', target_download_folder,
'--prefix', 'inclusion_test/',
'--include', 'subfolder/subfolder2/xyz.jpg'
])
assert os.path.exists(os.path.join(target_download_folder, 'inclusion_test', 'subfolder', 'subfolder2', 'xyz.jpg'))
# Delete objects with inclusions
result = invoke([
'os', 'object', 'bulk-delete',
'--namespace', util.NAMESPACE,
'--bucket-name', bulk_put_bucket_name,
'--prefix', 'inclusion_test/',
'--include', '*.txt',
'--include', 'subfolder/blah.pdf',
'--dry-run'
])
parsed_dry_run_result = parse_json_response_from_mixed_output(result.output)
assert len(parsed_dry_run_result['deleted-objects']) == 4
result = invoke([
'os', 'object', 'bulk-delete',
'--namespace', util.NAMESPACE,
'--bucket-name', bulk_put_bucket_name,
'--prefix', 'inclusion_test/',
'--include', '*.txt',
'--include', 'subfolder/blah.pdf',
'--force'
])
parsed_result = parse_json_response_from_mixed_output(result.output)
assert parsed_result['delete-failures'] == {}
assert set(parsed_result['deleted-objects']) == set(parsed_dry_run_result['deleted-objects'])
list_objects_responses = oci_cli_object_storage.objectstorage_cli_extended.retrying_list_objects(
client=object_storage_client,
request_id=None,
namespace=util.NAMESPACE,
bucket_name=bulk_put_bucket_name,
prefix='inclusion_test/',
start=None,
end=None,
limit=1000,
delimiter=None,
fields='name',
retrieve_all=True
)
remaining_objects = []
for response in list_objects_responses:
remaining_objects.extend(map(lambda obj: obj.name, response.data.objects))
assert len(remaining_objects) == 3
assert '{}{}'.format('inclusion_test/', 'subfolder/testfile3.png') in remaining_objects
assert '{}{}'.format('inclusion_test/', 'subfolder/subfolder2/testfile4.png') in remaining_objects
assert '{}{}'.format('inclusion_test/', 'subfolder/subfolder2/xyz.jpg') in remaining_objects
shutil.rmtree(target_download_folder)
shutil.rmtree(inclusion_test_folder)
@util.skip_while_rerecording
def test_bulk_put_get_delete_with_exclusions(object_storage_client):
exclusion_test_folder = os.path.join('tests', 'temp', 'os_bulk_upload_exclusion_test')
if not os.path.exists(exclusion_test_folder):
os.makedirs(exclusion_test_folder)
# Make some files for include/exclude
folders_to_files = {
'': ['test_file1.txt', 'test_file2.png'],
'subfolder': ['blah.pdf', 'hello.txt', 'testfile3.png'],
'subfolder/subfolder2': ['xyz.jpg', 'blag.txt', 'byz.jpg', 'testfile4.png']
}
for folder, files in six.iteritems(folders_to_files):
folder_path = os.path.join(exclusion_test_folder, folder)
if not os.path.exists(folder_path):
os.makedirs(folder_path)
for file in files:
file_path = os.path.join(folder_path, file)
with open(file_path, 'w') as f:
# For non-text extension types this won't create a valid file, but for testing is probably OK
f.write(generate_random_string(CONTENT_STRING_LENGTH))
result = invoke([
'os',
'object',
'bulk-upload',
'--namespace', util.NAMESPACE,
'--bucket-name', bulk_put_bucket_name,
'--src-dir', exclusion_test_folder,
'--object-prefix', 'exclusion_test/',
'--exclude', '*.txt',
'--exclude', '*.ps1', # Shouldn't match anything
'--exclude', 'subfolder/subfolder2/xyz.jpg',
'--exclude', 'subfolder/[spqr]lah.pdf' # blah.pdf should still be included because it's not slah.pdf, plah.pdf, qlah.pdf or rlah.pdf
])
parsed_result = parse_json_response_from_mixed_output(result.output)
assert parsed_result['skipped-objects'] == []
assert parsed_result['upload-failures'] == {}
expected_uploaded_files = [
'{}{}'.format('exclusion_test/', 'test_file2.png'),
'{}{}'.format('exclusion_test/', 'subfolder/blah.pdf'),
'{}{}'.format('exclusion_test/', 'subfolder/testfile3.png'),
'{}{}'.format('exclusion_test/', 'subfolder/subfolder2/byz.jpg'),
'{}{}'.format('exclusion_test/', 'subfolder/subfolder2/testfile4.png')
]
# Check that we uploaded what we said we did
assert len(parsed_result['uploaded-objects']) == len(expected_uploaded_files)
for f in expected_uploaded_files:
assert f in parsed_result['uploaded-objects']
download_folder_base = os.path.join('tests', 'temp', 'verify_os_bulk_upload_exclusion_test')
verify_downloaded_folders_for_inclusion_exclusion_tests(
expected_uploaded_files=expected_uploaded_files,
source_folder=exclusion_test_folder,
download_folder=download_folder_base,
download_prefix_no_slash='exclusion_test'
)
# Download objects with exclusions to make sure that works
target_download_folder = os.path.join(download_folder_base, 'get_with_exclude')
invoke([
'os', 'object', 'bulk-download',
'--namespace', util.NAMESPACE,
'--bucket-name', bulk_put_bucket_name,
'--download-dir', target_download_folder,
'--prefix', 'exclusion_test/',
'--exclude', '*.jpg',
'--exclude', 'subfolder/subfolder2/*.png',
'--exclude', 'subfolder/blah.pdf',
])
assert not os.path.exists(os.path.join(target_download_folder, 'exclusion_test', 'subfolder', 'blah.pdf'))
assert not os.path.exists(os.path.join(target_download_folder, 'exclusion_test', 'subfolder', 'subfolder2', 'byz.jpg'))
assert not os.path.exists(os.path.join(target_download_folder, 'exclusion_test', 'subfolder', 'subfolder2', 'testfile4.png'))
assert get_count_of_files_in_folder_and_subfolders(target_download_folder) == 2
assert os.path.exists(os.path.join(target_download_folder, 'exclusion_test', 'test_file2.png'))
assert os.path.exists(os.path.join(target_download_folder, 'exclusion_test', 'subfolder', 'testfile3.png'))
assert filecmp.cmp(
os.path.join(exclusion_test_folder, 'test_file2.png'),
os.path.join(target_download_folder, 'exclusion_test', 'test_file2.png')
)
assert filecmp.cmp(
os.path.join(exclusion_test_folder, 'subfolder', 'testfile3.png'),
os.path.join(target_download_folder, 'exclusion_test', 'subfolder', 'testfile3.png')
)
# Delete objects with exclusions
result = invoke([
'os', 'object', 'bulk-delete',
'--namespace', util.NAMESPACE,
'--bucket-name', bulk_put_bucket_name,
'--prefix', 'exclusion_test/',
'--exclude', '*.jpg',
'--exclude', 'subfolder/blah.pdf',
'--dry-run'
])
parsed_dry_run_result = parse_json_response_from_mixed_output(result.output)
assert len(parsed_dry_run_result['deleted-objects']) == 3
result = invoke([
'os', 'object', 'bulk-delete',
'--namespace', util.NAMESPACE,
'--bucket-name', bulk_put_bucket_name,
'--prefix', 'exclusion_test/',
'--exclude', '*.jpg',
'--exclude', 'subfolder/blah.pdf',
'--force'
])
parsed_result = parse_json_response_from_mixed_output(result.output)
assert parsed_result['delete-failures'] == {}
assert set(parsed_result['deleted-objects']) == set(parsed_dry_run_result['deleted-objects'])
list_objects_responses = oci_cli_object_storage.objectstorage_cli_extended.retrying_list_objects(
client=object_storage_client,
request_id=None,
namespace=util.NAMESPACE,
bucket_name=bulk_put_bucket_name,
prefix='exclusion_test/',
start=None,
end=None,
limit=1000,
delimiter=None,
fields='name',
retrieve_all=True
)
remaining_objects = []
for response in list_objects_responses:
remaining_objects.extend(map(lambda obj: obj.name, response.data.objects))
assert len(remaining_objects) == 2
assert '{}{}'.format('exclusion_test/', 'subfolder/blah.pdf') in remaining_objects
assert '{}{}'.format('exclusion_test/', 'subfolder/subfolder2/byz.jpg') in remaining_objects
shutil.rmtree(target_download_folder)
shutil.rmtree(exclusion_test_folder)
@util.skip_while_rerecording
def test_delete_when_no_objects_in_bucket(vcr_fixture, object_storage_client):
create_bucket_request = oci.object_storage.models.CreateBucketDetails()
create_bucket_request.name = 'ObjectStorageBulkDelete_{}'.format(util.random_number_string())
create_bucket_request.compartment_id = util.COMPARTMENT_ID
object_storage_client.create_bucket(util.NAMESPACE, create_bucket_request)
result = invoke(['os', 'object', 'bulk-delete', '--namespace', util.NAMESPACE, '--bucket-name', create_bucket_request.name])
assert 'There are no objects to delete in {}'.format(create_bucket_request.name) in result.output
delete_bucket_and_all_items(object_storage_client, create_bucket_request.name)
@util.skip_while_rerecording
def test_delete_dry_run(vcr_fixture):
# Dry-run against entire bucket
result = invoke(['os', 'object', 'bulk-delete', '--namespace', util.NAMESPACE, '--bucket-name', bulk_get_bucket_name, '--dry-run'])
parsed_result = json.loads(result.output)
assert set(parsed_result['deleted-objects']) == set(bulk_get_object_to_content.keys())
# Dry-run against a folder and all subfolders
result = invoke(['os', 'object', 'bulk-delete', '--namespace', util.NAMESPACE, '--bucket-name', bulk_get_bucket_name, '--prefix', 'a/b/', '--dry-run'])
parsed_result = json.loads(result.output)
expected_objects = set().union(bulk_get_prefix_to_object['a/b'], bulk_get_prefix_to_object['a/b/c'], bulk_get_prefix_to_object['a/b/c/d'])
assert set(parsed_result['deleted-objects']) == expected_objects
# Dry-run against a folder and no subfolders
result = invoke(['os', 'object', 'bulk-delete', '--namespace', util.NAMESPACE, '--bucket-name', bulk_get_bucket_name, '--prefix', 'a/b/', '--delimiter', '/', '--dry-run'])
parsed_result = json.loads(result.output)
assert set(parsed_result['deleted-objects']) == set(bulk_get_prefix_to_object['a/b'])
@util.skip_while_rerecording
def test_delete(object_storage_client):
create_bucket_request = oci.object_storage.models.CreateBucketDetails()
create_bucket_request.name = 'ObjectStorageBulkDelete_{}'.format(random.randint(0, 1000000))
create_bucket_request.compartment_id = util.COMPARTMENT_ID
util.clear_test_data(object_storage_client, util.NAMESPACE, util.COMPARTMENT_ID, create_bucket_request.name)
object_storage_client.create_bucket(util.NAMESPACE, create_bucket_request)
invoke(['os', 'object', 'bulk-upload', '--namespace', util.NAMESPACE, '--bucket-name', create_bucket_request.name, '--src-dir', root_bulk_put_folder])
num_objects_to_delete = get_count_of_files_in_folder_and_subfolders(root_bulk_put_folder)
# Sanity check that the bucket has things in it
assert get_number_of_objects_in_bucket(object_storage_client, create_bucket_request.name) > 0
result = invoke(['os', 'object', 'bulk-delete', '--namespace', util.NAMESPACE, '--bucket-name', create_bucket_request.name])
if num_objects_to_delete >= 1000:
confirm_prompt = 'WARNING: This command will delete at least {} objects. Are you sure you wish to continue?'.format(num_objects_to_delete)
else:
confirm_prompt = 'WARNING: This command will delete {} objects. Are you sure you wish to continue?'.format(num_objects_to_delete)
assert confirm_prompt in result.output
result = invoke(['os', 'object', 'bulk-delete', '--namespace', util.NAMESPACE, '--bucket-name', create_bucket_request.name, '--force'])
parsed_result = parse_json_response_from_mixed_output(result.output)
assert parsed_result['delete-failures'] == {}
assert len(parsed_result['deleted-objects']) == num_objects_to_delete
# Check that the bucket is now empty
assert get_number_of_objects_in_bucket(object_storage_client, create_bucket_request.name) == 0
delete_bucket_and_all_items(object_storage_client, create_bucket_request.name)
@util.skip_while_rerecording
def test_bulk_operation_table_output_query(object_storage_client):
create_bucket_request = oci.object_storage.models.CreateBucketDetails()
create_bucket_request.name = 'ObjectStorageTableOutput_{}'.format(util.random_number_string())
create_bucket_request.compartment_id = util.COMPARTMENT_ID
util.clear_test_data(object_storage_client, util.NAMESPACE, util.COMPARTMENT_ID, create_bucket_request.name)
object_storage_client.create_bucket(util.NAMESPACE, create_bucket_request)
result = invoke(['os', 'object', 'bulk-upload', '--namespace', util.NAMESPACE, '--bucket-name', create_bucket_request.name, '--src-dir', root_bulk_put_folder, '--output', 'table', '--query', "[?action=='Uploaded'].{file: file, \"opc-content-md5\": \"opc-content-md5\"}"])
assert 'file' in result.output
assert 'opc-content-md5' in result.output
assert 'etag' not in result.output
result = invoke(['os', 'object', 'bulk-delete', '--namespace', util.NAMESPACE, '--bucket-name', bulk_get_bucket_name, '--dry-run', '--output', 'table'])
assert 'action' in result.output
assert 'object' in result.output
assert '/a/Object_1' in result.output
result = invoke(['os', 'object', 'bulk-delete', '--namespace', util.NAMESPACE, '--bucket-name', bulk_get_bucket_name, '--dry-run', '--output', 'table', '--query', "[?object=='Object_0'][object]"])
assert 'action' not in result.output
assert '/a/Object_1' not in result.output
assert 'Object_0' in result.output
target_download_folder = os.path.join('tests', 'temp', create_bucket_request.name)
result = invoke([
'os', 'object', 'bulk-download',
'--namespace', util.NAMESPACE,
'--bucket-name', create_bucket_request.name,
'--download-dir', target_download_folder,
'--output', 'table',
])
delete_bucket_and_all_items(object_storage_client, create_bucket_request.name)
shutil.rmtree(target_download_folder)
def invoke(commands, debug=False, ** args):
if debug is True:
commands = ['--debug'] + commands
return util.invoke_command(commands, ** args)
def get_count_of_files_in_folder_and_subfolders(directory):
file_count = 0
for dir_name, subdir_list, file_list in os.walk(directory):
file_count = file_count + len(file_list)
return file_count
def generate_random_string(length):
if test_config_container.using_vcr_with_mock_responses():
return 'a' * length
else:
return ''.join(random.choice(string.ascii_lowercase) for i in range(length))
# Pull JSON data out of output which may have stuff other than JSON in it. Assumes that nothing
# comes after the JSON data
def parse_json_response_from_mixed_output(output):
lines = output.split('\n')
json_str = ''
object_begun = False
for line in lines:
if object_begun or line.startswith('{'):
object_begun = True
json_str += line
return json.loads(json_str)
# For the bulk operations, object names are taken from the file path of the thing we uploaded. Normalize to
# / in the paths (Windows can go both ways) then chop the front bit off
def get_object_name_from_path(path_root, full_path):
return full_path.replace(os.sep, '/').replace(path_root + '/', '')
def delete_bucket_and_all_items(object_storage_client, bucket_name):
list_object_responses = oci_cli_object_storage.objectstorage_cli_extended.retrying_list_objects(
client=object_storage_client,
request_id=None,
namespace=util.NAMESPACE,
bucket_name=bucket_name,
prefix=None,
start=None,
end=None,
limit=1000,
delimiter=None,
fields='name',
retrieve_all=True
)
for response in list_object_responses:
for obj in response.data.objects:
object_storage_client.delete_object(util.NAMESPACE, bucket_name, obj.name)
object_storage_client.delete_bucket(util.NAMESPACE, bucket_name)
def get_number_of_objects_in_bucket(object_storage_client, bucket_name):
list_object_responses = oci_cli_object_storage.objectstorage_cli_extended.retrying_list_objects(
client=object_storage_client,
request_id=None,
namespace=util.NAMESPACE,
bucket_name=bucket_name,
prefix=None,
start=None,
end=None,
limit=1000,
delimiter=None,
fields='name',
retrieve_all=True
)
num_objects_in_bucket = 0
for response in list_object_responses:
num_objects_in_bucket = num_objects_in_bucket + len(response.data.objects)
return num_objects_in_bucket
def verify_downloaded_folders_for_inclusion_exclusion_tests(expected_uploaded_files, source_folder, download_folder, download_prefix_no_slash):
# Download uploaded files and check they are the same
invoke(['os', 'object', 'bulk-download', '--namespace', util.NAMESPACE, '--bucket-name', bulk_put_bucket_name, '--download-dir', download_folder, '--prefix', download_prefix_no_slash + '/'])
# The strings in the expected_uploaded_files array have a "/" in them, but this doesn't match with paths on Windows. Using normpath converts these of
# "\" on Windows and so our matching/comparison works. For Linux/Unix/macOS this doesn't appear to have an impact
normalized_expected_uploaded_files = []
for euf in expected_uploaded_files:
normalized_expected_uploaded_files.append(os.path.normpath(euf))
actual_download_folder = os.path.join(download_folder, download_prefix_no_slash)
files_compared = 0
for dir_name, subdir_list, file_list in os.walk(source_folder):
for file in file_list:
source_file_path = os.path.join(dir_name, file)
downloaded_file_path = source_file_path.replace(source_folder, actual_download_folder)
if downloaded_file_path.replace(actual_download_folder, download_prefix_no_slash) in normalized_expected_uploaded_files:
files_compared += 1
assert os.path.exists(downloaded_file_path)
assert filecmp.cmp(source_file_path, downloaded_file_path, shallow=False)
assert files_compared == len(expected_uploaded_files)
shutil.rmtree(actual_download_folder)
| 2.140625 | 2 |
Extras/benchmark/simple-benchmark.py | yunhaom94/redis-writeanywhere | 0 | 5177 | #!/usr/bin/python3
import random
import string
import time
import subprocess
import os
import redis
import threading
def generate_string(string_size, size, dict):
'''
https://stackoverflow.com/questions/16308989/fastest-method-to-generate-big-random-string-with-lower-latin-letters
'''
for i in range(size):
min_lc = ord(b'a')
len_lc = 26
key = bytearray(random.getrandbits(8*string_size).to_bytes(string_size, 'big'))
for i, b in enumerate(key):
key[i] = min_lc + b % len_lc # convert 0..255 to 97..122
key = key.decode()
val = key
dict[key] = val
if __name__ == "__main__":
size = 1000 # TODO: make is an command line argument
port = 7000
FNULL = open(os.devnull, 'w')
string_size = 100000
partition = int(size/4)
print("generating test sets")
d1 = {}
d2 = {}
d3 = {}
d4 = {}
t1 = threading.Thread(target=generate_string, args = (string_size, partition, d1))
t2 = threading.Thread(target=generate_string, args = (string_size, partition, d2))
t3 = threading.Thread(target=generate_string, args = (string_size, partition, d3))
t4 = threading.Thread(target=generate_string, args = (string_size, partition, d4))
t1.start()
t2.start()
t3.start()
t4.start()
t1.join()
t1.join()
t1.join()
t1.join()
test_set = {}
test_set.update(d1)
test_set.update(d2)
test_set.update(d3)
test_set.update(d4)
print(len(test_set))
print("running tests...")
r = redis.StrictRedis(host='localhost', port=port, db=0)
start = time.time()
print("testing set")
for k,v in test_set.items():
r.set(k, v)
r.wait(3, 0)
print("testing get")
for k,v in test_set.items():
r.get(k)
r.wait(3, 0)
end = time.time()
runtime = end - start
ops = size * 2
throughput = float(ops/runtime)
latency = float(1/throughput)
print("total run time: {runtime}s \n\
number of total operations with 50% Set and 50% Get: {ops} \n\
avg. throughput: {throughput} ops/s \n\
avg. latency: {latency} s".format(
runtime=runtime,
ops=ops,
throughput=throughput,
latency=latency
))
| 3.125 | 3 |
challenges/015-setintersection.py | Widdershin/CodeEval | 0 | 5178 | <reponame>Widdershin/CodeEval
"""
https://www.codeeval.com/browse/30/
Set Intersection
Challenge Description:
You are given two sorted list of numbers (ascending order). The lists
themselves are comma delimited and the two lists are semicolon
delimited. Print out the intersection of these two sets.
Input Sample:
File containing two lists of ascending order sorted integers, comma
delimited, one per line. E.g.
1,2,3,4;4,5,6
20,21,22;45,46,47
7,8,9;8,9,10,11,12
Output Sample:
Print out the ascending order sorted intersection of the two lists,
one per line. Print empty new line in case the lists have
no intersection. E.g.
4
8,9
"""
###### IO Boilerplate ######
import sys
if len(sys.argv) < 2:
input_file_name = "15-setintersection-in.txt"
else:
input_file_name = sys.argv[1]
with open(input_file_name) as input_file:
input_lines = map(lambda x: x.strip(), filter(lambda x: x != '', input_file.readlines()))
###### /IO Boilerplate ######
def main():
for line in input_lines:
string_sets = line.split(';')
sets = [set(string_set.split(',')) for string_set in string_sets]
intersection = sorted(sets[0].intersection(sets[1]))
print ",".join(intersection)
if __name__ == '__main__':
main()
| 4.1875 | 4 |
python/arch/api/table/session.py | GentleWang1011/eggroll | 1 | 5179 | <gh_stars>1-10
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import abc
import datetime
import threading
from typing import Iterable
import six
from arch.api import WorkMode, Backend
from arch.api.table.table import Table
from eggroll.core.constants import StoreTypes
def build_session(job_id=None,
work_mode: WorkMode = WorkMode.STANDALONE,
backend: Backend = Backend.EGGROLL2,
persistent_engine: StoreTypes = StoreTypes.ROLLPAIR_LMDB):
from arch.api.table import eggroll_util
if backend.is_eggroll():
from arch.api.table.eggroll import session_impl
eggroll_session = eggroll_util.build_eggroll_session(work_mode=work_mode, job_id=job_id)
session = session_impl.FateSessionImpl(eggroll_session, work_mode, persistent_engine)
elif backend.is_spark():
from arch.api.table.pyspark import session_impl
eggroll_session = eggroll_util.build_eggroll_session(work_mode=work_mode, job_id=job_id)
session = session_impl.FateSessionImpl(eggroll_session, work_mode, persistent_engine)
elif backend.is_eggroll2():
from eggroll.core.session import session_init
from arch.api.table.eggroll2 import session_impl
options = {}
if work_mode == WorkMode.STANDALONE:
options['eggroll.session.deploy.mode'] = "standalone"
elif work_mode == WorkMode.CLUSTER:
options['eggroll.session.deploy.mode'] = "cluster"
er_session = session_init(session_id=job_id, options=options)
session = session_impl.FateSessionImpl(er_session, work_mode, persistent_engine)
else:
raise ValueError(f"work_mode: {work_mode} not supported")
return session
@six.add_metaclass(abc.ABCMeta)
class FateSession(object):
_instance: 'FateSession' = None
__lock = threading.Lock()
@staticmethod
def set_instance(instance):
if not FateSession._instance:
with FateSession.__lock:
if not FateSession._instance:
FateSession._instance = instance
@staticmethod
def get_instance():
return FateSession._instance
@abc.abstractmethod
def get_persistent_engine(self):
pass
@abc.abstractmethod
def table(self,
name,
namespace,
partition,
persistent,
in_place_computing,
create_if_missing,
error_if_exist) -> Table:
pass
@abc.abstractmethod
def parallelize(self,
data: Iterable,
include_key,
name,
partition,
namespace,
persistent,
chunk_size,
in_place_computing,
create_if_missing,
error_if_exist) -> Table:
pass
@abc.abstractmethod
def cleanup(self, name, namespace, persistent):
pass
# noinspection PyPep8Naming
@abc.abstractmethod
def generateUniqueId(self):
pass
@abc.abstractmethod
def get_session_id(self):
pass
@abc.abstractmethod
def stop(self):
pass
@staticmethod
def get_data_table(name, namespace):
"""
return data table instance by table name and table name space
:param name: table name of data table
:param namespace: table name space of data table
:return:
data table instance
"""
return FateSession.get_instance().table(name=name,
namespace=namespace,
create_if_missing=False,
persistent=True,
error_if_exist=False,
in_place_computing=False,
partition=1)
@staticmethod
def save_data_table_meta(kv, data_table_name, data_table_namespace):
"""
save data table meta information
:param kv: v should be serialized by JSON
:param data_table_name: table name of this data table
:param data_table_namespace: table name of this data table
:return:
"""
from arch.api.utils.core import json_dumps
data_meta_table = FateSession.get_instance().table(name="%s.meta" % data_table_name,
namespace=data_table_namespace,
partition=1,
create_if_missing=True,
error_if_exist=False,
persistent=True,
in_place_computing=False)
for k, v in kv.items():
data_meta_table.put(k, json_dumps(v))
@staticmethod
def get_data_table_meta(key, data_table_name, data_table_namespace):
"""
get data table meta information
:param key:
:param data_table_name: table name of this data table
:param data_table_namespace: table name of this data table
:return:
"""
from arch.api.utils.core import json_loads
data_meta_table = FateSession.get_instance().table(name="%s.meta" % data_table_name,
namespace=data_table_namespace,
create_if_missing=True,
error_if_exist=False,
in_place_computing=False,
persistent=True,
partition=1)
if data_meta_table:
value_bytes = data_meta_table.get(key, use_serialize=False)
if value_bytes:
return json_loads(value_bytes)
else:
return None
else:
return None
@staticmethod
def get_data_table_metas(data_table_name, data_table_namespace):
"""
get data table meta information
:param data_table_name: table name of this data table
:param data_table_namespace: table name of this data table
:return:
"""
from arch.api.utils.core import json_loads
data_meta_table = FateSession.get_instance().table(name="%s.meta" % data_table_name,
namespace=data_table_namespace,
partition=1,
persistent=True,
in_place_computing=False,
create_if_missing=True,
error_if_exist=False)
if data_meta_table:
metas = dict()
for k, v in data_meta_table.collect(use_serialize=False):
metas[k] = json_loads(v)
return metas
else:
return None
@staticmethod
def clean_table(namespace, regex_string='*'):
try:
FateSession.get_instance().cleanup(name=regex_string, namespace=namespace, persistent=False)
except Exception as e:
print(e)
@staticmethod
def save_data(kv_data: Iterable,
name,
namespace,
partition=1,
persistent: bool = True,
create_if_missing=True,
error_if_exist=False,
in_version: bool = False,
version_log=None):
"""
save data into data table
:param version_log:
:param in_version:
:param kv_data:
:param name: table name of data table
:param namespace: table namespace of data table
:param partition: number of partition
:param persistent: bool = True,
:param create_if_missing:
:param error_if_exist:
:return:
data table instance
"""
from arch.api.utils import version_control
data_table = FateSession.get_instance().table(name=name,
namespace=namespace,
partition=partition,
persistent=persistent,
in_place_computing=False,
create_if_missing=create_if_missing,
error_if_exist=error_if_exist)
data_table.put_all(kv_data)
if in_version:
version_log = "[AUTO] save data at %s." % datetime.datetime.now() if not version_log else version_log
version_control.save_version(name=name, namespace=namespace, version_log=version_log)
return data_table
| 1.84375 | 2 |
experiments/vitchyr/vaes/learn_swirl_vae.py | Asap7772/rail-rl-franka-eval | 0 | 5180 | """
VAE on the swirl task.
Basically, VAEs don't work. It's probably because the prior isn't very good
and/or because the learning signal is pretty weak when both the encoder and
decoder change quickly. However, I tried also alternating between the two,
and that didn't seem to help.
"""
from torch.distributions import Normal
from torch.optim import Adam
import torch
import numpy as np
import matplotlib.pyplot as plt
from torch import nn as nn
import railrl.torch.pytorch_util as ptu
SWIRL_RATE = 1
T = 10
BS = 128
N_BATCHES = 2000
N_VIS = 1000
HIDDEN_SIZE = 32
VERBOSE = False
def swirl_data(batch_size):
t = np.random.uniform(size=batch_size, low=0, high=T)
x = t * np.cos(t * SWIRL_RATE) / T
y = t * np.sin(t * SWIRL_RATE) / T
data = np.array([x, y]).T
noise = np.random.randn(batch_size, 2) / (T * 2)
return data + noise, t.reshape(-1, 1)
def swirl_t_to_data(t):
x = t * np.cos(t * SWIRL_RATE) / T
y = t * np.sin(t * SWIRL_RATE) / T
return np.array([x, y]).T
def kl_to_prior(means, log_stds, stds):
"""
KL between a Gaussian and a standard Gaussian.
https://stats.stackexchange.com/questions/60680/kl-divergence-between-two-multivariate-gaussians
"""
return 0.5 * (
- 2 * log_stds # log std_prior = 0
- 1 # d = 1
+ stds ** 2
+ means ** 2
)
class Encoder(nn.Sequential):
def encode(self, x):
return self.get_encoding_and_suff_stats(x)[0]
def get_encoding_and_suff_stats(self, x):
output = self(x)
means, log_stds = (
output[:, 0:1], output[:, 1:2]
)
stds = log_stds.exp()
epsilon = ptu.Variable(torch.randn(*means.size()))
latents = epsilon * stds + means
latents = latents
return latents, means, log_stds, stds
class Decoder(nn.Sequential):
def decode(self, latents):
output = self(latents)
means, log_stds = output[:, 0:2], output[:, 2:4]
distribution = Normal(means, log_stds.exp())
return distribution.sample()
def t_to_xy(t):
if len(t.shape) == 2:
t = t[:, 0]
x = t * np.cos(t * SWIRL_RATE) / T
y = t * np.sin(t * SWIRL_RATE) / T
return np.array([x, y]).T
def pretrain_encoder(encoder, opt):
losses = []
for _ in range(1000):
x_np, y_np = swirl_data(BS)
x = ptu.np_to_var(x_np)
y = ptu.np_to_var(y_np)
y_hat = encoder.encode(x)
loss = ((y_hat - y) ** 2).mean()
opt.zero_grad()
loss.backward()
opt.step()
losses.append(loss.data.numpy())
if VERBOSE:
x_np, y_np = swirl_data(N_VIS)
x = ptu.np_to_var(x_np)
y_hat = encoder.encode(x)
y_hat_np = y_hat.data.numpy()
x_hat_np = t_to_xy(y_hat_np[:, 0])
plt.subplot(2, 1, 1)
plt.plot(np.array(losses))
plt.title("Training Loss")
plt.subplot(2, 1, 2)
plt.plot(x_np[:, 0], x_np[:, 1], '.')
plt.plot(x_hat_np[:, 0], x_hat_np[:, 1], '.')
plt.title("Samples")
plt.legend(["Samples", "Estimates"])
plt.show()
def train_encoder(encoder, decoder, encoder_opt):
batch, true_latents = swirl_data(BS)
batch = ptu.np_to_var(batch)
latents, means, log_stds, stds = encoder.get_encoding_and_suff_stats(
batch
)
kl = kl_to_prior(means, log_stds, stds)
latents = encoder.encode(batch)
decoder_output = decoder(latents)
decoder_means = decoder_output[:, 0:2]
decoder_log_stds = decoder_output[:, 2:4]
distribution = Normal(decoder_means, decoder_log_stds.exp())
reconstruction_log_prob = distribution.log_prob(batch).sum(dim=1)
# elbo = - kl + reconstruction_log_prob
# loss = - elbo.mean()
loss = - reconstruction_log_prob.mean()
# This is the second place where we cheat:
latent_loss = ((ptu.np_to_var(true_latents) - latents) ** 2).mean()
loss = loss# + latent_loss
encoder_opt.zero_grad()
loss.backward()
encoder_opt.step()
return loss
def train_decoder(encoder, decoder, decoder_opt):
batch, true_latents = swirl_data(BS)
batch = ptu.np_to_var(batch)
latents = encoder.encode(batch)
decoder_output = decoder(latents)
decoder_means = decoder_output[:, 0:2]
decoder_log_stds = decoder_output[:, 2:4]
distribution = Normal(decoder_means, decoder_log_stds.exp())
reconstruction_log_prob = distribution.log_prob(batch).sum(dim=1)
loss = - reconstruction_log_prob.mean()
decoder_opt.zero_grad()
loss.backward()
decoder_opt.step()
return loss
def train_alternating(*_):
encoder = Encoder(
nn.Linear(2, HIDDEN_SIZE),
nn.ReLU(),
nn.Linear(HIDDEN_SIZE, HIDDEN_SIZE),
nn.ReLU(),
nn.Linear(HIDDEN_SIZE, HIDDEN_SIZE),
nn.ReLU(),
nn.Linear(HIDDEN_SIZE, HIDDEN_SIZE),
nn.ReLU(),
nn.Linear(HIDDEN_SIZE, 2),
)
encoder_opt = Adam(encoder.parameters())
decoder = Decoder(
nn.Linear(1, HIDDEN_SIZE),
nn.ReLU(),
nn.Linear(HIDDEN_SIZE, HIDDEN_SIZE),
nn.ReLU(),
nn.Linear(HIDDEN_SIZE, HIDDEN_SIZE),
nn.ReLU(),
nn.Linear(HIDDEN_SIZE, HIDDEN_SIZE),
nn.ReLU(),
nn.Linear(HIDDEN_SIZE, 4),
)
decoder_opt = Adam(decoder.parameters())
encoder_losses = []
decoder_losses = []
for _ in range(100):
for _ in range(N_BATCHES):
encoder_losses.append(
train_encoder(encoder, decoder, encoder_opt).data.numpy()
)
for _ in range(N_BATCHES):
decoder_losses.append(
train_decoder(encoder, decoder, decoder_opt).data.numpy()
)
# Visualize
vis_samples_np, true_latents_np = swirl_data(N_VIS)
vis_samples = ptu.np_to_var(vis_samples_np)
true_xy_mean_np = t_to_xy(true_latents_np)
latents = encoder.encode(vis_samples)
reconstructed_samples = decoder.decode(latents).data.numpy()
generated_samples = decoder.decode(
ptu.Variable(torch.randn(*latents.shape))
).data.numpy()
plt.subplot(2, 2, 1)
plt.plot(np.array(encoder_losses))
plt.title("Encoder Loss")
plt.subplot(2, 2, 2)
plt.plot(np.array(decoder_losses))
plt.title("Decoder Loss")
plt.subplot(2, 3, 4)
plt.plot(generated_samples[:, 0], generated_samples[:, 1], '.')
plt.title("Generated Samples")
plt.subplot(2, 3, 5)
plt.plot(reconstructed_samples[:, 0], reconstructed_samples[:, 1], '.')
estimated_means = t_to_xy(latents.data.numpy())
# plt.plot(estimated_means[:, 0], estimated_means[:, 1], '.')
plt.title("Reconstruction")
# plt.legend(["Samples", "Projected Latents"])
plt.subplot(2, 3, 6)
plt.plot(vis_samples_np[:, 0], vis_samples_np[:, 1], '.')
plt.plot(true_xy_mean_np[:, 0], true_xy_mean_np[:, 1], '.')
plt.title("Original Samples")
plt.legend(["Original", "True means"])
plt.show()
def train():
encoder = Encoder(
nn.Linear(2, HIDDEN_SIZE),
nn.ReLU(),
nn.Linear(HIDDEN_SIZE, HIDDEN_SIZE),
nn.ReLU(),
nn.Linear(HIDDEN_SIZE, HIDDEN_SIZE),
nn.ReLU(),
nn.Linear(HIDDEN_SIZE, HIDDEN_SIZE),
nn.ReLU(),
nn.Linear(HIDDEN_SIZE, 2),
)
encoder_opt = Adam(encoder.parameters())
# This is the first place that we cheat. However, this pretraining isn't
# needed if you just add the loss to the training (see below)
# pretrain_encoder(encoder, encoder_opt)
decoder = Decoder(
nn.Linear(1, HIDDEN_SIZE),
nn.ReLU(),
nn.Linear(HIDDEN_SIZE, HIDDEN_SIZE),
nn.ReLU(),
nn.Linear(HIDDEN_SIZE, HIDDEN_SIZE),
nn.ReLU(),
nn.Linear(HIDDEN_SIZE, HIDDEN_SIZE),
nn.ReLU(),
nn.Linear(HIDDEN_SIZE, 4),
)
decoder_opt = Adam(decoder.parameters())
print("Done training encoder")
losses = []
kls = []
log_probs = []
for _ in range(N_BATCHES):
batch, true_latents = swirl_data(BS)
batch = ptu.np_to_var(batch)
latents, means, log_stds, stds = encoder.get_encoding_and_suff_stats(
batch
)
kl = kl_to_prior(means, log_stds, stds)
latents = encoder.encode(batch)
# decoder_output = decoder(latents.detach())
decoder_output = decoder(latents)
decoder_means = decoder_output[:, 0:2]
decoder_log_stds = decoder_output[:, 2:4]
distribution = Normal(decoder_means, decoder_log_stds.exp())
reconstruction_log_prob = distribution.log_prob(batch).sum(dim=1)
elbo = - kl + reconstruction_log_prob
loss = - elbo.mean()
# This is the second place where we cheat:
latent_loss = ((ptu.np_to_var(true_latents) - latents) ** 2).mean()
loss = loss + latent_loss
decoder_opt.zero_grad()
encoder_opt.zero_grad()
loss.backward()
decoder_opt.step()
encoder_opt.step()
losses.append(loss.data.numpy())
kls.append(kl.mean().data.numpy())
log_probs.append(reconstruction_log_prob.mean().data.numpy())
# Visualize
vis_samples_np, true_latents_np = swirl_data(N_VIS)
vis_samples = ptu.np_to_var(vis_samples_np)
true_xy_mean_np = t_to_xy(true_latents_np)
latents = encoder.encode(vis_samples)
reconstructed_samples = decoder.decode(latents).data.numpy()
generated_samples = decoder.decode(
ptu.Variable(torch.randn(*latents.shape))
).data.numpy()
plt.subplot(2, 3, 1)
plt.plot(np.array(losses))
plt.title("Training Loss")
plt.subplot(2, 3, 2)
plt.plot(np.array(kls))
plt.title("KLs")
plt.subplot(2, 3, 3)
plt.plot(np.array(log_probs))
plt.title("Log Probs")
plt.subplot(2, 3, 4)
plt.plot(generated_samples[:, 0], generated_samples[:, 1], '.')
plt.title("Generated Samples")
plt.subplot(2, 3, 5)
plt.plot(reconstructed_samples[:, 0], reconstructed_samples[:, 1], '.')
estimated_means = t_to_xy(latents.data.numpy())
plt.plot(estimated_means[:, 0], estimated_means[:, 1], '.')
plt.title("Reconstruction")
plt.subplot(2, 3, 6)
plt.plot(vis_samples_np[:, 0], vis_samples_np[:, 1], '.')
plt.plot(true_xy_mean_np[:, 0], true_xy_mean_np[:, 1], '.')
plt.title("Original Samples")
plt.legend(["Original", "True means"])
plt.show()
if __name__ == '__main__':
train_alternating()
# train()
| 2.53125 | 3 |
litex/build/openfpgaloader.py | JosephBushagour/litex | 0 | 5181 | #
# This file is part of LiteX.
#
# Copyright (c) 2020 <NAME> <<EMAIL>>
# SPDX-License-Identifier: BSD-2-Clause
from litex.build.tools import write_to_file
from litex.build.generic_programmer import GenericProgrammer
# openFPGAloader ------------------------------------------------------------------------------------------
class OpenFPGALoader(GenericProgrammer):
needs_bitreverse = False
def __init__(self, board):
self.board = board
def load_bitstream(self, bitstream_file):
cmd = ["openFPGALoader", "--board", self.board, "--bitstream", bitstream_file]
self.call(cmd)
def flash(self, address, data_file):
cmd = ["openFPGALoader", "--board", self.board, "--write-flash", "--bitstream", data_file]
if address:
cmd.append("--offset")
cmd.append(address)
self.call(cmd)
| 2.296875 | 2 |
NutriBuddiAPIServices/ImageClassifier/NutriBuddiClassifier/Classifier/FoodClassifier.py | NutriBuddi/NutriBuddi | 2 | 5182 | class FoodClassifier:
#Class Attributes:
#model - the underlying keras model
#labels - the labels to be associated with the activation of each output neuron.
#Labels must be the same size as the output layer of the neural network.
def __init__(self, modelpath, labels, min_confidence = 0.6):
from keras.models import load_model
from keras.applications.resnet50 import ResNet50
self.resnet = ResNet50(include_top=False,weights='imagenet',pooling='max',input_shape=(224,224,3))
self.extModel = load_model(modelpath)
if(isinstance(labels,str)):
#its a file path
from os.path import exists
if(exists(labels)):
f = open(labels,'r')
x = f.readlines()
y = []
for i in x:
y.append(i.split('\n')[0])
self.labels = y
else:
self.labels = labels
self.num_classes = len(labels)
self.min_confidence=min_confidence
def predict(self,img):
import os
from PIL import Image
from keras.preprocessing.image import img_to_array
import numpy as np
#check if image is a filepath
if(isinstance(img,str)):
if(not os.path.exists(img)):
print("Error: Invalid File Path")
return ""
else:
#if its a filepath, convert to PIL image
img = Image.open(img)
#resize image
#shape from model input
shape = self.resnet.input_shape
imgr = img.resize(shape[1:3])
x = img_to_array(imgr).reshape((1,shape[1],shape[2],shape[3]))
#predict
features = self.resnet.predict(x)
prediction = self.extModel.predict(features)
#get max of predictions and return label(s)
predIdx = np.argmax(prediction)
if(prediction[0,predIdx]<self.min_confidence):
return ""
else:
return self.labels[predIdx]
def set_extModel(self,model):
self.extModel = model
def get_extModel(self):
return self.extModel
def set_labels(self,labels):
self.labels = labels
def get_labels(self):
return self.labels
def set_min_confidence(self,conf):
self.min_confidence=conf
def get_min_confidence(self):
return self.min_confidence
def generate_features_from_directory(location,target_image_count,model=None):
#generates feature maps from the convolutional layers of ResNet50 using all
#images from the directory
#INPUT:
#directory containing NESTED DIRECTORIES of images. (Very Important)
#the number of feature maps to generate for each image class
#OUTPUT:
#a npy file containing the 2048-dimensional feature vector
#produced by ResNet50's convolutional layers
#data is generated in batches of 32
import numpy as np
from keras.preprocessing.image import ImageDataGenerator
from keras.applications.resnet50 import ResNet50
from os import listdir
from os.path import isdir
#create the model, if not defined
if model==None:
model = ResNet50(weights='imagenet',include_top=False,pooling='max')
#create the data generation
datagen = ImageDataGenerator()
#for each directory in
if(not isdir(location)):
print("could not find location: " + location)
return
for label in listdir(location):
#first check that its a directory
label_path = location+'/'+label
if(not isdir(label_path)):
continue
#create the data generator
#Output size is 256x256 to fit the ResNet50
print("Generating feature maps for " + label + "...")
generator = datagen.flow_from_directory(
label_path,
target_size = (224,224),
batch_size = 32,
class_mode=None)
#use ResNet50 to create the features
features = model.predict_generator(generator,target_image_count/32)
#features = np.reshape(features,(features.shape[0],features.shape[3]))
#save the features in a numpy binary
np.save(location+'/'+label+'.npy', features)
def create_data_set(data_path,output_folder,save_to_file=True):
#combines all npy files into one large file with their respective labels
#INPUTS:
#a directory containing npy fils of all different classes
#Outputs:
#training array and training labels
#label array is returned as a one hot encoding
#label names
from os.path import isdir
from os import listdir
import numpy as np
#find out how many classes
num_classes = 0
label_names = []
if(not isdir(data_path)):
print("Could not find directory: "+ data_path)
return
data_contents = listdir(data_path)
for f in data_contents:
if(f.endswith('.npy')):
num_classes +=1
label_names.append(f.split('.')[0])
if(num_classes==0):
print("Could not find any data files in directory: "+data_path)
return
#generate one-hot label vectors
labels = np.zeros([num_classes,num_classes])
for i in range(0,num_classes):
labels[i][i]=1
#load all arrays into memory.
#In the future, might need to do this on either a high ram machine
#or find another way to concatenate data
arrays = []
sizes = []
for f in data_contents:
if(f.endswith('.npy')):
arr = np.load(data_path+'/'+f)
sizes.append(arr.shape[0])
arrays.append(arr)
X = np.vstack([arr for arr in arrays])
#load the labels into memory
labelcodes = []
for i in range(0,num_classes):
labelcodes.append(np.vstack([labels[i]]*sizes[i]))
y = np.vstack([l for l in labelcodes])
if(save_to_file):
np.save(output_folder+'/data_set.npy',X)
np.save(output_folder+'/label_codes.npy',y)
with open(output_folder+"/labels.txt","w") as output:
output.write("".join([label + '\n' for label in label_names]))
return X,y,label_names
def train_classifier_from_images(train_dir,train_size,val_dir,val_size,output_dir):
#INPUTS:
#train_dir is the directory containig the training images
#test_dir is the directory containing the validation images
#output_dir is the directory to save the trained model
#train_size is the number of images to generate for each training class
#val_size is the number of images to generate for each validation class
#OUTPUTS
#A model that takes as input a 2048-vector of feature maps and outputs
#a prediction of what an image with those features might be.
#The labels file is also placed in this directory
#The model created is an SVM with softmax activation.
from time import time
from keras.applications.resnet50 import ResNet50
from keras.models import Sequential
from keras.optimizers import SGD
from keras.regularizers import l2
from keras.layers import Dense
from sklearn.utils import shuffle
from keras.callbacks import EarlyStopping, ModelCheckpoint
#import ResNet50 without top layer
print("Loading the ResNet50 Network...")
resnet = ResNet50(weights='imagenet',include_top=False,pooling='max')
#create the training and validation datasets for each class
print("Generating Training Set...")
generate_features_from_directory(train_dir,train_size,model=resnet)
print("Generating Testing Set...")
generate_features_from_directory(val_dir,val_size,model=resnet)
#create the combined dataset
print("Combining datasets...")
X_train,y_train,labels = create_data_set(train_dir,output_dir+"/train",save_to_file=True)
X_val,y_val,labels = create_data_set(val_dir,output_dir+"/validation",save_to_file=True)
#shuffle the train data
X_train,y_train = shuffle(X_train,y_train)
num_classes = len(labels)
#create the extension model
print("Creating extension model...")
extModel = Sequential()
extModel.add(Dense(num_classes,input_shape=(2048,), activation='softmax', W_regularizer=l2(0.01)))
extModel.compile(loss='hinge',optimizer=SGD(lr=0.01,momentum=0.9),metrics=["accuracy"])
#callbacks
checkpoint = ModelCheckpoint(output_dir + "/extModel"+str(int(time()))+".h5", monitor='val_acc', verbose=1, save_best_only=True, save_weights_only=False, mode='auto', period=1)
early = EarlyStopping(monitor='val_acc', min_delta=0, patience=10, verbose=1, mode='auto')
with open(output_dir+"/labels.txt","w") as output:
output.write("".join([label + '\n' for label in labels]))
#train model
print("Training...")
extModel.fit(X_train,y_train,
batch_size=32,
epochs=50,
validation_data=(X_val,y_val),
callbacks = [checkpoint,early])
return extModel
def add_to_train(train_dir,image,label, resnet):
#INPUTS
#Train_dir - the directory that all npy files are contained
#image - the path to the image being added
#resnet - the resnet model to be used for feature determination
#label - the name of the item
#Appends the features of the new item to the training set data for that label
from PIL import Image
from os.path import exists
from keras.preprocessing.image import img_to_array
if(isinstance(image,str)):
if(not exists(image)):
print("Error: Invalid File Path")
return ""
else:
#if its a filepath, convert to PIL image
img = Image.open(image)
shape = resnet.input_shape
imgr = img.resize(shape[1:3])
x = img_to_array(imgr).reshape((1,shape[1],shape[2],shape[3]))
#predict
features = resnet.predict(x)
import numpy as np
npyname = train_dir+'/'+label+'.npy'
if(not exists(npyname)):
np.save(npyname,features)
else:
fullset = np.load(npyname)
newset = np.append(fullset,features,axis=0)
np.save(npyname,newset)
| 3.359375 | 3 |
freehackquest_libclient_py/__init__.py | freehackquest/libfhqcli-py | 0 | 5183 | <reponame>freehackquest/libfhqcli-py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2020-2021 FreeHackQuest Team <<EMAIL>>
"""This file was automatically generated by fhq-server
Version: v0.2.47
Date: 2022-01-01 07:15:35
"""
from freehackquest_libclient_py.freehackquest_client import FreeHackQuestClient
| 0.855469 | 1 |
exchange_sockets/bitstamp_websocket.py | SpiralDevelopment/crypto-hft-data | 31 | 5184 | from exchange_sockets.exchange_websocket import ExchangeWebSocket
from singletones.custom_logger import MyLogger
import websocket
import threading
from time import sleep
from time import time
import json
import ssl
logger = MyLogger()
class BitstampWebsocket(ExchangeWebSocket):
def __init__(self, pairs_n_streams):
super().__init__('Bitstamp', pairs_n_streams)
self.possible_streams = ['live_trades', 'diff_order_book']
self.streams = []
def init_streams(self):
for pair, streams in self.pairs_n_streams.items():
for sub_stream in streams.split(','):
if self.has_stream(sub_stream):
cur = dict()
cur['event'] = 'bts:subscribe'
cur['data'] = {'channel': "{}_{}".format(sub_stream, pair)}
self.streams.append(cur)
def start_multiple_websocket(self, init_streams=True):
super().start_multiple_websocket(init_streams=init_streams)
websocket.enableTrace(True)
self.ws = websocket.WebSocketApp("wss://ws.bitstamp.net",
on_open=self.__on_open,
on_message=self.__on_message,
on_error=self.__on_error,
on_close=self.__on_close)
self.wst = threading.Thread(target=lambda: self.ws.run_forever(sslopt={'cert_reqs': ssl.CERT_NONE}))
self.wst.daemon = True
self.wst.start()
logger.debug("Started thread")
# Wait for connect before continuing
conn_timeout = 15
while not self.ws.sock or not self.ws.sock.connected and conn_timeout:
sleep(1)
conn_timeout -= 1
if not conn_timeout:
logger.error("%s Couldn't connect to %s! Exiting.",
self.node,
self.exchange)
self.close_socket()
else:
logger.info('{} socket is started:\n{}\n{}'.format(self.exchange,
self.node,
str(self.streams)))
def save_trades(self, message):
data = message['data']
channel = message['channel']
symbol = channel.split('_')[-1]
stream = channel[:-(len(symbol) + 1)]
append_data = "{},{},{},{}\n".format(data['timestamp'],
data['price'],
data['amount'],
data['type'])
self.file_manager.save_data_to_file(self.exchange,
stream,
symbol,
append_data)
def save_level2_orderbook(self, message):
data = message['data']
channel = message['channel']
symbol = channel.split('_')[-1]
stream = channel[:-(len(symbol) + 1)]
all_data = {}
data_time = data['timestamp']
for side in ['bids', 'asks']:
for cur in data[side]:
if not all_data.get(symbol, None):
all_data[symbol] = []
price = cur[0]
size = cur[1]
all_data[symbol].append("{},{},{}\n".format(
data_time,
price,
size if side == "bids" else "-{}".format(size)))
for symbol, l2_ob_data in all_data.items():
for l2_ob in l2_ob_data:
self.file_manager.save_data_to_file(self.exchange,
stream,
symbol,
l2_ob)
def __on_message(self, ws, message):
if message is None:
return
try:
self.last_msg_time = int(time())
message = json.loads(message)
channel = message['channel']
if channel.startswith('diff_order_book'):
self.save_level2_orderbook(message)
elif channel.startswith('live_trades'):
self.save_trades(message)
except Exception as e:
logger.debug(str(e))
def __on_error(self, ws, error):
self.on_error = True
logger.error("On error\n{}\n{} {}".format(self.node,
self.exchange,
error))
def __on_close(self, ws):
logger.info("On close\n{}".format(self.exchange))
def __on_open(self, ws):
logger.info("On Open\n{}".format(self.exchange))
if self.streams:
for stream in self.streams:
logger.info('Subscribing to %s', json.dumps(stream))
self.ws.send(json.dumps(stream))
sleep(2)
else:
logger.error('%s. Stream is not initialized', self.exchange)
def close_socket(self):
self.exited = True
if self.ws:
self.ws.close()
| 2.375 | 2 |
src/data_loading.py | katerakelly/pytorch-maml | 565 | 5185 | <gh_stars>100-1000
import numpy as np
import random
import torch
from torch.utils.data import DataLoader
from torch.utils.data.sampler import Sampler
import torchvision.transforms as transforms
from dataset import Omniglot, MNIST
'''
Helpers for loading class-balanced few-shot tasks
from datasets
'''
class ClassBalancedSampler(Sampler):
'''
Samples class-balanced batches from 'num_cl' pools each
of size 'num_inst'
If 'batch_cutoff' is None, indices for iterating over batches
of the entire dataset will be returned
Otherwise, indices for the number of batches up to the batch_cutoff
will be returned
(This is to allow sampling with replacement across training iterations)
'''
def __init__(self, num_cl, num_inst, batch_cutoff=None):
self.num_cl = num_cl
self.num_inst = num_inst
self.batch_cutoff = batch_cutoff
def __iter__(self):
'''return a single list of indices, assuming that items will be grouped by class '''
# First construct batches of 1 instance per class
batches = [[i+j*self.num_inst for i in torch.randperm(self.num_inst)] for j in range(self.num_cl)]
batches = [[batches[j][i] for j in range(self.num_cl)] for i in range(self.num_inst)]
# Shuffle within each batch so that classes don't always appear in same order
for sublist in batches:
random.shuffle(sublist)
if self.batch_cutoff is not None:
random.shuffle(batches)
batches = batches[:self.batch_cutoff]
batches = [item for sublist in batches for item in sublist]
return iter(batches)
def __len__(self):
return 1
def get_data_loader(task, batch_size=1, split='train'):
# NOTE: batch size here is # instances PER CLASS
if task.dataset == 'mnist':
normalize = transforms.Normalize(mean=[0.13066, 0.13066, 0.13066], std=[0.30131, 0.30131, 0.30131])
dset = MNIST(task, transform=transforms.Compose([transforms.ToTensor(), normalize]), split=split)
else:
normalize = transforms.Normalize(mean=[0.92206, 0.92206, 0.92206], std=[0.08426, 0.08426, 0.08426])
dset = Omniglot(task, transform=transforms.Compose([transforms.ToTensor(), normalize]), split=split)
sampler = ClassBalancedSampler(task.num_cl, task.num_inst, batch_cutoff = (None if split != 'train' else batch_size))
loader = DataLoader(dset, batch_size=batch_size*task.num_cl, sampler=sampler, num_workers=1, pin_memory=True)
return loader
| 2.65625 | 3 |
DoChaP-db/UnusedScripts/main.py | Tal-Shay-Group/DoChaP | 2 | 5186 | <filename>DoChaP-db/UnusedScripts/main.py<gh_stars>1-10
#!/usr/bin/python
import sys
import os
sys.path.append(os.getcwd())
from Director import Director
from OrthologsBuilder import *
from SpeciesDB import *
if __name__ == "__main__":
inputDict = {}
for inarg in sys.argv[1:]:
try:
splitArg = inarg.strip("-").split("=")
if splitArg[0] in ("download", "withEns"):
inputDict[splitArg[0]] = splitArg[1]
else:
raise ValueError("Wrong input arguments. only accepts arguments 'download' and 'withEns'")
except AttributeError or IndexError:
raise ValueError("Make sure that input arguments are argumentName=argumentValue")
species = ['M_musculus', 'H_sapiens', 'R_norvegicus', 'D_rerio', 'X_tropicalis']
download = inputDict['download'] == 'True'
withEns = inputDict['withEns'] == 'True'
print("Running DBbuilder with Download {} and withENS {}".format(download, withEns))
print(type(download))
print(type(withEns))
director = Director()
orthologs = OrthologsBuilder(species=species, download=download)
director.setBuilder(orthologs)
director.collectFromSource(download=download)
spl = len(species)
spnum = 1
for sp in species:
print("===========Current Species: {}===========".format(sp))
dbBuild = dbBuilder(sp, download=download, withEns=withEns)
dbBuild.create_tables_db(merged=False)
dbBuild.fill_in_db(merged=False)
print("Filling {} completed!".format(dbBuild.dbName))
if spnum == 1:
dbBuild.create_tables_db(merged=True)
dbBuild.fill_in_db(merged=True)
if spnum == spl:
dbBuild.create_index()
dbBuild.AddOrthology(orthologs.OrthoTable)
spnum += 1
print("Filling {} completed!".format(dbBuild.dbName))
| 2.28125 | 2 |
tests/constants.py | eigenein/sqlitemap | 4 | 5187 | # See also: https://stackoverflow.com/questions/3694276/what-are-valid-table-names-in-sqlite
good_table_names = [
'foo',
'123abc',
'123abc.txt',
'123abc-ABC.txt',
'foo""bar',
'😀',
'_sqlite',
]
# See also: https://stackoverflow.com/questions/3694276/what-are-valid-table-names-in-sqlite
bad_table_names = [
'"',
'"foo"',
'sqlite_',
'sqlite_reserved',
]
| 1.53125 | 2 |
TWLight/settings/base.py | amire80/TWLight | 0 | 5188 | <filename>TWLight/settings/base.py
# -*- coding: utf-8 -*-
"""
Base settings for twlight project.
This is not intended to be used as the live settings file for a project and will
not work as one. You should instead use production.py, local.py, heroku.py, or
another file that you write. These files should live in the settings directory;
start with 'from .base import *'; and proceed to add or override settings as
appropriate to their context. In particular, you will need to set ALLOWED_HOSTS
before your app will run.
If you want to use production settings, you are now done. If not, you will also
need to set the environment variables indicated in the README.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
import os
import json
from django.contrib import messages
from django.urls import reverse_lazy
from django.utils.translation import gettext_lazy as _
# Import available locales from Faker, so we can determine what languages we fake in tests.
from faker.config import AVAILABLE_LOCALES as FAKER_AVAILABLE_LOCALES
# We're going to replace Django's default logging config.
import logging.config
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
TWLIGHT_HOME = os.path.dirname(
os.path.dirname(os.path.abspath(os.path.join(os.path.abspath(__file__), os.pardir)))
)
TWLIGHT_ENV = os.environ.get("TWLIGHT_ENV")
# An atypical way of setting django languages for TranslateWiki integration:
# https://translatewiki.net/wiki/Thread:Support/_The_following_issue_is_unconfirmed,_still_to_be_investigated._Adding_TheWikipediaLibrary_Card_Platform_TranslateWiki
# Get the language codes from the locale directories, and compare them to the
# languages in Wikimedia CLDR. Use langauge autonyms from Wikimedia.
# We periodically pull:
# https://raw.githubusercontent.com/wikimedia/language-data/master/data/language-data.json
# into locale/language-data.json
def get_languages_from_locale_subdirectories(dir):
current_languages = []
language_data_json = open(os.path.join(dir, "language-data.json"))
languages = json.loads(language_data_json.read())["languages"]
for locale_dir in os.listdir(dir):
if os.path.isdir(os.path.join(dir, locale_dir)):
for lang_code, lang_data in languages.items():
autonym = lang_data[-1]
if locale_dir == lang_code:
current_languages += [(lang_code, autonym)]
return sorted(set(current_languages))
# Get the intersection of available Faker locales and the specified language set.
def get_django_faker_languages_intersection(languages):
languages_intersection = []
for locale in FAKER_AVAILABLE_LOCALES:
for i, (djlang_code, djlang_name) in enumerate(languages):
# Exclude common English locales from random test selection; English often works while others are broken.
if (
locale == djlang_code
and locale != "en"
and locale != "en_US"
and locale != "en_GB"
):
languages_intersection += [locale]
return sorted(set(languages_intersection))
# ------------------------------------------------------------------------------
# ------------------------> core django configurations <------------------------
# ------------------------------------------------------------------------------
# APP CONFIGURATION
# ------------------------------------------------------------------------------
DJANGO_APPS = [
"django.contrib.admin",
"django.contrib.admindocs",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"whitenoise.runserver_nostatic", # Not a django app; replaces staticfiles
"django.contrib.staticfiles",
"django.contrib.sites", # required by django.contrib.comments
]
THIRD_PARTY_APPS = [
"annoying",
"crispy_forms",
"reversion",
"dal",
"dal_select2",
"django_comments",
"django_cron",
"django_filters",
"modeltranslation",
# DO NOT CONFUSE THIS with requests, the Python URL library! This is
# django-request, the user analytics package.
"request",
"django_countries",
"rest_framework",
"rest_framework.authtoken",
"django_extensions",
]
TWLIGHT_APPS = [
"TWLight.i18n",
"TWLight.users",
"TWLight.resources",
"TWLight.applications",
"TWLight.emails",
"TWLight.graphs",
"TWLight.comments",
"TWLight.api",
"TWLight.ezproxy",
]
# dal (autocomplete_light) and modeltranslation must go before django.contrib.admin.
INSTALLED_APPS = THIRD_PARTY_APPS + DJANGO_APPS + TWLIGHT_APPS
# CRON CONFIGURATION
# ------------------------------------------------------------------------------
CRON_CLASSES = [
"TWLight.crons.BackupCronJob",
"TWLight.crons.SendCoordinatorRemindersCronJob",
"TWLight.crons.UserRenewalNoticeCronJob",
"TWLight.crons.ProxyWaitlistDisableCronJob",
"TWLight.crons.UserUpdateEligibilityCronJob",
"TWLight.crons.ClearSessions",
]
# REST FRAMEWORK CONFIG
# ------------------------------------------------------------------------------
REST_FRAMEWORK = {
"DEFAULT_VERSIONING_CLASS": "rest_framework.versioning.NamespaceVersioning"
}
# MIDDLEWARE CONFIGURATION
# ------------------------------------------------------------------------------
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
# WhiteNoise should be loaded before everything but security.
"whitenoise.middleware.WhiteNoiseMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
# LocaleMiddleware must go after Session (and Cache, if used), but before
# Common.
"django.middleware.locale.LocaleMiddleware",
"django.middleware.common.CommonMiddleware",
"django.contrib.admindocs.middleware.XViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
# The default storage backend relies on sessions.
# That’s why SessionMiddleware must be enabled and appear before
# MessageMiddleware.
"django.contrib.messages.middleware.MessageMiddleware",
]
# DEBUG
# ------------------------------------------------------------------------------
# By setting this an an environment variable, it is easy to switch debug on in
# servers to do a quick test.
# DEBUG SHOULD BE FALSE ON PRODUCTION for security reasons.
DEBUG = bool(os.environ.get("DEBUG", "False").lower() == "true")
# DATABASE CONFIGURATION
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
# WMF sysadmins strongly prefer mysql, so use that.
# If you're deploying to Heroku, heroku.py will override this.
DATABASES = {
"default": {
"ENGINE": "django.db.backends.mysql",
"NAME": os.environ.get("DJANGO_DB_NAME", None),
"USER": os.environ.get("DJANGO_DB_USER", None),
"PASSWORD": os.environ.get("DJANGO_DB_PASSWORD", None),
"HOST": os.environ.get("DJANGO_DB_HOST", None),
"PORT": "3306",
# This is critical for handling Unicode data due to stupid properties
# of MySQL; see https://stackoverflow.com/questions/2108824/mysql-incorrect-string-value-error-when-save-unicode-string-in-django .
"OPTIONS": {
"charset": "utf8mb4",
"init_command": "SET sql_mode='STRICT_ALL_TABLES'; SET storage_engine='INNODB';",
},
}
}
# GENERAL CONFIGURATION
# ------------------------------------------------------------------------------
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.environ.get("SECRET_KEY")
# In production, this list should contain the URL of the server and nothing
# else, for security reasons. For local testing '*' is OK.
ALLOWED_HOSTS = os.environ.get("ALLOWED_HOSTS", "localhost 127.0.0.1 [::1]").split(" ")
# Let Django know about external URLs in case they differ from internal
# Needed to be added for /admin
USE_X_FORWARDED_HOST = True
REQUEST_BASE_URL = os.environ.get("REQUEST_BASE_URL", None)
ROOT_URLCONF = "TWLight.urls"
WSGI_APPLICATION = "TWLight.wsgi.application"
SITE_ID = 1
# Overwrite messages.ERROR to use danger instead, to play nice with bootstrap
MESSAGE_TAGS = {messages.ERROR: "danger"}
# INTERNATIONALIZATION CONFIGURATION
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = "en" # Sets site default language.
# https://django-modeltranslation.readthedocs.io/en/latest/installation.html#advanced-settings
MODELTRANSLATION_DEFAULT_LANGUAGE = (
LANGUAGE_CODE # sets the modeltranslation default language.
)
LOCALE_PATHS = [
# makemessages looks for locale/ in the top level, not the project level.
os.path.join(os.path.dirname(BASE_DIR), "locale")
]
# We're letting the file-based translation contributions dictate the languages
# available to the system. This keeps our column and index count for db-stored
# translations as low as possible while allowing translatewiki contributions to
# be used without reconfiguring the site.
LANGUAGES = get_languages_from_locale_subdirectories(LOCALE_PATHS[0])
FAKER_LOCALES = get_django_faker_languages_intersection(LANGUAGES)
TIME_ZONE = "UTC"
USE_I18N = True
USE_L10N = True
USE_TZ = True
# TEMPLATE CONFIGURATION
# ------------------------------------------------------------------------------
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [os.path.join(BASE_DIR, "templates")],
"OPTIONS": {
# Reiterating the default so we can add to it later.
"context_processors": (
"django.contrib.auth.context_processors.auth",
"django.template.context_processors.debug",
"django.template.context_processors.i18n",
"django.template.context_processors.media",
"django.template.context_processors.request",
"django.template.context_processors.static",
"django.template.context_processors.tz",
"django.contrib.messages.context_processors.messages",
),
# We cache templates by default.
"loaders": [
(
"django.template.loaders.cached.Loader",
[
"django.template.loaders.filesystem.Loader",
"django.template.loaders.app_directories.Loader",
],
)
],
},
}
]
# STATIC FILE CONFIGURATION
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_ROOT = os.path.join(BASE_DIR, "collectedstatic")
STATIC_URL = "/static/"
STATICFILES_DIRS = [os.path.join(BASE_DIR, "static")]
STATICFILES_STORAGE = "whitenoise.storage.CompressedManifestStaticFilesStorage"
# MEDIA FILE CONFIGURATION
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/1.8/topics/files/
MEDIA_ROOT = os.path.join(os.path.dirname(BASE_DIR), "media")
MEDIA_URL = "/media/"
# ------------------------------------------------------------------------------
# -----------------> third-party and TWLight configurations <-------------------
# ------------------------------------------------------------------------------
CRISPY_TEMPLATE_PACK = "bootstrap3"
# EZPROXY CONFIGURATION
# ------------------------------------------------------------------------------
TWLIGHT_EZPROXY_URL = os.environ.get("TWLIGHT_EZPROXY_URL", None)
TWLIGHT_EZPROXY_SECRET = os.environ.get("TWLIGHT_EZPROXY_SECRET", None)
# OAUTH CONFIGURATION
# ------------------------------------------------------------------------------
LOGIN_URL = reverse_lazy("oauth_login")
LOGIN_REDIRECT_URL = reverse_lazy("users:home")
AUTHENTICATION_BACKENDS = [
"TWLight.users.oauth.OAuthBackend",
"django.contrib.auth.backends.ModelBackend",
]
TWLIGHT_OAUTH_PROVIDER_URL = os.environ.get("TWLIGHT_OAUTH_PROVIDER_URL", None)
TWLIGHT_OAUTH_CONSUMER_KEY = os.environ.get("TWLIGHT_OAUTH_CONSUMER_KEY", None)
TWLIGHT_OAUTH_CONSUMER_SECRET = os.environ.get("TWLIGHT_OAUTH_CONSUMER_SECRET", None)
# API CONFIGURATION
# ------------------------------------------------------------------------------
TWLIGHT_API_PROVIDER_ENDPOINT = os.environ.get("TWLIGHT_API_PROVIDER_ENDPOINT", None)
# COMMENTS CONFIGURATION
# ------------------------------------------------------------------------------
COMMENTS_APP = "TWLight.comments"
# REVERSION CONFIGURATION
# ------------------------------------------------------------------------------
# See https://django-reversion.readthedocs.org/ .
# We are NOT using reversion middleware, because that creates revisions when
# save() is called in the context of some http requests, but not on all database
# saves. This makes it untestable. Instead we decorate the Application.save().
# DJMAIL CONFIGURATION
# ------------------------------------------------------------------------------
DJMAIL_REAL_BACKEND = os.environ.get(
"DJANGO_EMAIL_BACKEND", "django.core.mail.backends.console.EmailBackend"
)
EMAIL_BACKEND = "djmail.backends.async.EmailBackend"
EMAIL_HOST = os.environ.get("DJANGO_EMAIL_HOST", "localhost")
EMAIL_PORT = 25
EMAIL_HOST_USER = ""
EMAIL_HOST_PASSWORD = ""
EMAIL_USE_TLS = False
INSTALLED_APPS += ["djmail"]
# DJANGO_REQUEST CONFIGURATION
# ------------------------------------------------------------------------------
MIDDLEWARE += ["request.middleware.RequestMiddleware"]
# The following are set for privacy purposes. Note that, if some amount of
# geographic tracking is desired, there is a REQUEST_ANONYMOUS_IP setting which
# scrubs the last octet of the IP address, which could be used instead of
# REQUEST_LOG_IP. There is not a way to get semi-granular user tracking (such
# as tracking only authenticated vs anonymous users).
REQUEST_LOG_IP = False
REQUEST_LOG_USER = False
# LOGGING CONFIGURATION
# ------------------------------------------------------------------------------
# We're replacing the default logging config to get better control of the
# mail_admins behavior.
LOGGING_CONFIG = None
logging.config.dictConfig(
{
"version": 1,
"disable_existing_loggers": False,
"filters": {
"require_debug_false": {"()": "django.utils.log.RequireDebugFalse"},
"require_debug_true": {"()": "django.utils.log.RequireDebugTrue"},
},
"formatters": {
"django.server": {
"()": "django.utils.log.ServerFormatter",
"format": "[%(server_time)s] %(message)s",
}
},
"handlers": {
"nodebug_console": {
"level": "WARNING",
"filters": ["require_debug_false"],
"class": "logging.StreamHandler",
},
"debug_console": {
"level": "INFO",
"filters": ["require_debug_true"],
"class": "logging.StreamHandler",
},
"django.server": {
"level": "INFO",
"class": "logging.StreamHandler",
"formatter": "django.server",
},
},
"loggers": {
"django": {
"handlers": ["nodebug_console", "debug_console"],
"level": os.environ.get("DJANGO_LOG_LEVEL", "INFO"),
},
"django.server": {
"handlers": ["django.server"],
"level": os.environ.get("DJANGO_LOG_LEVEL", "INFO"),
"propagate": False,
},
"TWLight": {
"handlers": ["nodebug_console", "debug_console"],
"level": os.environ.get("DJANGO_LOG_LEVEL", "INFO"),
},
},
}
)
| 1.742188 | 2 |
modelator_py/util/tla/_optable.py | informalsystems/modelator-py | 0 | 5189 | <reponame>informalsystems/modelator-py
"""Table of operators."""
# Copyright 2020 by California Institute of Technology
# Copyright (c) 2008-2013 INRIA and Microsoft Corporation
# All rights reserved. Licensed under 3-clause BSD.
#
# This module is based on the file:
#
# <https://github.com/tlaplus/tlapm/blob/main/src/optable.ml>
#
import pprint
from .ast import Nodes as nodes
# open Builtin
# type fixity =
# | Nonfix
# | Prefix | Postfix
# | Infix of assoc
class Fixity:
pass
class Nonfix(Fixity):
pass
class Prefix(Fixity):
pass
class Postfix(Fixity):
pass
class Infix(Fixity):
def __init__(self, assoc):
self.assoc = assoc
# and assoc =
# | Left | Non | Right
class Assoc:
pass
class Left(Assoc):
pass
class Right(Assoc):
pass
class Non(Assoc):
pass
# and dom =
# (* primitive operators *)
# | Logic | Sets | Modal
# (* user-definable operators *)
# | User
dom = {"Logic", "Sets", "Modal", "User"}
# type prec = int * int
class Prec:
def __init__(self, a, b):
self.a = a
self.b = b
# let withdef (name, prec, fix, als, defn) = (
# name, prec, fix, als, Some defn);;
def withdef(tuple_):
name, prec, fix, als, defn = tuple_
return (name, prec, fix, als, defn)
# let tlaops = [
# Logic,
# List.map withdef [
# '=>', ( 1, 1), Infix(Non()), [], Implies ;
# '<=>', ( 2, 2), Infix(Non()), [ '\\equiv' ], Equiv ;
# '/\\', ( 3, 3), Infix(Left()), [ '\\land' ], Conj ;
# '\\/', ( 3, 3), Infix(Left()), [ '\\lor' ], Disj ;
# '~', ( 4, 4), Prefix, [ '\\neg' ; '\\lnot' ], Neg ;
# '=', ( 5, 5), Infix(Non()), [], Eq ;
# '#', ( 5, 5), Infix(Non()), [ '/=' ], Neq ;
# ] ;
# Sets,
# List.map withdef [
# 'SUBSET', ( 8, 8), Prefix, [], SUBSET ;
# 'UNION', ( 8, 8), Prefix, [], UNION ;
# 'DOMAIN', ( 9, 9), Prefix, [], DOMAIN ;
# '\\subseteq', ( 5, 5), Infix(Non()), [], Subseteq ;
# '\\in', ( 5, 5), Infix(Non()), [], Mem ;
# '\\notin', ( 5, 5), Infix(Non()), [], Notmem ;
# '\\', ( 8, 8), Infix(Non()), [], Setminus ;
# '\\cap', ( 8, 8), Infix(Left()), [ '\\intersect' ], Cap ;
# '\\cup', ( 8, 8), Infix(Left()), [ '\\union' ], Cup ;
# ] ;
# Sets,
# [ '\\X', (10,13), Prefix, [ '\\times' ], None ] ;
# Modal,
# List.map withdef [
# ''', (15,15), Postfix, [], Prime ;
# '~>', ( 2, 2), Infix(Non()), [ '\\leadsto' ], Leadsto ;
# 'ENABLED', ( 4,15), Prefix, [], ENABLED ;
# 'UNCHANGED', ( 4,15), Prefix, [], UNCHANGED ;
# '\\cdot', ( 5,14), Infix(Left()), [], Cdot ;
# '-+->', ( 2, 2), Infix(Non()), [], Actplus ;
# '[]', ( 4,15), Prefix, [], Box true ;
# '<>', ( 4,15), Prefix, [], Diamond ;
# ] ;
# User,
# List.map (fun (name, prec, fix, als) -> (name, prec, fix, als, None)) [
# '^', (14,14), Infix(Non()), [] ;
# '/', (13,13), Infix(Non()), [] ;
# '*', (13,13), Infix(Left()), [] ;
# '-.', (12,12), Prefix, [ '-' ] ;
# '-', (11,11), Infix(Left()), [] ;
# '+', (10,10), Infix(Left()), [] ;
# '^+', (15,15), Postfix, [] ;
# '^*', (15,15), Postfix, [] ;
# '^#', (15,15), Postfix, [] ;
# '<', ( 5, 5), Infix(Non()), [] ;
# '=<', ( 5, 5), Infix(Non()), [ '<=' ; '\\leq' ] ;
# '>', ( 5, 5), Infix(Non()), [] ;
# '>=', ( 5, 5), Infix(Non()), [ '\\geq' ] ;
# '...', ( 9, 9), Infix(Non()), [] ;
# '..', ( 9, 9), Infix(Non()), [] ;
# '|', (10,11), Infix(Left()), [] ;
# '||', (10,11), Infix(Left()), [] ;
# '&&', (13,13), Infix(Left()), [] ;
# '&', (13,13), Infix(Left()), [] ;
# '$$', ( 9,13), Infix(Left()), [] ;
# '$', ( 9,13), Infix(Left()), [] ;
# '??', ( 9,13), Infix(Left()), [] ;
# '%%', (10,11), Infix(Left()), [] ;
# '%', (10,11), Infix(Non()), [ '\\mod' ] ;
# '##', ( 9,13), Infix(Left()), [] ;
# '++', (10,10), Infix(Left()), [] ;
# '--', (11,11), Infix(Left()), [] ;
# '**', (13,13), Infix(Left()), [] ;
# '//', (13,13), Infix(Non()), [] ;
# '^^', (14,14), Infix(Non()), [] ;
# '@@', ( 6, 6), Infix(Left()), [] ;
# '!!', ( 9,13), Infix(Non()), [] ;
# '|-', ( 5, 5), Infix(Non()), [] ;
# '|=', ( 5, 5), Infix(Non()), [] ;
# '-|', ( 5, 5), Infix(Non()), [] ;
# '=|', ( 5, 5), Infix(Non()), [] ;
# '<:', ( 7, 7), Infix(Non()), [] ;
# ':>', ( 7, 7), Infix(Non()), [] ;
# ':=', ( 5, 5), Infix(Non()), [] ;
# '::=', ( 5, 5), Infix(Non()), [] ;
# '(+)', (10,10), Infix(Left()), [ '\\oplus' ] ;
# '(-)', (11,11), Infix(Left()), [ '\\ominus' ] ;
# '(.)', (13,13), Infix(Left()), [ '\\odot' ] ;
# '(/)', (13,13), Infix(Non()), [ '\\oslash' ] ;
# '(\\X)', (13,13), Infix(Left()), [ '\\otimes' ] ;
# '\\uplus', ( 9,13), Infix(Left()), [] ;
# '\\sqcap', ( 9,13), Infix(Left()), [] ;
# '\\sqcup', ( 9,13), Infix(Left()), [] ;
# '\\div', (13,13), Infix(Non()), [] ;
# '\\wr', ( 9,14), Infix(Non()), [] ;
# '\\star', (13,13), Infix(Left()), [] ;
# '\\o', (13,13), Infix(Left()), [ '\\circ' ] ;
# '\\bigcirc', (13,13), Infix(Left()), [] ;
# '\\bullet', (13,13), Infix(Left()), [] ;
# '\\prec', ( 5, 5), Infix(Non()), [] ;
# '\\succ', ( 5, 5), Infix(Non()), [] ;
# '\\preceq', ( 5, 5), Infix(Non()), [] ;
# '\\succeq', ( 5, 5), Infix(Non()), [] ;
# '\\sim', ( 5, 5), Infix(Non()), [] ;
# '\\simeq', ( 5, 5), Infix(Non()), [] ;
# '\\ll', ( 5, 5), Infix(Non()), [] ;
# '\\gg', ( 5, 5), Infix(Non()), [] ;
# '\\asymp', ( 5, 5), Infix(Non()), [] ;
# '\\subset', ( 5, 5), Infix(Non()), [] ;
# '\\supset', ( 5, 5), Infix(Non()), [] ;
# '\\supseteq', ( 5, 5), Infix(Non()), [] ;
# '\\approx', ( 5, 5), Infix(Non()), [] ;
# '\\cong', ( 5, 5), Infix(Non()), [] ;
# '\\sqsubset', ( 5, 5), Infix(Non()), [] ;
# '\\sqsubseteq', ( 5, 5), Infix(Non()), [] ;
# '\\sqsupset', ( 5, 5), Infix(Non()), [] ;
# '\\sqsupseteq', ( 5, 5), Infix(Non()), [] ;
# '\\doteq', ( 5, 5), Infix(Non()), [] ;
# '\\propto', ( 5, 5), Infix(Non()), [] ;
# ] ;
# ]
def _generate_tlaops():
tlaops = [
(
"Logic",
[
("=>", (1, 1), Infix(Non()), list(), nodes.Implies()),
("<=>", (2, 2), Infix(Non()), ["\\equiv"], nodes.Equiv()),
("/\\", (3, 3), Infix(Left()), ["\\land"], nodes.Conj()),
("\\/", (3, 3), Infix(Left()), ["\\lor"], nodes.Disj()),
("~", (4, 4), Prefix(), ["\\neg", "\\lnot"], nodes.Neg()),
("=", (5, 5), Infix(Non()), list(), nodes.Eq()),
("#", (5, 5), Infix(Non()), ["/="], nodes.Neq()),
],
),
(
"Sets",
[
("SUBSET", (8, 8), Prefix(), list(), nodes.SUBSET()),
("UNION", (8, 8), Prefix(), list(), nodes.UNION()),
("DOMAIN", (9, 9), Prefix(), list(), nodes.DOMAIN()),
("\\subseteq", (5, 5), Infix(Non()), list(), nodes.Subseteq()),
("\\in", (5, 5), Infix(Non()), list(), nodes.Mem()),
("\\notin", (5, 5), Infix(Non()), [], nodes.Notmem()),
("\\", (8, 8), Infix(Non()), ["\\setminus"], nodes.Setminus()),
("\\cap", (8, 8), Infix(Left()), ["\\intersect"], nodes.Cap()),
("\\cup", (8, 8), Infix(Left()), ["\\union"], nodes.Cup()),
("\\X", (10, 13), Infix(Left()), ["\\times"], None),
],
),
(
"Modal",
[
("'", (15, 15), Postfix(), list(), nodes.Prime()),
("~>", (2, 2), Infix(Non()), ["\\leadsto"], nodes.LeadsTo()),
("ENABLED", (4, 15), Prefix(), list(), nodes.ENABLED()),
("UNCHANGED", (4, 15), Prefix(), list(), nodes.UNCHANGED()),
("\\cdot", (5, 14), Infix(Left()), list(), nodes.Cdot()),
("-+->", (2, 2), Infix(Non()), list(), nodes.WhilePlus()),
("[]", (4, 15), Prefix(), list(), nodes.Box(True)),
("<>", (4, 15), Prefix(), list(), nodes.Diamond()),
],
),
(
"User",
[
(name, prec, fix, als, None)
for name, prec, fix, als in [
("^", (14, 14), Infix(Non()), list()),
("/", (13, 13), Infix(Non()), list()),
("*", (13, 13), Infix(Left()), list()),
("-.", (12, 12), Prefix(), ["-"]),
("-", (11, 11), Infix(Left()), list()),
("+", (10, 10), Infix(Left()), list()),
("^+", (15, 15), Postfix(), list()),
("^*", (15, 15), Postfix(), list()),
("^#", (15, 15), Postfix(), list()),
("<", (5, 5), Infix(Non()), list()),
("=<", (5, 5), Infix(Non()), ["<=", "\\leq"]),
(">", (5, 5), Infix(Non()), list()),
(">=", (5, 5), Infix(Non()), ["\\geq"]),
("...", (9, 9), Infix(Non()), list()),
("..", (9, 9), Infix(Non()), list()),
("|", (10, 11), Infix(Left()), list()),
("||", (10, 11), Infix(Left()), list()),
("&&", (13, 13), Infix(Left()), list()),
("&", (13, 13), Infix(Left()), list()),
("$$", (9, 13), Infix(Left()), list()),
("$", (9, 13), Infix(Left()), list()),
("??", (9, 13), Infix(Left()), list()),
("%%", (10, 11), Infix(Left()), list()),
("%", (10, 11), Infix(Non()), ["\\mod"]),
("##", (9, 13), Infix(Left()), list()),
("++", (10, 10), Infix(Left()), list()),
("--", (11, 11), Infix(Left()), list()),
("**", (13, 13), Infix(Left()), list()),
("//", (13, 13), Infix(Non()), list()),
("^^", (14, 14), Infix(Non()), list()),
("@@", (6, 6), Infix(Left()), list()),
("!!", (9, 13), Infix(Non()), list()),
("|-", (5, 5), Infix(Non()), list()),
("|=", (5, 5), Infix(Non()), list()),
("-|", (5, 5), Infix(Non()), list()),
("=|", (5, 5), Infix(Non()), list()),
("<:", (7, 7), Infix(Non()), list()),
(":>", (7, 7), Infix(Non()), list()),
(":=", (5, 5), Infix(Non()), list()),
("::=", (5, 5), Infix(Non()), list()),
("(+)", (10, 10), Infix(Left()), ["\\oplus"]),
("(-)", (11, 11), Infix(Left()), ["\\ominus"]),
("(.)", (13, 13), Infix(Left()), ["\\odot"]),
("(/)", (13, 13), Infix(Non()), ["\\oslash"]),
("(\\X)", (13, 13), Infix(Left()), ["\\otimes"]),
("\\uplus", (9, 13), Infix(Left()), list()),
("\\sqcap", (9, 13), Infix(Left()), list()),
("\\sqcup", (9, 13), Infix(Left()), list()),
("\\div", (13, 13), Infix(Non()), list()),
("\\wr", (9, 14), Infix(Non()), list()),
("\\star", (13, 13), Infix(Left()), list()),
("\\o", (13, 13), Infix(Left()), ["\\circ"]),
("\\bigcirc", (13, 13), Infix(Left()), list()),
("\\bullet", (13, 13), Infix(Left()), list()),
("\\prec", (5, 5), Infix(Non()), list()),
("\\succ", (5, 5), Infix(Non()), list()),
("\\preceq", (5, 5), Infix(Non()), list()),
("\\succeq", (5, 5), Infix(Non()), list()),
("\\sim", (5, 5), Infix(Non()), list()),
("\\simeq", (5, 5), Infix(Non()), list()),
("\\ll", (5, 5), Infix(Non()), list()),
("\\gg", (5, 5), Infix(Non()), list()),
("\\asymp", (5, 5), Infix(Non()), list()),
("\\subset", (5, 5), Infix(Non()), list()),
("\\supset", (5, 5), Infix(Non()), list()),
("\\supseteq", (5, 5), Infix(Non()), list()),
("\\approx", (5, 5), Infix(Non()), list()),
("\\cong", (5, 5), Infix(Non()), list()),
("\\sqsubset", (5, 5), Infix(Non()), list()),
("\\sqsubseteq", (5, 5), Infix(Non()), list()),
("\\sqsupset", (5, 5), Infix(Non()), list()),
("\\sqsupseteq", (5, 5), Infix(Non()), list()),
("\\doteq", (5, 5), Infix(Non()), list()),
("\\propto", (5, 5), Infix(Non()), list()),
]
],
),
]
return tlaops
# type tlaop = {
# name : string ;
# prec : prec ;
# fix : fixity ;
# dom : dom ;
# defn : Builtin.builtin option ;
# }
class TLAOP:
def __init__(self, name, prec, fixity, dom, defn):
self.name = name # str
self.prec = prec # Prec
self.fix = fixity # Fixity
self.dom = dom
self.defn = defn
def __repr__(self):
return (
f"TLAOP({self.name}, {self.prec}, " f"{self.fix}, {self.dom}, {self.defn})"
)
# let optable =
# let module H = Hashtbl in
# let tab = H.create 109 in
# List.iter begin
# fun (dom, ops) ->
# List.iter begin
# fun (name, prec, fix, als, defn) ->
# let op = { name = name ;
# prec = prec ;
# fix = fix ; dom = dom ;
# defn = defn }
# in
# H.add tab name op ;
# List.iter (fun s -> H.add tab s op) als
# end ops
# end tlaops ;
# tab
def _generate_optable():
tlaops = _generate_tlaops()
optable = dict()
for dom, ops in tlaops:
for name, prec, fixity, alternatives, defn in ops:
op = TLAOP(name, prec, fixity, dom, defn)
optable.setdefault(name, list())
optable[name].append(op)
for s in alternatives:
optable.setdefault(s, list())
optable[s].append(op)
return optable
optable = _generate_optable()
# pprint.pprint(optable)
# let nonfix name defn =
# { name = name ; prec = (-1, -1) ;
# fix = Nonfix ; dom = User ; defn = defn }
#
# let lookup name =
# if Hashtbl.mem optable name then
# Hashtbl.find optable name
# else
# nonfix name None
#
# (** Mapping from builtins to standard tlaops *)
# let standard_form b =
# match b with
# | TRUE -> nonfix 'TRUE' (Some TRUE)
# | FALSE -> nonfix 'FALSE' (Some FALSE)
# | Implies -> lookup '=>'
# | Equiv -> lookup '<=>'
# | Conj -> lookup '/\\'
# | Disj -> lookup '\\/'
# | Neg -> lookup '~'
# | Eq -> lookup '='
# | Neq -> lookup '#'
# | Divides ->
# {
# name = '?|';
# prec = (10, 11);
# fix = Infix(Non());
# dom = Logic;
# defn = Some Divides;
# }
#
# | STRING -> nonfix 'STRING' (Some STRING)
# | BOOLEAN -> nonfix 'BOOLEAN' (Some BOOLEAN)
# | SUBSET -> lookup 'SUBSET'
# | UNION -> lookup 'UNION'
# | DOMAIN -> lookup 'DOMAIN'
# | Subseteq -> lookup '\\subseteq'
# | Mem -> lookup '\\in'
# | Notmem -> lookup '\\notin'
# | Setminus -> lookup '\\'
# | Cap -> lookup '\\cap'
# | Cup -> lookup '\\cup'
#
# | Prime -> lookup '''
# | StrongPrime -> lookup '''
# | Leadsto -> lookup '~>'
# | ENABLED -> lookup 'ENABLED'
# | UNCHANGED -> lookup 'UNCHANGED'
# | Cdot -> lookup '\\cdot'
# | Actplus -> lookup '-+->'
# | Box _ -> lookup '[]'
# | Diamond -> lookup '<>'
#
# | Plus -> { (lookup '+') with defn = Some Plus }
# | Minus -> { (lookup '-') with defn = Some Minus }
# | Uminus -> { (lookup '-.') with defn = Some Uminus ; name = '-' }
# | Times -> { (lookup '*') with defn = Some Times }
# | Ratio -> { (lookup '/') with defn = Some Ratio }
# | Quotient -> { (lookup '\\div') with defn = Some Quotient }
# | Remainder -> { (lookup '%') with defn = Some Remainder }
# | Exp -> { (lookup '^') with defn = Some Exp }
# | Lteq -> { (lookup '=<') with defn = Some Lteq }
# | Lt -> { (lookup '<') with defn = Some Lt }
# | Gteq -> { (lookup '>=') with defn = Some Gteq }
# | Gt -> { (lookup '>') with defn = Some Gt }
# | Range -> { (lookup '..') with defn = Some Range }
# | Nat -> nonfix 'Nat' (Some Nat)
# | Int -> nonfix 'Int' (Some Int)
# | Real -> nonfix 'Real' (Some Real)
# | Infinity -> nonfix 'Infinity' (Some Infinity)
#
# | Seq -> nonfix 'Seq' (Some Seq)
# | Len -> nonfix 'Len' (Some Len)
# | BSeq -> nonfix 'BSeq' (Some BSeq)
# | Append -> nonfix 'Append' (Some Append)
# | Cat -> { (lookup '\\o') with defn = Some Cat }
# | Head -> nonfix 'Head' (Some Head)
# | Tail -> nonfix 'Tail' (Some Tail)
# | SubSeq -> nonfix 'SubSeq' (Some SubSeq)
# | SelectSeq -> nonfix 'SelectSeq' (Some SelectSeq)
#
# | OneArg -> { (lookup ':>') with defn = Some OneArg }
# | Extend -> { (lookup '@@') with defn = Some Extend }
# | Print -> nonfix 'Print' (Some Print)
# | PrintT -> nonfix 'PrintT' (Some PrintT)
# | Assert -> nonfix 'Assert' (Some Assert)
# | JavaTime -> nonfix 'JavaTime' (Some JavaTime)
# | TLCGet -> nonfix 'TLCGet' (Some TLCGet)
# | TLCSet -> nonfix 'TLCSet' (Some TLCSet)
# | Permutations -> nonfix 'Permutations' (Some Permutations)
# | SortSeq -> nonfix 'SortSeq' (Some SortSeq)
# | RandomElement -> nonfix 'RandomElement' (Some RandomElement)
# | Any -> nonfix 'Any' (Some Any)
# | ToString -> nonfix 'ToString' (Some ToString)
#
# | Unprimable -> nonfix 'Unprimable' None
# | Irregular -> nonfix 'Irregular' None
# ;;
| 2.171875 | 2 |
DIE/UI/FunctionViewEx.py | a1ext/DIE | 5 | 5190 | <filename>DIE/UI/FunctionViewEx.py<gh_stars>1-10
import networkx as nx
from awesome.context import ignored
import sark
import idaapi
import idautils
import idc
from idaapi import PluginForm
from sark.qt import QtGui, QtCore, QtWidgets, form_to_widget, use_qt5
if use_qt5:
_QSortFilterProxyModel = QtCore.QSortFilterProxyModel
_MatchRecursive = QtCore.Qt.MatchRecursive
_MatchExactly = QtCore.Qt.MatchExactly
_PositionAtTop = QtWidgets.QAbstractItemView.PositionAtTop
else:
_QSortFilterProxyModel = QtGui.QSortFilterProxyModel
_MatchRecursive = QtCore.Qt.MatchFlag.MatchRecursive
_MatchExactly = QtCore.Qt.MatchFlag.MatchExactly
_PositionAtTop = QtWidgets.QAbstractItemView.ScrollHint.PositionAtTop
import DIE.UI.Die_Icons
import DIE.UI.ValueViewEx
import DIE.UI.ParserView
import DIE.UI.BPView
import DIE.Lib.IDAConnector
import DIE.Lib.DIEDb
import DIE.Lib.BpHandler
import sark.ui
class FunctionView(PluginForm):
"""
DIE Function View
"""
def __init__(self):
super(FunctionView, self).__init__()
self.value_view = None
self.bp_handler = None
self.die_icons = None
self.die_db = None
self.highligthed_items = []
def Show(self):
# Reset highlighted items
self.highligthed_items = []
return PluginForm.Show(self,
"Function View",
options=PluginForm.FORM_PERSIST)
def OnCreate(self, form):
"""
Called when the plugin form is created
"""
self.value_view = DIE.UI.ValueViewEx.get_view()
self.bp_handler = DIE.Lib.BpHandler.get_bp_handler()
self.die_icons = DIE.UI.Die_Icons.get_die_icons()
self.die_db = DIE.Lib.DIEDb.get_db()
# Get parent widget
self.parent = form_to_widget(form)
self.functionModel = QtGui.QStandardItemModel()
self.functionTreeView = QtWidgets.QTreeView()
self.functionTreeView.setExpandsOnDoubleClick(False)
#self.functionTreeView.setSortingEnabled(True)
delegate = TreeViewDelegate(self.functionTreeView)
self.functionTreeView.setItemDelegate(delegate)
self.functionTreeView.doubleClicked.connect(self.itemDoubleClickSlot)
self._model_builder(self.functionModel)
self.functionTreeView.setModel(self.functionModel)
self.functionTreeView.setColumnWidth(0, 200)
self.functionTreeView.setColumnWidth(1, 20)
self.functionTreeView.setColumnWidth(2, 20)
self.functionTreeView.setColumnWidth(3, 20)
self.functionTreeView.setColumnWidth(4, 250)
self.functionTreeView.setColumnWidth(5, 100)
self.functionTreeView.setColumnWidth(6, 20)
self.functionTreeView.setColumnWidth(7, 450)
self.functionTreeView.setColumnWidth(8, 20)
self.functionTreeView.setColumnWidth(9, 450)
# Context menus
self.functionTreeView.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
self.functionTreeView.customContextMenuRequested.connect(self.onCustomContextMenu)
# Actions
self.context_menu_param = None # Parameter to be passed to context menu slots
action_exclude_func = QtWidgets.QAction("Exclude Function", self.functionTreeView, triggered=lambda: self.on_exclude_func(self.context_menu_param))
action_exclude_func_adrs = QtWidgets.QAction("Exclude All Function Calls", self.functionTreeView, triggered=lambda: self.on_exclude_func_adrs(self.context_menu_param))
action_exclude_ea = QtWidgets.QAction("Exclude Address", self.functionTreeView, triggered=lambda: self.on_exclude_ea(self.context_menu_param))
action_exclude_library = QtWidgets.QAction("Exclude Library", self.functionTreeView, triggered=lambda: self.on_exclude_library(self.context_menu_param))
action_value_detail = QtWidgets.QAction("Inspect Value Details", self.functionTreeView, triggered=lambda: self.on_value_detail(self.context_menu_param))
action_show_callgraph = QtWidgets.QAction("Show Call-Graph", self.functionTreeView, triggered=lambda: self.on_show_callgraph(self.context_menu_param))
# Function ContextMenu
self.function_context_menu = QtWidgets.QMenu(self.functionTreeView)
self.function_context_menu.addAction(action_exclude_func)
self.function_context_menu.addAction(action_exclude_library)
self.function_context_menu.addAction(action_exclude_func_adrs)
# Function ea ContextMenu
self.ea_context_menu = QtWidgets.QMenu(self.functionTreeView)
self.ea_context_menu.addAction(action_exclude_ea)
self.ea_context_menu.addAction(action_show_callgraph)
# Argument value ContextMenu
self.value_context_menu = QtWidgets.QMenu(self.functionTreeView)
self.value_context_menu.addAction(action_value_detail)
# Therad ComboBox
threads = []
if self.die_db is not None:
threads = self.die_db.get_thread_list()
thread_id_list = []
thread_id_list.append("All Threads")
for thread in threads:
thread_id_list.append(str(thread.thread_num))
self.thread_id_combo = QtWidgets.QComboBox()
self.thread_id_combo.addItems(thread_id_list)
self.thread_id_combo.activated[str].connect(self.on_thread_combobox_change)
self.thread_id_label = QtWidgets.QLabel("Thread: ")
# Toolbar
self.function_toolbar = QtWidgets.QToolBar()
self.function_toolbar.addWidget(self.thread_id_label)
self.function_toolbar.addWidget(self.thread_id_combo)
# Grid
layout = QtWidgets.QGridLayout()
layout.addWidget(self.function_toolbar)
layout.addWidget(self.functionTreeView)
self.parent.setLayout(layout)
def OnClose(self, form):
idaapi.msg("Closed\n")
def isVisible(self):
"""
Is functionview visible
@return: True if visible, otherwise False
"""
try:
return self.functionTreeView.isVisible()
except:
return False
def _model_builder(self, model):
"""
Build the function model.
@param model: QStandardItemModel object
"""
model.clear() # Clear the model
root_node = model.invisibleRootItem()
self._make_model_headers(model)
if self.die_db is None:
return
# Add db functions to the model
for function in self.die_db.get_functions():
item_list_func = self._make_function_item(function)
if function.is_lib_func: # Color library function
for tmp_item in item_list_func:
tmp_item.setBackground(QtGui.QColor(184, 223, 220))
item_function = item_list_func[0]
root_node.appendRow(item_list_func)
# Add function contexts ea\occurrences for the current function
func_context_dict = self.die_db.get_function_context_dict(function)
for function_context_ea in func_context_dict:
function_context_list = func_context_dict[function_context_ea]
if not len(function_context_list) > 0:
continue
item_func_context_list = self._make_function_ea_item(function_context_list[0])
item_func_context_ea = item_func_context_list[0]
item_function.appendRow(item_func_context_list)
occurrence_num = 0
for function_context in function_context_list:
item_func_context_list = self._make_func_occur_item(function_context, occurrence_num)
item_func_context = item_func_context_list[0]
item_func_context_ea.appendRow(item_func_context_list)
self._insert_thread_data(item_function, function_context.thread_id)
self._insert_thread_data(item_func_context_ea, function_context.thread_id)
# Add function arguments to each context
current_call_values = self.die_db.get_call_values(function_context)
current_ret_values = self.die_db.get_return_values(function_context)
curret_ret_arg_value = self.die_db.get_return_arg_value(function_context)
for arg_index in xrange(0, function.arg_num):
try:
current_arg = self.die_db.get_function_arg(function, arg_index)
self._add_model_arg_value(item_func_context,
current_call_values[arg_index],
current_ret_values[arg_index],
current_arg.name,
current_arg.type)
except IndexError:
break
ret_arg = self.die_db.get_function_arg(function, -1)
if ret_arg is None:
ret_arg_type = "VOID"
else:
ret_arg_type = ret_arg.type
# Add return argument
self._add_model_arg_value(item_func_context,
None,
curret_ret_arg_value,
"ret_arg",
ret_arg_type)
# Increment occurrence counter
occurrence_num += 1
# Add non-executed function to the model
# for func_ea in idautils.Functions():
# func_name = DIE.Lib.IDAConnector.get_function_name(func_ea)
#
# if self.die_db.get_function_by_name(func_name) is None:
# item_list_func = self._make_nonexec_function_time(func_name)
#
# if function.is_lib_func: # Color library function
# for tmp_item in item_list_func:
# tmp_item.setBackground(QtGui.QColor(255, 0, 0, 127))
#
# root_node.appendRow(item_list_func)
def _make_model_headers(self, model):
"""
Set the model horizontal header data
@param model: the QStandardItemModel which headers should be set
"""
### Function Header
item_header = QtGui.QStandardItem("Function")
item_header.setToolTip("Function Name")
model.setHorizontalHeaderItem(0, item_header)
### Call number header
item_header = QtGui.QStandardItem("#")
item_header.setToolTip("Number of calls preformed to this function")
model.setHorizontalHeaderItem(1, item_header)
### Indirect Header
item_header = QtGui.QStandardItem("I")
item_header.setToolTip("Indirect Call")
model.setHorizontalHeaderItem(2, item_header)
### Indirect Header
item_header = QtGui.QStandardItem("N")
item_header.setToolTip("New Function")
model.setHorizontalHeaderItem(3, item_header)
### Indirect Header
item_header = QtGui.QStandardItem("Type")
item_header.setToolTip("Argument Type")
model.setHorizontalHeaderItem(4, item_header)
### New Function Header
item_header = QtGui.QStandardItem("Name")
item_header.setToolTip("Argument Name")
model.setHorizontalHeaderItem(5, item_header)
### Call Value Icon Header
item_header = QtGui.QStandardItem("")
model.setHorizontalHeaderItem(6, item_header)
### Call Value Header
item_header = QtGui.QStandardItem("Call Value")
item_header.setToolTip("Argument`s value on function call")
model.setHorizontalHeaderItem(7, item_header)
### Return Value Icon Header
item_header = QtGui.QStandardItem("")
model.setHorizontalHeaderItem(8, item_header)
### Return Value Header
item_header = QtGui.QStandardItem("Return Value")
item_header.setToolTip("Argument`s value on function return")
model.setHorizontalHeaderItem(9, item_header)
def _make_thread_id_data(self, thread_id):
"""
Delimit thread_id data in order to support filtering\sorting on multi-thread data items
@param thread_id: thread id to normalize
@return: a normalized string of the thread_id to be used sa data for ThreadId_Role
"""
return "t%st" % str(thread_id)
def _insert_thread_data(self, item, thread_id):
"""
Insert thread_id data into a model item.
The value found in thread_id argument will be delimited by the _make_thread_id_data function
(e.g: thread_id 123 will become 't123t')
the delimited value will then be appended to a string of concatenated (unique) child-item thread-ids
(for example a item data value can be "a123aa5672aa11112a") for threads 123, 5672 and 111112
@param item: the model item to add the data to
@param thread_id: thread_id number
@return: True if thread data was successfully added to item, otherwise False
"""
try:
current_thread_id = self._make_thread_id_data(thread_id)
thread_data = item.data(role=DIE.UI.ThreadId_Role)
if thread_data is None:
item.setData(current_thread_id, role=DIE.UI.ThreadId_Role)
elif not current_thread_id in thread_data:
item.setData(thread_data + current_thread_id, role=DIE.UI.ThreadId_Role)
return True
except Exception as ex:
idaapi.msg("Error while inserting thread data: %s\n" %ex)
return False
def _make_function_item(self, function):
"""
Build a tree item for a function name (level-0)
@param function: dbFunction object
@return: QStandradItemModel item for the function
"""
function_txt = "%s" % function.function_name
item_function = QtGui.QStandardItem(self.die_icons.icon_function, function_txt)
item_function.setData(function, role=DIE.UI.Function_Role)
function_count = self.die_db.count_function_occurs(function)
item_function_count = QtGui.QStandardItem(str(function_count))
item_function_count.setEditable(False)
item_function.setEditable(False)
item_list = [item_function,
item_function_count,
QtGui.QStandardItem(),
QtGui.QStandardItem(),
QtGui.QStandardItem(),
QtGui.QStandardItem(),
QtGui.QStandardItem(),
QtGui.QStandardItem(),
QtGui.QStandardItem(),
QtGui.QStandardItem()]
return item_list
def _make_nonexec_function_time(self, function_name):
"""
Build a tree item for a function name (for a non-executed function)
@type: String
@param function_name: Function name
@return:
"""
item_function = QtGui.QStandardItem(self.die_icons.icon_function, function_name)
item_function_count = QtGui.QStandardItem("0")
item_function_count.setEditable(False)
item_function.setEditable(False)
item_list = [item_function, item_function_count]
return item_list
def _make_function_ea_item(self, function_context):
"""
Build a tree item for a function_ea node (level-1)
@param function_context: a dbFunction_Context object
@return: QStandradItemModel item for the function context
"""
calling_function_start = None
with ignored(sark.exceptions.SarkNoFunction):
calling_function_start = sark.Function(function_context.calling_ea).startEA
if calling_function_start is not None:
call_offset = function_context.calling_ea - calling_function_start
func_ea_txt = "%s+%s" % (function_context.calling_func_name, hex(call_offset))
else:
func_ea_txt = "[%s]:%s" % (function_context.calling_func_name, hex(function_context.calling_ea))
item_func_context_ea = QtGui.QStandardItem(func_ea_txt)
item_func_context_ea.setEditable(False)
item_func_context_ea.setData(hex(function_context.calling_ea), role=QtCore.Qt.ToolTipRole)
item_func_context_ea.setData(function_context, role=DIE.UI.FunctionContext_Role)
item_func_context_ea.setData(id(function_context), role=DIE.UI.ContextId_Role) # Used for module look-ups
item_func_is_indirect = QtGui.QStandardItem()
item_func_is_indirect.setEditable(False)
if function_context.is_indirect:
item_func_is_indirect.setIcon(self.die_icons.icon_v)
item_func_is_new = QtGui.QStandardItem()
item_func_is_new.setEditable(False)
if function_context.is_new_func:
item_func_is_new.setIcon(self.die_icons.icon_v)
item_list = [item_func_context_ea,
QtGui.QStandardItem(),
item_func_is_indirect,
item_func_is_new,
QtGui.QStandardItem(),
QtGui.QStandardItem(),
QtGui.QStandardItem(),
QtGui.QStandardItem(),
QtGui.QStandardItem(),
QtGui.QStandardItem()]
return item_list
def _make_func_occur_item(self, function_context, occur_num):
"""
Build a tree item for function occurrence (level-2)
@param function_context: a dbFunction_Context object
@param occur_num: occurrence number
@return: QStandradItemModel item for the function occurrence
"""
func_occur_txt = "Occur %s" % str(occur_num)
item_func_context = QtGui.QStandardItem(func_occur_txt)
item_func_context.setColumnCount(5)
item_func_context.setEditable(False)
item_func_context.setData(function_context, role=DIE.UI.FunctionContext_Role)
item_func_context.setData(id(function_context), role=DIE.UI.ContextId_Role) # Used for module look-ups
item_func_context.setData(self._make_thread_id_data(function_context.thread_id), role=DIE.UI.ThreadId_Role)
item_list = [item_func_context,
QtGui.QStandardItem(),
QtGui.QStandardItem(),
QtGui.QStandardItem(),
QtGui.QStandardItem(),
QtGui.QStandardItem(),
QtGui.QStandardItem(),
QtGui.QStandardItem(),
QtGui.QStandardItem(),
QtGui.QStandardItem()]
return item_list
def _add_model_arg_value(self, parent, call_value, ret_value, arg_name, arg_type, nest_depth=0):
"""
Add a debug value
@param parent:
@param call_value:
@param ret_value:
@param arg_name:
@param arg_type:
@return:
"""
arg_count = parent.rowCount()
this_row_item = QtGui.QStandardItem("")
this_row_item.setData(parent.data(role=DIE.UI.ThreadId_Role), role=DIE.UI.ThreadId_Role) # Inherit thread data from parent
# Set indentation for argument types (for nested values)
arg_ident = " " * nest_depth
arg_ident_type = arg_ident + arg_type
item_parsed_val_flag_call = QtGui.QStandardItem()
item_parsed_val_call = QtGui.QStandardItem()
item_parsed_val_flag_ret = QtGui.QStandardItem()
item_parsed_val_ret = QtGui.QStandardItem()
# Get Call Value
if call_value is not None:
parsed_vals = self.die_db.get_parsed_values(call_value)
this_row_item.setData(parsed_vals, role=DIE.UI.CallValue_Role)
if parsed_vals is not None and len(parsed_vals) > 0:
is_guessed, best_val = self.die_db.get_best_parsed_val(parsed_vals)
item_parsed_val_call = QtGui.QStandardItem(best_val.data)
if is_guessed:
item_parsed_val_flag_call.setIcon(self.die_icons.icon_question)
if len(parsed_vals) > 1: # If more the 1 item, show a combo-box
item_parsed_val_call.setData(parsed_vals, role=DIE.UI.ParsedValuesRole)
item_parsed_val_flag_call.setIcon(self.die_icons.icon_more)
else:
item_parsed_val_call.setData(parsed_vals[0], role=DIE.UI.ParsedValueRole)
else:
parsed_val_data = "NULL"
if call_value.derref_depth == 0:
parsed_val_data = "!MAX_DEREF!"
if call_value.raw_value is not None:
parsed_val_data = hex(call_value.raw_value)
if len(call_value.nested_values) > 0 or call_value.reference_flink is not None:
parsed_val_data = ""
item_parsed_val_call = QtGui.QStandardItem(parsed_val_data)
# Get return value
if ret_value is not None:
parsed_vals = self.die_db.get_parsed_values(ret_value)
this_row_item.setData(parsed_vals, role=DIE.UI.RetValue_Role)
# If len(parsed_vals)>1 create a combobox delegate.
if parsed_vals:
is_guessed, best_val = self.die_db.get_best_parsed_val(parsed_vals)
item_parsed_val_ret = QtGui.QStandardItem(best_val.data)
if is_guessed:
item_parsed_val_flag_ret.setIcon(self.die_icons.icon_question)
if len(parsed_vals) > 1: # If more the 1 item, show a combo-box
item_parsed_val_ret.setData(parsed_vals, role=DIE.UI.ParsedValuesRole)
item_parsed_val_flag_ret.setIcon(self.die_icons.icon_more)
else:
item_parsed_val_ret.setData(parsed_vals[0], role=DIE.UI.ParsedValueRole)
else:
parsed_val_data = "NULL"
if ret_value.derref_depth == 0:
parsed_val_data = "!MAX_DEREF!"
if ret_value.raw_value is not None:
parsed_val_data = hex(ret_value.raw_value)
if ret_value.nested_values or ret_value.reference_flink is not None:
parsed_val_data = ""
item_parsed_val_ret = QtGui.QStandardItem(parsed_val_data)
parent.setChild(arg_count, 0, this_row_item)
parent.setChild(arg_count, 1, QtGui.QStandardItem())
parent.setChild(arg_count, 2, QtGui.QStandardItem())
parent.setChild(arg_count, 3, QtGui.QStandardItem())
parent.setChild(arg_count, 4, QtGui.QStandardItem(arg_ident_type))
parent.setChild(arg_count, 5, QtGui.QStandardItem(arg_name))
parent.setChild(arg_count, 6, item_parsed_val_flag_call)
parent.setChild(arg_count, 7, item_parsed_val_call)
parent.setChild(arg_count, 8, item_parsed_val_flag_ret)
parent.setChild(arg_count, 9, item_parsed_val_ret)
# If current object contains reference values, add them to the module
self._add_model_arg_ref(this_row_item, call_value, ret_value, nest_depth)
# If current object is a container object, Add its members to the module
self._add_model_container_members(this_row_item, call_value, ret_value, nest_depth)
def _add_model_arg_ref(self, parent, call_value, ret_value, nest_depth=0):
"""
Add a reference value to module
@param parent:
@param call_value:
@param ret_value:
@param nest_depth:
@return:
"""
# If call debug value is a reference
if call_value is not None:
if call_value.reference_flink is not None and not call_value.is_definitely_parsed:
ref_val_call = self.die_db.get_dbg_value(call_value.reference_flink)
ref_val_ret = None
# Try to get the same reference from the return debug value.
if ret_value is not None and ret_value.type == call_value.type:
if ret_value.reference_flink is not None and not ret_value.is_definitely_parsed:
ref_val_ret = self.die_db.get_dbg_value(ret_value.reference_flink)
self._add_model_arg_value(parent, ref_val_call, ref_val_ret, ref_val_call.name, ref_val_call.type, nest_depth+1)
# If return debug value is a reference (and call value is not)
elif ret_value is not None:
if ret_value.reference_flink is not None and not ret_value.is_definitely_parsed:
ref_val = self.die_db.get_dbg_value(ret_value.reference_flink)
self._add_model_arg_value(parent, None, ref_val, ref_val.name, ref_val.type, nest_depth+1)
def _add_model_container_members(self, parent, call_value, ret_value, nest_depth=0):
"""
Add container members to module
@param parent:
@param call_value:
@param ret_value:
@param nest_depth:
@return:
"""
# If call value is a container type (struct\union\etc)
if call_value is not None and call_value.nested_values is not None:
if call_value.nested_values:
for index in xrange(0, len(call_value.nested_values)):
nested_val_call = self.die_db.get_dbg_value(call_value.nested_values[index])
nested_val_ret = None
# Try to get the same member from the return debug value.
if ret_value is not None and ret_value.type == call_value.type:
if ret_value.nested_values is not None:
if ret_value.nested_values:
nested_val_ret = self.die_db.get_dbg_value(ret_value.nested_values[index])
self._add_model_arg_value(parent, nested_val_call, nested_val_ret, nested_val_call.name, nested_val_call.type, nest_depth+1)
# If return value is a container type (and call value is not)
elif ret_value is not None:
if ret_value.nested_values is not None:
if ret_value.nested_values:
for nested_value in ret_value.nested_values:
nested_val_ret = self.die_db.get_dbg_value(nested_value)
self._add_model_arg_value(parent,
None,
nested_val_ret,
nested_val_ret.name,
nested_val_ret.type,
nest_depth+1)
def reset_function_count(self, thread_id=None):
"""
Reset the function count and set the count according to currently selected thread_id
@param thread_id: currently selected thread_id
"""
root_item = self.functionModel.item(0, 0)
rows = root_item.rowCount()
thread_id = self.thread_id_combo.currentText()
for row in xrange(0, rows):
cur_item = root_item.child(row, 0)
function = cur_item.data(role=DIE.UI.Function_Role)
if function is not None:
count = 0
if thread_id is None:
count = self.die_db.count_function_occurs(function)
else:
count = self.die_db.count_function_occurs(function, int(thread_id))
func_count_item = root_item.child(row, 1)
func_count_item.setText(str(count))
###############################################################################################
# Highlight Items.
def highlight_item(self, item):
"""
Highlight a single item
@param item: module item
"""
try:
item.setBackground(QtGui.QColor('yellow'))
cur_font = item.font()
cur_font.setBold(True)
item.setFont(cur_font)
except Exception as ex:
idaapi.msg("Error while highlighting item: %s\n" %ex)
def highlight_item_row(self, item):
"""
highlight the entire row containing a table item
@param item: table item
"""
try:
if not item.index().isValid():
return
parent = item.parent()
if parent is None:
parent = item
if not parent.hasChildren():
self.highlight_item(parent)
return
row = item.row()
column_num = parent.columnCount()
for column in xrange(0, column_num):
if self.functionModel.hasIndex(row, column, parent.index()):
cur_index = self.functionModel.index(row, column, parent.index())
self.highlight_item(self.functionModel.itemFromIndex(cur_index))
persistent_index = QtCore.QPersistentModelIndex(cur_index)
self.highligthed_items.append(persistent_index)
except Exception as ex:
idaapi.msg("Error while highlighting item row: %s\n" % ex)
def clear_highlights(self):
"""
Clear all highlighted items
@return:
"""
try:
self.functionTreeView.collapseAll()
for persistent_index in self.highligthed_items:
if persistent_index.isValid():
item = self.functionModel.itemFromIndex(persistent_index)
item.setBackground(QtGui.QColor('white'))
cur_font = item.font()
cur_font.setBold(False)
item.setFont(cur_font)
self.highligthed_items = []
except Exception as ex:
idaapi.msg("Error while clearing highlights: %s\n" % ex)
###############################################################################################
# Find Items.
def find_function(self, function_name):
"""
Find and highlight a function in current module
@param function_name: Function name
"""
self.clear_highlights()
matched_items = self.functionModel.findItems(function_name)
for item in matched_items:
self.functionTreeView.expand(item.index())
self.functionTreeView.scrollTo(item.index(), _PositionAtTop)
self.highlight_item_row(item)
def find_context_list(self, context_list):
"""
Find and highlight a list of function contexts
@param context_list: list of function contexts (of type dbFunction_Context)
"""
try:
self.clear_highlights()
root_index = self.functionModel.index(0, 0)
if not root_index.isValid():
return
for func_context in context_list:
context_id = id(func_context)
matched_items = self.functionModel.match(root_index, DIE.UI.ContextId_Role, context_id, -1, _MatchRecursive | _MatchExactly)
for index in matched_items:
if not index.isValid():
continue
# Do not highlight "ea root" items, only occurrences of it.
if not index.data().startswith("Occur"):
continue
item = self.functionModel.itemFromIndex(index)
self.functionTreeView.expand(index)
self.functionTreeView.scrollTo(index, _PositionAtTop)
self.highlight_item_row(item)
return True
except Exception as ex:
idaapi.msg("Error while looking up function context in FunctionView: %s\n" % ex)
return False
###############################################################################################
# Slots.
# @QtCore.Slot(QtCore.QModelIndex)
def itemDoubleClickSlot(self, index):
"""
TreeView DoubleClicked Slot.
@param index: QModelIndex object of the clicked tree index item.
@return:
"""
function = index.data(role=DIE.UI.Function_Role)
if function is not None:
ea = function.function_start
if function.is_lib_func:
ea = function.proto_ea
if ea is not None and ea is not idc.BADADDR:
idc.Jump(ea)
return True
func_context = index.data(role=DIE.UI.FunctionContext_Role)
if func_context is not None:
ea = func_context.calling_ea
if ea is not None and ea is not idc.BADADDR:
idc.Jump(ea)
return True
# @QtCore.Slot(QtCore.QPoint)
def onCustomContextMenu(self, point):
index = self.functionTreeView.indexAt(point)
is_function_item = index.data(role=DIE.UI.Function_Role)
is_func_context_item = index.data(role=DIE.UI.FunctionContext_Role)
is_value_item = index.data(role=DIE.UI.ParsedValueRole)
if is_function_item is not None:
self.context_menu_param = is_function_item
self.function_context_menu.exec_(self.functionTreeView.mapToGlobal(point))
if is_func_context_item is not None:
self.context_menu_param = is_func_context_item
self.ea_context_menu.exec_(self.functionTreeView.mapToGlobal(point))
if is_value_item is not None:
self.context_menu_param = is_value_item
self.value_context_menu.exec_(self.functionTreeView.mapToGlobal(point))
# @QtCore.Slot(str)
def on_exclude_func(self, function):
if not isinstance(function, DIE.Lib.DIEDb.dbFunction):
if function is not None:
raise ValueError("Wrong value sent to 'on_exclude_func_adrs': %s. excpected dbFunction_Context" % function.__class__)
else:
raise ValueError("Wrong value sent to 'on_exclude_func_adrs'")
self.bp_handler.add_bp_funcname_exception(function.function_name)
return
# @QtCore.Slot(str)
def on_exclude_func_adrs(self, function):
if not isinstance(function, DIE.Lib.DIEDb.dbFunction):
if function is not None:
raise ValueError("Wrong value sent to 'on_exclude_func_adrs': %s. excpected dbFunction_Context" % function.__class__)
else:
raise ValueError("Wrong value sent to 'on_exclude_func_adrs'")
func_context_list = self.die_db.get_function_context_list(function)
for func_context in func_context_list:
self.bp_handler.add_bp_ea_exception(func_context.calling_ea)
return
# @QtCore.Slot(str)
def on_exclude_ea(self, function_context):
if not isinstance(function_context, DIE.Lib.DIEDb.dbFunction_Context):
if function_context is not None:
raise ValueError("Wrong value sent to 'on_exclude_ea': %s. excpected dbFunction_Context" % function_context.__class__)
else:
raise ValueError("Wrong value sent to 'on_exclude_ea'")
self.bp_handler.add_bp_ea_exception(function_context.calling_ea)
return
# @QtCore.Slot(str)
def on_show_callgraph(self, function_context):
if not isinstance(function_context, DIE.Lib.DIEDb.dbFunction_Context):
if function_context is not None:
raise ValueError("Wrong value sent to 'on_show_callgraph': %s. excpected dbFunction_Context" % function_context.__class__)
else:
raise ValueError("Wrong value sent to 'on_show_callgraph'")
graph = nx.DiGraph()
call_graph = self.die_db.get_call_graph_to(function_context)
if not call_graph:
idaapi.msg("No Execution Graph")
return
for ctxt_node in call_graph:
(from_address, to_address) = ctxt_node
graph.add_edge(from_address, to_address)
function_name = self.die_db.get_function_name(function_context.function)
viewer = sark.ui.NXGraph(graph, "Callgraph for {}".format(function_name), handler=sark.ui.AddressNodeHandler())
viewer.Show()
return
# @QtCore.Slot(str)
def on_exclude_library(self, function):
if not isinstance(function, DIE.Lib.DIEDb.dbFunction):
if function is not None:
raise ValueError("Wrong value sent to 'on_exclude_func_adrs': %s. excpected dbFunction_Context" % function.__class__)
else:
raise ValueError("Wrong value sent to 'on_exclude_func_adrs'")
if function.is_lib_func and function.lib_name is not None:
self.bp_handler.add_module_exception(function.lib_name)
return
# @QtCore.Slot(str)
def on_value_detail(self, value):
if not self.value_view.isVisible():
self.value_view.Show()
self.value_view.find_value(value)
return
def on_thread_combobox_change(self, thread_id):
self.reset_function_count(thread_id) # reset function count according to currently selected thread
if thread_id == "All Threads":
if not self.functionTreeView.model() is self.functionModel:
self.functionTreeView.setModel(self.functionModel)
return
hidden_threads = ".*" + self._make_thread_id_data(thread_id) + ".*"
threadProxyModel = _QSortFilterProxyModel()
threadProxyModel.setFilterRole(DIE.UI.ThreadId_Role)
threadProxyModel.setFilterRegExp(hidden_threads)
threadProxyModel.setSourceModel(self.functionModel)
self.functionTreeView.setModel(threadProxyModel)
def on_valueview_button(self):
value_view = DIE.UI.ValueViewEx.get_view()
value_view.Show()
def on_pluginsview_button(self):
plugins_view = DIE.UI.ParserView.get_view()
plugins_view.Show()
def on_bpview_button(self):
bp_view = DIE.UI.BPView.get_view()
bp_view.Show()
###############################################################################################
# View Delegates.
class TreeViewDelegate(QtWidgets.QStyledItemDelegate):
"""
Delegate for parsed value viewing in the tree view
"""
def __init__(self, parent):
QtWidgets.QStyledItemDelegate.__init__(self, parent)
self.parent = parent
def createEditor(self, parent, option, index):
parsed_val_list = index.data(role=DIE.UI.ParsedValuesRole)
# Show combobox only if parsed_value as two or more items.
if parsed_val_list is not None and len(parsed_val_list) > 1:
lines = []
for parsed_val in parsed_val_list:
line_txt = "%d, %s, %s" % (parsed_val.score, parsed_val.data, parsed_val.description)
lines.append(line_txt)
combo_box = QtWidgets.QComboBox(parent)
combo_box.addItems(lines)
return combo_box
def setEditorData(self, editor, index):
editor.blockSignals(True)
editor.setCurrentIndex(int(index.model().data(index)))
editor.blockSignals(False)
# Singelton
function_view = None
def initialize():
global function_view
function_view = FunctionView()
def get_view():
return function_view
| 1.929688 | 2 |
peerbot/PeerBot.py | danerprog/PeerHostedDiscordBot | 0 | 5191 | <reponame>danerprog/PeerHostedDiscordBot
from peerbot.PeerBotStateMachine import PeerBotStateMachine
from utils.Logger import Logger
import discord
class PeerBot(discord.Client):
def __init__(self, args):
self.args = args
self.isBotReady = False
super().__init__()
async def on_ready(self):
stringifiedUserId = str(self.args['userId'])
self.logger = Logger.getLogger("PeerBot - " + stringifiedUserId)
self.logger.trace("on_ready called")
self.stateMachine = PeerBotStateMachine(await self._getStateMachineArgs(self.args))
self.isBotReady = True
self.stateMachine.start()
async def on_message(self, message):
if self.isBotReady:
self.logger.trace("on_message called")
self.stateMachine.execute(message)
async def _getStateMachineArgs(self, args):
return {
'user' : await self.fetch_user(int(args['userId'])),
'protocolChannel' : await self.fetch_channel(int(args['protocolChannelId'])),
'appInfo' : await self.application_info()
} | 2.421875 | 2 |
dags/oss_know/oss_know_dags/dags_github/dag_github_init_issues_timeline.py | ynang/airflow-jobs-1 | 4 | 5192 | from datetime import datetime
from airflow import DAG
from airflow.operators.python import PythonOperator
# v0.0.1
from oss_know.libs.base_dict.variable_key import NEED_INIT_GITHUB_ISSUES_TIMELINE_REPOS, GITHUB_TOKENS, \
OPENSEARCH_CONN_DATA, PROXY_CONFS
from oss_know.libs.util.proxy import KuaiProxyService, ProxyManager, GithubTokenProxyAccommodator
from oss_know.libs.util.token import TokenManager
with DAG(
dag_id='github_init_issues_timeline_v1',
schedule_interval=None,
start_date=datetime(2000, 1, 1),
catchup=False,
tags=['github'],
) as dag:
def scheduler_init_github_issues_timeline(ds, **kwargs):
return 'End:scheduler_init_github_issues_timeline'
op_scheduler_init_github_issues_timeline = PythonOperator(
task_id='op_scheduler_init_github_issues_timeline',
python_callable=scheduler_init_github_issues_timeline
)
def do_init_github_issues_timeline(params):
from airflow.models import Variable
from oss_know.libs.github import init_issues_timeline
github_tokens = Variable.get(GITHUB_TOKENS, deserialize_json=True)
opensearch_conn_info = Variable.get(OPENSEARCH_CONN_DATA, deserialize_json=True)
proxy_confs = Variable.get(PROXY_CONFS, deserialize_json=True)
proxies = []
for line in proxy_confs['reserved_proxies']:
proxies.append(f'http://{line}')
proxy_service = KuaiProxyService(proxy_confs['api_url'], proxy_confs['orderid'])
proxy_manager = ProxyManager(proxies, proxy_service)
token_manager = TokenManager(github_tokens)
proxy_accommodator = GithubTokenProxyAccommodator(token_manager, proxy_manager, shuffle=True,
policy=GithubTokenProxyAccommodator.POLICY_FIXED_MAP)
owner = params["owner"]
repo = params["repo"]
# since = params["since"]
since = None
init_issues_timeline.init_sync_github_issues_timeline(opensearch_conn_info, owner, repo,
proxy_accommodator, since)
return params
need_do_init_ops = []
from airflow.models import Variable
need_init_github_issues_timeline_repos = Variable.get(NEED_INIT_GITHUB_ISSUES_TIMELINE_REPOS,
deserialize_json=True)
for need_init_github_issues_timeline_repo in need_init_github_issues_timeline_repos:
op_do_init_github_issues_timeline = PythonOperator(
task_id='op_do_init_github_issues_timeline_{owner}_{repo}'.format(
owner=need_init_github_issues_timeline_repo["owner"],
repo=need_init_github_issues_timeline_repo["repo"]),
python_callable=do_init_github_issues_timeline,
op_kwargs={'params': need_init_github_issues_timeline_repo},
)
op_scheduler_init_github_issues_timeline >> op_do_init_github_issues_timeline
| 2.21875 | 2 |
conans/conan.py | laundry-96/conan | 2 | 5193 | <reponame>laundry-96/conan
import sys
from conans.client.command import main
def run():
main(sys.argv[1:])
if __name__ == '__main__':
run()
| 1.390625 | 1 |
tests/apps/persons/test_cms_plugins_person.py | lunika/richie | 0 | 5194 | # -*- coding: utf-8 -*-
"""
Unit tests for the Person plugin and its model
"""
from django import forms
from django.conf import settings
from django.test import TestCase
from cms.api import add_plugin, create_page
from cmsplugin_plain_text.cms_plugins import PlaintextPlugin
from djangocms_picture.cms_plugins import PicturePlugin
from richie.apps.core.factories import FilerImageFactory, UserFactory
from richie.apps.core.helpers import create_i18n_page
from richie.apps.persons.cms_plugins import PersonPlugin
from richie.apps.persons.factories import PersonFactory
from richie.apps.persons.models import PersonPluginModel
class PersonPluginTestCase(TestCase):
"""
Test that PersonPlugin correctly displays a Person's page placeholders content
"""
def test_cms_plugins_person_form_page_choices(self):
"""
The form to create a person plugin should only list person pages in the select box.
"""
class PersonPluginModelForm(forms.ModelForm):
"""A form for testing the choices in the select box"""
class Meta:
model = PersonPluginModel
exclude = ()
person = PersonFactory()
other_page_title = "other page"
create_page(other_page_title, "richie/fullwidth.html", settings.LANGUAGE_CODE)
plugin_form = PersonPluginModelForm()
self.assertIn(person.get_full_name(), plugin_form.as_table())
self.assertNotIn(other_page_title, plugin_form.as_table())
def test_cms_plugins_person_render(self):
"""
Test that a PersonPlugin correctly renders person's page specific information
"""
# Create a filer fake image
staff = UserFactory(is_staff=True, is_superuser=True)
image = FilerImageFactory(owner=staff)
# Create a Person
person = PersonFactory()
person_page = person.extended_object
# Add portrait to related placeholder
portrait_placeholder = person_page.placeholders.get(slot="portrait")
add_plugin(
portrait_placeholder,
PicturePlugin,
"en",
**{"picture": image, "attributes": {"alt": "portrait description"}}
)
add_plugin(
portrait_placeholder,
PicturePlugin,
"fr",
**{"picture": image, "attributes": {"alt": "description du portrait"}}
)
# A resume to related placeholder
resume_placeholder = person_page.placeholders.get(slot="resume")
add_plugin(
resume_placeholder, PlaintextPlugin, "en", **{"body": "A short resume"}
)
add_plugin(
resume_placeholder, PlaintextPlugin, "fr", **{"body": "Un résumé court"}
)
# Create a page to add the plugin to
page = create_i18n_page({"en": "A page", "fr": "Une page"})
placeholder = page.placeholders.get(slot="maincontent")
add_plugin(placeholder, PersonPlugin, "en", **{"person": person})
add_plugin(placeholder, PersonPlugin, "fr", **{"person": person})
page.publish("en")
page.publish("fr")
# Check the page content in English
url = page.get_absolute_url(language="en")
response = self.client.get(url)
# Person's name should be present as a link to the cms page
# And CMS page title should be in title attribute of the link
self.assertContains(
response,
'<a href="{url}" title="{page_title}">'.format(
url=person_page.get_absolute_url(), page_title=person_page.get_title()
),
status_code=200,
)
self.assertContains(response, person.get_full_name(), html=True)
# Person's portrait and its properties should be present
# pylint: disable=no-member
self.assertContains(response, image.file.name)
# Short resume should be present
self.assertContains(
response,
'<div class="person-plugin__content__text">A short resume</div>',
html=True,
)
# The person's full name should be wrapped in a h2
self.assertContains(
response,
'<h2 class="person-plugin__content__title">{:s}</h2>'.format(
person.get_full_name()
),
html=True,
)
# Same checks in French
url = page.get_absolute_url(language="fr")
response = self.client.get(url)
self.assertContains(
response,
'<a href="{url}" title="{page_title}">'.format(
url=person_page.get_absolute_url(), page_title=person_page.get_title()
),
status_code=200,
)
# pylint: disable=no-member
self.assertContains(response, image.file.name)
self.assertContains(
response,
'<div class="person-plugin__content__text">Un résumé court</div>',
html=True,
)
| 2.359375 | 2 |
mathics/core/subexpression.py | Mathics3/mathics-core | 90 | 5195 | # cython: language_level=3
# -*- coding: utf-8 -*-
from mathics.core.expression import Expression
from mathics.core.symbols import Atom, Symbol
from mathics.core.atoms import Integer
from mathics.builtin.base import MessageException
"""
This module provides some infrastructure to deal with SubExpressions.
"""
def _pspec_span_to_tuple(pspec, expr):
"""
This function takes an expression and a Mathics
`Span` Expression and returns a tuple with the positions
of the leaves.
"""
start = 1
stop = None
step = 1
leaves = pspec.leaves
if len(leaves) > 3:
raise MessageException("Part", "span", leaves)
if len(leaves) > 0:
start = leaves[0].get_int_value()
if len(leaves) > 1:
stop = leaves[1].get_int_value()
if stop is None:
if leaves[1].get_name() == "System`All":
stop = None
else:
raise MessageException("Part", "span", pspec)
else:
stop = stop - 1 if stop > 0 else len(expr.leaves) + stop
if len(pspec.leaves) > 2:
step = leaves[2].get_int_value()
if start is None or step is None:
raise MessageException("Part", "span", pspec)
if start == 0 or stop == 0:
# index 0 is undefined
raise MessageException("Part", "span", Integer(0))
if start < 0:
start = len(expr.leaves) - start
else:
start = start - 1
if stop is None:
stop = 0 if step < 0 else len(expr.leaves) - 1
stop = stop + 1 if step > 0 else stop - 1
return tuple(k for k in range(start, stop, step))
class ExpressionPointer(object):
"""
This class represents a reference to a leaf in an expression.
Supports a minimal part of the basic interface of `mathics.core.symbols.BaseElement`.
"""
def __init__(self, expr, pos=None):
"""
Initializes a ExpressionPointer pointing to the leaf in position `pos`
of `expr`.
expr: can be an Expression, a Symbol, or another ExpressionPointer
pos: int or None
If `pos==0`, then the pointer points to the `head` of the expression.
If `pos` is `None`, it points out the whole expression.
"""
if pos is None:
if type(expr) is ExpressionPointer:
self.parent = expr.parent
self.position = expr.position
else:
self.parent = expr
self.position = None
else:
self.parent = expr
self.position = pos
def __str__(self) -> str:
return "%s[[%s]]" % (self.parent, self.position)
def __repr__(self) -> str:
return self.__str__()
@property
def original(self):
return None
@original.setter
def original(self, value):
raise ValueError("Expression.original is write protected.")
@property
def head(self):
pos = self.position
if pos is None:
return self.parent.head
elif pos == 0:
return self.parent.head.head
return self.parent.leaves[pos - 1].head
@head.setter
def head(self, value):
raise ValueError("ExpressionPointer.head is write protected.")
@property
def leaves(self):
pos = self.position
if pos is None:
return self.parent.leaves
elif pos == 0:
self.parent.head.leaves
return self.parent.leaves[pos - 1].leaves
@leaves.setter
def leaves(self, value):
raise ValueError("ExpressionPointer.leaves is write protected.")
def get_head_name(self):
return self.head.get_name()
def is_atom(self):
pos = self.position
if pos is None:
return self.parent.is_atom()
elif pos == 0:
return self.parent.head.is_atom()
return self.parent.leaves[pos - 1].is_atom()
def to_expression(self):
parent = self.parent
p = self.position
if p == 0:
if isinstance(parent, Symbol):
return parent
else:
return parent.head.copy()
else:
leaf = self.parent.leaves[p - 1]
if isinstance(leaf, Atom):
return leaf
else:
return leaf.copy()
def replace(self, new):
"""
This method replaces the value pointed out by a `new` value.
"""
# First, look for the ancestor that is not an ExpressionPointer,
# keeping the positions of each step:
parent = self.parent
pos = [self.position]
while type(parent) is ExpressionPointer:
position = parent.position
if position is None:
parent = parent.parent
continue
pos.append(parent.position)
parent = parent.parent
# At this point, we hit the expression, and we have
# the path to reach the position
i = pos.pop()
try:
while pos:
if i == 0:
parent = parent._head
else:
parent = parent.elements[i - 1]
i = pos.pop()
except Exception:
raise MessageException("Part", "span", pos)
# Now, we have a pointer to an element in a true `Expression`.
# Now, set it to the new value.
if i == 0:
parent.set_head(new)
else:
parent.set_element(i - 1, new)
class SubExpression(object):
"""
This class represents a Subexpression of an existing Expression.
Assignment to a subexpression results in the change of the original Expression.
"""
def __new__(cls, expr, pos=None):
"""
`expr` can be an `Expression`, a `ExpressionPointer` or
another `SubExpression`
`pos` can be `None`, an integer value or an `Expression` that
indicates a subset of leaves in the original `Expression`.
If `pos` points out to a single whole leaf of `expr`, then
returns an `ExpressionPointer`.
"""
# If pos is a list, take the first element, and
# store the remainder.
if type(pos) in (tuple, list):
pos, rem_pos = pos[0], pos[1:]
if len(rem_pos) == 0:
rem_pos = None
else:
rem_pos = None
# Trivial conversion: if pos is an `Integer`, convert
# to a Python native int
if type(pos) is Integer:
pos = pos.get_int_value()
# pos == `System`All`
elif isinstance(pos, Symbol) and pos.get_name() == "System`All":
pos = None
elif type(pos) is Expression:
if pos.has_form("System`List", None):
tuple_pos = [i.get_int_value() for i in pos.leaves]
if any([i is None for i in tuple_pos]):
raise MessageException("Part", "pspec", pos)
pos = tuple_pos
elif pos.has_form("System`Span", None):
pos = _pspec_span_to_tuple(pos, expr)
else:
raise MessageException("Part", "pspec", pos)
if pos is None or type(pos) is int:
if rem_pos is None:
return ExpressionPointer(expr, pos)
else:
return SubExpression(ExpressionPointer(expr, pos), rem_pos)
elif type(pos) is tuple:
self = super(SubExpression, cls).__new__(cls)
self._headp = ExpressionPointer(expr.head, 0)
self._elementsp = [
SubExpression(ExpressionPointer(expr, k + 1), rem_pos) for k in pos
]
return self
def is_atom(self):
return False
def __str__(self):
return (
self.head.__str__()
+ "[\n"
+ ",\n".join(["\t " + leaf.__str__() for leaf in self.leaves])
+ "\n\t]"
)
def __repr__(self):
return self.__str__()
@property
def head(self):
return self._headp
@head.setter
def head(self, value):
raise ValueError("SubExpression.head is write protected.")
def get_head_name(self):
return self._headp.parent.get_head_name()
@property
def elements(self):
return self._elementsp
@elements.setter
def elements(self, value):
raise ValueError("SubExpression.leaves is write protected.")
@property
def leaves(self):
return self._elementsp
@leaves.setter
def leaves(self, value):
raise ValueError("SubExpression.leaves is write protected.")
def to_expression(self):
return Expression(
self._headp.to_expression(),
*(leaf.to_expression() for leaf in self._elementsp)
)
def replace(self, new):
"""
Asigns `new` to the subexpression, according to the logic of `mathics.core.walk_parts`
"""
if (new.has_form("List", None) or new.get_head_name() == "System`List") and len(
new.leaves
) == len(self._elementsp):
for leaf, sub_new in zip(self._elementsp, new.leaves):
leaf.replace(sub_new)
else:
for leaf in self._elementsp:
leaf.replace(new)
| 2.765625 | 3 |
pyopenproject/business/services/command/configuration/find.py | webu/pyopenproject | 5 | 5196 | <filename>pyopenproject/business/services/command/configuration/find.py<gh_stars>1-10
from pyopenproject.api_connection.exceptions.request_exception import RequestError
from pyopenproject.api_connection.requests.get_request import GetRequest
from pyopenproject.business.exception.business_error import BusinessError
from pyopenproject.business.services.command.configuration.configuration_command import ConfigurationCommand
from pyopenproject.model.configuration import Configuration
class Find(ConfigurationCommand):
def __init__(self, connection):
"""Constructor for class Find, from ConfigurationCommand.
:param connection: The connection data
"""
super().__init__(connection)
def execute(self):
try:
json_obj = GetRequest(self.connection, f"{self.CONTEXT}").execute()
return Configuration(json_obj)
except RequestError as re:
raise BusinessError("Error listing configuration") from re
| 2.609375 | 3 |
lib/python/treadmill/tests/api/cell_test.py | vrautela/treadmill | 1 | 5197 | <gh_stars>1-10
"""Cell API tests.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import unittest
import mock
from treadmill import admin
from treadmill.api import cell
class ApiCellTest(unittest.TestCase):
"""treadmill.api.cell tests."""
def setUp(self):
self.cell = cell.API()
def tearDown(self):
pass
@mock.patch('treadmill.context.AdminContext.conn',
mock.Mock(return_value=admin.Admin(None, None)))
@mock.patch('treadmill.admin.Cell.list', mock.Mock(return_value=[]))
def test_list(self):
"""Dummy test for treadmill.api.cell._list()"""
self.cell.list()
cell_admin = admin.Cell(None)
self.assertTrue(cell_admin.list.called)
@mock.patch('treadmill.context.AdminContext.conn',
mock.Mock(return_value=admin.Admin(None, None)))
@mock.patch('treadmill.admin.Cell.get',
mock.Mock(return_value={'cell': 'ny-999-cell'}))
def test_get(self):
"""Dummy test for treadmill.api.cell.get()"""
cell_admin = admin.Cell(None)
self.cell.get('some-cell')
cell_admin.get.assert_called_with('some-cell')
@mock.patch('treadmill.context.AdminContext.conn',
mock.Mock(return_value=admin.Admin(None, None)))
@mock.patch('treadmill.admin.Cell.get',
mock.Mock(return_value={'cell': 'ny-999-cell'}))
@mock.patch('treadmill.admin.Cell.create', mock.Mock())
def test_create(self):
"""Dummy test for treadmill.api.cell.create()"""
cell_admin = admin.Cell(None)
self.cell.create('some-cell', {'location': 'ny',
'treadmillid': 'treadmld',
'version': 'v3'})
cell_admin.get.assert_called_with('some-cell', dirty=True)
if __name__ == '__main__':
unittest.main()
| 2.46875 | 2 |
src/python_package/__init__.py | microsoft/ai-python-package | 3 | 5198 | <filename>src/python_package/__init__.py<gh_stars>1-10
# -------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See LICENSE in project root for information.
# -------------------------------------------------------------
"""Python Package Template"""
from __future__ import annotations
__version__ = "0.0.2"
| 1.804688 | 2 |
tests/test_integration_partition.py | themoodymann/piChain | 8 | 5199 | <gh_stars>1-10
"""Integration test: Test partition of piChain nodes.
Note: run tests with default setting values in config.py.
"""
import time
from tests.util import MultiNodeTest
class MultiNodeTestPartition(MultiNodeTest):
def test_scenario30_partition(self):
self.start_processes_with_test_scenario(30, 5)
time.sleep(8)
self.terminate_processes()
node0_blocks = self.extract_committed_blocks_single_process(0)
node1_blocks = self.extract_committed_blocks_single_process(1)
node2_blocks = self.extract_committed_blocks_single_process(2)
node3_blocks = self.extract_committed_blocks_single_process(3)
node4_blocks = self.extract_committed_blocks_single_process(4)
assert len(node0_blocks) > 0
assert node0_blocks == node1_blocks
assert node2_blocks == node1_blocks
assert node3_blocks == node1_blocks
assert node4_blocks == node1_blocks
def test_scenario31_partition(self):
self.start_processes_with_test_scenario(31, 5)
time.sleep(8)
self.terminate_processes()
node0_blocks = self.extract_committed_blocks_single_process(0)
node1_blocks = self.extract_committed_blocks_single_process(1)
node2_blocks = self.extract_committed_blocks_single_process(2)
node3_blocks = self.extract_committed_blocks_single_process(3)
node4_blocks = self.extract_committed_blocks_single_process(4)
assert len(node0_blocks) > 0
assert node0_blocks == node1_blocks
assert node2_blocks == node1_blocks
assert node3_blocks == node1_blocks
assert node4_blocks == node1_blocks
def test_scenario32_partition(self):
self.start_processes_with_test_scenario(32, 5)
time.sleep(15)
self.terminate_processes()
node0_blocks = self.extract_committed_blocks_single_process(0)
node1_blocks = self.extract_committed_blocks_single_process(1)
node2_blocks = self.extract_committed_blocks_single_process(2)
node3_blocks = self.extract_committed_blocks_single_process(3)
node4_blocks = self.extract_committed_blocks_single_process(4)
assert len(node0_blocks) > 0
assert node0_blocks == node1_blocks
assert node2_blocks == node1_blocks
assert node3_blocks == node1_blocks
assert node4_blocks == node1_blocks
| 2.5 | 2 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.