max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
---|---|---|---|---|---|---|
upcfcardsearch/c8.py | ProfessorSean/Kasutamaiza | 0 | 3900 | <gh_stars>0
import discord
from discord.ext import commands
from discord.utils import get
class c8(commands.Cog, name="c8"):
def __init__(self, bot: commands.Bot):
self.bot = bot
@commands.command(name='Sacrosanct_Devouring_Pyre', aliases=['c8'])
async def example_embed(self, ctx):
embed = discord.Embed(title='Sacrosanct Devouring Pyre',
color=0xBC5A84)
embed.set_thumbnail(url='https://www.duelingbook.com/images/custom-pics/2300000/2308475.jpg')
embed.add_field(name='Status (Archetype)', value='Casual:3/Tournament:3', inline=True)
embed.add_field(name='Type', value='Trap/Normal', inline=False)
embed.add_field(name='Card Effect', value='Tribute 2 monsters, then target 2 monsters; destroy those targets. You can only activate 1 "Sacrosanct Devouring Pyre" per turn.', inline=False)
embed.set_footer(text='Set Code: ANCF')
await ctx.send(embed=embed)
def setup(bot: commands.Bot):
bot.add_cog(c8(bot)) | 2.765625 | 3 |
SVMmodel_withSKF.py | tameney22/DCI-Capstone | 0 | 3901 | """
This script is where the preprocessed data is used to train the SVM model to
perform the classification. I am using Stratified K-Fold Cross Validation to
prevent bias and/or any imbalance that could affect the model's accuracy.
REFERENCE: https://medium.com/@bedigunjit/simple-guide-to-text-classification-nlp-using-svm-and-naive-bayes-with-python-421db3a72d34
"""
import numpy as np
import pandas as pd
from sklearn import model_selection, svm
from sklearn.metrics import accuracy_score
from sklearn.preprocessing import LabelEncoder
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.model_selection import StratifiedKFold
# Open preproccessed csv
df = pd.read_csv("preprocessed.csv", index_col=0)
print(df.head())
print("SPLITTING TRAIN-TEST")
x = df["Text"]
y = df["PublicationTitle"]
train_x, test_x, train_y, test_y = model_selection.train_test_split(
df["Text"], df["PublicationTitle"], test_size=0.3)
# Label encode the target variable to transform categorical data of string
# type into numerical values the model can understand
encoder = LabelEncoder()
# train_y = encoder.fit_transform(train_y)
# test_y = encoder.fit_transform(test_y)
# Word vectorization
# turning a collection of text documents into numerical feature vectors
# We are using Term Frequency - Inverse Document
tfidf_vect = TfidfVectorizer(max_features=5000)
tfidf_vect.fit(df["Text"])
# train_x_tfidf = tfidf_vect.transform(train_x)
# test_x_tfidf = tfidf_vect.transform(test_x)
x_tfidf = tfidf_vect.transform(df["Text"])
y = encoder.fit_transform(y)
# print(tfidf_vect.vocabulary_)
# Fit the training dataset to the classifier
print("TRAINING THE MODEL")
SVM = svm.SVC(C=1.0, kernel='linear', degree=3, gamma='auto')
skf = StratifiedKFold(n_splits=10, shuffle=True, random_state=1)
accuracies = []
fold = 1
for train_idx, test_idx in skf.split(x, y):
print("Working on fold", fold)
x_train_fold, x_test_fold = x_tfidf[train_idx], x_tfidf[test_idx]
y_train_fold, y_test_fold = y[train_idx], y[test_idx]
SVM.fit(x_train_fold, y_train_fold)
acc = SVM.score(x_test_fold, y_test_fold)
print("Acc", fold, ":", acc)
accuracies.append(acc)
fold += 1
print("ACCURACIES:", accuracies)
print("Max Accuracy:", np.max(accuracies))
print("Min Accuracy:", np.min(accuracies))
print("Mean of Accuracies:", np.mean(accuracies))
print("STD of Accuracies:", np.std(accuracies))
# print("RUNNING TEST PREDICTIONS")
# predictions = SVM.predict(test_x_tfidf)
# # Calculate accuracy score
# accuracy = accuracy_score(test_y, predictions)
# print("Accuracy:", str(accuracy * 100) + "%")
| 3.53125 | 4 |
red_dwarf/entrypoints/project_management.py | JesseMaitland/red-dwarf | 0 | 3902 | <gh_stars>0
from rsterm import EntryPoint
from red_dwarf.project import provide_project_context, ProjectContext
class InitProject(EntryPoint):
@provide_project_context
def run(self, project_context: ProjectContext) -> None:
project_context.init_project()
| 1.390625 | 1 |
src/consensus.py | dschwoerer/samscripts | 0 | 3903 | <reponame>dschwoerer/samscripts
#! /usr/bin/env python
# Copyright <NAME>, 2015. www.sovic.org
#
# Creates a pileup from a given SAM/BAM file, and calls consensus bases (or variants).
import os
import sys
import operator
import subprocess
def increase_in_dict(dict_counter, value):
try:
dict_counter[value] += 1
except:
dict_counter[value] = 1
def process_mpileup_line(
line,
line_number,
ret_variant_list,
ret_vcf_list,
ret_snp_count,
ret_insertion_count,
ret_deletion_count,
ret_num_undercovered_bases,
ret_num_called_bases,
ret_num_correct_bases,
ret_coverage_sum,
coverage_threshold,
verbose=False,
):
# Split the line, and perform a sanity check.
split_line = line.strip().split("\t")
if len(split_line) < 5 or len(split_line) > 6:
sys.stderr.write(line + "\n")
return 0
ref_name = split_line[0]
position = split_line[1]
ref_base = split_line[2]
coverage = split_line[3]
original_bases = split_line[4]
if len(split_line) == 6:
qualities = split_line[5]
bases = ""
# Replace the '.' and ',' signs with the actual reference base.
i = 0
while i < len(original_bases):
if original_bases[i] == "." or original_bases[i] == ",":
bases += ref_base
else:
bases += original_bases[i]
i += 1
base_counts = {}
insertion_count = 0
current_base_deletion_count = 0
deletion_count = 0
insertion_event_counts = {}
deletion_event_counts = {}
end_counts = 0
# print 'position: %s' % position;
# print 'bases: "%s"' % bases;
# print 'line_number: %d' % line_number;
# print line;
# print '';
# sys.stdout.flush();
i = 0
while i < len(bases):
base = bases[i]
if base == r"^":
# This is the starting position of a read. It encodes two
# symbols: '^' marking the read start and a char marking the
# mapping quality of the read.
# increase_in_dict(base_counts, bases[i + 1].upper());
i += 1
# Increase only by 1, because we have i += 1 down there.
elif base == r"$":
# This marks the end of a read.
end_counts += 1
elif base == r"*":
# This is a deletion, just count it.
current_base_deletion_count += 1
elif base == r"-":
# This marks the occurance of deletions. It is a composite object
# consisting of: the special character '-', the number of the deleted bases
# and the actual bases that are deleted (these bases follow the current position).
# In our approach, we ignore this case, because we count deletions one by one
# through the '*' character.
# Get the number of bases that need to be skipped in the string.
j = i + 1
while bases[j] in "0123456789":
j += 1
num_bases = int(bases[(i + 1) : j])
skip_bases = (j - i) + num_bases - 1
deletion_count += 1
deletion = bases[j : (j + num_bases)].upper()
increase_in_dict(deletion_event_counts, deletion)
# Skip the length of the numeric entry plus the actual number of bases
# that need to be skipped.
i += skip_bases
elif base == r"+":
# This marks the occurance of an insertion. It is a composite object
# consisting of: the special character '+', the number of the inserted bases
# and the actual bases that are inserted (these bases follow the current position).
# Similar to the deletion marking, but here we actually care about the bases,
# and we need to make an allele aware count.
# Get the number of bases that are inserted;
j = i + 1
while bases[j] in "0123456789":
j += 1
num_bases = int(bases[(i + 1) : j])
skip_bases = (j - i) + num_bases - 1
insertion_count += 1
insertion = bases[j : (j + num_bases)].upper()
increase_in_dict(insertion_event_counts, insertion)
i += skip_bases
else:
increase_in_dict(base_counts, bases[i].upper())
i += 1
# TODO: An additional problematic case, discovered this on 03.11.2014., when analyzing BWA-MEM's mpileup.
# There are pileup bases that do not have any actual bases, but only the '*' symbols. How should this be handled properly?
# Example line from the mpileup file:
# gi|48994873|gb|U00096.2|_Escherichia_coli_str._K-12_substr._MG1655,_complete_genome 1938202 T 20 ******************** 8,2*#-;)$B>2$1&D-
# I chose to handle them as undercovered bases.
non_indel_coverage_current_base = int(coverage) - current_base_deletion_count
if verbose == True:
sys.stdout.write("%s\nbase_counts: %s\n" % (line.strip(), str(base_counts)))
# EDIT: Previously I compared the total coverage of the current base with the coverage threshold.
# However, the total coverage also accounts for the deletions denoted with the '*' sign, which I think
# isn't relevant, as deletions are counted prior to occuring, and at that point is already decided if there is going
# to be a deletion event. If we wound up at this base (i.e. this base didn't get skipped because of a deletion
# consensus), then the deletions on this base are ignored.
# if (int(coverage) < coverage_threshold or int(coverage) == current_base_deletion_count):
# if (non_indel_coverage_current_base < coverage_threshold):
if int(coverage) < coverage_threshold:
ret_num_undercovered_bases[0] += 1
# ret_coverage_sum[0] += 0;
ret_coverage_sum[0] += int(coverage)
# TODO: Should I count total coverage of this base, or the non_indel_coverage_current_base?
sorted_base_counts = [["A", 0], ["C", 0], ["T", 0], ["G", 0]]
sorted_base_counts = sorted(
list(base_counts.items()), key=operator.itemgetter(1)
)
try:
most_common_base_count = sorted_base_counts[-1][1]
except Exception as e:
most_common_base_count = 0
pass
# variant_line = 'undercovered1\tpos = %s\tcoverage = %d\tnon_indel_cov_curr = %d\tmost_common_base_count = %d\tref_base = %s\tcons_base = %s\tbase_counts = %s\tinsertion_counts = %s\tdeletion_counts = %s\t%s' % (position, int(coverage), non_indel_coverage_current_base, most_common_base_count, ref_base, sorted_base_counts[-1][0], str(sorted_base_counts), str(insertion_event_counts), str(deletion_event_counts), line.strip());
# ret_variant_list.append(variant_line);
variant_line = (
"undercovered1\tpos = %s\tref = %s\tcoverage = %d\tbase_counts = %s\tinsertion_counts = %s\tdeletion_counts = %s"
% (
position,
ref_name,
int(coverage),
str(sorted_base_counts),
str(insertion_event_counts),
str(deletion_event_counts),
)
)
ret_variant_list.append(variant_line)
### VCF output ###
qual = 1000
info = "DP=%s;TYPE=snp" % (coverage)
ref_field = ref_base
alt_field = "N"
vcf_line = "%s\t%s\t.\t%s\t%s\t%d\tPASS\t%s" % (
ref_name,
position,
ref_field,
alt_field,
qual,
info,
)
ret_vcf_list.append(vcf_line)
##################
else:
ret_num_called_bases[0] += 1
ret_coverage_sum[0] += int(coverage)
# TODO: Should I count total coverage of this base, or the non_indel_coverage_current_base?
most_common_base_count = 0
### Handling base consensus.
sorted_base_counts = sorted(
list(base_counts.items()), key=operator.itemgetter(1)
)
try:
most_common_base_count = sorted_base_counts[-1][1]
except Exception as e:
pass
# sys.stderr.write(str(e) + '\n');
# sys.stderr.write('sorted_base_counts:\n');
# sys.stderr.write(str(sorted_base_counts) + '\n');
# sys.stderr.write('base_counts:\n');
# sys.stderr.write(str(base_counts) + '\n');
# sys.stderr.write('original_bases:\n');
# sys.stderr.write(str(original_bases) + '\n');
# sys.stderr.write('line:\n');
# sys.stderr.write(line.strip() + '\n');
# most_common_base_count = 0;
# Allow for the case where there are multiple equally good choices.
# In this case, we prefer the choice which is equal to the reference.
is_good = False
for base_count in sorted_base_counts:
if base_count[1] == most_common_base_count:
if base_count[0] == ref_base:
is_good = True
break
if is_good == False:
if len(sorted_base_counts) > 0:
ret_snp_count[0] += 1
# ret_variant_list.append(line_number);
variant_line = (
"SNP\tpos = %s\tref = %s\tcoverage = %d\tnon_indel_cov_curr = %d\tmost_common_base_count = %d\tref_base = %s\tcons_base = %s\tbase_counts = %s\tinsertion_counts = %s\tdeletion_counts = %s\t%s"
% (
position,
ref_name,
int(coverage),
non_indel_coverage_current_base,
most_common_base_count,
ref_base,
("{}")
if (len(sorted_base_counts) == 0)
else (str(sorted_base_counts[-1][0])),
str(sorted_base_counts),
str(insertion_event_counts),
str(deletion_event_counts),
line.strip(),
)
)
ret_variant_list.append(variant_line)
### VCF output ###
alt_base = (
("{}")
if (len(sorted_base_counts) == 0)
else (str(sorted_base_counts[-1][0]))
)
qual = 1000
info = "DP=%s;TYPE=snp" % (coverage)
ref_field = ref_base
alt_field = alt_base
vcf_line = "%s\t%s\t.\t%s\t%s\t%d\tPASS\t%s" % (
ref_name,
position,
ref_field,
alt_field,
qual,
info,
)
ret_vcf_list.append(vcf_line)
##################
else:
sys.stderr.write(
"\nWarning: a SNP was detected, but there were no bases in the sorted_base_counts!"
)
variant_line = (
"SNP\tpos = %s\tref = %s\tcoverage = %d\tnon_indel_cov_curr = %d\tmost_common_base_count = %d\tref_base = %s\tcons_base = %s\tbase_counts = %s\tinsertion_counts = %s\tdeletion_counts = %s\t%s"
% (
position,
ref_name,
int(coverage),
non_indel_coverage_current_base,
most_common_base_count,
ref_base,
("{}")
if (len(sorted_base_counts) == 0)
else (str(sorted_base_counts[-1][0])),
str(sorted_base_counts),
str(insertion_event_counts),
str(deletion_event_counts),
line.strip(),
)
)
sys.stderr.write("\n")
else:
ret_num_correct_bases[0] += 1
if verbose == True:
sys.stdout.write("Reference base: %s\n" % (ref_base))
sys.stdout.write("Consensus base: %s\n\n" % (base_count[0]))
# if (int(position) == 100000 or int(position) == 1000000 or int(position) == 2000000 or int(position) == 3000000 or int(position) == 4000000):
# print '\nTEST\tpos = %s\tcoverage = %d\tnon_indel_cov_curr = %d\tmost_common_base_count = %d\tref_base = %s\tcons_base = %s\tbase_counts = %s\tinsertion_counts = %s\tdeletion_counts = %s\t%s\n' % (position, int(coverage), non_indel_coverage_current_base, most_common_base_count, ref_base, sorted_base_counts[-1][0], str(sorted_base_counts), str(insertion_event_counts), str(deletion_event_counts), line.strip());
### Handling indel consensus.
### Put a different coverage threshold. Here we are interested even in the reads
### which had a '*' at the current position (because we don't know where it ends).
non_indel_coverage_next_base = (
int(coverage) - end_counts - deletion_count - insertion_count
)
if (
non_indel_coverage_next_base + deletion_count + insertion_count
) > coverage_threshold:
# Sanity check, just to see if there actually were any insertions (to avoid index out of bounds error).
# If there are insertions, get the most common one.
if len(list(insertion_event_counts.keys())) > 0:
sorted_insertion_counts = sorted(
list(insertion_event_counts.items()), key=operator.itemgetter(1)
)
most_common_insertion_count = sorted_insertion_counts[-1][1]
most_common_insertion_length = len(sorted_insertion_counts[-1][0])
insertion_unique = (
True
if (
sum(
[
int(insertion_count[1] == most_common_insertion_count)
for insertion_count in sorted_insertion_counts
]
)
== 1
)
else False
)
else:
most_common_insertion_count = 0
most_common_insertion_length = 0
insertion_unique = False
# Sanity check, just to see if there actually were any deletions (to avoid index out of bounds error).
# If there are deletions, get the most common one.
if len(list(deletion_event_counts.keys())) > 0:
sorted_deletion_counts = sorted(
list(deletion_event_counts.items()), key=operator.itemgetter(1)
)
most_common_deletion_count = sorted_deletion_counts[-1][1]
most_common_deletion_length = len(sorted_deletion_counts[-1][0])
deletion_unique = (
True
if (
sum(
[
int(deletion_count[1] == most_common_deletion_count)
for deletion_count in sorted_deletion_counts
]
)
== 1
)
else False
)
else:
most_common_deletion_count = 0
most_common_deletion_length = 0
deletion_unique = False
if (
most_common_insertion_count > most_common_deletion_count
and most_common_insertion_count > non_indel_coverage_next_base
):
# In this case, insertions are a clear winner.
if insertion_unique == True:
# ret_insertion_count[0] += most_common_insertion_length;
ret_insertion_count[0] += 1
ret_num_called_bases[0] += most_common_insertion_length
# variant_line = 'insertion\t%d\t%s\t%s\t%s\t%s' % (most_common_insertion_count, str(sorted_base_counts), str(insertion_event_counts), str(deletion_event_counts), line.strip());
# ret_variant_list.append(variant_line);
try:
temp_sorted_bc = sorted_base_counts[-1][0]
except:
temp_sorted_bc = 0
indel_length = most_common_insertion_length
variant_line = (
"ins\tpos = %s\tref = %s\tnon_indel_cov_next = %d\tnon_indel_cov_curr = %d\tmost_common_insertion_count = %d\tref_base = %s\tcons_base = %s\tbase_counts = %s\tinsertion_counts = %s\tdeletion_counts = %s\t%s"
% (
position,
ref_name,
non_indel_coverage_next_base,
non_indel_coverage_current_base,
most_common_insertion_count,
ref_base,
temp_sorted_bc,
str(sorted_base_counts),
str(insertion_event_counts),
str(deletion_event_counts),
line.strip(),
)
)
ret_variant_list.append(variant_line)
### Insertions in the VCF format specifies the position where a insertion occurs. The ref position should contain the base which is the same as ref, but the alt field contains the ref base + the insertion event.
### VCF output ###
alt_base = (
("{}")
if (len(sorted_base_counts) == 0)
else (str(sorted_base_counts[-1][0]))
)
qual = 1000
info = "DP=%s;TYPE=ins" % (coverage)
ref_field = ref_base
alt_field = "%s%s" % (ref_base, sorted_insertion_counts[-1][0])
vcf_line = "%s\t%s\t.\t%s\t%s\t%d\tPASS\t%s" % (
ref_name,
position,
ref_field,
alt_field,
qual,
info,
)
ret_vcf_list.append(vcf_line)
##################
elif (
most_common_deletion_count > most_common_insertion_count
and most_common_deletion_count > non_indel_coverage_next_base
):
# In this case, deletions are a clear winner.
if deletion_unique == True:
# ret_deletion_count[0] += most_common_deletion_length;
ret_deletion_count[0] += 1
# variant_line = 'deletion\t%d\t%s\t%s\t%s\t%s' % (most_common_deletion_count, str(sorted_base_counts), str(insertion_event_counts), str(deletion_event_counts), line.strip());
# ret_variant_list.append(variant_line);
# return most_common_deletion_length;
variant_line = (
"del\tpos = %s\tref = %s\tnon_indel_cov_next = %d\tnon_indel_cov_curr = %d\tmost_common_deletion_count = %d\tref_base = %s\tcons_base = %s\tbase_counts = %s\tinsertion_counts = %s\tdeletion_counts = %s\t%s"
% (
position,
ref_name,
non_indel_coverage_next_base,
non_indel_coverage_current_base,
most_common_deletion_count,
ref_base,
sorted_base_counts[-1][0],
str(sorted_base_counts),
str(insertion_event_counts),
str(deletion_event_counts),
line.strip(),
)
)
ret_variant_list.append(variant_line)
### Deletions in the VCF format specifies the position where a deletion occurs, with the first base being non-deletion, and the following bases being a deletion event.
### VCF output ###
alt_base = (
("{}")
if (len(sorted_base_counts) == 0)
else (str(sorted_base_counts[-1][0]))
)
qual = 1000
info = "DP=%s;TYPE=del" % (coverage)
ref_field = "%s%s" % (ref_base, sorted_deletion_counts[-1][0])
alt_field = ref_base
vcf_line = "%s\t%s\t.\t%s\t%s\t%d\tPASS\t%s" % (
ref_name,
position,
ref_field,
alt_field,
qual,
info,
)
ret_vcf_list.append(vcf_line)
##################
return most_common_deletion_length
else:
# In this case, either the base count consensus wins, or the
# insertion/deletion count is ambiguous.
pass
return 0
def process_mpileup(
alignments_path,
reference_path,
mpileup_path,
coverage_threshold,
output_prefix,
thread_id=0,
bed_position="",
):
fp = None
try:
fp = open(mpileup_path, "r")
except IOError:
sys.stderr.write(
'ERROR: Could not open file "%s" for reading!\n' % mpileup_path
)
return None
ret_variant_list = []
ret_vcf_list = []
ret_snp_count = [0]
ret_insertion_count = [0]
ret_deletion_count = [0]
ret_num_undercovered_bases = [0]
ret_num_called_bases = [0]
ret_num_correct_bases = [0]
ret_coverage_sum = [0]
# lines = fp.readlines();
fp_variant = None
fp_vcf = None
if output_prefix != "":
if not os.path.exists(os.path.dirname(output_prefix)):
os.makedirs(os.path.dirname(output_prefix))
variant_file = "%s-cov_%d.variant.csv" % (output_prefix, coverage_threshold)
fp_variant = open(variant_file, "w")
vcf_file = "%s-cov_%d.variant.vcf" % (output_prefix, coverage_threshold)
fp_vcf = open(vcf_file, "w")
fp_vcf.write("##fileformat=VCFv4.0\n")
fp_vcf.write("##fileDate=20150409\n")
fp_vcf.write("##source=%s\n" % (" ".join(sys.argv)))
fp_vcf.write("##reference=%s\n" % reference_path)
fp_vcf.write('##INFO=<ID=DP,Number=1,Type=Integer,Description="Raw Depth">\n')
fp_vcf.write(
'##INFO=<ID=TYPE,Number=A,Type=String,Description="Type of each allele (snp, ins, del, mnp, complex)">\n'
)
fp_vcf.write(
'##INFO=<ID=AF,Number=1,Type=Float,Description="Allele Frequency">\n'
)
fp_vcf.write(
'##INFO=<ID=SB,Number=1,Type=Integer,Description="Phred-scaled strand bias at this position">\n'
)
fp_vcf.write(
'##INFO=<ID=DP4,Number=4,Type=Integer,Description="Counts for ref-forward bases, ref-reverse, alt-forward and alt-reverse bases">\n'
)
fp_vcf.write(
'##INFO=<ID=INDEL,Number=0,Type=Flag,Description="Indicates that the variant is an INDEL.">\n'
)
fp_vcf.write(
'##INFO=<ID=CONSVAR,Number=0,Type=Flag,Description="Indicates that the variant is a consensus variant (as opposed to a low frequency variant).">\n'
)
fp_vcf.write(
'##INFO=<ID=HRUN,Number=1,Type=Integer,Description="Homopolymer length to the right of report indel position">\n'
)
fp_vcf.write("#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\n")
fp_vcf.flush()
use_bed = False
bed_chromosome = ""
bed_pos_start = 0
# bed_pos_end = len(lines);
bed_pos_end = -1
if bed_position != "":
bed_split = bed_position.split(":")
if len(bed_split) != 2:
use_bed = False
else:
bed_chromosome = bed_split[0]
bed_pos_split = bed_split[1].split("-")
if len(bed_pos_split) != 2:
use_bed = False
else:
bed_pos_start = int(bed_pos_split[0])
bed_pos_end = int(bed_pos_split[1])
use_bed = True
sys.stderr.write("Using location specified through commandline:\n")
sys.stderr.write('\tChromosome: "%s"\n' % bed_chromosome)
sys.stderr.write("\tStart: %d\n" % bed_pos_start)
sys.stderr.write("\tEnd: %d\n\n" % bed_pos_end)
# i = 0;
i = 0 if (use_bed == False) else max((bed_pos_start - 10), 0)
j = 0
# while (i < bed_pos_end): # len(lines)):
num_bases_to_skip = 0
for line in fp:
# line = lines[i];
if num_bases_to_skip > 0:
num_bases_to_skip -= 1
continue
if use_bed == True:
line_split = line.strip().split("\t")
if len(line_split) > 2 and line_split[0] == bed_chromosome:
current_pos = int(line_split[1])
if current_pos < bed_pos_start or current_pos >= bed_pos_end:
i += 1
j += 1
continue
else:
# print line_split[0];
# print bed_chromosome;
i += 1
j += 1
continue
if thread_id == 0:
if (j % 1000) == 0:
sys.stderr.write(
"\r[%d] snps = %d, insertions = %d, deletions = %d, undercovered = %d, coverage = %.2f"
% (
i,
ret_snp_count[0],
ret_insertion_count[0],
ret_deletion_count[0],
ret_num_undercovered_bases[0],
(float(ret_coverage_sum[0]) / float((i + 1))),
)
)
sys.stderr.flush()
variant_list_length = len(ret_variant_list)
vcf_list_length = len(ret_vcf_list)
num_bases_to_skip = process_mpileup_line(
line,
i,
ret_variant_list,
ret_vcf_list,
ret_snp_count,
ret_insertion_count,
ret_deletion_count,
ret_num_undercovered_bases,
ret_num_called_bases,
ret_num_correct_bases,
ret_coverage_sum,
coverage_threshold,
verbose=use_bed,
)
if len(ret_variant_list) > variant_list_length and fp_variant != None:
fp_variant.write("\n".join(ret_variant_list[variant_list_length:]) + "\n")
fp_variant.flush()
if len(ret_vcf_list) > vcf_list_length and fp_vcf != None:
fp_vcf.write("\n".join(ret_vcf_list[vcf_list_length:]) + "\n")
fp_vcf.flush()
i += num_bases_to_skip
i += 1
j += 1
# if (i > 10000):
# break;
fp.close()
sys.stderr.write("\n")
if fp_variant != None:
fp_variant.close()
if fp_vcf != None:
fp_vcf.close()
summary_lines = ""
summary_lines += "alignments_file: %s\n" % alignments_path
summary_lines += "mpileup_file: %s\n" % mpileup_path
summary_lines += "coverage_threshold: %d\n" % coverage_threshold
summary_lines += "snp_count: %d\n" % ret_snp_count[0]
summary_lines += "insertion_count: %d\n" % ret_insertion_count[0]
summary_lines += "deletion_count: %d\n" % ret_deletion_count[0]
summary_lines += "num_undercovered_bases: %d\n" % ret_num_undercovered_bases[0]
summary_lines += "num_called_bases: %d\n" % ret_num_called_bases[0]
summary_lines += "num_correct_bases: %d\n" % ret_num_correct_bases[0]
summary_lines += "average_coverage: %.2f\n" % (
(float(ret_coverage_sum[0]) / float((i + 1)))
)
sys.stderr.write(summary_lines + "\n")
sys.stderr.write("\n")
if output_prefix != "":
# summary_file = output_prefix + '.conssum';
summary_file = "%s-cov_%d.variant.sum" % (output_prefix, coverage_threshold)
try:
fp_sum = open(summary_file, "w")
fp_sum.write(summary_lines)
fp_sum.close()
return summary_file
except IOError:
sys.stderr.write(
'ERROR: Could not open file "%s" for writing!\n' % (summary_file)
)
return None
return None
def main(
alignments_path,
reference_path,
coverage_threshold,
output_prefix,
thread_id=0,
bed_position="",
):
# Sanity checking the existence of the file, and the correctness of its extension.
# Also, if input file is a SAM file, then convert it to a sorted BAM.
alignments_path_bam = alignments_path
if os.path.exists(alignments_path) == False:
sys.stderr.write('ERROR: File "%s" does not exist!\n' % alignments_path)
return
if alignments_path.endswith("sam"):
# Determine the path where the new BAM file will be generated.
dir_name = os.path.dirname(alignments_path)
if dir_name == "":
dir_name = "."
alignments_path_bam = (
dir_name
+ "/"
+ os.path.splitext(os.path.basename(alignments_path))[0]
+ ".bam"
)
alignments_path_bam_exists = os.path.exists(alignments_path_bam)
# Check if a BAM file with the given name already exists.
if alignments_path_bam_exists == False or (
alignments_path_bam_exists == True
and os.path.getmtime(alignments_path)
> os.path.getmtime(alignments_path_bam)
):
# Convert the SAM file to a sorted BAM file.
command = "samtools view -bS %s | samtools sort - %s" % (
alignments_path,
os.path.splitext(alignments_path_bam)[0],
)
sys.stderr.write(command + "\n")
subprocess.call(command, shell="True")
# Create the BAM index file.
command = "samtools index %s %s.bai" % (
alignments_path_bam,
alignments_path_bam,
)
subprocess.call(command, shell="True")
elif alignments_path.endswith("bam") == False:
sys.stderr.write(
'ERROR: File extension needs to be either .sam or .bam! Input file path: "%s".\n'
% alignments_path
)
return
# Convert the sorted BAM file to a mpileup file if it doesn't exist yet.
mpileup_path = "%s.mpileup" % alignments_path_bam
mpileup_exists = os.path.exists(mpileup_path)
if mpileup_exists == False or (
mpileup_exists == True
and os.path.getmtime(alignments_path) > os.path.getmtime(mpileup_path)
):
command = "samtools mpileup -B -d 1000000 -Q 0 -A -f %s %s > %s.mpileup" % (
reference_path,
alignments_path_bam,
alignments_path_bam,
)
subprocess.call(command, shell="True")
sys.stderr.write('Processing file "%s"...\n' % alignments_path)
sys.stderr.write('Reference file "%s"...\n' % reference_path)
sys.stderr.write("Coverage threshold: %d\n" % coverage_threshold)
summary_file = process_mpileup(
alignments_path,
reference_path,
("%s.mpileup" % alignments_path_bam),
coverage_threshold,
output_prefix,
thread_id,
bed_position,
)
def CollectSummaries(
sam_files, prefix_for_intermediate_results, collective_output_file
):
fp_collect = None
try:
fp_collect = open(collective_output_file, "w")
except IOError:
sys.stderr.write(
'ERROR: Could not open file "%s" for writing!\n' % collective_output_file
)
return
for sam_file in sam_files:
summary_file = prefix_for_intermediate_results + ".sum"
try:
fp_sum = open(summary_file, "r")
lines = fp_sum.readlines()
fp_sum.close()
except IOError:
sys.stderr.write(
'ERROR: Could not open file "%s" for reading!\n' % summary_file
)
continue
fp_collect.write("".join(lines) + "\n")
fp_collect.close()
if __name__ == "__main__":
# if (len(sys.argv) < 5):
# sys.stderr.write('Usage:\n');
# sys.stderr.write('\t%s <reference_file_path> coverage_threshold <collective_output_file> <{sb}am_file_1> [<{sb}am_file_2> <{sb}am_file_3> ...]\n' % sys.argv[0]);
# sys.stderr.write('\t(If <collective_output_file> is equal to "-", no files will be written to disk.)\n');
# exit(1);
if len(sys.argv) < 5:
sys.stderr.write("Usage:\n")
sys.stderr.write(
"\t%s <reference_file_path> coverage_threshold <output_prefix> <{sb}am_file_> [position]\n"
% sys.argv[0]
)
sys.stderr.write(
'\t(If <collective_output_file> is equal to "-", no files will be written to disk.)\n'
)
sys.stderr.write(
'\tPosition parameter is a string specifying "chromosome:start-end"\n\n'
)
exit(1)
reference_file = sys.argv[1]
coverage_threshold = int(sys.argv[2])
output_prefix = sys.argv[3]
sam_file = sys.argv[4]
bed_position = ""
if len(sys.argv) > 5:
bed_position = sys.argv[5]
# sys.stderr.write('bed_position: "%s"\n\n' % bed_position);
processes = []
if output_prefix == "-":
output_prefix = os.path.splitext(sam_file)[0]
main(sam_file, reference_file, coverage_threshold, output_prefix, 0, bed_position)
# if (output_prefix != '-'):
# CollectSummaries([sam_file], output_prefix, output_prefix + '.variant.sum');
| 2.4375 | 2 |
web/addons/account_payment/wizard/account_payment_populate_statement.py | diogocs1/comps | 0 | 3904 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from lxml import etree
from openerp.osv import fields, osv
class account_payment_populate_statement(osv.osv_memory):
_name = "account.payment.populate.statement"
_description = "Account Payment Populate Statement"
_columns = {
'lines': fields.many2many('payment.line', 'payment_line_rel_', 'payment_id', 'line_id', 'Payment Lines')
}
def fields_view_get(self, cr, uid, view_id=None, view_type='form', context=None, toolbar=False, submenu=False):
line_obj = self.pool.get('payment.line')
res = super(account_payment_populate_statement, self).fields_view_get(cr, uid, view_id=view_id, view_type=view_type, context=context, toolbar=toolbar, submenu=False)
line_ids = line_obj.search(cr, uid, [
('move_line_id.reconcile_id', '=', False),
('bank_statement_line_id', '=', False),
('move_line_id.state','=','valid')])
line_ids.extend(line_obj.search(cr, uid, [
('move_line_id.reconcile_id', '=', False),
('order_id.mode', '=', False),
('move_line_id.state','=','valid')]))
domain = '[("id", "in", '+ str(line_ids)+')]'
doc = etree.XML(res['arch'])
nodes = doc.xpath("//field[@name='lines']")
for node in nodes:
node.set('domain', domain)
res['arch'] = etree.tostring(doc)
return res
def populate_statement(self, cr, uid, ids, context=None):
line_obj = self.pool.get('payment.line')
statement_obj = self.pool.get('account.bank.statement')
statement_line_obj = self.pool.get('account.bank.statement.line')
currency_obj = self.pool.get('res.currency')
voucher_obj = self.pool.get('account.voucher')
voucher_line_obj = self.pool.get('account.voucher.line')
move_line_obj = self.pool.get('account.move.line')
if context is None:
context = {}
data = self.read(cr, uid, ids, context=context)[0]
line_ids = data['lines']
if not line_ids:
return {'type': 'ir.actions.act_window_close'}
statement = statement_obj.browse(cr, uid, context['active_id'], context=context)
for line in line_obj.browse(cr, uid, line_ids, context=context):
ctx = context.copy()
ctx['date'] = line.ml_maturity_date # was value_date earlier,but this field exists no more now
amount = currency_obj.compute(cr, uid, line.currency.id,
statement.currency.id, line.amount_currency, context=ctx)
if not line.move_line_id.id:
continue
context = dict(context, move_line_ids=[line.move_line_id.id])
result = voucher_obj.onchange_partner_id(cr, uid, [], partner_id=line.partner_id.id, journal_id=statement.journal_id.id, amount=abs(amount), currency_id= statement.currency.id, ttype='payment', date=line.ml_maturity_date, context=context)
if line.move_line_id:
voucher_res = {
'type': 'payment',
'name': line.name,
'partner_id': line.partner_id.id,
'journal_id': statement.journal_id.id,
'account_id': result['value'].get('account_id', statement.journal_id.default_credit_account_id.id),
'company_id': statement.company_id.id,
'currency_id': statement.currency.id,
'date': line.date or time.strftime('%Y-%m-%d'),
'amount': abs(amount),
'period_id': statement.period_id.id,
}
voucher_id = voucher_obj.create(cr, uid, voucher_res, context=context)
voucher_line_dict = {}
for line_dict in result['value']['line_cr_ids'] + result['value']['line_dr_ids']:
move_line = move_line_obj.browse(cr, uid, line_dict['move_line_id'], context)
if line.move_line_id.move_id.id == move_line.move_id.id:
voucher_line_dict = line_dict
if voucher_line_dict:
voucher_line_dict.update({'voucher_id': voucher_id})
voucher_line_obj.create(cr, uid, voucher_line_dict, context=context)
st_line_id = statement_line_obj.create(cr, uid, {
'name': line.order_id.reference or '?',
'amount': - amount,
'partner_id': line.partner_id.id,
'statement_id': statement.id,
'ref': line.communication,
}, context=context)
line_obj.write(cr, uid, [line.id], {'bank_statement_line_id': st_line_id})
return {'type': 'ir.actions.act_window_close'}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| 1.851563 | 2 |
libqtile/widget/imapwidget.py | akloster/qtile | 1 | 3905 | # -*- coding: utf-8 -*-
# Copyright (c) 2015 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import imaplib
import re
import keyring
from libqtile.log_utils import logger
from libqtile.widget import base
class ImapWidget(base.ThreadPoolText):
"""Email IMAP widget
This widget will scan one of your imap email boxes and report the number of
unseen messages present. I've configured it to only work with imap with
ssl. Your password is obtained from the Gnome Keyring.
Writing your password to the keyring initially is as simple as (changing
out <userid> and <password> for your userid and password):
1) create the file ~/.local/share/python_keyring/keyringrc.cfg with the
following contents::
[backend]
default-keyring=keyring.backends.Gnome.Keyring
keyring-path=/home/<userid>/.local/share/keyring/
2) Execute the following python shell script once::
#!/usr/bin/env python3
import keyring
user = <userid>
password = <password>
keyring.set_password('imapwidget', user, password)
mbox names must include the path to the mbox (except for the default
INBOX). So, for example if your mailroot is ``~/Maildir``, and you want to
look at the mailbox at HomeMail/fred, the mbox setting would be:
``mbox="~/Maildir/HomeMail/fred"``. Note the nested sets of quotes! Labels
can be whatever you choose, of course.
Widget requirements: keyring_.
.. _keyring: https://pypi.org/project/keyring/
"""
defaults = [
('mbox', '"INBOX"', 'mailbox to fetch'),
('label', 'INBOX', 'label for display'),
('user', None, 'email username'),
('server', None, 'email server name'),
]
def __init__(self, **config):
base.ThreadPoolText.__init__(self, "", **config)
self.add_defaults(ImapWidget.defaults)
password = <PASSWORD>_password('imapwidget', self.user)
if password is not None:
self.password = password
else:
logger.critical('Gnome Keyring Error')
def poll(self):
im = imaplib.IMAP4_SSL(self.server, 993)
if self.password == '<PASSWORD>':
self.text = 'Gnome Keyring Error'
else:
im.login(self.user, self.password)
status, response = im.status(self.mbox, '(UNSEEN)')
self.text = response[0].decode()
self.text = self.label + ': ' + re.sub(r'\).*$', '', re.sub(r'^.*N\s', '', self.text))
im.logout()
return self.text
| 1.773438 | 2 |
game/views/tests/game_definition_view_test.py | dimadk24/english-fight-api | 0 | 3906 | <reponame>dimadk24/english-fight-api
from rest_framework.response import Response
from rest_framework.test import APIClient
from game.models import GameDefinition, AppUser
def create_game_definition(api_client: APIClient) -> Response:
return api_client.post("/api/game_definition")
def get_game_definition(api_client: APIClient, game_def_id: str) -> Response:
return api_client.get(f"/api/game_definition/{game_def_id}")
def test_returns_game_def_to_the_current_user_by_hash_id(api_client):
post_game_def_response = create_game_definition(api_client)
assert post_game_def_response.status_code == 201
game_def_id = post_game_def_response.data["id"]
assert isinstance(game_def_id, str)
get_game_def_response = get_game_definition(api_client, game_def_id)
assert get_game_def_response.status_code == 200
assert get_game_def_response.data == post_game_def_response.data
def test_returns_game_def_to_another_user_by_hash_id(api_client):
post_game_def_response = create_game_definition(api_client)
assert post_game_def_response.status_code == 201
game_def_id = post_game_def_response.data["id"]
assert isinstance(game_def_id, str)
user2 = AppUser.objects.create(vk_id=2, username=2)
api_client.force_authenticate(user2)
get_game_def_response = get_game_definition(api_client, game_def_id)
assert get_game_def_response.status_code == 200
assert get_game_def_response.data == post_game_def_response.data
def test_game_def_not_found_by_int_id(api_client):
post_game_def_response = create_game_definition(api_client)
assert post_game_def_response.status_code == 201
game_def_id = post_game_def_response.data["id"]
int_game_def_id = GameDefinition.objects.get(pk=game_def_id).id.id
assert isinstance(int_game_def_id, int)
get_game_def_response = get_game_definition(
api_client, str(int_game_def_id)
)
assert get_game_def_response.status_code == 404
assert get_game_def_response.data == {"detail": "Страница не найдена."}
def test_game_def_permission_denied_if_started(api_client):
post_game_def_response = create_game_definition(api_client)
game_def_id = post_game_def_response.data["id"]
GameDefinition.objects.filter(id=game_def_id).update(started=True)
get_game_def_response = get_game_definition(api_client, game_def_id)
assert get_game_def_response.status_code == 403
assert get_game_def_response.data == {
'detail': 'К игре уже нельзя подключиться'
}
| 2.4375 | 2 |
2-Python-Fundamentals (Jan 2021)/Course-Exercises-and-Exams/08-Text-Processing/01_Lab/02-Repeat-Strings.py | karolinanikolova/SoftUni-Software-Engineering | 0 | 3907 | # 2. Repeat Strings
# Write a Program That Reads a list of strings. Each string is repeated N times, where N is the length of the string. Print the concatenated string.
strings = input().split()
output_string = ""
for string in strings:
N = len(string)
output_string += string * N
print(output_string)
| 4.375 | 4 |
cloudkeeperV1/plugins/cleanup_aws_loadbalancers/test/test_args.py | mesosphere/cloudkeeper | 99 | 3908 | from cklib.args import get_arg_parser, ArgumentParser
from cloudkeeper_plugin_cleanup_aws_loadbalancers import CleanupAWSLoadbalancersPlugin
def test_args():
arg_parser = get_arg_parser()
CleanupAWSLoadbalancersPlugin.add_args(arg_parser)
arg_parser.parse_args()
assert ArgumentParser.args.cleanup_aws_loadbalancers is False
assert ArgumentParser.args.cleanup_aws_loadbalancers_age == "7 days"
| 2.09375 | 2 |
mayan/apps/metadata/migrations/0011_auto_20180917_0645.py | prezi/mayan-edms | 4 | 3909 | <reponame>prezi/mayan-edms<gh_stars>1-10
# -*- coding: utf-8 -*-
# Generated by Django 1.11.11 on 2018-09-17 06:45
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('metadata', '0010_auto_20180823_2353'),
]
operations = [
migrations.AlterField(
model_name='documentmetadata',
name='value',
field=models.CharField(blank=True, db_index=True, help_text='The actual value stored in the metadata type field for the document.', max_length=255, null=True, verbose_name='Value'),
),
migrations.AlterField(
model_name='metadatatype',
name='name',
field=models.CharField(help_text='Name used by other apps to reference this metadata type. Do not use python reserved words, or spaces.', max_length=48, unique=True, verbose_name='Name'),
),
]
| 1.703125 | 2 |
algorithm/dfs/boj_1260.py | ruslanlvivsky/python-algorithm | 3 | 3910 | <filename>algorithm/dfs/boj_1260.py
def dfs(V):
print(V, end=' ')
visited[V] = True
for n in graph[V]:
if not visited[n]:
dfs(n)
def dfs_s(V):
stack = [V]
visited[V] = True
while stack:
now = stack.pop()
print(now, end=' ')
for n in graph[now]:
if not visited[n]:
stack.append(n)
visited[n] = True
def bfs(V):
visited[V] = True
queue = [V]
while queue:
now = queue.pop(0)
print(now, end=' ')
for n in graph[now]:
if not visited[n]:
queue.append(n)
visited[n] = True
N, M, V = map(int, input().strip().split())
visited = [False] * (N + 1)
graph = [[] for _ in range(N + 1)]
for i in range(M):
a, b = map(int, input().strip().split())
graph[a].append(b)
graph[b].append(a)
for i in range(1, N + 1):
graph[i].sort()
dfs(V)
visited = [False] * (N + 1)
print()
bfs(V)
| 3.375 | 3 |
redirector.py | UKPLab/DiGAT | 8 | 3911 | from google.appengine.ext import webapp
from google.appengine.ext.webapp.util import run_wsgi_app
__author__ = "<NAME>, <NAME>, and <NAME>"
__copyright__ = "Copyright 2013-2015 UKP TU Darmstadt"
__credits__ = ["<NAME>", "<NAME>", "<NAME>"]
__license__ = "ASL"
class Redirector(webapp.RequestHandler):
def get(self):
self.redirect("/argunit/home")
def post(self):
self.redirect("/argunit/home")
application = webapp.WSGIApplication(
[('/.*', Redirector)],
debug=True)
def main():
run_wsgi_app(application)
if __name__ == "__main__":
main()
| 2.453125 | 2 |
imgaug/augmenters/flip.py | pAoenix/image-Augmented | 1 | 3912 | <reponame>pAoenix/image-Augmented
"""
Augmenters that apply mirroring/flipping operations to images.
Do not import directly from this file, as the categorization is not final.
Use instead ::
from imgaug import augmenters as iaa
and then e.g. ::
seq = iaa.Sequential([
iaa.Fliplr((0.0, 1.0)),
iaa.Flipud((0.0, 1.0))
])
List of augmenters:
* Fliplr
* Flipud
"""
from __future__ import print_function, division, absolute_import
from .. import parameters as iap
import numpy as np
import six.moves as sm
from .meta import Augmenter
class Fliplr(Augmenter): # pylint: disable=locally-disabled, unused-variable, line-too-long
"""
Flip/mirror input images horizontally.
Parameters
----------
p : number or StochasticParameter, optional(default=0)
Probability of each image to get flipped.
name : string, optional(default=None)
See `Augmenter.__init__()`
deterministic : bool, optional(default=False)
See `Augmenter.__init__()`
random_state : int or np.random.RandomState or None, optional(default=None)
See `Augmenter.__init__()`
Examples
--------
>>> aug = iaa.Fliplr(0.5)
would horizontally flip/mirror 50 percent of all input images.
>>> aug = iaa.Fliplr(1.0)
would horizontally flip/mirror all input images.
"""
def __init__(self, p=0, name=None, deterministic=False, random_state=None):
super(Fliplr, self).__init__(name=name, deterministic=deterministic, random_state=random_state)
self.p = iap.handle_probability_param(p, "p")
def _augment_images(self, images, random_state, parents, hooks):
nb_images = len(images)
samples = self.p.draw_samples((nb_images,), random_state=random_state)
for i in sm.xrange(nb_images):
if samples[i] == 1:
images[i] = np.fliplr(images[i])
return images
def _augment_heatmaps(self, heatmaps, random_state, parents, hooks):
arrs_flipped = self._augment_images(
[heatmaps_i.arr_0to1 for heatmaps_i in heatmaps],
random_state=random_state,
parents=parents,
hooks=hooks
)
for heatmaps_i, arr_flipped in zip(heatmaps, arrs_flipped):
heatmaps_i.arr_0to1 = arr_flipped
return heatmaps
def _augment_keypoints(self, keypoints_on_images, random_state, parents, hooks):
nb_images = len(keypoints_on_images)
samples = self.p.draw_samples((nb_images,), random_state=random_state)
for i, keypoints_on_image in enumerate(keypoints_on_images):
if samples[i] == 1:
width = keypoints_on_image.shape[1]
for keypoint in keypoints_on_image.keypoints:
keypoint.x = (width - 1) - keypoint.x
return keypoints_on_images
def get_parameters(self):
return [self.p]
class Flipud(Augmenter): # pylint: disable=locally-disabled, unused-variable, line-too-long
"""
Flip/mirror input images vertically.
Parameters
----------
p : number or StochasticParameter, optional(default=0)
Probability of each image to get flipped.
name : string, optional(default=None)
See `Augmenter.__init__()`
deterministic : bool, optional(default=False)
See `Augmenter.__init__()`
random_state : int or np.random.RandomState or None, optional(default=None)
See `Augmenter.__init__()`
Examples
--------
>>> aug = iaa.Flipud(0.5)
would vertically flip/mirror 50 percent of all input images.
>>> aug = iaa.Flipud(1.0)
would vertically flip/mirror all input images.
"""
def __init__(self, p=0, name=None, deterministic=False, random_state=None):
super(Flipud, self).__init__(name=name, deterministic=deterministic, random_state=random_state)
self.p = iap.handle_probability_param(p, "p")
def _augment_images(self, images, random_state, parents, hooks):
nb_images = len(images)
samples = self.p.draw_samples((nb_images,), random_state=random_state)
for i in sm.xrange(nb_images):
if samples[i] == 1:
images[i] = np.flipud(images[i])
return images
def _augment_heatmaps(self, heatmaps, random_state, parents, hooks):
arrs_flipped = self._augment_images(
[heatmaps_i.arr_0to1 for heatmaps_i in heatmaps],
random_state=random_state,
parents=parents,
hooks=hooks
)
for heatmaps_i, arr_flipped in zip(heatmaps, arrs_flipped):
heatmaps_i.arr_0to1 = arr_flipped
return heatmaps
def _augment_keypoints(self, keypoints_on_images, random_state, parents, hooks):
nb_images = len(keypoints_on_images)
samples = self.p.draw_samples((nb_images,), random_state=random_state)
for i, keypoints_on_image in enumerate(keypoints_on_images):
if samples[i] == 1:
height = keypoints_on_image.shape[0]
for keypoint in keypoints_on_image.keypoints:
keypoint.y = (height - 1) - keypoint.y
return keypoints_on_images
def get_parameters(self):
return [self.p]
| 2.53125 | 3 |
2. Add Two Numbers DC(12-1-21).py | Dharaneeshwar/Leetcode | 4 | 3913 | # Time Complexity - O(n) ; Space Complexity - O(n)
class Solution:
def addTwoNumbers(self, l1: ListNode, l2: ListNode) -> ListNode:
carry = 0
out = temp = ListNode()
while l1 is not None and l2 is not None:
tempsum = l1.val + l2.val
tempsum += carry
if tempsum > 9:
carry = tempsum//10
tempsum %= 10
else:
carry = 0
temp.next = ListNode(tempsum)
temp = temp.next
l1 = l1.next
l2 = l2.next
if l1:
while l1:
tempsum = l1.val + carry
if tempsum > 9:
carry = tempsum//10
tempsum %= 10
else:
carry = 0
temp.next = ListNode(tempsum)
temp = temp.next
l1 = l1.next
elif l2:
while l2:
tempsum = l2.val + carry
if tempsum > 9:
carry = tempsum//10
tempsum %= 10
else:
carry = 0
temp.next = ListNode(tempsum)
temp = temp.next
l2 = l2.next
if carry:
temp.next = ListNode(carry)
return out.next | 3.703125 | 4 |
deepgp_dsvi/demos/step_function.py | dks28/Deep-Gaussian-Process | 21 | 3914 | <reponame>dks28/Deep-Gaussian-Process
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from gpflow.kernels import White, RBF
from gpflow.likelihoods import Gaussian
from deep_gp import DeepGP
np.random.seed(0)
tf.random.set_seed(0)
def get_data():
Ns = 300
Xs = np.linspace(-0.5, 1.5, Ns)[:, None]
N, M = 50, 25
X = np.random.uniform(0, 1, N)[:, None]
Z = np.random.uniform(0, 1, M)[:, None]
f_step = lambda x: 0. if x < 0.5 else 1.
Y = np.reshape([f_step(x) for x in X], X.shape) + np.random.randn(
*X.shape) * 1e-2
return Xs, X, Y, Z
def make_deep_GP(num_layers, X, Y, Z):
kernels = []
layer_sizes = []
for l in range(num_layers):
kernel = RBF(lengthscales=0.2, variance=1.0) + White(variance=1e-5)
kernels.append(kernel)
layer_sizes.append(1)
dgp = DeepGP(X, Y, Z, kernels, layer_sizes, Gaussian(), num_samples=100)
# init hidden layers to be near deterministic
for layer in dgp.layers[:-1]:
layer.q_sqrt.assign(layer.q_sqrt * 1e-5)
return dgp
if __name__ == '__main__':
Xs, X_train, Y_train, Z = get_data()
dgp = make_deep_GP(3, X_train, Y_train, Z)
optimizer = tf.optimizers.Adam(learning_rate=0.01, epsilon=1e-08)
for _ in range(1500):
with tf.GradientTape(watch_accessed_variables=False) as tape:
tape.watch(dgp.trainable_variables)
objective = -dgp.elbo((X_train, Y_train))
gradients = tape.gradient(objective, dgp.trainable_variables)
optimizer.apply_gradients(zip(gradients, dgp.trainable_variables))
print(f"ELBO: {-objective.numpy()}")
samples, _, _ = dgp.predict_all_layers(Xs, num_samples=50, full_cov=True)
plt.plot(Xs, samples[-1].numpy()[:, :, 0].T, color='r', alpha=0.3)
plt.title('Deep Gaussian Process')
plt.scatter(X_train, Y_train)
plt.show() | 2.515625 | 3 |
metaspace/engine/sm/engine/annotation_lithops/moldb_pipeline.py | METASPACE2020/METASPACE | 32 | 3915 | from __future__ import annotations
import json
import logging
from contextlib import contextmanager, ExitStack
from typing import List, Dict
import pandas as pd
from lithops.storage import Storage
from lithops.storage.utils import CloudObject, StorageNoSuchKeyError
from sm.engine.annotation_lithops.build_moldb import (
build_moldb,
InputMolDb,
DbFDRData,
)
from sm.engine.annotation_lithops.calculate_centroids import (
calculate_centroids,
validate_centroids,
)
from sm.engine.annotation_lithops.executor import Executor
from sm.engine.annotation_lithops.io import (
CObj,
save_cobj,
iter_cobjects_with_prefetch,
deserialize,
)
from sm.engine.annotation_lithops.utils import jsonhash
from sm.engine.utils.db_mutex import DBMutex
from sm.engine.ds_config import DSConfig
from sm.engine.annotation.isocalc_wrapper import IsocalcWrapper
logger = logging.getLogger('annotation-pipeline')
class CentroidsCacheEntry:
def __init__(
self, executor: Executor, sm_storage: Dict, ds_config: DSConfig, moldbs: List[InputMolDb]
):
ds_hash_params = ds_config.copy()
self.ds_config = {
**ds_hash_params, # type: ignore # https://github.com/python/mypy/issues/4122
# Include the `targeted` value of databases so that a new cache entry is made if
# someone manually changes that field
'databases': [(moldb['id'], moldb['targeted']) for moldb in moldbs],
}
# Remove database_ids as it may be in a different order to moldbs
del self.ds_config['database_ids']
self.ds_hash = jsonhash(self.ds_config)
self.executor = executor
self.storage = executor.storage
self.bucket, raw_prefix = sm_storage['centroids']
self.prefix = f"{raw_prefix}/{self.ds_hash}"
self.config_key = f'{self.prefix}/ds_config.json'
self.meta_key = f'{self.prefix}/meta'
@contextmanager
def lock(self):
with DBMutex().lock(self.ds_hash, timeout=3600):
yield
def load(self):
try:
db_data_cobjs, peaks_cobjs = deserialize(
self.storage.get_object(self.bucket, self.meta_key)
)
return db_data_cobjs, peaks_cobjs
except StorageNoSuchKeyError:
return None
def save(self, db_data_cobjs: List[CObj[DbFDRData]], peaks_cobjs: List[CObj[pd.DataFrame]]):
def batch_copy(src_cobjs: List[CloudObject], dest_prefix: str, *, storage: Storage):
# If Lithops' storage supported Copy Object operations, this could be easily optimized.
# Not sure if it's worth the effort yet
result_cobjs = []
for i, data in enumerate(iter_cobjects_with_prefetch(storage, src_cobjs)):
dest_key = f'{dest_prefix}/{i:06}'
result_cobjs.append(storage.put_cloudobject(data, dest_bucket, dest_key))
return result_cobjs
dest_bucket = self.bucket
# Copy cobjs to the cache dir
new_db_data_cobjs, new_peaks_cobjs = self.executor.map(
batch_copy,
[(db_data_cobjs, f'{self.prefix}/db_data'), (peaks_cobjs, f'{self.prefix}/peaks')],
runtime_memory=1024,
)
# Save config in case it's needed for debugging
self.storage.put_cloudobject(
json.dumps(self.ds_config, indent=4), self.bucket, self.config_key
)
# Save list of cobjects. This list would be easy to reconstruct by listing keys, but
# saving a separate object as the last step of the process is helpful to confirm that
# the cache item is complete, and didn't partially fail to copy.
save_cobj(self.storage, (new_db_data_cobjs, new_peaks_cobjs), self.bucket, self.meta_key)
return new_db_data_cobjs, new_peaks_cobjs
def clear(self):
keys = self.storage.list_keys(self.bucket, self.prefix)
if keys:
logger.info(f'Clearing centroids cache {self.prefix}')
self.storage.delete_objects(self.bucket, keys)
def get_moldb_centroids(
executor: Executor,
sm_storage: Dict,
ds_config: DSConfig,
moldbs: List[InputMolDb],
debug_validate=False,
use_cache=True,
use_db_mutex=True,
):
moldb_cache = CentroidsCacheEntry(executor, sm_storage, ds_config, moldbs)
with ExitStack() as stack:
if use_db_mutex:
stack.enter_context(moldb_cache.lock())
if use_cache:
cached_val = moldb_cache.load()
else:
cached_val = None
moldb_cache.clear()
if cached_val:
db_data_cobjs, peaks_cobjs = cached_val
logger.info(
f'Loaded {len(db_data_cobjs)} DBs, {len(peaks_cobjs)} peak segms from cache'
)
else:
formula_cobjs, db_data_cobjs = build_moldb(executor, ds_config, moldbs)
isocalc_wrapper = IsocalcWrapper(ds_config)
peaks_cobjs = calculate_centroids(executor, formula_cobjs, isocalc_wrapper)
if debug_validate:
validate_centroids(executor, peaks_cobjs)
moldb_cache.save(db_data_cobjs, peaks_cobjs)
logger.info(f'Saved {len(db_data_cobjs)} DBs, {len(peaks_cobjs)} peak segms to cache')
return db_data_cobjs, peaks_cobjs
| 1.820313 | 2 |
gui/main_window/node_editor/items/connector_top_item.py | anglebinbin/Barista-tool | 1 | 3916 | from PyQt5.QtWidgets import QMenu
from gui.main_window.node_editor.items.connector_item import ConnectorItem
class ConnectorTopItem(ConnectorItem):
""" Class to provide top connector functionality """
def __init__(self, index, nodeItem, nodeEditor, parent=None):
super(ConnectorTopItem, self).__init__(index, nodeItem, nodeEditor, parent)
def isTopConnector(self):
""" Returns whether the connector is a top connector (implementation for parent class) """
return True
def isInPlace(self):
""" Returns whether the connector is connected to a in-place working layer
A top connector is in place if any connected bottom connector is in place.
(implementation for parent class) """
for connection in self._connections:
if connection.getIsInPlace():
return True
return False
def getConnectedNodes(self):
""" Returns a list of node items, connected to this connector (implementation for parent class) """
nodes = list()
# for each connection get the node connected to the bottom of the connection
for connection in self._connections:
connectionsBottomConnector = connection.getBottomConnector()
if connectionsBottomConnector is not None:
nodes.append(connectionsBottomConnector.getNodeItem())
return nodes
def addConnection(self, connection):
""" Adds a connection to the connector and sets the start of the connection to this connectors position
(implementation for parent class) """
self._connections.append(connection)
connection.setStart(self.scenePos())
def updateConnectionPositions(self):
""" Updates the connected connections, sets the start of all connected connections to this connectors position
(implementation for parent class) """
for connection in self._connections:
connection.setStart(self.scenePos())
def contextMenuEvent(self, event):
""" Context menu for the top connector """
contextMenu = QMenu()
renameTop = contextMenu.addAction("Change name")
disconnectTop = contextMenu.addAction("Disconnect")
if self.getConnectionCount() == 0:
disconnectTop.setEnabled(False)
removeTop = contextMenu.addAction("Remove")
action = contextMenu.exec_(event.screenPos())
if action is not None:
if action == removeTop:
self._nodeEditor.tryToRemoveTopBlob(self._nodeItem.getLayerID(), self._index)
elif action == renameTop:
self._nodeEditor.tryToRenameTopBlob(self)
elif action == disconnectTop:
self._nodeEditor.disconnectTopBlob(self._nodeItem.getLayerID(), self._index)
| 3.015625 | 3 |
modules/platforms/python/pyignite/api/key_value.py | DirectXceriD/gridgain | 1 | 3917 | <gh_stars>1-10
# GridGain Community Edition Licensing
# Copyright 2019 GridGain Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License") modified with Commons Clause
# Restriction; you may not use this file except in compliance with the License. You may obtain a
# copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the
# License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the specific language governing permissions
# and limitations under the License.
#
# Commons Clause Restriction
#
# The Software is provided to you by the Licensor under the License, as defined below, subject to
# the following condition.
#
# Without limiting other conditions in the License, the grant of rights under the License will not
# include, and the License does not grant to you, the right to Sell the Software.
# For purposes of the foregoing, “Sell” means practicing any or all of the rights granted to you
# under the License to provide to third parties, for a fee or other consideration (including without
# limitation fees for hosting or consulting/ support services related to the Software), a product or
# service whose value derives, entirely or substantially, from the functionality of the Software.
# Any license notice or attribution required by the License must also include this Commons Clause
# License Condition notice.
#
# For purposes of the clause above, the “Licensor” is Copyright 2019 GridGain Systems, Inc.,
# the “License” is the Apache License, Version 2.0, and the Software is the GridGain Community
# Edition software provided with this notice.
from typing import Iterable, Union
from pyignite.queries.op_codes import *
from pyignite.datatypes import (
Map, Bool, Byte, Int, Long, AnyDataArray, AnyDataObject,
)
from pyignite.datatypes.key_value import PeekModes
from pyignite.queries import Query, Response
from pyignite.utils import cache_id
def cache_put(
connection: 'Connection', cache: Union[str, int], key, value,
key_hint=None, value_hint=None, binary=False, query_id=None,
) -> 'APIResult':
"""
Puts a value with a given key to cache (overwriting existing value if any).
:param connection: connection to Ignite server,
:param cache: name or ID of the cache,
:param key: key for the cache entry. Can be of any supported type,
:param value: value for the key,
:param key_hint: (optional) Ignite data type, for which the given key
should be converted,
:param value_hint: (optional) Ignite data type, for which the given value
should be converted.
:param binary: (optional) pass True to keep the value in binary form.
False by default,
:param query_id: (optional) a value generated by client and returned as-is
in response.query_id. When the parameter is omitted, a random value
is generated,
:return: API result data object. Contains zero status if a value
is written, non-zero status and an error description otherwise.
"""
query_struct = Query(
OP_CACHE_PUT,
[
('hash_code', Int),
('flag', Byte),
('key', key_hint or AnyDataObject),
('value', value_hint or AnyDataObject),
],
query_id=query_id,
)
return query_struct.perform(connection, {
'hash_code': cache_id(cache),
'flag': 1 if binary else 0,
'key': key,
'value': value,
})
def cache_get(
connection: 'Connection', cache: Union[str, int], key,
key_hint=None, binary=False, query_id=None,
) -> 'APIResult':
"""
Retrieves a value from cache by key.
:param connection: connection to Ignite server,
:param cache: name or ID of the cache,
:param key: key for the cache entry. Can be of any supported type,
:param key_hint: (optional) Ignite data type, for which the given key
should be converted,
:param binary: (optional) pass True to keep the value in binary form.
False by default,
:param query_id: (optional) a value generated by client and returned as-is
in response.query_id. When the parameter is omitted, a random value
is generated,
:return: API result data object. Contains zero status and a value
retrieved on success, non-zero status and an error description on failure.
"""
query_struct = Query(
OP_CACHE_GET,
[
('hash_code', Int),
('flag', Byte),
('key', key_hint or AnyDataObject),
],
query_id=query_id,
)
result = query_struct.perform(
connection,
query_params={
'hash_code': cache_id(cache),
'flag': 1 if binary else 0,
'key': key,
},
response_config=[
('value', AnyDataObject),
],
)
if result.status != 0:
return result
result.value = result.value['value']
return result
def cache_get_all(
connection: 'Connection', cache: Union[str, int], keys: Iterable,
binary=False, query_id=None,
) -> 'APIResult':
"""
Retrieves multiple key-value pairs from cache.
:param connection: connection to Ignite server,
:param cache: name or ID of the cache,
:param keys: list of keys or tuples of (key, key_hint),
:param binary: (optional) pass True to keep the value in binary form.
False by default,
:param query_id: (optional) a value generated by client and returned as-is
in response.query_id. When the parameter is omitted, a random value
is generated,
:return: API result data object. Contains zero status and a dict, made of
retrieved key-value pairs, non-zero status and an error description
on failure.
"""
query_struct = Query(
OP_CACHE_GET_ALL,
[
('hash_code', Int),
('flag', Byte),
('keys', AnyDataArray()),
],
query_id=query_id,
)
result = query_struct.perform(
connection,
query_params={
'hash_code': cache_id(cache),
'flag': 1 if binary else 0,
'keys': keys,
},
response_config=[
('data', Map),
],
)
if result.status == 0:
result.value = dict(result.value)['data']
return result
def cache_put_all(
connection: 'Connection', cache: Union[str, int], pairs: dict,
binary=False, query_id=None,
) -> 'APIResult':
"""
Puts multiple key-value pairs to cache (overwriting existing associations
if any).
:param connection: connection to Ignite server,
:param cache: name or ID of the cache,
:param pairs: dictionary type parameters, contains key-value pairs to save.
Each key or value can be an item of representable Python type or a tuple
of (item, hint),
:param binary: (optional) pass True to keep the value in binary form.
False by default,
:param query_id: (optional) a value generated by client and returned as-is
in response.query_id. When the parameter is omitted, a random value
is generated,
:return: API result data object. Contains zero status if key-value pairs
are written, non-zero status and an error description otherwise.
"""
query_struct = Query(
OP_CACHE_PUT_ALL,
[
('hash_code', Int),
('flag', Byte),
('data', Map),
],
query_id=query_id,
)
return query_struct.perform(
connection,
query_params={
'hash_code': cache_id(cache),
'flag': 1 if binary else 0,
'data': pairs,
},
)
def cache_contains_key(
connection: 'Connection', cache: Union[str, int], key,
key_hint=None, binary=False, query_id=None,
) -> 'APIResult':
"""
Returns a value indicating whether given key is present in cache.
:param connection: connection to Ignite server,
:param cache: name or ID of the cache,
:param key: key for the cache entry. Can be of any supported type,
:param key_hint: (optional) Ignite data type, for which the given key
should be converted,
:param binary: pass True to keep the value in binary form. False
by default,
:param query_id: a value generated by client and returned as-is
in response.query_id. When the parameter is omitted, a random value
is generated,
:return: API result data object. Contains zero status and a bool value
retrieved on success: `True` when key is present, `False` otherwise,
non-zero status and an error description on failure.
"""
query_struct = Query(
OP_CACHE_CONTAINS_KEY,
[
('hash_code', Int),
('flag', Byte),
('key', key_hint or AnyDataObject),
],
query_id=query_id,
)
result = query_struct.perform(
connection,
query_params={
'hash_code': cache_id(cache),
'flag': 1 if binary else 0,
'key': key,
},
response_config=[
('value', Bool),
],
)
if result.status == 0:
result.value = result.value['value']
return result
def cache_contains_keys(
connection: 'Connection', cache: Union[str, int], keys: Iterable,
binary=False, query_id=None,
) -> 'APIResult':
"""
Returns a value indicating whether all given keys are present in cache.
:param connection: connection to Ignite server,
:param cache: name or ID of the cache,
:param keys: a list of keys or (key, type hint) tuples,
:param binary: pass True to keep the value in binary form. False
by default,
:param query_id: a value generated by client and returned as-is
in response.query_id. When the parameter is omitted, a random value
is generated,
:return: API result data object. Contains zero status and a bool value
retrieved on success: `True` when all keys are present, `False` otherwise,
non-zero status and an error description on failure.
"""
query_struct = Query(
OP_CACHE_CONTAINS_KEYS,
[
('hash_code', Int),
('flag', Byte),
('keys', AnyDataArray()),
],
query_id=query_id,
)
result = query_struct.perform(
connection,
query_params={
'hash_code': cache_id(cache),
'flag': 1 if binary else 0,
'keys': keys,
},
response_config=[
('value', Bool),
],
)
if result.status == 0:
result.value = result.value['value']
return result
def cache_get_and_put(
connection: 'Connection', cache: Union[str, int], key, value,
key_hint=None, value_hint=None, binary=False, query_id=None,
) -> 'APIResult':
"""
Puts a value with a given key to cache, and returns the previous value
for that key, or null value if there was not such key.
:param connection: connection to Ignite server,
:param cache: name or ID of the cache,
:param key: key for the cache entry. Can be of any supported type,
:param value: value for the key,
:param key_hint: (optional) Ignite data type, for which the given key
should be converted,
:param value_hint: (optional) Ignite data type, for which the given value
should be converted.
:param binary: pass True to keep the value in binary form. False
by default,
:param query_id: a value generated by client and returned as-is
in response.query_id. When the parameter is omitted, a random value
is generated,
:return: API result data object. Contains zero status and an old value
or None if a value is written, non-zero status and an error description
in case of error.
"""
query_struct = Query(
OP_CACHE_GET_AND_PUT,
[
('hash_code', Int),
('flag', Byte),
('key', key_hint or AnyDataObject),
('value', value_hint or AnyDataObject),
],
query_id=query_id,
)
result = query_struct.perform(
connection,
query_params={
'hash_code': cache_id(cache),
'flag': 1 if binary else 0,
'key': key,
'value': value,
},
response_config=[
('value', AnyDataObject),
],
)
if result.status == 0:
result.value = result.value['value']
return result
def cache_get_and_replace(
connection: 'Connection', cache: Union[str, int], key, value,
key_hint=None, value_hint=None, binary=False, query_id=None,
) -> 'APIResult':
"""
Puts a value with a given key to cache, returning previous value
for that key, if and only if there is a value currently mapped
for that key.
:param connection: connection to Ignite server,
:param cache: name or ID of the cache,
:param key: key for the cache entry. Can be of any supported type,
:param value: value for the key,
:param key_hint: (optional) Ignite data type, for which the given key
should be converted,
:param value_hint: (optional) Ignite data type, for which the given value
should be converted.
:param binary: pass True to keep the value in binary form. False
by default,
:param query_id: a value generated by client and returned as-is
in response.query_id. When the parameter is omitted, a random value
is generated,
:return: API result data object. Contains zero status and an old value
or None on success, non-zero status and an error description otherwise.
"""
query_struct = Query(
OP_CACHE_GET_AND_REPLACE, [
('hash_code', Int),
('flag', Byte),
('key', key_hint or AnyDataObject),
('value', value_hint or AnyDataObject),
],
query_id=query_id,
)
result = query_struct.perform(
connection,
query_params={
'hash_code': cache_id(cache),
'flag': 1 if binary else 0,
'key': key,
'value': value,
},
response_config=[
('value', AnyDataObject),
],
)
if result.status == 0:
result.value = result.value['value']
return result
def cache_get_and_remove(
connection: 'Connection', cache: Union[str, int], key,
key_hint=None, binary=False, query_id=None,
) -> 'APIResult':
"""
Removes the cache entry with specified key, returning the value.
:param connection: connection to Ignite server,
:param cache: name or ID of the cache,
:param key: key for the cache entry. Can be of any supported type,
:param key_hint: (optional) Ignite data type, for which the given key
should be converted,
:param binary: pass True to keep the value in binary form. False
by default,
:param query_id: a value generated by client and returned as-is
in response.query_id. When the parameter is omitted, a random value
is generated,
:return: API result data object. Contains zero status and an old value
or None, non-zero status and an error description otherwise.
"""
query_struct = Query(
OP_CACHE_GET_AND_REMOVE, [
('hash_code', Int),
('flag', Byte),
('key', key_hint or AnyDataObject),
],
query_id=query_id,
)
result = query_struct.perform(
connection,
query_params={
'hash_code': cache_id(cache),
'flag': 1 if binary else 0,
'key': key,
},
response_config=[
('value', AnyDataObject),
],
)
if result.status == 0:
result.value = result.value['value']
return result
def cache_put_if_absent(
connection: 'Connection', cache: Union[str, int], key, value,
key_hint=None, value_hint=None, binary=False, query_id=None,
) -> 'APIResult':
"""
Puts a value with a given key to cache only if the key
does not already exist.
:param connection: connection to Ignite server,
:param cache: name or ID of the cache,
:param key: key for the cache entry. Can be of any supported type,
:param value: value for the key,
:param key_hint: (optional) Ignite data type, for which the given key
should be converted,
:param value_hint: (optional) Ignite data type, for which the given value
should be converted.
:param binary: (optional) pass True to keep the value in binary form. False
by default,
:param query_id: (optional) a value generated by client and returned as-is
in response.query_id. When the parameter is omitted, a random value
is generated,
:return: API result data object. Contains zero status on success,
non-zero status and an error description otherwise.
"""
query_struct = Query(
OP_CACHE_PUT_IF_ABSENT,
[
('hash_code', Int),
('flag', Byte),
('key', key_hint or AnyDataObject),
('value', value_hint or AnyDataObject),
],
query_id=query_id,
)
result = query_struct.perform(
connection,
query_params={
'hash_code': cache_id(cache),
'flag': 1 if binary else 0,
'key': key,
'value': value,
},
response_config=[
('success', Bool),
],
)
if result.status == 0:
result.value = result.value['success']
return result
def cache_get_and_put_if_absent(
connection: 'Connection', cache: Union[str, int], key, value,
key_hint=None, value_hint=None, binary=False, query_id=None,
) -> 'APIResult':
"""
Puts a value with a given key to cache only if the key does not
already exist.
:param connection: connection to Ignite server,
:param cache: name or ID of the cache,
:param key: key for the cache entry. Can be of any supported type,
:param value: value for the key,
:param key_hint: (optional) Ignite data type, for which the given key
should be converted,
:param value_hint: (optional) Ignite data type, for which the given value
should be converted.
:param binary: (optional) pass True to keep the value in binary form. False
by default,
:param query_id: (optional) a value generated by client and returned as-is
in response.query_id. When the parameter is omitted, a random value
is generated,
:return: API result data object. Contains zero status and an old value
or None on success, non-zero status and an error description otherwise.
"""
query_struct = Query(
OP_CACHE_GET_AND_PUT_IF_ABSENT,
[
('hash_code', Int),
('flag', Byte),
('key', key_hint or AnyDataObject),
('value', value_hint or AnyDataObject),
],
query_id=query_id,
)
result = query_struct.perform(
connection,
query_params={
'hash_code': cache_id(cache),
'flag': 1 if binary else 0,
'key': key,
'value': value,
},
response_config=[
('value', AnyDataObject),
],
)
if result.status == 0:
result.value = result.value['value']
return result
def cache_replace(
connection: 'Connection', cache: Union[str, int], key, value,
key_hint=None, value_hint=None, binary=False, query_id=None,
) -> 'APIResult':
"""
Puts a value with a given key to cache only if the key already exist.
:param connection: connection to Ignite server,
:param cache: name or ID of the cache,
:param key: key for the cache entry. Can be of any supported type,
:param value: value for the key,
:param key_hint: (optional) Ignite data type, for which the given key
should be converted,
:param value_hint: (optional) Ignite data type, for which the given value
should be converted.
:param binary: pass True to keep the value in binary form. False
by default,
:param query_id: a value generated by client and returned as-is
in response.query_id. When the parameter is omitted, a random value
is generated,
:return: API result data object. Contains zero status and a boolean
success code, or non-zero status and an error description if something
has gone wrong.
"""
query_struct = Query(
OP_CACHE_REPLACE,
[
('hash_code', Int),
('flag', Byte),
('key', key_hint or AnyDataObject),
('value', value_hint or AnyDataObject),
],
query_id=query_id,
)
result = query_struct.perform(
connection,
query_params={
'hash_code': cache_id(cache),
'flag': 1 if binary else 0,
'key': key,
'value': value,
},
response_config=[
('success', Bool),
],
)
if result.status == 0:
result.value = result.value['success']
return result
def cache_replace_if_equals(
connection: 'Connection', cache: Union[str, int], key, sample, value,
key_hint=None, sample_hint=None, value_hint=None,
binary=False, query_id=None,
) -> 'APIResult':
"""
Puts a value with a given key to cache only if the key already exists
and value equals provided sample.
:param connection: connection to Ignite server,
:param cache: name or ID of the cache,
:param key: key for the cache entry,
:param sample: a sample to compare the stored value with,
:param value: new value for the given key,
:param key_hint: (optional) Ignite data type, for which the given key
should be converted,
:param sample_hint: (optional) Ignite data type, for whic
the given sample should be converted
:param value_hint: (optional) Ignite data type, for which the given value
should be converted,
:param binary: (optional) pass True to keep the value in binary form.
False by default,
:param query_id: (optional) a value generated by client and returned
as-is in response.query_id. When the parameter is omitted, a random
value is generated,
:return: API result data object. Contains zero status and a boolean
success code, or non-zero status and an error description if something
has gone wrong.
"""
query_struct = Query(
OP_CACHE_REPLACE_IF_EQUALS,
[
('hash_code', Int),
('flag', Byte),
('key', key_hint or AnyDataObject),
('sample', sample_hint or AnyDataObject),
('value', value_hint or AnyDataObject),
],
query_id=query_id,
)
result = query_struct.perform(
connection,
query_params={
'hash_code': cache_id(cache),
'flag': 1 if binary else 0,
'key': key,
'sample': sample,
'value': value,
},
response_config=[
('success', Bool),
],
)
if result.status == 0:
result.value = result.value['success']
return result
def cache_clear(
connection: 'Connection', cache: Union[str, int], binary=False,
query_id=None,
) -> 'APIResult':
"""
Clears the cache without notifying listeners or cache writers.
:param connection: connection to Ignite server,
:param cache: name or ID of the cache,
:param binary: (optional) pass True to keep the value in binary form.
False by default,
:param query_id: (optional) a value generated by client and returned
as-is in response.query_id. When the parameter is omitted, a random
value is generated,
:return: API result data object. Contains zero status on success,
non-zero status and an error description otherwise.
"""
query_struct = Query(
OP_CACHE_CLEAR,
[
('hash_code', Int),
('flag', Byte),
],
query_id=query_id,
)
return query_struct.perform(
connection,
query_params={
'hash_code': cache_id(cache),
'flag': 1 if binary else 0,
},
)
def cache_clear_key(
connection: 'Connection', cache: Union[str, int], key,
key_hint: object=None, binary=False, query_id=None,
) -> 'APIResult':
"""
Clears the cache key without notifying listeners or cache writers.
:param connection: connection to Ignite server,
:param cache: name or ID of the cache,
:param key: key for the cache entry,
:param key_hint: (optional) Ignite data type, for which the given key
should be converted,
:param binary: (optional) pass True to keep the value in binary form.
False by default,
:param query_id: (optional) a value generated by client and returned
as-is in response.query_id. When the parameter is omitted, a random
value is generated,
:return: API result data object. Contains zero status on success,
non-zero status and an error description otherwise.
"""
query_struct = Query(
OP_CACHE_CLEAR_KEY,
[
('hash_code', Int),
('flag', Byte),
('key', key_hint or AnyDataObject),
],
query_id=query_id,
)
return query_struct.perform(
connection,
query_params={
'hash_code': cache_id(cache),
'flag': 1 if binary else 0,
'key': key,
},
)
def cache_clear_keys(
connection: 'Connection', cache: Union[str, int], keys: list,
binary=False, query_id=None,
) -> 'APIResult':
"""
Clears the cache keys without notifying listeners or cache writers.
:param connection: connection to Ignite server,
:param cache: name or ID of the cache,
:param keys: list of keys or tuples of (key, key_hint),
:param binary: (optional) pass True to keep the value in binary form.
False by default,
:param query_id: (optional) a value generated by client and returned as-is
in response.query_id. When the parameter is omitted, a random value
is generated,
:return: API result data object. Contains zero status on success,
non-zero status and an error description otherwise.
"""
query_struct = Query(
OP_CACHE_CLEAR_KEYS,
[
('hash_code', Int),
('flag', Byte),
('keys', AnyDataArray()),
],
query_id=query_id,
)
return query_struct.perform(
connection,
query_params={
'hash_code': cache_id(cache),
'flag': 1 if binary else 0,
'keys': keys,
},
)
def cache_remove_key(
connection: 'Connection', cache: Union[str, int], key,
key_hint: object=None, binary=False, query_id=None,
) -> 'APIResult':
"""
Clears the cache key without notifying listeners or cache writers.
:param connection: connection to Ignite server,
:param cache: name or ID of the cache,
:param key: key for the cache entry,
:param key_hint: (optional) Ignite data type, for which the given key
should be converted,
:param binary: (optional) pass True to keep the value in binary form.
False by default,
:param query_id: (optional) a value generated by client and returned
as-is in response.query_id. When the parameter is omitted, a random
value is generated,
:return: API result data object. Contains zero status and a boolean
success code, or non-zero status and an error description if something
has gone wrong.
"""
query_struct = Query(
OP_CACHE_REMOVE_KEY,
[
('hash_code', Int),
('flag', Byte),
('key', key_hint or AnyDataObject),
],
query_id=query_id,
)
result = query_struct.perform(
connection,
query_params={
'hash_code': cache_id(cache),
'flag': 1 if binary else 0,
'key': key,
},
response_config=[
('success', Bool),
],
)
if result.status == 0:
result.value = result.value['success']
return result
def cache_remove_if_equals(
connection: 'Connection', cache: Union[str, int], key, sample,
key_hint=None, sample_hint=None,
binary=False, query_id=None,
) -> 'APIResult':
"""
Removes an entry with a given key if provided value is equal to
actual value, notifying listeners and cache writers.
:param connection: connection to Ignite server,
:param cache: name or ID of the cache,
:param key: key for the cache entry,
:param sample: a sample to compare the stored value with,
:param key_hint: (optional) Ignite data type, for which the given key
should be converted,
:param sample_hint: (optional) Ignite data type, for whic
the given sample should be converted
:param binary: (optional) pass True to keep the value in binary form.
False by default,
:param query_id: (optional) a value generated by client and returned
as-is in response.query_id. When the parameter is omitted, a random
value is generated,
:return: API result data object. Contains zero status and a boolean
success code, or non-zero status and an error description if something
has gone wrong.
"""
query_struct = Query(
OP_CACHE_REMOVE_IF_EQUALS,
[
('hash_code', Int),
('flag', Byte),
('key', key_hint or AnyDataObject),
('sample', sample_hint or AnyDataObject),
],
query_id=query_id,
)
result = query_struct.perform(
connection,
query_params={
'hash_code': cache_id(cache),
'flag': 1 if binary else 0,
'key': key,
'sample': sample,
},
response_config=[
('success', Bool),
],
)
if result.status == 0:
result.value = result.value['success']
return result
def cache_remove_keys(
connection: 'Connection', cache: Union[str, int], keys: Iterable,
binary=False, query_id=None,
) -> 'APIResult':
"""
Removes entries with given keys, notifying listeners and cache writers.
:param connection: connection to Ignite server,
:param cache: name or ID of the cache,
:param keys: list of keys or tuples of (key, key_hint),
:param binary: (optional) pass True to keep the value in binary form.
False by default,
:param query_id: (optional) a value generated by client and returned as-is
in response.query_id. When the parameter is omitted, a random value
is generated,
:return: API result data object. Contains zero status on success,
non-zero status and an error description otherwise.
"""
query_struct = Query(
OP_CACHE_REMOVE_KEYS,
[
('hash_code', Int),
('flag', Byte),
('keys', AnyDataArray()),
],
query_id=query_id,
)
return query_struct.perform(
connection,
query_params={
'hash_code': cache_id(cache),
'flag': 1 if binary else 0,
'keys': keys,
},
)
def cache_remove_all(
connection: 'Connection', cache: Union[str, int], binary=False,
query_id=None,
) -> 'APIResult':
"""
Removes all entries from cache, notifying listeners and cache writers.
:param connection: connection to Ignite server,
:param cache: name or ID of the cache,
:param binary: (optional) pass True to keep the value in binary form.
False by default,
:param query_id: (optional) a value generated by client and returned as-is
in response.query_id. When the parameter is omitted, a random value
is generated,
:return: API result data object. Contains zero status on success,
non-zero status and an error description otherwise.
"""
query_struct = Query(
OP_CACHE_REMOVE_ALL,
[
('hash_code', Int),
('flag', Byte),
],
query_id=query_id,
)
return query_struct.perform(
connection,
query_params={
'hash_code': cache_id(cache),
'flag': 1 if binary else 0,
},
)
def cache_get_size(
connection: 'Connection', cache: Union[str, int], peek_modes=0,
binary=False, query_id=None,
) -> 'APIResult':
"""
Gets the number of entries in cache.
:param connection: connection to Ignite server,
:param cache: name or ID of the cache,
:param peek_modes: (optional) limit count to near cache partition
(PeekModes.NEAR), primary cache (PeekModes.PRIMARY), or backup cache
(PeekModes.BACKUP). Defaults to all cache partitions (PeekModes.ALL),
:param binary: (optional) pass True to keep the value in binary form.
False by default,
:param query_id: (optional) a value generated by client and returned as-is
in response.query_id. When the parameter is omitted, a random value
is generated,
:return: API result data object. Contains zero status and a number of
cache entries on success, non-zero status and an error description
otherwise.
"""
if not isinstance(peek_modes, (list, tuple)):
if peek_modes == 0:
peek_modes = []
else:
peek_modes = [peek_modes]
query_struct = Query(
OP_CACHE_GET_SIZE,
[
('hash_code', Int),
('flag', Byte),
('peek_modes', PeekModes),
],
query_id=query_id,
)
result = query_struct.perform(
connection,
query_params={
'hash_code': cache_id(cache),
'flag': 1 if binary else 0,
'peek_modes': peek_modes,
},
response_config=[
('count', Long),
],
)
if result.status == 0:
result.value = result.value['count']
return result
| 1.640625 | 2 |
wrt/wrt-manifest-tizen-tests/const.py | linshen/crosswalk-test-suite | 0 | 3918 | #!/usr/bin/env python
import sys, os
import itertools, shutil
path = os.path.abspath(__file__)
path = os.path.split(path)[0]
os.chdir(path)
print path
device_ssh_ip = ""
ssh_device = device_ssh_ip.split(",")
path_tcs = path + "/tcs"
path_result= path + "/result"
path_allpairs = path + "/allpairs"
path_resource = path + "/resource"
seed_file = path_allpairs + "/positive/input_seed.txt"
seed_negative = path_allpairs + "/negative"
seed_positive =path_allpairs + "/positivee"
seed_file_na = seed_negative + "/input_seed_negative.txt"
selfcomb_file = path_allpairs + "/selfcomb.txt"
output_file = path_allpairs + "/output.txt"
output_file_ne = path_allpairs + "/output_negative.txt"
report_path = path + "/report"
report_file = report_path + "/wrt-manifest-tizen-tests.xml"
report_summary_file = report_path + "/summary.xml"
sh_path = path + "/script"
log_path = report_path + "/log_"
device_path = "/home/app/content/tct/"
run_times = 3
version="6.35.1.2"
name="wrt-manifest-tizen-tests"
| 2.28125 | 2 |
api/src/opentrons/protocol_engine/commands/thermocycler/open_lid.py | Opentrons/protocol_framework | 0 | 3919 | <reponame>Opentrons/protocol_framework
"""Command models to open a Thermocycler's lid."""
from __future__ import annotations
from typing import Optional, TYPE_CHECKING
from typing_extensions import Literal, Type
from pydantic import BaseModel, Field
from ..command import AbstractCommandImpl, BaseCommand, BaseCommandCreate
from opentrons.protocol_engine.types import MotorAxis
if TYPE_CHECKING:
from opentrons.protocol_engine.state import StateView
from opentrons.protocol_engine.execution import EquipmentHandler, MovementHandler
OpenLidCommandType = Literal["thermocycler/openLid"]
class OpenLidParams(BaseModel):
"""Input parameters to open a Thermocycler's lid."""
moduleId: str = Field(..., description="Unique ID of the Thermocycler.")
class OpenLidResult(BaseModel):
"""Result data from opening a Thermocycler's lid."""
class OpenLidImpl(AbstractCommandImpl[OpenLidParams, OpenLidResult]):
"""Execution implementation of a Thermocycler's open lid command."""
def __init__(
self,
state_view: StateView,
equipment: EquipmentHandler,
movement: MovementHandler,
**unused_dependencies: object,
) -> None:
self._state_view = state_view
self._equipment = equipment
self._movement = movement
async def execute(self, params: OpenLidParams) -> OpenLidResult:
"""Open a Thermocycler's lid."""
thermocycler_state = self._state_view.modules.get_thermocycler_module_substate(
params.moduleId
)
thermocycler_hardware = self._equipment.get_module_hardware_api(
thermocycler_state.module_id
)
# move the pipettes and gantry over the trash
# do not home plunger axes because pipettes may be holding liquid
await self._movement.home(
[
MotorAxis.X,
MotorAxis.Y,
MotorAxis.RIGHT_Z,
MotorAxis.LEFT_Z,
]
)
if thermocycler_hardware is not None:
await thermocycler_hardware.open()
return OpenLidResult()
class OpenLid(BaseCommand[OpenLidParams, OpenLidResult]):
"""A command to open a Thermocycler's lid."""
commandType: OpenLidCommandType = "thermocycler/openLid"
params: OpenLidParams
result: Optional[OpenLidResult]
_ImplementationCls: Type[OpenLidImpl] = OpenLidImpl
class OpenLidCreate(BaseCommandCreate[OpenLidParams]):
"""A request to open a Thermocycler's lid."""
commandType: OpenLidCommandType = "thermocycler/openLid"
params: OpenLidParams
_CommandCls: Type[OpenLid] = OpenLid
| 2.453125 | 2 |
deep_utils/nlp/utils/utils.py | pooya-mohammadi/deep_utils | 36 | 3920 | def multiple_replace(text: str, chars_to_mapping: dict):
"""
This function is used to replace a dictionary of characters inside a text string
:param text:
:param chars_to_mapping:
:return:
"""
import re
pattern = "|".join(map(re.escape, chars_to_mapping.keys()))
return re.sub(pattern, lambda m: chars_to_mapping[m.group()], str(text))
| 4.125 | 4 |
apps/gamedoc/models.py | mehrbodjavadi79/AIC21-Backend | 3 | 3921 | <reponame>mehrbodjavadi79/AIC21-Backend<filename>apps/gamedoc/models.py
from django.db import models
# Create your models here.
class Gamedoc(models.Model):
link = models.URLField(max_length=500)
title = models.CharField(max_length=500)
repo_name = models.CharField(max_length=512, blank=True, null=True)
user_name = models.CharField(max_length=512, blank=True, null=True)
def __str__(self):
return f'{self.title}'
| 2.203125 | 2 |
assignment/users/admin.py | LongNKCoder/SD4456_Python_Assignment_2 | 0 | 3922 | from django.contrib import admin
from users.models import Friendship
admin.site.register(Friendship)
# Register your models here.
| 1.398438 | 1 |
python/Word/demo_doc.py | davidgjy/arch-lib | 0 | 3923 | import docx
doc = docx.Document('demo.docx')
print('paragraphs number: %s' % len(doc.paragraphs))
print('1st paragraph: %s' % doc.paragraphs[0].text)
print('2nd paragraph: %s' % doc.paragraphs[1].text)
print('paragraphs runs: %s' % len(doc.paragraphs[1].runs))
print('1st paragraph run: %s' % doc.paragraphs[1].runs[0].text)
print('2nd paragraph run: %s' % doc.paragraphs[1].runs[1].text)
print('3rd paragraph run: %s' % doc.paragraphs[1].runs[2].text)
print('4th paragraph run: %s' % doc.paragraphs[1].runs[3].text)
| 3.390625 | 3 |
src/GL/sim/gql_ql_sims_ml_analysis.py | kylmcgr/RL-RNN-SURF | 2 | 3924 | <reponame>kylmcgr/RL-RNN-SURF<filename>src/GL/sim/gql_ql_sims_ml_analysis.py
# Analysis the data generated from on policy simulations of QL, QLP and GQL.
from BD.sim.sims import sims_analysis, merge_sim_files, extract_run_rew
from BD.util.paths import Paths
def sims_analysis_BD():
input_folder = Paths.rest_path + 'archive/beh/qlp-ml-opt/qlp-ml/'
sims_analysis(input_folder,
Paths.local_path + 'BD/to_graph_data/qlp_ml_onpolicy__stats.csv',
lambda conf: True
)
input_folder = Paths.rest_path + 'archive/beh/ql-ml-opt/ql-ml/'
sims_analysis(input_folder,
Paths.local_path + 'BD/to_graph_data/ql_ml_onpolicy_stats.csv',
lambda conf: True
)
def sims_analysis_GQL_BD():
input_folder = Paths.rest_path + 'archive/beh/gql-ml-opt/gql-ml/'
sims_analysis(input_folder,
Paths.local_path + 'BD/to_graph_data/gql_ml_onpolicy_stats.csv',
lambda conf: True
)
input_folder = Paths.rest_path + 'archive/beh/gql10d-ml-opt/gql10d-ml/'
sims_analysis(input_folder,
Paths.local_path + 'BD/to_graph_data/gql10d_ml_onpolicy_stats.csv',
lambda conf: True
)
if __name__ == '__main__':
sims_analysis_BD()
sims_analysis_GQL_BD()
data = merge_sim_files(lambda x: True, Paths.rest_path + 'archive/beh/gql10d-ml-opt/gql10d-ml/')
all_trials = extract_run_rew(data)
output_file = Paths.local_path + 'BD/to_graph_data/gql10d_all_data_ml.csv'
all_trials.to_csv(output_file, header=True)
data = merge_sim_files(lambda x: True, Paths.rest_path + 'archive/beh/gql-ml-opt/gql-ml/')
all_trials = extract_run_rew(data)
output_file = Paths.local_path + 'BD/to_graph_data/gql_all_data_ml.csv'
all_trials.to_csv(output_file, header=True)
| 1.8125 | 2 |
scripts/randomize_sw2_seed.py | epichoxha/nanodump | 0 | 3925 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import re
import glob
import random
import struct
def get_old_seed():
with open('include/syscalls.h') as f:
code = f.read()
match = re.search(r'#define SW2_SEED (0x[a-fA-F0-9]{8})', code)
assert match is not None, 'SW2_SEED not found!'
return match.group(1)
def replace_seed(old_seed, new_seed):
with open('include/syscalls.h') as f:
code = f.read()
code = code.replace(
f'#define SW2_SEED {old_seed}',
f'#define SW2_SEED 0x{new_seed:08X}',
1
)
with open('include/syscalls.h', 'w') as f:
f.write(code)
def get_function_hash(seed, function_name, is_syscall=True):
function_hash = seed
function_name = function_name.replace('_', '')
if is_syscall and function_name[:2] == 'Nt':
function_name = 'Zw' + function_name[2:]
name = function_name + '\0'
ror8 = lambda v: ((v >> 8) & (2 ** 32 - 1)) | ((v << 24) & (2 ** 32 - 1))
for segment in [s for s in [name[i:i + 2] for i in range(len(name))] if len(s) == 2]:
partial_name_short = struct.unpack('<H', segment.encode())[0]
function_hash ^= partial_name_short + ror8(function_hash)
return function_hash
def replace_syscall_hashes(seed):
with open('source/syscalls.c') as f:
code = f.read()
regex = re.compile(r'__declspec\(naked\) NTSTATUS (Nt[^(]+)')
syscall_names = re.findall(regex, code)
syscall_names = set(syscall_names)
syscall_definitions = code.split('#elif defined(__GNUC__)')[3]
for syscall_name in syscall_names:
regex = re.compile('NTSTATUS ' + syscall_name + '\\(.*?"mov ecx, (0x[A-Fa-f0-9]{8})', re.DOTALL)
match = re.search(regex, syscall_definitions)
assert match is not None, f'hash of syscall {syscall_name} not found!'
old_hash = match.group(1)
new_hash = get_function_hash(seed, syscall_name)
print(f'{syscall_name} -> {old_hash} - 0x{new_hash:08X}')
code = code.replace(
old_hash,
f'0x{new_hash:08X}'
)
with open('source/syscalls.c', 'w') as f:
f.write(code)
with open('source/syscalls-asm.asm') as f:
code = f.read()
for syscall_name in syscall_names:
regex = re.compile(syscall_name + ' PROC.*?mov ecx, 0([A-Fa-f0-9]{8})h', re.DOTALL)
match = re.search(regex, code)
assert match is not None, f'hash of syscall {syscall_name} not found!'
old_hash = match.group(1)
new_hash = get_function_hash(seed, syscall_name)
code = code.replace(
f'0{old_hash}h',
f'0{new_hash:08X}h',
1
)
with open('source/syscalls-asm.asm', 'w') as f:
f.write(code)
def replace_dinvoke_hashes(seed):
for header_file in glob.glob("include/**/*.h", recursive=True):
with open(header_file) as f:
code = f.read()
regex = re.compile(r'#define (\w+)_SW2_HASH (0x[a-fA-F0-9]{8})')
matches = re.findall(regex, code)
for function_name, old_hash in matches:
new_hash = get_function_hash(seed, function_name, is_syscall=False)
code = code.replace(
f'#define {function_name}_SW2_HASH {old_hash}',
f'#define {function_name}_SW2_HASH 0x{new_hash:08X}',
1
)
if matches:
with open(header_file, 'w') as f:
f.write(code)
def main():
new_seed = random.randint(2 ** 28, 2 ** 32 - 1)
#new_seed = 0x1337c0de
old_seed = get_old_seed()
replace_seed(old_seed, new_seed)
replace_syscall_hashes(new_seed)
replace_dinvoke_hashes(new_seed)
if os.name == 'nt':
print('done! recompile with:\nnmake -f Makefile.msvc')
else:
print('done! recompile with:\nmake -f Makefile.mingw')
if __name__ == '__main__':
main()
| 2.4375 | 2 |
checker/checker/executer.py | grimpy/hexa-a | 3 | 3926 | <filename>checker/checker/executer.py
from subprocess import run, PIPE, TimeoutExpired, CompletedProcess
from codes import exitcodes
def _error_decode(response):
stderr = ""
if response.returncode:
if response.returncode < 0:
errmsg = exitcodes.get(abs(response.returncode), "Unknown Error")
if isinstance(errmsg, dict):
errmsg = errmsg["descr"]
else:
errmsg = response.stderr
stderr = "Exit code ({}): {}".format(abs(response.returncode), errmsg)
return response.returncode, stderr
def execute(cmd, workdir=None, timeout=60):
cmd = ["/bin/bash", "-c", cmd]
try:
response = run(
cmd,
stderr=PIPE,
stdout=PIPE,
cwd=workdir,
timeout=timeout,
universal_newlines=True,
)
except TimeoutExpired:
response = CompletedProcess(
args=cmd,
returncode=124,
stderr="Timeout"
)
except:
response = CompletedProcess(
args=cmd,
returncode=-1,
stderr="Internal Checker Error"
)
response.stdout = "" if not response.stdout else str(response.stdout)
response.returncode, response.stderr = _error_decode(response)
return response | 2.328125 | 2 |
pfm/pf_command/update.py | takahi-i/pfm | 9 | 3927 | <reponame>takahi-i/pfm
import json
from pfm.pf_command.base import BaseCommand
from pfm.util.log import logger
class UpdateCommand(BaseCommand):
def __init__(self, name, forward_type,
remote_host, remote_port, local_port,
ssh_server, server_port, login_user, config):
super(UpdateCommand, self).__init__(config)
self.name = name
self.forward_type = forward_type
self.remote_host = remote_host
self.remote_port = remote_port
self.local_port = local_port
self.ssh_server = ssh_server
self.server_port = server_port
self.login_user = login_user
def run(self):
f = open(self.config_path, 'r')
targets = json.load(f)
if self.name in targets:
target = targets[self.name]
self.update(target)
else:
logger.warn("Port forward setting named " + self.name + "is not registered")
# write the target
f = open(self.config_path, 'w')
f.write(json.dumps(targets, indent=4))
f.close()
def update(self, target):
if self.forward_type is not None:
target["type"] = self.forward_type
if self.remote_host is not None:
target["remote_host"] = self.remote_host
if self.remote_port is not None:
target["remote_port"] = self.remote_port
if self.local_port is not None:
target["local_port"] = self.local_port
if self.ssh_server is not None:
target["ssh_server"] = self.ssh_server
if self.server_port is not None:
target["server_port"] = self.server_port
if self.login_user is not None:
target["login_user"] = self.login_user
| 2.578125 | 3 |
tests/atfork/test_atfork.py | luciferliu/xTools | 0 | 3928 | <reponame>luciferliu/xTools
#!/usr/bin/python
#
# Copyright 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Licensed to the PSF under a Contributor Agreement.
#
# Author: <NAME> <<EMAIL>>
"""Tests for atfork."""
import os
import sys
import importlib
from xTool.compat import StringIO
import traceback
import unittest
from xTool import atfork
class AtforkTest(unittest.TestCase):
def setUp(self):
atfork.monkeypatch_os_fork_functions()
self.calls = []
self.orig_stderr = sys.stderr
self.assertFalse(
atfork._fork_lock.locked(),
"atfork._fork_lock not released by an earlier test!",
)
# Unregister calls registered by earlier tests.
atfork._prepare_call_list = []
atfork._parent_call_list = []
atfork._child_call_list = []
def tearDown(self):
# Un-monkeypatch the os module. ook.
global os
importlib.reload(os)
sys.stderr = self.orig_stderr
def _pre(self):
self.calls.append(self._pre)
def _parent(self):
self.calls.append(self._parent)
def _child(self):
self.calls.append(self._child)
def _other(self):
self.calls.append(self._other)
def _raise_pre(self):
self._pre()
raise RuntimeError("This as the first parent error expected.")
def _raise_parent(self):
self._parent()
raise RuntimeError("This as the second parent error expected.")
def _raise_child(self):
self._child()
raise RuntimeError("This child error is expected.")
def _assert_expected_parent_stderr(self, error_msg):
self.assertTrue(("first parent error" in error_msg), error_msg)
self.assertTrue(("second parent error" in error_msg), error_msg)
self.assertTrue(
(error_msg.index("first parent") < error_msg.index("second parent")),
"first and second errors out of order in:\n%r" % error_msg,
)
self.assertEqual(2, error_msg.count("RuntimeError:"))
def _assert_expected_child_stderr(self, error_msg):
self.assertTrue("child error is expected" in error_msg)
self.assertEqual(1, error_msg.count("RuntimeError:"), error_msg)
def test_monkeypatching(self):
if not hasattr(os, "fork"):
return # Nothing to test on this platform.
self.assertTrue(callable(atfork._orig_os_fork))
self.assertTrue(callable(atfork._orig_os_forkpty))
# The os module was patched, these should not be equal.
self.assertNotEqual(atfork._orig_os_fork, os.fork)
self.assertNotEqual(atfork._orig_os_forkpty, os.forkpty)
# These are the wrapped versions we patched in.
self.assertEqual(atfork.os_fork_wrapper, os.fork)
self.assertEqual(atfork.os_forkpty_wrapper, os.forkpty)
def test_register_atfork_calls(self):
# Test with both positional and keyword arguments as well as None.
atfork.atfork(self._pre, self._parent, self._child)
atfork.atfork(prepare=self._pre)
atfork.atfork(parent=self._parent)
atfork.atfork(child=self._child)
self.assertEqual([self._pre] * 2, atfork._prepare_call_list)
self.assertEqual([self._parent] * 2, atfork._parent_call_list)
self.assertEqual([self._child] * 2, atfork._child_call_list)
if __debug__:
self.assertRaises(AssertionError, atfork.atfork, 1, 2, 3)
def test_call_atfork_list(self):
self.assertEqual([], atfork._call_atfork_list([]))
self.assertEqual([], atfork._call_atfork_list([self._pre]))
def raise_something():
raise RuntimeError()
errors = atfork._call_atfork_list([raise_something] * 2)
self.assertEqual(2, len(errors))
for exc_info in errors:
self.assertEqual(RuntimeError, exc_info[0])
def _test_a_fork_wrapper(self, fork_func):
sys.stderr = StringIO() # restored in tearDown
atfork.atfork(self._raise_pre, self._raise_parent, self._raise_child)
atfork.atfork(self._other, self._other, self._other)
pid = fork_func()
if pid == 0:
try:
try:
self.assertEqual(
[self._pre, self._other, self._child, self._other], self.calls
)
self.assertFalse(atfork._fork_lock.locked())
self._assert_expected_child_stderr(sys.stderr.getvalue())
except BaseException:
try:
traceback.print_exc()
self.orig_stderr.write(sys.stderr.getvalue())
finally:
os._exit(1)
finally:
os._exit(0)
else:
self.assertEqual(
[self._pre, self._other, self._parent, self._other], self.calls
)
self.assertFalse(atfork._fork_lock.locked())
self.assertEqual(0, os.waitpid(pid, 0)[1], "error in child")
self._assert_expected_parent_stderr(sys.stderr.getvalue())
def test_os_fork_wrapper(self):
self._test_a_fork_wrapper(os.fork)
def test_os_forkpty_wrapper(self):
self._test_a_fork_wrapper(lambda: os.forkpty()[0])
def _test_fork_failure(self, orig_fork_attrname, fork_wrapper):
def failing_fork():
raise OSError(0, "testing a fork failure")
atfork.atfork(self._pre, self._parent, self._child)
orig_orig_fork = getattr(atfork, orig_fork_attrname)
try:
setattr(atfork, orig_fork_attrname, failing_fork)
try:
pid = fork_wrapper()
if pid == 0:
# This should never happen but do this just in case.
os._exit(0)
except OSError:
self.assertEqual([self._pre, self._parent], self.calls)
else:
self.fail("Fork failed to fail!")
finally:
setattr(atfork, orig_fork_attrname, orig_orig_fork)
def test_fork_wrapper_failure(self):
self._test_fork_failure("_orig_os_fork", atfork.os_fork_wrapper)
def test_forkpty_wrapper_failure(self):
self._test_fork_failure("_orig_os_forkpty", atfork.os_forkpty_wrapper)
def test_multiple_monkeypatch_safe(self):
self.assertNotEqual(atfork._orig_os_fork, atfork.os_fork_wrapper)
self.assertNotEqual(atfork._orig_os_forkpty, atfork.os_forkpty_wrapper)
atfork.monkeypatch_os_fork_functions()
self.assertNotEqual(atfork._orig_os_fork, atfork.os_fork_wrapper)
self.assertNotEqual(atfork._orig_os_forkpty, atfork.os_forkpty_wrapper)
atfork.monkeypatch_os_fork_functions()
self.assertNotEqual(atfork._orig_os_fork, atfork.os_fork_wrapper)
self.assertNotEqual(atfork._orig_os_forkpty, atfork.os_forkpty_wrapper)
if __name__ == "__main__":
unittest.main()
| 1.9375 | 2 |
IO/files/handling.py | brendano257/Zugspitze-Schneefernerhaus | 0 | 3929 | import os
from pathlib import Path
__all__ = ['list_files_recur', 'scan_and_create_dir_tree', 'get_all_data_files', 'get_subsubdirs']
def list_files_recur(path):
"""
Cheater function that wraps path.rglob().
:param Path path: path to list recursively
:return list: list of Path objects
"""
files = []
for file in path.rglob('*'):
files.append(file)
return files
def scan_and_create_dir_tree(path, file=True):
"""
Creates all the necessary directories for the file at the end of path to be created.
When specified with a filepath to a file or folder, it creates directories until the path is valid.
:param Path path: must end with a filename, else the final directory won't be created
:param bool file: Boolean, does the given path end with a file? If not, path.parts[-1] will be created
:return None:
"""
parts = path.parts
path_to_check = Path(parts[0])
for i in range(1, len(parts)):
if not path_to_check.exists():
path_to_check.mkdir()
path_to_check = path_to_check / parts[i]
if file:
pass
else:
if not path_to_check.exists():
path_to_check.mkdir()
def get_all_data_files(path, filetype):
"""
Recursively search the given directory for .xxx files.
:param Path path: Path to search
:param str filetype: str, ".type" of file to search for
:return list: list of file-like Path objects
"""
files = list_files_recur(path)
files[:] = [file for file in files if filetype in file.name]
return files
def get_subsubdirs(path):
"""
Get the second-level subdirectories of the given path.
If given path 'a/b', a sample return would be ['a/b/c/d', 'a/b/c/d2', 'a/b/c/etc']
:param str path:
:return list: list containing Path instances for all paths found two levels below the supplied path
"""
leveltwo_subdirs = []
immediate_subdirs = [os.scandir(subdir) for subdir in os.scandir(path) if Path(subdir).is_dir()]
for scan in immediate_subdirs:
for subdir in scan:
leveltwo_subdirs.append(Path(subdir)) if Path(subdir).is_dir() else None
return leveltwo_subdirs
| 3.5625 | 4 |
tools/mo/openvino/tools/mo/ops/detection_output_onnx.py | ryanloney/openvino-1 | 1,127 | 3930 | # Copyright (C) 2018-2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import numpy as np
from openvino.tools.mo.front.common.partial_infer.utils import dynamic_dimension_value, shape_array, set_input_shapes
from openvino.tools.mo.ops.op import Op
class ExperimentalDetectronDetectionOutput(Op):
op = 'ExperimentalDetectronDetectionOutput'
enabled = True
def __init__(self, graph, attrs):
mandatory_props = dict(
type=self.op,
op=self.op,
version='opset6',
infer=self.infer,
reverse_infer=self.reverse_infer,
type_infer=self.type_infer,
in_ports_count=4,
out_ports_count=3,
)
super().__init__(graph, mandatory_props, attrs)
def backend_attrs(self):
return [
('class_agnostic_box_regression', lambda node: str(bool(node['class_agnostic_box_regression'])).lower()),
'max_detections_per_image',
'nms_threshold',
'num_classes',
'post_nms_count',
'score_threshold',
'max_delta_log_wh',
('deltas_weights', lambda node: ','.join(map(str, node['deltas_weights'])))]
@staticmethod
def infer(node):
rois_num = node.max_detections_per_image
# boxes
node.out_port(0).data.set_shape([rois_num, 4])
# classes, scores, batch indices
# We use range(1, 1 + max(node.out_ports().keys())) instead of range(1, 3), because there are incorrectly
# generated models where ExperimentalDetectronDetectionOutput has 4 outputs.
for port_ind in range(1, 1 + max(node.out_ports().keys())):
if not node.out_port(port_ind).disconnected():
node.out_port(port_ind).data.set_shape([rois_num])
@staticmethod
def type_infer(node):
in_data_type = node.in_port(0).get_data_type()
node.out_port(0).set_data_type(in_data_type)
node.out_port(1).set_data_type(np.int32) # the second output contains class indices
node.out_port(2).set_data_type(in_data_type)
if node.is_out_port_connected(3):
node.out_port(3).set_data_type(np.int32) # the fourth output contains batch indices
@staticmethod
def reverse_infer(node):
set_input_shapes(node,
shape_array([dynamic_dimension_value, 4]),
shape_array([dynamic_dimension_value, node['num_classes'] * 4]),
shape_array([dynamic_dimension_value, node['num_classes']]),
shape_array([1, 3]))
| 1.796875 | 2 |
driver.py | FahimMahmudJoy/Physionet_2019_Sepsis | 1 | 3931 | #!/usr/bin/env python
import numpy as np, os, sys
from get_sepsis_score import load_sepsis_model, get_sepsis_score
def load_challenge_data(file):
with open(file, 'r') as f:
header = f.readline().strip()
column_names = header.split('|')
data = np.loadtxt(f, delimiter='|')
# Ignore SepsisLabel column if present.
if column_names[-1] == 'SepsisLabel':
column_names = column_names[:-1]
data = data[:, :-1]
return data
def save_challenge_predictions(file, scores, labels):
with open(file, 'w') as f:
f.write('PredictedProbability|PredictedLabel\n')
for (s, l) in zip(scores, labels):
f.write('%g|%d\n' % (s, l))
if __name__ == '__main__':
# Parse arguments.
if len(sys.argv) != 3:
raise Exception('Include the input and output directories as arguments, e.g., python driver.py input output.')
input_directory = sys.argv[1]
output_directory = sys.argv[2]
# Find files.
files = []
for f in os.listdir(input_directory):
if os.path.isfile(os.path.join(input_directory, f)) and not f.lower().startswith('.') and f.lower().endswith('psv'):
files.append(f)
if not os.path.isdir(output_directory):
os.mkdir(output_directory)
# Load model.
model = load_sepsis_model()
print(model)
# Iterate over files.
for f in files:
# Load data.
input_file = os.path.join(input_directory, f)
data = load_challenge_data(input_file)
# print(type(data))
# Make predictions.
num_rows = len(data)
scores = np.zeros(num_rows)
labels = np.zeros(num_rows)
for t in range(num_rows):
current_data = data[:t+1]
current_score, current_label = get_sepsis_score(current_data, model)
scores[t] = current_score
labels[t] = current_label
# Save results.
output_file = os.path.join(output_directory, f)
save_challenge_predictions(output_file, scores, labels)
| 2.875 | 3 |
src/LspRuntimeMonitor.py | TafsirGna/ClspGeneticAlgorithm | 0 | 3932 | <gh_stars>0
#!/usr/bin/python3.5
# -*-coding: utf-8 -*
from collections import defaultdict
from threading import Thread
from time import perf_counter, time
from LspLibrary import bcolors
import time
import matplotlib.pyplot as plt
class LspRuntimeMonitor:
"""
"""
clockStart = None
clockEnd = None
mutation_strategy = "simple_mutation"
popsData = defaultdict(lambda: None)
outputString = ""
outputFilePath = "data/output/output.txt"
verbose = False
running = True
def __init__(self) -> None:
"""
"""
pass
@classmethod
def duration(cls):
"""
"""
return f"{cls.clockEnd - cls.clockStart} second(s)"
@classmethod
def started(cls):
"""
"""
cls.running = True
LspRuntimeMonitor.clockStart = perf_counter()
print(f"{bcolors.OKGREEN}Processing input data.{bcolors.ENDC}")
# Thread(cls.waitingAnimation())
@classmethod
def ended(cls):
"""
"""
cls.running = False
LspRuntimeMonitor.clockEnd = perf_counter()
@classmethod
def output(cls, output):
"""
"""
cls.outputString += output
if cls.verbose:
print(output)
@classmethod
def saveOutput(cls):
"""
"""
f = open(cls.outputFilePath, "w")
f.write(cls.outputString)
f.close()
@classmethod
def report(cls):
"""
"""
# Duration
durationStatement = cls.duration()
cls.output(durationStatement)
# Saving all generated output to a default file
cls.saveOutput()
cls.plotData()
@classmethod
def plotData(cls):
"""
"""
print('-----------------------------------------')
print(cls.popsData)
data = list(cls.popsData.values())[0]
# Plots
# Plotting the evolution of the minimal cost over generations
plt.plot(list(range(len(data["max"]))), data["max"])
plt.ylabel("Population maximal cost")
plt.show()
# Plotting the evolution of the minimal cost over generations
plt.plot(list(range(len(data["min"]))), data["min"])
plt.ylabel("Population minimal cost")
plt.show()
@classmethod
def waitingAnimation(cls):
"""
"""
animation = "|/-\\"
idx = 0
# while thing_not_complete():
while cls.running:
print(animation[idx % len(animation)], end="\r")
idx += 1
time.sleep(0.1) | 2.4375 | 2 |
core/github/parsers/__init__.py | goranc/GraphYourCodeVulnerability | 0 | 3933 | from .python.parser import PythonParser
all_parsers = [PythonParser]
| 1.125 | 1 |
methylcheck/predict/sex.py | FoxoTech/methylcheck | 0 | 3934 | <reponame>FoxoTech/methylcheck<filename>methylcheck/predict/sex.py
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from pathlib import Path
#app
import methylcheck # uses .load; get_sex uses methylprep models too and detect_array()
import logging
LOGGER = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
def _get_copy_number(meth,unmeth):
"""function to return copy number.
requires dataframes of methylated and
unmethylated values. can be raw OR corrected"""
# minfi R version:
# log2(getMeth(object) + getUnmeth(object))
return np.log2(meth+unmeth)
def get_sex(data_source, array_type=None, verbose=False, plot=False, save=False,
on_lambda=False, median_cutoff= -2, include_probe_failure_percent=True,
poobah_cutoff=20, custom_label=None, return_fig=False, return_labels=False):
"""This will calculate and predict the sex of each sample.
inputs:
=======
the "data_source" can be any one of:
path -- to a folder with csv data that contains processed sample data
path -- to a folder with the 'meth_values.pkl' and 'unmeth_values.pkl' dataframes
path -- to a folder also containing samplesheet pkl and poobah_values.pkl, if you want to compare predicted sex with actual sex.
data_containers -- object created from methylprep.run_pipeline() or methylcheck.load(path, 'meth')
tuple of (meth, unmeth) dataframes
array_type (string)
enum: {'27k','450k','epic','epic+','mouse'}
if not specified, it will load the data from data_source and determine the array for you.
median_cutoff
the minimum difference in the medians of X and Y probe copy numbers to assign male or female
(copied from the minfi sex predict function)
include_probe_failure_percent:
True: includes poobah percent per sample as column in the output table and on the plot.
Note: you must supply a 'path' as data_source to include poobah in plots.
poobah_cutoff
The maximum percent of sample probes that can fail before the sample fails. Default is 20 (percent)
Has no effect if `include_probe_failure_percent` is False.
plot
True: creates a plot, with option to `save` as image or `return_fig`.
save
True: saves the plot, if plot is True
return_fig
If True, returns a pyplot figure instead of a dataframe. Default is False.
Note: return_fig will not show a plot on screen.
return_labels: (requires plot == True)
When using poobah_cutoff, the figure only includes A-Z,1...N labels on samples on plot to make it easier to read.
So to get what sample_ids these labels correspond to, you can rerun the function with return_labels=True and it will
skip plotting and just return a dictionary with sample_ids and these labels, to embed in a PDF report if you like.
custom_label:
Option to provide a dictionary with keys as sample_ids and values as labels to apply to samples.
e.g. add more data about samples to the multi-dimensional QC plot
while providing a filepath is the easiest way, you can also pass in a data_containers object,
a list of data_containers containing raw meth/unmeth values, instead. This object is produced
by methylprep.run_pipeline, or by using methylcheck.load(filepath, format='meth') and lets you
customize the import if your files were not prepared using methylprep (non-standand CSV columns, for example)
If a `poobah_values.pkl` file can be found in path, the dataframe returned will also include
percent of probes for X and Y chromosomes that failed quality control, and warn the user if any did.
This feature won't work if a containers object or tuple of dataframes is passed in, instead of a path.
Note: ~90% of Y probes should fail if the sample is female. That chromosome is missing."""
allowed_array_types = {'27k','450k','epic','epic+','mouse'}
try:
from methylprep.files import Manifest
from methylprep.models import ArrayType
except ImportError:
raise ImportError("This function requires methylprep to be installed (pip3 install `methylprep`)")
(data_source_type, data_source) = methylcheck.load_processed._data_source_type(data_source)
# data_source_type is one of {'path', 'container', 'control', 'meth_unmeth_tuple'}
poobah=None
if data_source_type in ('path'):
# this will look for saved pickles first, then csvs or parsing the containers (which are both slower)
# the saved pickles function isn't working for batches yet.
try:
meth, unmeth = methylcheck.qc_plot._get_data(
data_containers=None, path=data_source,
compare=False, noob=False, verbose=False)
except Exception as e:
meth, unmeth = methylcheck.qc_plot._get_data(
data_containers=None, path=data_source,
compare=False, noob=True, verbose=False)
if include_probe_failure_percent == True and Path(data_source,'poobah_values.pkl').expanduser().exists():
poobah = pd.read_pickle(Path(data_source,'poobah_values.pkl').expanduser())
elif data_source_type in ('container'):
# this will look for saved pickles first, then csvs or parsing the containers (which are both slower)
# the saved pickles function isn't working for batches yet.
meth, unmeth = methylcheck.qc_plot._get_data(
data_containers=data_source, path=None,
compare=False, noob=False, verbose=False)
elif data_source_type == 'meth_unmeth_tuple':
(meth, unmeth) = data_source
if len(meth) != len(unmeth):
raise ValueError(f"WARNING: probe count mismatch: meth {len(meth)} -- unmeth {len(unmeth)}")
if array_type == None:
# get list of X any Y probes - using .methylprep_manifest_files (or MANIFEST_DIR_PATH_LAMBDA) and auto-detected array here
array_type = ArrayType(methylcheck.detect_array(meth, on_lambda=on_lambda))
elif isinstance(array_type,str):
if array_type in allowed_array_types:
array_type = ArrayType(array_type)
else:
raise ValueError(f"Your array_type must be one of these: {allowed_array_types} or None.")
if verbose:
LOGGER.debug(array_type)
LOGGER.setLevel(logging.WARNING)
manifest = Manifest(array_type, on_lambda=on_lambda, verbose=verbose)._Manifest__data_frame # 'custom', '27k', '450k', 'epic', 'epic+'
LOGGER.setLevel(logging.INFO)
x_probes = manifest.index[manifest['CHR']=='X']
y_probes = manifest.index[manifest['CHR']=='Y']
if verbose:
LOGGER.info(f"Found {len(x_probes)} X and {len(y_probes)} Y probes")
# dataframes of meth and unmeth values for the sex chromosomes
x_meth = meth[meth.index.isin(x_probes)]
x_unmeth = unmeth[unmeth.index.isin(x_probes)]
y_meth = meth[meth.index.isin(y_probes)]
y_unmeth = unmeth[unmeth.index.isin(y_probes)]
# create empty dataframe for output
output = pd.DataFrame(index=[s for s in meth.columns], columns=['x_median','y_median','predicted_sex'])
# get median values for each sex chromosome for each sample
x_med = _get_copy_number(x_meth,x_unmeth).median()
y_med = _get_copy_number(y_meth,y_unmeth).median()
# populate output dataframe with values
output['x_median'] = output.index.map(x_med)
output['y_median'] = output.index.map(y_med)
# compute difference
median_difference = output['y_median'] - output['x_median']
# median cutoff - can be manipulated by user --- default = -2 --- used to predict sex
sex0 = ['F' if x < median_cutoff else 'M' for x in median_difference]
# NOTE for testing: GSE85566/GPL13534 (N=120) has 4 samples that are predicted as wrong sex when using -2, but work at -0.5.
# populate dataframe with predicted sex
output['predicted_sex'] = sex0
output = output.round(1)
# if poobah_df exists, calculate percent X and Y probes that failed
sample_failure_percent = {} # % of ALL probes in sample, not just X or Y
if include_probe_failure_percent == True and isinstance(poobah, pd.DataFrame):
p_value_cutoff = 0.05
X_col = []
Y_col = []
failed_samples = []
for column in poobah.columns:
sample_failure_percent[column] = round(100*len(poobah[column][poobah[column] >= p_value_cutoff].index) / len(poobah.index),1)
failed_probe_names = poobah[column][poobah[column] >= p_value_cutoff].index
failed_x_probe_names = list(set(failed_probe_names) & set(x_probes))
failed_y_probe_names = list(set(failed_probe_names) & set(y_probes))
X_percent = round(100*len(failed_x_probe_names)/poobah.index.isin(list(x_probes)).sum(),1)
Y_percent = round(100*len(failed_y_probe_names)/poobah.index.isin(list(y_probes)).sum(),1)
X_col.append(X_percent)
Y_col.append(Y_percent)
if X_percent > 10:
failed_samples.append(column)
output['X_fail_percent'] = X_col #output.index.map(X_col)
output['Y_fail_percent'] = Y_col #output.index.map(Y_col)
if failed_samples != []:
LOGGER.warning(f"{len(failed_samples)} samples had >10% of X probes fail p-value probe detection. Predictions for these may be unreliable:")
LOGGER.warning(f"{failed_samples}")
if data_source_type in ('path'):
output = _fetch_actual_sex_from_sample_sheet_meta_data(data_source, output)
if plot == True:
fig = _plot_predicted_sex(data=output, # 'x_median', 'y_median', 'predicted_sex', 'X_fail_percent', 'Y_fail_percent'
sample_failure_percent=sample_failure_percent,
median_cutoff=median_cutoff,
include_probe_failure_percent=include_probe_failure_percent,
verbose=verbose,
save=save,
poobah_cutoff=poobah_cutoff,
custom_label=custom_label,
data_source_type=data_source_type,
data_source=data_source,
return_fig=return_fig,
return_labels=return_labels,
)
if return_labels:
return fig # these are a lookup dictionary of labels
if return_fig:
return fig
return output
def _plot_predicted_sex(data=pd.DataFrame(),
sample_failure_percent={},
median_cutoff= -2,
include_probe_failure_percent=True,
verbose=False,
save=False,
poobah_cutoff=20, #%
custom_label=None,
data_source_type=None,
data_source=None,
return_fig=False,
return_labels=False):
"""
data columns: ['x_median', 'y_median', 'predicted_sex', 'X_fail_percent', 'Y_fail_percent']
- color is sex, pink or blue
- marker circle size will be larger and more faded if poobah values are worse, smaller and darker if low variance. Like a probability cloud.
- sample text is (ID, delta age)
- sex mismatches are X, matched samples are circles (if samplesheet contains actual sex data)
- omits labels for samples that have LOW failure rates, but shows IDs when failed
- adds legend of sketchy samples and labels
- show delta age on labels (using custom column dict)
- unit tests with custom label and without, and check that controls_report still works with this function
- save_fig
- return_labels, returns a lookup dict instead of plot
if there is a "custom_label" dict passed in, such as (actual_age - predicted_age), it simply adds those this label to the marker text labels.
Dicts must match the data DF index.
"""
if sample_failure_percent != {} and set(sample_failure_percent.keys()) == set(data.index):
data['sample_failure_percent'] = pd.Series(sample_failure_percent)
else:
LOGGER.warning("sample_failure_percent index did not align with output data index")
#sns.set_theme(style="white")
show_mismatches = None if 'sex_matches' not in data.columns else "sex_matches"
if show_mismatches:
data["sex_matches"] = data["sex_matches"].map({0:"Mismatch", 1:"Match"})
show_failure = None if 'sample_failure_percent' not in data.columns else "sample_failure_percent"
sample_sizes = (20, 600)
if show_failure: # avoid sizing dots with narrow range; gives false impression of bad samples.
poobah_range = data["sample_failure_percent"].max() - data["sample_failure_percent"].min()
if poobah_range < poobah_cutoff/2:
show_failure = None
sample_sizes = (40,40)
custom_palette = sns.set_palette(sns.color_palette(['#FE6E89','#0671B7']))
# if only one sex, make sure male is blue; female is pink
# if hasattr(output, 'actual_sex') and set(output.actual_sex) == set('M')
# if first value to be plotted is male, change palette
if hasattr(data, 'predicted_sex') and list(data.predicted_sex)[0] == 'M':
custom_palette = sns.set_palette(sns.color_palette(['#0671B7','#FE6E89']))
fig = sns.relplot(data=data,
x='x_median',
y='y_median',
hue="predicted_sex",
size=show_failure,
style=show_mismatches,
sizes=sample_sizes,
alpha=.5,
palette=custom_palette,
height=8,
aspect=1.34)
ax = fig.axes[0,0]
fig.fig.subplots_adjust(top=.95)
# for zoomed-in plots with few points close together, set the min scale to be at least 2 units.
yscale = plt.gca().get_ylim()
xscale = plt.gca().get_xlim()
if abs(yscale[1]-yscale[0]) < 2.0:
ax.set_xlim(xmin=xscale[0]-1, xmax=xscale[1]+1)
ax.set_ylim(ymin=yscale[0]-1, ymax=yscale[1]+1)
label_lookup = {index_val: chr(i+65) if (i <= 26) else str(i-26) for i,index_val in enumerate(data.index)}
for idx,row in data.iterrows():
if "sample_failure_percent" in row and row['sample_failure_percent'] > poobah_cutoff:
label = f"{label_lookup[idx]}, {custom_label.get(idx)}" if isinstance(custom_label, dict) and custom_label.get(idx) else label_lookup[idx]
ax.text(row['x_median'], row['y_median'], label, horizontalalignment='center', fontsize=10, color='darkred')
else:
label = f"{custom_label.get(idx)}" if isinstance(custom_label, dict) else None
if label:
ax.text(row['x_median']+0.05, row['y_median']+0.05, label, horizontalalignment='center', fontsize=10, color='grey')
if return_labels:
plt.close() # release memory
return label_lookup
if "sample_failure_percent" in data.columns:
N_failed = len(data[data['sample_failure_percent'] > poobah_cutoff].index)
N_total = len(data['sample_failure_percent'].index)
ax.set_title(f"{N_failed} of {N_total} samples failed poobah, with at least {poobah_cutoff}% of probes failing")
else:
ax.set_title(f"Predicted sex based on matching X and Y probes.")
if save:
filepath = 'predicted_sexes.png' if data_source_type != 'path' else Path(data_source,'predicted_sexes.png').expanduser()
plt.savefig(filepath, bbox_inches="tight")
if return_fig:
return fig
plt.show()
def _fetch_actual_sex_from_sample_sheet_meta_data(filepath, output):
"""output is a dataframe with Sample_ID in the index. This adds actual_sex as a column and returns it."""
# controls_report() does the same thing, and only calls get_sex() with the minimum of data to be fast, because these are already loaded. Just passes in meth/unmeth data
# Sample sheet should have 'M' or 'F' in column to match predicted sex.
# merge actual sex into processed output, if available
file_patterns = {
'sample_sheet_meta_data.pkl': 'meta',
'*_meta_data.pkl': 'meta',
'*samplesheet*.csv': 'meta',
'*sample_sheet*.csv': 'meta',
}
loaded_files = {}
for file_pattern in file_patterns:
for filename in Path(filepath).expanduser().rglob(file_pattern):
if '.pkl' in filename.suffixes:
loaded_files['meta'] = pd.read_pickle(filename)
break
if '.csv' in filename.suffixes:
loaded_files['meta'] = pd.read_csv(filename)
break
if len(loaded_files) == 1:
# methylprep v1.5.4-6 was creating meta_data files with two Sample_ID columns. Check and fix here:
# methylcheck 0.7.9 / prep 1.6.0 meta_data lacking Sample_ID when sample_sheet uses alt column names and gets replaced.
if any(loaded_files['meta'].columns.duplicated()):
loaded_files['meta'] = loaded_files['meta'].loc[:, ~loaded_files['meta'].columns.duplicated()]
LOGGER.info("Removed a duplicate Sample_ID column in samplesheet")
if 'Sample_ID' in loaded_files['meta'].columns:
loaded_files['meta'] = loaded_files['meta'].set_index('Sample_ID')
elif 'Sentrix_ID' in loaded_files['meta'].columns and 'Sentrix_Position' in loaded_files['meta'].columns:
loaded_files['meta']['Sample_ID'] = loaded_files['meta']['Sentrix_ID'].astype(str) + '_' + loaded_files['meta']['Sentrix_Position'].astype(str)
loaded_files['meta'] = loaded_files['meta'].set_index('Sample_ID')
else:
raise ValueError("Your sample sheet must have a Sample_ID column, or (Sentrix_ID and Sentrix_Position) columns.")
# fixing case of the relevant column
renamed_column = None
if ('Gender' in loaded_files['meta'].columns or 'Sex' in loaded_files['meta'].columns):
if 'Gender' in loaded_files['meta'].columns:
renamed_column = 'Gender'
elif 'Sex' in loaded_files['meta'].columns:
renamed_column = 'Sex'
else:
renamed_columns = {col:(col.title() if col.lower() in ('sex','gender') else col) for col in loaded_files['meta'].columns}
loaded_files['meta'] = loaded_files['meta'].rename(columns=renamed_columns)
if 'Gender' in renamed_columns.values():
renamed_column = 'Gender'
elif 'Sex' in renamed_columns.values():
renamed_column = 'Sex'
if renamed_column is not None:
# next, ensure samplesheet Sex/Gender (Male/Female) are recoded as M/F; controls_report() does NOT do this step, but should.
sex_values = set(loaded_files['meta'][renamed_column].unique())
#print('sex_values', sex_values)
if not sex_values.issubset(set(['M','F'])): # subset, because samples might only contain one sex
if 'Male' in sex_values or 'Female' in sex_values:
loaded_files['meta'][renamed_column] = loaded_files['meta'][renamed_column].map({'Male':'M', 'Female':'F'})
elif 'male' in sex_values or 'female' in sex_values:
loaded_files['meta'][renamed_column] = loaded_files['meta'][renamed_column].map({'male':'M', 'female':'F'})
elif 'MALE' in sex_values or 'FEMALE' in sex_values:
loaded_files['meta'][renamed_column] = loaded_files['meta'][renamed_column].map({'MALE':'M', 'FEMALE':'F'})
elif 'm' in sex_values or 'f' in sex_values:
loaded_files['meta'][renamed_column] = loaded_files['meta'][renamed_column].map({'m':'M', 'f':'F'})
else:
raise ValueError(f"Cannot compare with predicted sex because actual sexes listed in your samplesheet are not understood (expecting M or F): (found {sex_values})")
output['actual_sex'] = None
output['sex_matches'] = None
for row in output.itertuples():
try:
actual_sex = str(loaded_files['meta'].loc[row.Index].get(renamed_column))
except KeyError:
if 'Sample_ID' in output.columns:
LOGGER.warning("Sample_ID was another column in your output DataFrame; Set that to the index when you pass it in.")
raise KeyError("Could not read actual sex from meta data to compare.")
if isinstance(actual_sex, pd.Series):
LOGGER.warning(f"Multiple samples matched actual sex for {row.Index}, because Sample_ID repeats in sample sheets. Only using first match, so matches may not be accurate.")
actual_sex = actual_sex[0]
if hasattr(row,'predicted_sex'):
sex_matches = 1 if actual_sex.upper() == str(row.predicted_sex).upper() else 0
else:
sex_matches = np.nan
output.loc[row.Index, 'actual_sex'] = actual_sex
output.loc[row.Index, 'sex_matches'] = sex_matches
else:
pass # no Sex/Gender column found in samplesheet
return output
| 2.375 | 2 |
backend/accounts/migrations/0003_auto_20201115_1537.py | mahmoud-batman/quizz-app | 0 | 3935 | # Generated by Django 3.1.2 on 2020-11-15 15:37
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0002_auto_20201115_1531'),
]
operations = [
migrations.AlterField(
model_name='customuser',
name='user_id',
field=models.IntegerField(blank=True, null=True, unique=True, validators=[django.core.validators.MaxValueValidator(99999), django.core.validators.MinValueValidator(10000), django.core.validators.MinLengthValidator(5), django.core.validators.MaxLengthValidator(5)]),
),
]
| 1.75 | 2 |
tests/test_basics.py | sirosen/git-fortune | 0 | 3936 | <reponame>sirosen/git-fortune<filename>tests/test_basics.py
import subprocess
from git_fortune._compat import fix_line_endings
from git_fortune.version import __version__
def test_help(capfd):
subprocess.check_call(["git-fortune", "-h"])
captured = capfd.readouterr()
assert (
fix_line_endings(
"""
A fortune-like command for showing git tips
Invoke it as 'git-fortune' or 'git fortune'
"""
)
in captured.out
)
def test_version(capfd):
subprocess.check_call(["git-fortune", "--version"])
captured = capfd.readouterr()
assert "git-fortune {}".format(__version__) in captured.out
def test_tip_boxformat(capfd):
subprocess.check_call(["git-fortune", "--id", "3"])
tip3boxbody = fix_line_endings(
"""\
+-------------------------------------------------------------------------------+
| GIT TIP #3 |
| |
| `git log --graph` can show you a tree-like representation of the git history. |
| |
| Try adding in `--oneline --decorate --all`. |
| |
+-------------------------------------------------------------------------------+
"""
)
captured = capfd.readouterr()
assert captured.out == tip3boxbody
def test_tip_plainformat(capfd):
subprocess.check_call(["git-fortune", "--format", "plain", "--id", "1"])
tip1plainbody = fix_line_endings(
"Modify your last commit before pushing with `git commit --amend`.\n"
)
captured = capfd.readouterr()
assert captured.out == tip1plainbody
def test_noargs(capfd):
"""just make sure it doesn't crashfail"""
subprocess.check_call(["git-fortune"])
captured = capfd.readouterr()
assert "GIT TIP #" in captured.out # from the box format
def test_category(capfd):
"""just make sure it doesn't crashfail"""
subprocess.check_call(["git-fortune", "--category", "diff"])
captured = capfd.readouterr()
assert "GIT TIP #" in captured.out # from the box format
def test_category_and_id_mutex(capfd):
ret = subprocess.call(["git-fortune", "--category", "diff", "--id", "3"])
assert ret == 2
captured = capfd.readouterr()
assert "" == captured.out
assert "argument --id: not allowed with argument --category" in captured.err
| 2.4375 | 2 |
configs/keypoints/faster_rcnn_r50_fpn_keypoints.py | VGrondin/CBNetV2_mask_remote | 0 | 3937 | <reponame>VGrondin/CBNetV2_mask_remote<gh_stars>0
_base_ = [
'../_base_/models/faster_rcnn_r50_fpn.py'
]
model = dict(
type='FasterRCNN',
# pretrained='torchvision://resnet50',
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch'),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
num_outs=5),
rpn_head=dict(
type='RPNHead',
in_channels=256,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
scales=[8],
ratios=[0.5, 1.0, 2.0],
strides=[4, 8, 16, 32, 64]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0]),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='L1Loss', loss_weight=1.0)),
roi_head=dict(
# type='StandardRoIHead',
_delete_=True,
type='KeypointRoIHead',
output_heatmaps=False,
# keypoint_head=dict(
# type='HRNetKeypointHead',
# num_convs=8,
# in_channels=256,
# features_size=[256, 256, 256, 256],
# conv_out_channels=512,
# num_keypoints=5,
# loss_keypoint=dict(type='MSELoss', loss_weight=50.0)),
keypoint_decoder=dict(type='HeatmapDecodeOneKeypoint', upscale=4),
bbox_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
bbox_head=dict(
type='Shared2FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=80,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2]),
reg_class_agnostic=False,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox=dict(type='L1Loss', loss_weight=1.0)))
)
#optimizer = dict(lr=0.002)
#lr_config = dict(step=[40, 55])
#total_epochs = 60
| 1.609375 | 2 |
maskrcnn_benchmark/layers/roi_align_rotated_3d.py | picwoon/As_built_BIM | 2 | 3938 | <reponame>picwoon/As_built_BIM
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import torch, math
from torch import nn
from torch.autograd import Function
from torch.autograd.function import once_differentiable
from torch.nn.modules.utils import _pair
from SparseConvNet.sparseconvnet.tools_3d_2d import sparse_3d_to_dense_2d
import _C
class _ROIAlignRotated3D(Function):
@staticmethod
def forward(ctx, input, roi, output_size, spatial_scale, sampling_ratio):
ctx.save_for_backward(roi)
ctx.output_size = _pair(output_size)
ctx.spatial_scale = spatial_scale
ctx.sampling_ratio = sampling_ratio
ctx.input_shape = input.size()
# input: [4, 256, 304, 200, 7]
# roi: [171, 8]
# spatial_scale: 0.25
# output_size: [7,7,7]
# sampling_ratio: 2
output = _C.roi_align_rotated_3d_forward(
input, roi, spatial_scale, output_size[0], output_size[1], output_size[2], sampling_ratio
) # [171, 256, 7, 7]
return output
@staticmethod
@once_differentiable
def backward(ctx, grad_output):
rois, = ctx.saved_tensors
output_size = ctx.output_size
spatial_scale = ctx.spatial_scale
sampling_ratio = ctx.sampling_ratio
bs, ch, h, w, zsize = ctx.input_shape
grad_input = _C.roi_align_rotated_3d_backward(
grad_output,
rois,
spatial_scale,
output_size[0],
output_size[1],
output_size[2],
bs,
ch,
h,
w,
zsize,
sampling_ratio,
)
return grad_input, None, None, None, None
roi_align_rotated_3d = _ROIAlignRotated3D.apply
class ROIAlignRotated3D(nn.Module):
def __init__(self, output_size, spatial_scale, sampling_ratio):
'''
output_size:[pooled_height, pooled_width]
spatial_scale: size_of_map/size_of_original_image
sampling_ratio: how many points to use for bilinear_interpolate
'''
super(ROIAlignRotated3D, self).__init__()
self.output_size = output_size # (7,7,7)
self.spatial_scale = spatial_scale # 0.25
self.sampling_ratio = sampling_ratio # 2
def forward(self, input_s3d, rois_3d):
'''
input0: sparse 3d tensor
rois_3d: 3d box, xyz order is same as input0,
yaw unit is rad, anti-clock wise is positive
input: [batch_size, feature, h, w]
rois: [n,5] [batch_ind, center_w, center_h, roi_width, roi_height, theta]
theta unit: degree, anti-clock wise is positive
Note: the order of w and h inside of input and rois is different.
'''
input_d3d = sparse_3d_to_dense_2d(input_s3d)
output = roi_align_rotated_3d(
input_d3d, rois_3d, self.output_size, self.spatial_scale, self.sampling_ratio
)
return output
def __repr__(self):
tmpstr = self.__class__.__name__ + "("
tmpstr += "output_size=" + str(self.output_size)
tmpstr += ", spatial_scale=" + str(self.spatial_scale)
tmpstr += ", sampling_ratio=" + str(self.sampling_ratio)
tmpstr += ")"
return tmpstr
| 1.71875 | 2 |
src/model/utils/utils.py | J-CITY/METADATA-EXTRACTOR | 0 | 3939 | <reponame>J-CITY/METADATA-EXTRACTOR<filename>src/model/utils/utils.py
import numpy as np
import os
from .logger import printLog
UNK = "$UNK$"
NUM = "$NUM$"
NONE = "O"
class ParrotIOError(Exception):
def __init__(self, filename):
message = "ERROR: Can not find file {}.".format(filename)
super(ParrotIOError, self).__init__(message)
# Class that iterates over CoNLL Dataset
class CoNLLDataset(object):
def __init__(self, filename, processingWord=None, processingTag=None,
maxIter=None):
self.filename = filename
self.processingWord = processingWord # function that takes a word as input
self.processingTag = processingTag # function that takes a tag as input
self.maxIter = maxIter # max number of sentences to yield
self.length = None
def __iter__(self):
niter = 0
with open(self.filename, encoding='utf-8') as f:
words, tags = [], []
for line in f:
line = line.strip() # delete spaces in start and end
if (len(line) == 0 or line.startswith("-DOCSTART-")):
if len(words) != 0:
niter += 1
if self.maxIter is not None and niter > self.maxIter:
break
yield words, tags
words, tags = [], []
else:
ls = line.split(' ')
word, tag = ls[0],ls[-1]
if self.processingWord is not None:
word = self.processingWord(word)
if self.processingTag is not None:
tag = self.processingTag(tag)
words += [word]
tags += [tag]
def __len__(self):
if self.length is None:
self.length = 0
for _ in self:
self.length += 1
return self.length
#Create a dictionary from dataset
def getDictionary(datasets):
printLog("Building dictionary: ")
dictWords = set()
dictTags = set()
for dataset in datasets:
for words, tags in dataset:
dictWords.update(words)
dictTags.update(tags)
printLog("DONE: " + str(len(dictWords)) + " size")
return dictWords, dictTags
def getCharDictionary(dataset):
dictChar = set()
for words, _ in dataset:
for word in words:
dictChar.update(word)
return dictChar
#filename - path wo file with vectors
def getGloveDictionary(filename):
printLog("Building dictionary")
dictGlove = set()
with open(filename, encoding='utf-8') as f:
for line in f:
word = line.strip().split(' ')[0]
dictGlove.add(word)
printLog("DONE: "+ str(len(dictGlove)) +" tokens")
return dictGlove
def saveDictionary(dictionary, filename):
printLog("SAVE")
with open(filename, "w", encoding='utf-8') as f:
for i, word in enumerate(dictionary):
if i != len(dictionary) - 1:
f.write("{}\n".format(word))
else:
f.write(word)
def loadDictionary(filename):
try:
d = dict()
with open(filename, encoding='utf-8') as f:
for idx, word in enumerate(f):
word = word.strip()
d[word] = idx
except IOError:
raise ParrotIOError(filename)
return d
def exportCompactGloveVectors(dictionary, gloveFilename, trimmedFilename, dim):
embeddings = np.zeros([len(dictionary), dim])
with open(gloveFilename, encoding='utf-8') as f:
for line in f:
line = line.strip().split(' ')
word = line[0]
if word in dictionary:
embedding = [float(x) for x in line[1:]] #glove coords
wordID = dictionary[word]
embeddings[wordID] = np.asarray(embedding)
np.savez_compressed(trimmedFilename, embeddings=embeddings) # store glove matrix
def getCompactGloveVectors(filename):
try:
with np.load(filename) as data:
return data["embeddings"]
except IOError:
raise ParrotIOError(filename)
def getProcessingWord(dictWords=None, dictChars=None,
lowercase=False, chars=False, allowUNK=True):
def f(word):
# char ids for word
if (dictChars is not None) and (chars == True):
charIDs = []
for char in word:
if (char in dictChars):
charIDs.append(dictChars[char])
if lowercase:
word = word.lower()
if word.isdigit():
word = NUM
# word id
if (dictWords is not None):
if word in dictWords:
word = dictWords[word]
elif allowUNK:
word = dictWords[UNK]
else:
raise Exception("Unknow tag.")
if (dictChars is not None) and (chars == True):
# chars ids and word id
return charIDs, word
# word id
return word
return f
def _padSequences(sequences, padtok, maxLength):
sequencePadded, sequenceLength = [], []
for seq in sequences:
seq = list(seq)
seq_ = seq[:maxLength] + [padtok]*max(maxLength - len(seq), 0)
sequencePadded += [seq_]
sequenceLength += [min(len(seq), maxLength)]
# all sublist have same length
return sequencePadded, sequenceLength
def padSequences(sequences, padtok, nlevels=1):
if nlevels == 1:
maxLength = max(map(lambda x : len(x), sequences))
sequencePadded, sequenceLength = _padSequences(sequences,
padtok, maxLength)
elif nlevels == 2:
maxLengthWord = max([max(map(lambda x: len(x), seq))
for seq in sequences])
sequencePadded, sequenceLength = [], []
for seq in sequences:
# all words are same length
sp, sl = _padSequences(seq, padtok, maxLengthWord)
sequencePadded += [sp]
sequenceLength += [sl]
maxLengthSentence = max(map(lambda x : len(x), sequences))
sequencePadded, _ = _padSequences(sequencePadded,
[padtok]*maxLengthWord, maxLengthSentence)
sequenceLength, _ = _padSequences(sequenceLength, 0,
maxLengthSentence)
return sequencePadded, sequenceLength
def minibatches(data, minibatchSize):
x_batch, y_batch = [], []
for (x, y) in data:
if len(x_batch) == minibatchSize:
yield x_batch, y_batch
x_batch, y_batch = [], []
if type(x[0]) == tuple:
x = zip(*x)
x_batch += [x]
y_batch += [y]
if len(x_batch) != 0:
yield x_batch, y_batch
def getChunkType(tok, idxToTag):
tagName = idxToTag[tok]
tagClass = tagName.split('-')[0]
tagType = tagName.split('-')[-1]
return tagClass, tagType
def getChunks(seq, tags):
"""Given a sequence of tags, group entities and their position
Args:
seq: [4, 4, 0, 0, ...] sequence of labels
tags: dict["O"] = 4
Returns:
list of (chunkType, chunkStart, chunkEnd)
Example:
seq = [4, 5, 0, 3]
tags = {"B-PER": 4, "I-PER": 5, "B-LOC": 3}
result = [("PER", 0, 2), ("LOC", 3, 4)]
"""
default = tags[NONE]
idxToTag = {idx: tag for tag, idx in tags.items()}
chunks = []
chunkType, chunkStart = None, None
for i, tok in enumerate(seq):
# End of a chunk 1
if tok == default and chunkType is not None:
# Add a chunk.
chunk = (chunkType, chunkStart, i)
chunks.append(chunk)
chunkType, chunkStart = None, None
# End of a chunk + start of a chunk!
elif tok != default:
tokChunkClass, tokChunkType = getChunkType(tok, idxToTag)
if chunkType is None:
chunkType, chunkStart = tokChunkType, i
elif tokChunkType != chunkType or tokChunkClass == "B":
chunk = (chunkType, chunkStart, i)
chunks.append(chunk)
chunkType, chunkStart = tokChunkType, i
else:
pass
# end condition
if chunkType is not None:
chunk = (chunkType, chunkStart, len(seq))
chunks.append(chunk)
return chunks
| 2.578125 | 3 |
noxfile.py | fatcat2/biggestContributor | 2 | 3940 | import nox
FILE_PATHS = ["utils", "main.py"]
@nox.session
def format(session):
session.install("black")
session.run("black", *FILE_PATHS)
| 1.453125 | 1 |
discordbot/economy/currencies.py | minhhoang1023/GamestonkTerminal | 1 | 3941 | import os
import df2img
import disnake
import pandas as pd
from PIL import Image
import discordbot.config_discordbot as cfg
from discordbot.config_discordbot import logger
from discordbot.helpers import autocrop_image
from gamestonk_terminal.economy import wsj_model
async def currencies_command(ctx):
"""Currencies overview [Wall St. Journal]"""
try:
# Debug user input
if cfg.DEBUG:
logger.debug("econ-currencies")
# Retrieve data
df = wsj_model.global_currencies()
df = pd.DataFrame.from_dict(df)
# Check for argument
if df.empty:
raise Exception("No available data found")
df["Last"] = pd.to_numeric(df["Last"].astype(float))
df["Chng"] = pd.to_numeric(df["Chng"].astype(float))
df["%Chng"] = pd.to_numeric(df["%Chng"].astype(float))
formats = {"Last": "{:.2f}", "Chng": "{:.2f}", "%Chng": "{:.2f}%"}
for col, value in formats.items():
df[col] = df[col].map(lambda x: value.format(x)) # pylint: disable=W0640
df = df.fillna("")
df.set_index(" ", inplace=True)
# Debug user output
if cfg.DEBUG:
logger.debug(df.to_string())
df = df[
[
"Last",
"Chng",
"%Chng",
]
]
dindex = len(df.index)
fig = df2img.plot_dataframe(
df,
fig_size=(800, (40 + (40 * dindex))),
col_width=[8, 3, 3],
tbl_cells=dict(
align="left",
height=35,
),
template="plotly_dark",
font=dict(
family="Consolas",
size=20,
),
paper_bgcolor="rgba(0, 0, 0, 0)",
)
imagefile = "econ-currencies.png"
df2img.save_dataframe(fig=fig, filename=imagefile)
image = Image.open(imagefile)
image = autocrop_image(image, 0)
image.save(imagefile, "PNG", quality=100)
image = disnake.File(imagefile)
title = "Economy: [WSJ] Currencies"
embed = disnake.Embed(title=title, colour=cfg.COLOR)
embed.set_image(url=f"attachment://{imagefile}")
embed.set_author(
name=cfg.AUTHOR_NAME,
icon_url=cfg.AUTHOR_ICON_URL,
)
os.remove(imagefile)
await ctx.send(embed=embed, file=image)
except Exception as e:
embed = disnake.Embed(
title="ERROR Economy: [WSJ] Currencies",
colour=cfg.COLOR,
description=e,
)
embed.set_author(
name=cfg.AUTHOR_NAME,
icon_url=cfg.AUTHOR_ICON_URL,
)
await ctx.send(embed=embed, delete_after=30.0)
| 2.6875 | 3 |
aiovectortiler/config_handler.py | shongololo/aiovectortiler | 4 | 3942 | <filename>aiovectortiler/config_handler.py<gh_stars>1-10
import os
import yaml
import logging
logger = logging.getLogger(__name__)
class Configs:
server = None
recipes = {}
DB = None
plugins = None
@classmethod
def init_server_configs(cls, server_configs):
with open(server_configs) as s_c:
cls.server = yaml.load(s_c.read())
@classmethod
def init_layer_recipes(cls, recipe_configs):
recipe_name = None
if '/' in recipe_configs:
recipe_name = os.path.normpath(recipe_configs).split('/')[-1]
# for windows
elif '\\' in recipe_configs:
recipe_name = os.path.normpath(recipe_configs).split('\\')[-1]
if recipe_name[-4:] == '.yml':
recipe_name = recipe_name[:-4]
elif recipe_name[-5:] == '.yaml':
recipe_name = recipe_name[:-5]
else:
raise FileExistsError('File in layer recipes folder does not have a YAML extension: {0}'.format(recipe_configs))
with open(recipe_configs) as r_c:
load_recipe = yaml.load(r_c.read())
cls.recipes[recipe_name] = Recipe(load_recipe)
# add the recipe name based on the file name
# this is needed by the tilejson query
cls.recipes[recipe_name].name = recipe_name
logger.info('Adding layer: {0}'.format(recipe_name))
'''
Plugins.load()
Plugins.hook('before_load', config=Configs)
def load_recipe(data):
name = data.get('name', 'default')
if name in RECIPES:
raise ValueError('Recipe with name {} already exist'.format(name))
data['name'] = name
RECIPES[name] = Recipe(data)
if len(RECIPES) == 1 and name != 'default':
RECIPES['default'] = RECIPES[data['name']]
for recipe in Configs.layers:
with Path(recipe).open() as f:
load_recipe(yaml.load(f.read()))
Plugins.hook('load', config=config, recipes=RECIPES)
'''
# the following model structures for recipes / layers / queries allows searching up the chain
# for attributes. If not found in the root recipes level then it will check the server configs.
class Recipe(dict):
def __init__(self, data):
super().__init__(data)
self.load_layers(data['layers'])
def load_layers(self, layers):
self.layers = {}
for layer in layers:
self.layers[layer['name']] = Layer(self, layer)
def __getattr__(self, attr):
return self.get(attr, Configs.server.get(attr, None))
class Layer(dict):
def __init__(self, recipe, layer_data):
self.recipe = recipe
super().__init__(layer_data)
self.load_queries(layer_data['queries'])
def load_queries(self, queries):
self.queries = []
for query in queries:
self.queries.append(Query(self, query))
def __getattr__(self, attr):
return self.get(attr, getattr(self.recipe, attr))
@property
def id(self):
return '{0}:{1}'.format(self.recipe.name, self.name)
@property
def description(self):
return self.get('description', 'no description provided')
class Query(dict):
def __init__(self, layer, data):
self.layer = layer
super().__init__(data)
def __getattr__(self, attr):
return self.get(attr, getattr(self.layer, attr))
| 2.375 | 2 |
volksdep/converters/__init__.py | repoww/volksdep | 271 | 3943 | from .torch2onnx import torch2onnx
from .onnx2trt import onnx2trt
from .torch2trt import torch2trt
from .base import load, save
| 1.164063 | 1 |
t2vretrieval/models/mlmatch.py | Roc-Ng/HANet | 34 | 3944 | <reponame>Roc-Ng/HANet<gh_stars>10-100
import numpy as np
import torch
import framework.ops
import t2vretrieval.encoders.mlsent
import t2vretrieval.encoders.mlvideo
import t2vretrieval.models.globalmatch
from t2vretrieval.models.criterion import cosine_sim
from t2vretrieval.models.globalmatch import VISENC, TXTENC
class RoleGraphMatchModelConfig(t2vretrieval.models.globalmatch.GlobalMatchModelConfig):
def __init__(self):
super().__init__()
self.num_verbs = 4
self.num_nouns = 6
self.attn_fusion = 'embed' # sim, embed
self.simattn_sigma = 4
self.hard_topk = 1
self.max_violation = True
self.loss_weights = None
## this config will be covered by model.json due to the functions of load and load_from_dict
self.subcfgs[VISENC] = t2vretrieval.encoders.mlvideo.MultilevelEncoderConfig()
self.subcfgs[TXTENC] = t2vretrieval.encoders.mlsent.RoleGraphEncoderConfig()
class RoleGraphMatchModel(t2vretrieval.models.globalmatch.GlobalMatchModel):
def build_submods(self):
return {
VISENC: t2vretrieval.encoders.mlvideo.MultilevelEncoder(self.config.subcfgs[VISENC]),
TXTENC: t2vretrieval.encoders.mlsent.RoleGraphEncoder(self.config.subcfgs[TXTENC])
}
def forward_video_embed(self, batch_data):
vid_fts = torch.FloatTensor(batch_data['attn_fts']).to(self.device)
vid_lens = torch.LongTensor(batch_data['attn_lens']).to(self.device)
# (batch, max_vis_len, dim_embed)
vid_sent_embeds, vid_verb_embeds, vid_noun_embeds, local_sent_embeds, logits, max_len = self.submods[VISENC](vid_fts, vid_lens)
return {
'vid_sent_embeds': vid_sent_embeds,
'vid_verb_embeds': vid_verb_embeds,
'vid_noun_embeds': vid_noun_embeds,
'local_vid_embeds': local_sent_embeds,
'vid_lens': vid_lens,
'max_len': max_len,
'logits': logits,
}
def forward_text_embed(self, batch_data):
sent_ids = torch.LongTensor(batch_data['sent_ids']).to(self.device) ## sentence
sent_lens = torch.LongTensor(batch_data['sent_lens']).to(self.device) ## length
verb_masks = torch.BoolTensor(batch_data['verb_masks']).to(self.device) ## batch*nv*max_sen_len
noun_masks = torch.BoolTensor(batch_data['noun_masks']).to(self.device)
node_roles = torch.LongTensor(batch_data['node_roles']).to(self.device) ## batch*(n_v+n_n)
rel_edges = torch.FloatTensor(batch_data['rel_edges']).to(self.device) ## batch*(1+n_v+n_n)*(1+n_v+n_n)
verb_lens = torch.sum(verb_masks, 2)
noun_lens = torch.sum(noun_masks, 2)
# sent_embeds: (batch, dim_embed)
# verb_embeds, noun_embeds: (batch, num_xxx, dim_embed)
sent_embeds, verb_embeds, noun_embeds, local_sent_embeds, sent_logits = self.submods[TXTENC](
sent_ids, sent_lens, verb_masks, noun_masks, node_roles, rel_edges)
return {
'sent_embeds': sent_embeds, 'sent_lens': sent_lens,
'verb_embeds': verb_embeds, 'verb_lens': verb_lens,
'noun_embeds': noun_embeds, 'noun_lens': noun_lens,
'sent_logits': sent_logits, 'local_sent_embeds': local_sent_embeds,
}
def generate_phrase_scores(self, vid_embeds, vid_masks, phrase_embeds, phrase_masks, mask_flag=False):
'''Args:
- vid_embeds: (batch, num_frames, embed_size)
- vid_masks: (batch, num_frames)
- phrase_embeds: (batch, num_phrases, embed_size)
- phrase_masks: (batch, num_phrases)
'''
batch_vids, num_frames, _ = vid_embeds.size()
vid_pad_masks = (vid_masks == 0).unsqueeze(1).unsqueeze(3)
batch_phrases, num_phrases, dim_embed = phrase_embeds.size()
# compute component-wise similarity
vid_2d_embeds = vid_embeds.view(-1, dim_embed)
phrase_2d_embeds = phrase_embeds.view(-1, dim_embed)
# size = (batch_vids, batch_phrases, num_frames, num_phrases)
ground_sims = cosine_sim(vid_2d_embeds, phrase_2d_embeds).view(
batch_vids, num_frames, batch_phrases, num_phrases).transpose(1, 2)
###
if mask_flag:
vid_attn_per_word = ground_sims.masked_fill(vid_pad_masks, 0) ##############
else:
vid_attn_per_word = ground_sims
vid_attn_per_word[vid_attn_per_word < 0] = 0
vid_attn_per_word = framework.ops.l2norm(vid_attn_per_word, dim=2)
if mask_flag:
vid_attn_per_word = vid_attn_per_word.masked_fill(vid_pad_masks, -1e18) #################
vid_attn_per_word = torch.softmax(self.config.simattn_sigma * vid_attn_per_word, dim=2)
if self.config.attn_fusion == 'embed':
vid_attned_embeds = torch.einsum('abcd,ace->abde', vid_attn_per_word, vid_embeds)
word_attn_sims = torch.einsum('abde,bde->abd',
framework.ops.l2norm(vid_attned_embeds),
framework.ops.l2norm(phrase_embeds))
elif self.config.attn_fusion == 'sim':
# (batch_vids, batch_phrases, num_phrases)
word_attn_sims = torch.sum(ground_sims * vid_attn_per_word, dim=2)
# sum: (batch_vid, batch_phrases)
phrase_scores = torch.sum(word_attn_sims * phrase_masks.float().unsqueeze(0), 2) \
/ torch.sum(phrase_masks, 1).float().unsqueeze(0).clamp(min=1)
return phrase_scores
def generate_scores(self, **kwargs):
##### shared #####
vid_lens = kwargs['vid_lens'] # (batch, )
num_frames = int(kwargs['max_len'])###########################kwargs['vid_verb_embeds'].size(1)
vid_masks = framework.ops.sequence_mask(vid_lens, num_frames, inverse=False)
# batch*max_len
##### sentence-level scores #####
sent_scores = cosine_sim(kwargs['vid_sent_embeds'], kwargs['sent_embeds'])
#######################################################
# concept scores use jaccard similarity
concept_verb_scores = self.jaccard_sim(kwargs['logits'][0], kwargs['sent_logits'][0])
concept_noun_scores = self.jaccard_sim(kwargs['logits'][1], kwargs['sent_logits'][1])
#######################################################
##### verb-level scores #####
vid_verb_embeds = kwargs['vid_verb_embeds'] # (batch, num_frames, dim_embed)
verb_embeds = kwargs['verb_embeds'] # (batch, num_verbs, dim_embed)
verb_lens = kwargs['verb_lens'] # (batch, num_verbs)
local_vid_embeds =kwargs['local_vid_embeds']
local_sent_embeds = kwargs['local_sent_embeds']
verb_masks = framework.ops.sequence_mask(torch.sum(verb_lens > 0, 1).long(),
self.config.num_verbs, inverse=False)
# sum: (batch_vids, batch_sents)
verb_scores = self.generate_phrase_scores(vid_verb_embeds, vid_masks, verb_embeds, verb_masks)
ind_verb_scores = self.generate_phrase_scores(local_vid_embeds[0], vid_masks, local_sent_embeds[0], verb_masks, True)
##### noun-level scores #####
vid_noun_embeds = kwargs['vid_noun_embeds'] # (batch, num_frames, dim_embed)
noun_embeds = kwargs['noun_embeds'] # (batch, num_nouns, dim_embed)
noun_lens = kwargs['noun_lens'] # (batch, num_nouns)
noun_masks = framework.ops.sequence_mask(torch.sum(noun_lens > 0, 1).long(),
self.config.num_nouns, inverse=False)
# sum: (batch_vids, batch_sents)
noun_scores = self.generate_phrase_scores(vid_noun_embeds, vid_masks, noun_embeds, noun_masks)
ind_noun_scores = self.generate_phrase_scores(local_vid_embeds[1], vid_masks, local_sent_embeds[1], noun_masks, True)
return sent_scores, verb_scores, noun_scores, concept_verb_scores, concept_noun_scores, ind_verb_scores, ind_noun_scores
def jaccard_sim(self, im, s):
im_bs = im.size(0)
s_bs = s.size(0)
im = im.unsqueeze(1).expand(-1, s_bs, -1)
s = s.unsqueeze(0).expand(im_bs, -1, -1)
intersection = torch.min(im, s).sum(-1)
union = torch.max(im, s).sum(-1)
score = intersection / union
return score
def forward_loss(self, batch_data, step=None):
enc_outs = self.forward_video_embed(batch_data)
cap_enc_outs = self.forward_text_embed(batch_data)
enc_outs.update(cap_enc_outs)
sent_scores, verb_scores, noun_scores, concept_verb_scores, concept_noun_scores, local_verb_scores, local_noun_scores = self.generate_scores(**enc_outs)
scores = (sent_scores + verb_scores + noun_scores + local_verb_scores + local_noun_scores) / 5
scores2 = (concept_verb_scores + concept_noun_scores) / 2
sent_loss = self.criterion(sent_scores)
verb_loss = self.criterion(verb_scores)
noun_loss = self.criterion(noun_scores)
eta = 0.1
mu = 0.01
concept_verb_loss = 0.5*self.criterion(concept_verb_scores)
concept_noun_loss = 0.5*self.criterion(concept_noun_scores)
concept_loss = eta*self.criterion(scores2)
verb_concept_label = torch.FloatTensor(batch_data['verb_concept_label']).to(self.device)
noun_concept_label = torch.FloatTensor(batch_data['noun_concept_label']).to(self.device)
verb_concept_mask = torch.FloatTensor(batch_data['verb_concept_mask']).to(self.device)
noun_concept_mask = torch.FloatTensor(batch_data['noun_concept_mask']).to(self.device)
v_mask_sum = torch.sum(verb_concept_mask, dim=1)
n_mask_sum = torch.sum(noun_concept_mask, dim=1)
vbce_loss = torch.sum(verb_concept_mask*self.criterion_bce(enc_outs['logits'][0], verb_concept_label), dim=1)
vbce_loss = mu*torch.mean(vbce_loss/v_mask_sum)
nbce_loss = torch.sum(noun_concept_mask*self.criterion_bce(enc_outs['logits'][1], noun_concept_label), dim=1)
nbce_loss = mu*torch.mean(nbce_loss/n_mask_sum)
vbce_sent_loss = torch.sum(verb_concept_mask*self.criterion_bce(enc_outs['sent_logits'][0], verb_concept_label), dim=1)
vbce_sent_loss = mu*torch.mean(vbce_sent_loss/v_mask_sum)
nbce_sent_loss = torch.sum(noun_concept_mask*self.criterion_bce(enc_outs['sent_logits'][1], noun_concept_label), dim=1)
nbce_sent_loss = mu*torch.mean(nbce_sent_loss/n_mask_sum)
fusion_loss = self.criterion(scores)
if self.config.loss_weights is None:
loss = fusion_loss + 1*(vbce_loss+nbce_loss) + 1*(vbce_sent_loss+nbce_sent_loss) + concept_loss
else:
loss = self.config.loss_weights[0] * fusion_loss + \
self.config.loss_weights[1] * sent_loss + \
self.config.loss_weights[2] * verb_loss + \
self.config.loss_weights[3] * noun_loss + \
vbce_loss + nbce_loss
if step is not None and self.config.monitor_iter > 0 and step % self.config.monitor_iter == 0:
neg_scores = scores.masked_fill(torch.eye(len(scores), dtype=torch.bool).to(self.device), -1e10)
self.print_fn('\tstep %d: pos mean scores %.2f, hard neg mean scores i2t %.2f, t2i %.2f'%(
step, torch.mean(torch.diag(scores)), torch.mean(torch.max(neg_scores, 1)[0]),
torch.mean(torch.max(neg_scores, 0)[0])))
self.print_fn('\tstep %d: sent_loss %.4f, verb_loss %.4f, noun_loss %.4f, fusion_loss %.4f'%(
step, sent_loss.data.item(), verb_loss.data.item(), noun_loss.data.item(), fusion_loss.data.item()))
self.print_fn('\tstep %d: vbce_loss %.4f, nbce_loss %.4f'%(step, vbce_loss.item(), nbce_loss.item()))
self.print_fn('\tstep %d: vbce_sent_loss %.4f, nbce_sent_loss %.4f'%(step, vbce_sent_loss.item(), nbce_sent_loss.item()))
self.print_fn('\tstep %d: sim_loss %.4f, vsim_loss %.4f, nsim_loss %.4f'%(step, concept_loss.item(),
concept_verb_loss.item(), concept_noun_loss.item()))
return loss
def evaluate_scores(self, tst_reader):
K = self.config.subcfgs[VISENC].num_levels
K = K + 4
assert K == 7, 'Note that this error indicates losing other scores!'
vid_names, all_scores = [], [[] for _ in range(K)]
cap_names = tst_reader.dataset.captions
for vid_data in tst_reader:
vid_names.extend(vid_data['names'])
vid_enc_outs = self.forward_video_embed(vid_data)
for k in range(K):
all_scores[k].append([])
ijj = 0
for cap_data in tst_reader.dataset.iterate_over_captions(self.config.tst_batch_size):
cap_enc_outs = self.forward_text_embed(cap_data)
cap_enc_outs.update(vid_enc_outs)
indv_scores = self.generate_scores(**cap_enc_outs)
for k in range(K):
all_scores[k][-1].append(indv_scores[k].data.cpu().numpy())
ijj += 0
for k in range(K):
all_scores[k][-1] = np.concatenate(all_scores[k][-1], axis=1)
for k in range(K):
all_scores[k] = np.concatenate(all_scores[k], axis=0) # (n_img, n_cap)
all_scores = np.array(all_scores) # (k, n_img, n_cap)
return vid_names, cap_names, all_scores
def evaluate(self, tst_reader, return_outs=False):
vid_names, cap_names, scores = self.evaluate_scores(tst_reader)
i2t_gts = []
for vid_name in vid_names:
i2t_gts.append([])
for i, cap_name in enumerate(cap_names):
if cap_name in tst_reader.dataset.ref_captions[vid_name]:
i2t_gts[-1].append(i)
t2i_gts = {}
for i, t_gts in enumerate(i2t_gts):
for t_gt in t_gts:
t2i_gts.setdefault(t_gt, [])
t2i_gts[t_gt].append(i)
idx = [0, 1, 2, 5, 6]
fused_scores = (np.mean(scores[idx], 0) + np.mean(scores[3:5], 0))/2
metrics = self.calculate_metrics(fused_scores, i2t_gts, t2i_gts)
if return_outs:
outs = {
'vid_names': vid_names,
'cap_names': cap_names,
'scores': scores,
}
return metrics, outs
else:
return metrics
| 1.953125 | 2 |
bench_fastapi/authentication/controllers/login.py | sharkguto/teste_carga | 1 | 3945 | <gh_stars>1-10
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# login.py
# @Author : <NAME> (<EMAIL>)
# @Link :
# @Date : 12/12/2019, 11:43:07 AM
from typing import Optional, Any
from fastapi import APIRouter, Body, Depends, HTTPException
from fastapi import Header, Security
from authentication.models.users import User
from fastapi.security import HTTPBasic, HTTPBasicCredentials, APIKeyHeader
from typing import List
from starlette.responses import Response
from fastapi.encoders import jsonable_encoder
from authentication.interfaces.database import database
import jwt
from starlette.status import HTTP_400_BAD_REQUEST, HTTP_401_UNAUTHORIZED
from datetime import datetime, timedelta
from hashlib import sha256
from authentication.interfaces.token import verify_token
router = APIRouter()
security = HTTPBasic(auto_error=True)
api_key = APIKeyHeader(name="x-api-key", auto_error=True)
@router.post("/login", tags=["token"])
async def renew_token(
response: Response,
user: dict = Depends(verify_token),
x_api_key: str = Header(None),
):
response.headers["x-api-key"] = x_api_key
return {"verified": True, "user": user["email"]}
@router.put("/login", tags=["token"])
async def renew_token(response: Response, user: dict = Depends(verify_token)):
sql = """UPDATE users.tbl_users
SET token = :token WHERE
id = :id"""
token = f"{user['pwd_updated_at']}-{user['email']}-{datetime.now()}"
mhash = sha256(token.encode("utf-8"))
token = mhash.hexdigest()
await database.execute(query=sql, values={"id": user["id"], "token": token})
response.headers["x-api-key"] = jwt.encode(
{**user, **dict(exp=(datetime.now() + timedelta(hours=8)))},
token,
algorithm="HS256",
).decode()
return {"renew": True}
# @router.post("/login", dependencies=[Depends(verify_token)])
# async def renew_token(x_api_key: str = Header(None)):
# return {"ok": x_api_key}
@router.get(
"/login", response_model=User, tags=["auth"], response_model_exclude_unset=True
)
async def login_basic(
response: Response, authorization: HTTPBasicCredentials = Security(security)
):
sql = """SELECT tu.id, tu.email, tu."name", tu.linkedin_id , tu.pwd_updated_at
FROM users.tbl_users tu
WHERE tu.passwd is NOT NULL
AND tu.passwd = crypt(:<PASSWORD>,tu.<PASSWORD>)
AND tu.email = :email
AND tu.enabled = true """
users = await database.fetch_one(
query=sql,
values={"email": authorization.username, "secret": authorization.password},
)
if not users:
raise HTTPException(status_code=HTTP_401_UNAUTHORIZED)
user = jsonable_encoder(users)
sql = """SELECT tp.acl_profile as profile
FROM users.tbl_users tu inner join
users.tbl_profile_users tpu on tpu.id_users = tu.id inner join
users.tbl_profile tp on tp.id = tpu.id_profile
WHERE tu.passwd is NOT NULL
AND tu.passwd = <PASSWORD>(:<PASSWORD>,tu.<PASSWORD>)
AND tu.email = :email"""
profiles = await database.fetch_all(
query=sql,
values={"email": authorization.username, "secret": authorization.password},
)
if not profiles:
raise HTTPException(status_code=HTTP_401_UNAUTHORIZED)
user["acl"] = jsonable_encoder(profiles)
sql = """UPDATE users.tbl_users
SET token = :token WHERE
id = :id"""
token = f"{user['pwd_updated_at']}-{authorization.username}-{datetime.now()}"
mhash = sha256(token.encode("utf-8"))
token = mhash.hexdigest()
await database.execute(query=sql, values={"id": user["id"], "token": token})
response.headers["x-api-key"] = jwt.encode(
{**user, **dict(exp=(datetime.now() + timedelta(hours=8)))},
token,
algorithm="HS256",
).decode()
return user
| 2.453125 | 2 |
dronesym-python/flask-api/src/dronepool.py | dilinade/DroneSym | 1 | 3946 | <reponame>dilinade/DroneSym<filename>dronesym-python/flask-api/src/dronepool.py
#DronePool module which handles interaction with SITLs
from dronekit import Vehicle, VehicleMode, connect
from dronekit_sitl import SITL
from threading import Lock
import node, time
import mavparser
import threadrunner
drone_pool = {}
instance_count = 0
env_test = False
q = None
mq = None
lock = Lock()
class Sim(SITL, object):
def __init__(self, instance=1, home=None):
super(Sim, self).download("copter", "3.3", verbose=not env_test)
self.instance = instance
if home:
self.home = home
else:
self.home = {"lat":6.9271, "lon":79.8612, "alt": 1}
self.p = None
return
def connection_string(self):
return super(Sim, self).connection_string()[:-4] + str(5760 + self.instance * 10)
def launch(self):
home_str = str(self.home['lat']) + ',' + str(self.home['lon']) + ',0,353'
super(Sim, self).launch(["--instance", str(self.instance), "--home", home_str], await_ready=True, verbose=not env_test)
def get_sitl_status(self):
return { 'id': self.instance, 'home': self.home }
def initialize():
global q, mq, instance_count
q = threadrunner.q
mq = threadrunner.mq
drones = node.get_drones()['drones']
if not drones:
return
for drone_id in drones:
if drone_id not in list(drone_pool.keys()):
drone = node.get_drone_by_id(drone_id)
location = drone['location']
q.put((create_new_drone, { "db_key" : drone_id, "home" : location }))
if 'status' in list(drone.keys()) and drone['status'] == 'FLYING':
q.put((resume_flight, { "drone_id" : drone_id }))
def resume_flight(kwargs):
drone_id = kwargs.get("drone_id", None)
drone = node.get_drone_by_id(drone_id)
waypoints = []
for wp in sorted(drone['waypoints']):
waypoints.append(drone['waypoints'][wp])
next_waypoint = waypoints.index(drone['waypoint'])
print (next_waypoint)
q.put((takeoff_drone, { "drone_id" : drone_id, "waypoints" : waypoints[next_waypoint:] }))
def create_new_drone(kwargs):
global instance_count
instance_count += 1
home = kwargs.get("home", None)
db_key = kwargs.get("db_key", None)
retries = 3
drone = Sim(instance_count, home)
drone.launch()
while retries > 0:
try:
drone_conn = connect(drone.connection_string(), wait_ready=True)
break
except:
print ("Retrying...")
retries -= 1
drone_pool[db_key] = drone_conn
res = { "status" : "OK", "id" : db_key }
return res
def remove_drone(kwargs):
drone_id = kwargs.get("drone_id", None)
if drone_id not in drone_pool:
return { "status" : "ERROR", "msg" : "Drone instance not found" }
drone = drone_pool[drone_id]
if drone.mode == VehicleMode('AUTO'):
return { "status" : "ERROR", "msg" : "Drone in operation" }
del drone_pool[drone_id]
return { "status" : "OK", "id" : drone_id }
def run_mission(drone, target_height, waypoints):
while True:
print(("Reaching target alt : " + str(drone.location.global_relative_frame.alt)))
if drone.location.global_relative_frame.alt >= target_height * 0.9:
break
print ('target alt reached')
mavparser.create_mission(drone, waypoints)
print ('mission acquired')
drone.mode = VehicleMode('AUTO')
print ('initiating sequence')
print ('in mission')
def attach_listener(kwargs):
attr = kwargs.get('attr', None)
fn = kwargs.get('fn', None)
attach_fn = kwargs.get('attach_fn', None)
if not fn == None and not attr == None and not attach_fn == None:
attach_fn(attr, fn)
def takeoff_drone(kwargs):
global q
drone_id = kwargs.get("drone_id", None)
target_height = kwargs.get("target_height", 10)
waypoints = kwargs.get("waypoints", None)
try:
drone = drone_pool[drone_id]
except:
raise
drone.initialize()
drone.mode = VehicleMode('GUIDED')
drone.armed = True
while not drone.armed:
time.sleep(1)
drone.simple_takeoff(target_height)
print (waypoints)
if waypoints:
run_mission(drone, target_height, waypoints)
def detach_event_listeners(drone, value, status):
drone.remove_attribute_listener('location', update_location)
drone.remove_attribute_listener('airspeed', update_airspeed)
drone.remove_attribute_listener('attitude', udpate_attitude)
drone.remove_attribute_listener('heading', update_heading)
node.update_drone(drone_id, { "location" : {"lat": value.global_relative_frame.lat, "lon": value.global_relative_frame.lon, "alt": value.global_relative_frame.alt}, "status": status})
return
def update_location(self, attr_name, value):
node.update_drone(drone_id, { "location" : {"lat": value.global_relative_frame.lat, "lon": value.global_relative_frame.lon, "alt": value.global_relative_frame.alt}, "status": "FLYING"})
command_len = len(drone.commands)
wp_len = len(waypoints)
if command_len >= wp_len :
diff = command_len - wp_len
next_wp = max(drone.commands.__next__ - diff, 0) % len(waypoints)
waypoint = waypoints[next_wp]
# print "df: " + `diff`
# print next_wp
node.update_drone(drone_id, { "waypoint" : waypoint })
if drone.mode == VehicleMode('LAND') and drone.location.global_relative_frame.alt <= 0.1:
detach_event_listeners(drone, value, "HALTED")
return
if drone.commands.__next__ == len(drone.commands):
detach_event_listeners(drone, value, "FINISHED")
return
def update_airspeed(self, attr_name, value):
node.update_drone(drone_id, {"airspeed": value})
def udpate_attitude(self, attr_name, value):
node.update_drone(drone_id, { "pitch": value.pitch, 'roll': value.roll, 'yaw': value.yaw })
def update_heading(self, attr_name, value):
node.update_drone(drone_id, { "heading": value })
mq.put((attach_listener, { "attach_fn" : drone.add_attribute_listener, "attr" : 'location', "fn" : update_location }))
mq.put((attach_listener, { "attach_fn" : drone.add_attribute_listener, "attr" : 'airspeed', "fn" : update_airspeed }))
mq.put((attach_listener, { "attach_fn" : drone.add_attribute_listener, "attr" : 'attitude', "fn" : udpate_attitude }))
mq.put((attach_listener, { "attach_fn" : drone.add_attribute_listener, "attr" : 'heading', "fn" : update_heading }))
print ('took off')
return True
def land_drone(kwargs):
drone_id = kwargs.get("drone_id", None)
try:
drone = drone_pool[drone_id]
except:
raise
if not drone.armed:
return False
cmds = drone.commands
cmds.wait_ready()
cmds.clear()
drone.mode = VehicleMode('LAND')
print((drone.mode))
return True
| 2.546875 | 3 |
Problemset/longest-string-chain/longest-string-chain.py | KivenCkl/LeetCode | 7 | 3947 |
# @Title: 最长字符串链 (Longest String Chain)
# @Author: KivenC
# @Date: 2019-05-26 20:35:25
# @Runtime: 144 ms
# @Memory: 13.3 MB
class Solution:
# # way 1
# def longestStrChain(self, words: List[str]) -> int:
# # 动态规划
# # dp[i] = max(dp[i], dp[j] + 1) (0 <= j < i 且 words[j] 是 words[i] 的前身)
# length = len(words)
# if length < 2:
# return length
# dp = [1 for _ in range(length)]
# words.sort(key=len) # 按字符串长度递增排序
# for i in range(1, length):
# if i >= 1 and words[i] == words[i - 1]: # 去重
# continue
# for j in range(i - 1, -1, -1):
# if len(words[i]) - len(words[j]) > 1: # 剪枝
# break
# if len(words[i]) == len(words[j]):
# continue
# if self.isPre(words[j], words[i]):
# dp[i] = max(dp[i], dp[j] + 1)
# return max(dp)
# def isPre(self, word1: str, word2: str) -> bool:
# # 判断 word1 是否是 word2 的前身
# # 双指针
# # i, j, length1, length2 = 0, 0, len(word1), len(word2)
# # while i < length1 and j < length2:
# # if word1[i] == word2[j]:
# # i += 1
# # j += 1
# # if length2 - length1 == 1 and i == length1:
# # return True
# # return False
# # word2 去除任意一个位置的字符后与 word1 进行比对
# if len(word1) + 1 != len(word2):
# return False
# for i in range(len(word2)):
# if word2[: i] + word2[i + 1:] == word1:
# return True
# return False
# way 2
def longestStrChain(self, words: List[str]) -> int:
import collections
length = len(words)
if length < 2:
return length
pool = collections.defaultdict(list) # 将字符串按照其长度进行分组
dp = {}
for word in words:
pool[len(word)].append(word)
for key in sorted(pool.keys()):
if key - 1 not in pool:
continue
for word in pool[key]:
for j in range(key):
tmp = word[: j] + word[j + 1:]
if tmp in pool[key - 1]:
dp[word] = max(dp.get(word, 1), dp.get(tmp, 1) + 1)
return max(dp.values()) if dp else 1
| 3.390625 | 3 |
IKFK Builder/IKFK_Builder.py | ssimbox/ssimbox-rigTools | 1 | 3948 | <reponame>ssimbox/ssimbox-rigTools
from ctrlUI_lib import createClav2, createSphere
import maya.cmds as cmds
import maya.OpenMaya as om
from functools import partial
def duplicateChain(*args):
global ogChain
global chainLen
global switcherLoc
global side
global controllerColor
global clavCheckbox
global rigGrp, ctrlGrp
ogRootchain = cmds.ls(sl = True, type = "joint")[0]
ogChain = cmds.listRelatives(ogRootchain, ad = True, type = "joint")
ogChain.append(ogRootchain)
ogChain.reverse()
side = ogRootchain[0:2]
# Initialize input from UI
scaleController = cmds.intField(scaleField_UI, q=1, v=1)
blendCheckbox = cmds.checkBox(blendCheckbox_UI, q=1, v=1)
constraintCheckBox = cmds.checkBox(constraintCheckBox_UI, q=1, v=1)
chainMenu = cmds.optionMenu("chainMenu_UI", q=1, v=1)
clavCheckbox = cmds.checkBox(clavCheckbox_UI, q=1, v=0)
if side == "l_": controllerColor = rgb=(0, 0, 255)
elif side == "r_": controllerColor = rgb=(255, 0, 0)
if chainMenu == "Leg": chainLen = 5
else: #this is totally unscalable but for now it's ok
chainLen = 3
#suffix for the new chains
newJointList = ["_ik", "_fk", "_scale"]
for newJoint in newJointList:
for i in range(chainLen):
if blendCheckbox == 0 and constraintCheckBox == 0:
cmds.error("pls, select one relation type")
break
newJointName = ogChain[i] + newJoint
#create a joint, copy their position and freeze transform
cmds.joint(n = newJointName)
cmds.matchTransform(newJointName, ogChain[i])
cmds.makeIdentity(newJointName, a = 1, t = 0, r = 1, s = 0)
#deselect to make the two different hierarchies
cmds.select(cl = 1)
cmds.parent((ogChain[0] + "_ik"), world = True)
cmds.setAttr(ogChain[0] + "_ik.visibility", 0)
cmds.setAttr(ogChain[0] + "_fk.visibility", 0)
# Create a locator used for switching IK/FK mode and snap it between two joints
switcherLoc = cmds.spaceLocator(n=side + chainMenu + "_ikfk_Switch")
switcherLocGrp = cmds.group(em=1, n=switcherLoc[0] + "_grp")
cmds.color(switcherLoc, rgb=(255, 255, 0)) #yellow
cmds.delete(cmds.pointConstraint(switcherLoc, switcherLocGrp))
cmds.parent(switcherLoc, switcherLocGrp)
cmds.delete(cmds.pointConstraint(ogChain[1], ogChain[2], switcherLocGrp))
cmds.addAttr(switcherLoc, ln="FKIK_Mode", at="short", min=0, max=1, k=1, r=1)
cmds.move(0,0,-12, switcherLocGrp, r=1) #IMPROVE THIS SHIT
cmds.parentConstraint(ogChain[1], switcherLocGrp, mo=1)
#remove .t, .r, .s and .v from the channelbox
for coord in ["X", "Y", "Z"]:
cmds.setAttr(switcherLoc[0] + ".translate" + coord, k=0, l=1)
cmds.setAttr(switcherLoc[0] + ".rotate" + coord, k=0, l=1)
cmds.setAttr(switcherLoc[0] + ".scale" + coord, k=0, l=1)
cmds.setAttr(switcherLoc[0] + ".visibility", k=0, l=1)
# Create hierarchy groups
rigGrp = cmds.group(em=1, n= side + chainMenu + "_rig_grp")
ctrlGrp = cmds.group(em=1, n= side + chainMenu + "_ctrl_grp")
cmds.delete(cmds.parentConstraint(ogChain[0], rigGrp))
cmds.delete(cmds.parentConstraint(ogChain[0], ctrlGrp))
cmds.parent(ctrlGrp, rigGrp)
# Execute
if blendCheckbox == 1:
blendNodeFunc(scaleController, chainMenu)
if constraintCheckBox == 1:
constraintFunc(scaleController, chainMenu)
if clavCheckbox == 1:
clavSel(scaleController)
else:
cmds.parent(ogChain[0] + "_ik", ogChain[0] + "_fk", ctrlGrp)
cmds.parent(ogChain[0] + "_fk_anim_grp", ctrlGrp)
cmds.parent(switcherLocGrp, rigGrp)
def clavSel(scaleClav):
# Select clavicle Joint moving up and put it at the top of the chain
clavJoint = cmds.pickWalk(ogChain[0], d="up")[0]
#ogChain.insert(0, clavJoint)
clavController = createClav2(clavJoint + "_anim") # Import coordinates from ctrlUI_lib
cmds.delete(cmds.pointConstraint(clavJoint, clavController))
# Create offset group, FDH and move up
clavControllerGrp = cmds.group(n=clavController + "_grp", em=1)
cmds.delete(cmds.parentConstraint(clavJoint, clavControllerGrp))
cmds.parent(clavController, clavControllerGrp)
fixedScale = scaleClav/4
cmds.scale(fixedScale, fixedScale, fixedScale, clavController)
cmds.makeIdentity(clavController, a=1)
cmds.move(0,10,0, clavControllerGrp, ws=1, r=1)
cmds.color(clavController, rgb=controllerColor)
# Move pivots on clavicle joint
piv = cmds.xform(clavJoint, q=True, ws=True, t=True)
cmds.xform(clavController, ws=True, piv=piv)
cmds.xform(clavControllerGrp, ws=True, piv=piv)
cmds.orientConstraint(clavController, clavJoint)
# Parent ik and fk chain under clavicle controller
cmds.parent((ogChain[0]+"_fk_anim_grp"),(ogChain[0] + "_ik"), (ogChain[0] + "_fk"), clavController)
cmds.parent(clavControllerGrp, ctrlGrp)
def visCheck(vis):
if vis == "Arm":
asd = True
if vis == "Leg":
asd = False
cmds.checkBox(clavCheckbox_UI, e=1, vis=asd, v=asd)
# Buttons +1 and +3
count = 0
def addOneUnit(*args):
global count
count = count + 1
cmds.intField(scaleField_UI, v=1+count, e=1)
def addThreeUnit(*args):
global count
count = count + 3
cmds.intField(scaleField_UI, v=1+count, e=1)
def blendNodeFunc(scaleController, selectChain):
# Create some blendColors node with the same name of the joint
for x in range(chainLen):
blendColorsNode = cmds.createNode("blendColors", n = ogChain[x] + "_blend")
# Connect FK and IK chains into blendColors channels and then connect the output to the original joint chain
cmds.connectAttr((ogChain[x] + "_ik.rotate"), blendColorsNode + ".color1")
cmds.connectAttr((ogChain[x] + "_fk.rotate"), blendColorsNode + ".color2")
cmds.connectAttr((blendColorsNode + ".output"), (ogChain[x] + ".rotate" ))
cmds.connectAttr(switcherLoc[0]+".FKIK_Mode", blendColorsNode + ".blender")
ikChainBuild(scaleController, selectChain)
fkControllerCreator(scaleController, selectChain)
def constraintFunc(scaleController, selectChain):
# Create some blendColors node with the same name of the joint
for x in range(chainLen):
# Setup orient constraints
cmds.parentConstraint((ogChain[x] + "_ik"), ogChain[x])
cmds.parentConstraint((ogChain[x] + "_fk"), ogChain[x])
# Setup SDK naming convention
sdkDriver = switcherLoc[0] + ".FKIK_Mode"
ikSdkDriven = ogChain[x] + "_parentConstraint1." + ogChain[x] + "_ikW0"
fkSdkDriven = ogChain[x] + "_parentConstraint1." + ogChain[x] + "_fkW1"
# Setup SDK
cmds.setAttr(sdkDriver, 0)
cmds.setDrivenKeyframe(ikSdkDriven, cd=sdkDriver, v=0, dv=0)
cmds.setDrivenKeyframe(fkSdkDriven, cd=sdkDriver, v=1, dv=0)
cmds.setAttr(sdkDriver, 1)
cmds.setDrivenKeyframe(ikSdkDriven, cd=sdkDriver, v=1, dv=1)
cmds.setDrivenKeyframe(fkSdkDriven, cd=sdkDriver, v=0, dv=1)
ikChainBuild(scaleController, selectChain)
fkControllerCreator(scaleController, selectChain)
def fkControllerCreator(fkSize, legOrArm):
orientController = cmds.optionMenu("UI_orientControllerMenu", q=1, v=1)
# Create controllers and group offsets
# Change rotation, color
for y in range(chainLen):
anim_group = cmds.group(em=1, n=ogChain[y] + "_fk_anim_grp")
fk_controller = cmds.circle(n=ogChain[y] + "_fk_anim")[0] # If not [0] it'll warn some stuff related to Maya underworld
# Set scale
cmds.scale(fkSize, fkSize, fkSize, fk_controller)
cmds.matchTransform(anim_group, ogChain[y])
cmds.delete(cmds.parentConstraint(ogChain[y], fk_controller))
cmds.parent(fk_controller, anim_group)
# Set controller orientation based on second axis
if orientController == "x": cmds.rotate(90,0,0, fk_controller)
if orientController == "y": cmds.rotate(0,90,0, fk_controller)
if orientController == "z": cmds.rotate(0,0,90, fk_controller)
# Freeze transform, delete history and set color
cmds.makeIdentity(fk_controller, a = 1, t = 1, r = 1, s = 0)
cmds.delete(fk_controller, ch = 1)
cmds.color(fk_controller, rgb=controllerColor)
# Set SDK visibility
sdkDriver = switcherLoc[0] + ".FKIK_Mode"
cmds.setAttr(sdkDriver, 1)
cmds.setDrivenKeyframe(ogChain[0] + "_fk_anim_grp.visibility", cd=sdkDriver, v=1, dv=0)
cmds.setAttr(sdkDriver, 0)
cmds.setDrivenKeyframe(ogChain[0] + "_fk_anim_grp.visibility", cd=sdkDriver, v=0, dv=1)
# Lock .t and .s attributes
#for x in ["X", "Y", "Z"]:
#cmds.setAttr(fk_controller + ".translate" + x, k=0, l=1)
#cmds.setAttr(fk_controller + ".scale" + x, k=0, l=1)
# Create ordered hierarchy
for x in reversed(range(chainLen)):
if x == 0:
continue
cmds.parent(ogChain[x] + "_fk_anim_grp", ogChain[x-1] + "_fk_anim")
# Set orientConstraint _anim controllers with _fk hierarchy
for x in range(chainLen):
cmds.parentConstraint(ogChain[x] + "_fk_anim", ogChain[x] + "_fk")
# If leg chain is selected delete toe controller, else not
if legOrArm == "Leg":
if x == (chainLen-1):
cmds.delete(ogChain[chainLen-1] + "_fk_anim_grp")
def ikChainBuild(scaleIK, HandleName):
masterIkHandle = cmds.ikHandle(sj=ogChain[0] + "_ik", ee=ogChain[2] + "_ik", sol="ikRPsolver", n=side + HandleName + "_ikHandle")
cmds.setAttr(masterIkHandle[0] + ".visibility", 0)
if HandleName == "Arm":
#print ("scaleController", scaleField_UI)
armIk(scaleIK, masterIkHandle, HandleName)
else:
#print ("scaleController", scaleField_UI)
legIK(scaleIK, masterIkHandle, HandleName)
def armIk(armIkScale, armikHandle, pvName):
ikHandJoint = cmds.joint(n=side + "hand_ik")
cmds.delete(cmds.parentConstraint(ogChain[2] + "_ik", ikHandJoint))
cmds.makeIdentity(ikHandJoint, a = 1, t = 1, r = 1, s = 0)
if side == "l_":
cmds.move(10,0,0, ikHandJoint, r=1, os=1)
else:
cmds.move(-10,0,0, ikHandJoint, r=1, os=1)
cmds.parent(ikHandJoint, ogChain[2] + "_ik")
handikHandle = cmds.ikHandle(sj=ogChain[2] + "_ik", ee=ikHandJoint, n=side + "hand_ikHandle", sol="ikSCsolver")
cmds.parent(handikHandle[0], armikHandle[0])
#create IK controller ---> CUBE
crvIkCube = cmds.curve(d=1, p=[(-0.5, 0.5, -0.5), (0.5, 0.5, -0.5), (0.5, 0.5, 0.5),
(-0.5, 0.5, 0.5), (-0.5, -0.5, 0.5), (-0.5, -0.5, -0.5),
(-0.5, 0.5, -0.5), (-0.5, 0.5, 0.5), (-0.5, -0.5, 0.5),
(0.5, -0.5, 0.5), (0.5, 0.5, 0.5), (0.5, 0.5, -0.5),
(0.5, -0.5, -0.5), (0.5, -0.5, 0.5), (0.5, -0.5, -0.5), (-0.5, -0.5, -0.5)],
k=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 1.0, 1.1, 1.2, 1.3, 1.4, 1.5], n=side + "hand_ik_anim" )
# Rename shape node
shapeList = cmds.listRelatives(crvIkCube, s = True)
cmds.rename(shapeList, crvIkCube + "Shape")
crvIkCubeGrp = cmds.group(n=crvIkCube + "_grp")
cmds.delete(cmds.parentConstraint(ogChain[2] + "_ik", crvIkCubeGrp))
cmds.color(crvIkCube, rgb=controllerColor)
cmds.scale(armIkScale, armIkScale, armIkScale, crvIkCubeGrp)
cmds.parent(armikHandle[0], crvIkCube)
pvController = createSphere(nome= side+pvName+"_PV")
findPoleVector(loc=pvController, targetHandle=armikHandle[0])
cmds.addAttr(pvController, at="enum", enumName = "------", ln="Attributes", k=1, r=1)
cmds.addAttr(pvController, ln="Follow", k=1, r=1, min=0, max=1)
cmds.addAttr(pvController, ln="Follow_Clav_Hand", k=1, r=1, min=0, max=1, dv=0.5)
# Parent ikController and PV under _rig_GRP
cmds.parent(crvIkCubeGrp, pvController + "_grp" ,rigGrp)
#set SDK visibility
sdkDriver = switcherLoc[0] + ".FKIK_Mode"
cmds.setAttr(sdkDriver, 0)
cmds.setDrivenKeyframe(crvIkCubeGrp + ".visibility", cd=sdkDriver, v=0, dv=0)
cmds.setDrivenKeyframe(pvController + "_grp.visibility", cd=sdkDriver, v=0, dv=0)
cmds.setAttr(sdkDriver, 1)
cmds.setDrivenKeyframe(crvIkCubeGrp + ".visibility", cd=sdkDriver, v=1, dv=1)
cmds.setDrivenKeyframe(pvController + "_grp.visibility", cd=sdkDriver, v=1, dv=1)
def legIK(ikFootScale, legikHandle, pvName):
ballikHandle = cmds.ikHandle(sj=ogChain[2] + "_ik", ee=ogChain[3] + "_ik", sol="ikSCsolver", n=side + "ball_ikHandle")
toeikHandle = cmds.ikHandle(sj=ogChain[3] + "_ik", ee=ogChain[4] + "_ik", sol="ikSCsolver", n=side + "toe_ikHandle")
# Create and place ik controller
ikFootControl = cmds.curve(d=2, p=[(0.997, 0, 1.789), (0, 0, 2.39), (-0.997,0,1.789), (-1.108, 0, 0), (-0.784, 0,-2.5),
(0, 0,-3), (0.784, 0, -2.5), (1.108, 0, 0), (0.997, 0, 1.789), (0, 0, 2.39)],
k=[0,1,2,3,4,5,6,7,8,9,10], n=side + "leg_anim_ik")
# Rename shape node
shapeList = cmds.listRelatives(ikFootControl, s = True)
cmds.rename(shapeList, ikFootControl + "Shape")
ikFootControlGrp = cmds.group(em=1, n=ikFootControl + "_grp")
cmds.parent(ikFootControl, ikFootControlGrp)
# Set size, freeze transform, create offset group and color
cmds.scale(ikFootScale, ikFootScale, ikFootScale, ikFootControlGrp)
cmds.move(0,-3.2,0, ikFootControl, r=1)
cmds.makeIdentity(ikFootControl, a = 1, t = 1, r = 1, s = 1)
cmds.delete(ikFootControl, ch = 1)
cmds.delete(cmds.pointConstraint(ogChain[3] + "_ik", ikFootControlGrp))
cmds.color(ikFootControl, rgb=controllerColor)
# pivot snapping on ankle joint
piv = cmds.xform(ogChain[2], q=True, ws=True, t=True)
cmds.xform(ikFootControl, ws=True, piv=piv)
cmds.parent(ballikHandle[0], toeikHandle[0], legikHandle[0], ikFootControl)
#---------- Making Pole Vector -------------#
# Pole Vector controller ---> Sphere
pvController = createSphere(nome= side+pvName+"_PV")
findPoleVector(loc=pvController, targetHandle=legikHandle[0])
cmds.addAttr(pvController, ln="Follow", k=1, r=1, min=0, max=1)
cmds.addAttr(pvController, ln="Follow_Leg_Foot", k=1, r=1, min=0, max=1, dv=0.5)
# Create attributes on ikController
cmds.addAttr(ikFootControl, at="enum",enumName = "------", ln="Attributes", k=1, r=1)
cmds.addAttr(ikFootControl, ln="Twist", k=1, r=1)
cmds.addAttr(ikFootControl, ln="Lateral_Roll", k=1, r=1)
for bone in ["Ankle", "Ball", "Toe_Tap"]:
cmds.addAttr(ikFootControl, at="enum", enumName = "------", ln=bone, k=1, r=1)
for coord in ["X", "Y", "Z"]:
cmds.addAttr(ikFootControl, ln=bone+coord, k=1, r=1)
# Parent ikController and PV under _rig_GRP
cmds.parent(ikFootControlGrp, pvController + "_grp" ,rigGrp)
# Set SDK visibility
sdkDriver = switcherLoc[0] + ".FKIK_Mode"
cmds.setAttr(sdkDriver, 0)
cmds.setDrivenKeyframe(ikFootControlGrp + ".visibility", cd=sdkDriver, v=0, dv=0)
cmds.setDrivenKeyframe(pvController + "_grp.visibility", cd=sdkDriver, v=0, dv=0)
cmds.setAttr(sdkDriver, 1)
cmds.setDrivenKeyframe(ikFootControlGrp + ".visibility", cd=sdkDriver, v=1, dv=1)
cmds.setDrivenKeyframe(pvController + "_grp.visibility", cd=sdkDriver, v=1, dv=1)
def findPoleVector(loc, targetHandle):
# This func is kinda black magic
# All credits to https://vimeo.com/66015036
start = cmds.xform(ogChain[0], q=1, ws=1, t=1)
mid = cmds.xform(ogChain[1], q=1, ws=1, t=1)
end = cmds.xform(ogChain[2], q=1, ws=1, t=1)
startV = om.MVector(start[0], start[1], start[2])
midV = om.MVector(mid[0], mid[1], mid[2])
endV = om.MVector(end[0], end[1], end[2])
startEnd = endV - startV
startMid = midV - startV
dotP = startMid * startEnd
proj = float(dotP) / float(startEnd.length())
startEndN = startEnd.normal()
projV = startEndN * proj
arrowV = startMid - projV
arrowV*= 10 #distance from joint
finalV = arrowV + midV
cmds.xform(loc, ws=1, t=(finalV.x, finalV.y ,finalV.z))
locGrp = cmds.group(em=1, n=loc + "_grp")
#snap, parent offsetGrp, set color and then make Constraint
cmds.delete(cmds.pointConstraint(loc, locGrp))
cmds.parent(loc, locGrp)
cmds.makeIdentity(loc, a=1, t=1, r=1, s=1)
cmds.color(loc, rgb=controllerColor)
cmds.poleVectorConstraint(loc, targetHandle)
def showUI():
global chainMenu_UI
global scaleField_UI
global orientControllerMenu
global constraintCheckBox_UI
global blendCheckbox_UI
global plusOne_UI
global plusThree_UI
global clavCheckbox_UI
if cmds.window("switchModeUI", ex = 1): cmds.deleteUI("switchModeUI")
myWin = cmds.window("switchModeUI", t="IKFK Builder", w=300, h=300, s=1)
mainLayout = cmds.formLayout(nd=50)
# Useful in selecting which chain: Leg or Arm?
chainMenu_UI = cmds.optionMenu("chainMenu_UI", l="Which chain?", cc=visCheck)
cmds.menuItem(l="Leg")
cmds.menuItem(l="Arm")
constraintCheckBox_UI = cmds.checkBox(label = "orientConsts+SDK Mode", v=0,
cc= lambda state: (cmds.checkBox(blendCheckbox_UI, e=1, en=state-1)))
blendCheckbox_UI = cmds.checkBox(label = "blendColor Mode", v=0,
cc= lambda state: (cmds.checkBox(constraintCheckBox_UI, e=1, en=state-1)))
clavCheckbox_UI = cmds.checkBox(l="Clavicle", vis=0)
# Useful in orienting FK controllers as the user wishes. Maybe this can be improved
orientControllerMenu = cmds.optionMenu("UI_orientControllerMenu", l="What's the secondary axis")
cmds.menuItem(l="x")
cmds.menuItem(l="y")
cmds.menuItem(l="z")
# Scale the UI becase you'll never know
scaleControllerText = cmds.text(l="Controllers size")
scaleField_UI = cmds.intField(en=10, v=1, min=1)
plusOne_UI = cmds.button(l="+1", c=addOneUnit)
plusThree_UI = cmds.button(l="+3", c=addThreeUnit)
separator01 = cmds.separator(h=5)
separator02 = cmds.separator(h=5)
#
execButton = cmds.button(l="Duplicate Chain", c=partial(duplicateChain, blendNodeFunc, constraintFunc))
cmds.formLayout(mainLayout, e=1,
attachForm = [
(chainMenu_UI, "left", 8), (chainMenu_UI, "top", 5), (chainMenu_UI, "right", 80),
(clavCheckbox_UI, "top", 7),
(blendCheckbox_UI, "left", 5),
(separator01, "left", 1), (separator01, "right", 2),
#--------------------
(scaleField_UI, "right", 65), (scaleField_UI, "left", 5),
(plusOne_UI, "right", 5),
(plusThree_UI, "right", 5),
(scaleControllerText, "left", 5),
(separator02, "left", 1), (separator02, "right", 2),
#--------------------
(orientControllerMenu, "left", 8), (orientControllerMenu, "top", 5),
#--------------------
(execButton, "bottom", 5), (execButton, "left", 5), (execButton, "right", 5),
],
attachControl = [(clavCheckbox_UI, "left", 10, chainMenu_UI),
(constraintCheckBox_UI, "top", 5, chainMenu_UI),
(blendCheckbox_UI, "top", 5, chainMenu_UI),
(separator01, "top", 5, constraintCheckBox_UI),
(scaleField_UI, "top", 5, separator01),
(scaleControllerText, "top", 8, separator01),
(plusOne_UI, "top", 4, separator01),
(plusThree_UI, "top", 4, separator01),
(separator02, "top", 6, scaleField_UI),
(orientControllerMenu, "top", 6, separator02),
],
attachPosition = [#(clavCheckbox_UI, "right", 0, 10),
(constraintCheckBox_UI, "left", 0, 26), (blendCheckbox_UI, "right", 10, 24),
(scaleControllerText, "left", 5, 0), (scaleField_UI, "left", 110, 0), #(scaleField_UI, "right",0, 40),
(plusOne_UI, "right", 0, 45),
(plusThree_UI, "right", 0, 49)
]
)
cmds.showWindow(myWin)
showUI() | 2.21875 | 2 |
pipng/imagescale-q-m.py | nwiizo/joke | 1 | 3949 | <filename>pipng/imagescale-q-m.py
#!/usr/bin/env python3
# Copyright © 2012-13 Qtrac Ltd. All rights reserved.
# This program or module is free software: you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version. It is provided for
# educational purposes and is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
import argparse
import collections
import math
import multiprocessing
import os
import sys
import Image
import Qtrac
Result = collections.namedtuple("Result", "copied scaled name")
Summary = collections.namedtuple("Summary", "todo copied scaled canceled")
def main():
size, smooth, source, target, concurrency = handle_commandline()
Qtrac.report("starting...")
summary = scale(size, smooth, source, target, concurrency)
summarize(summary, concurrency)
def handle_commandline():
parser = argparse.ArgumentParser()
parser.add_argument("-c", "--concurrency", type=int,
default=multiprocessing.cpu_count(),
help="specify the concurrency (for debugging and "
"timing) [default: %(default)d]")
parser.add_argument("-s", "--size", default=400, type=int,
help="make a scaled image that fits the given dimension "
"[default: %(default)d]")
parser.add_argument("-S", "--smooth", action="store_true",
help="use smooth scaling (slow but good for text)")
parser.add_argument("source",
help="the directory containing the original .xpm images")
parser.add_argument("target",
help="the directory for the scaled .xpm images")
args = parser.parse_args()
source = os.path.abspath(args.source)
target = os.path.abspath(args.target)
if source == target:
args.error("source and target must be different")
if not os.path.exists(args.target):
os.makedirs(target)
return args.size, args.smooth, source, target, args.concurrency
def scale(size, smooth, source, target, concurrency):
canceled = False
jobs = multiprocessing.JoinableQueue()
results = multiprocessing.Queue()
create_processes(size, smooth, jobs, results, concurrency)
todo = add_jobs(source, target, jobs)
try:
jobs.join()
except KeyboardInterrupt: # May not work on Windows
Qtrac.report("canceling...")
canceled = True
copied = scaled = 0
while not results.empty(): # Safe because all jobs have finished
result = results.get_nowait()
copied += result.copied
scaled += result.scaled
return Summary(todo, copied, scaled, canceled)
def create_processes(size, smooth, jobs, results, concurrency):
for _ in range(concurrency):
process = multiprocessing.Process(target=worker, args=(size,
smooth, jobs, results))
process.daemon = True
process.start()
def worker(size, smooth, jobs, results):
while True:
try:
sourceImage, targetImage = jobs.get()
try:
result = scale_one(size, smooth, sourceImage, targetImage)
Qtrac.report("{} {}".format("copied" if result.copied else
"scaled", os.path.basename(result.name)))
results.put(result)
except Image.Error as err:
Qtrac.report(str(err), True)
finally:
jobs.task_done()
def add_jobs(source, target, jobs):
for todo, name in enumerate(os.listdir(source), start=1):
sourceImage = os.path.join(source, name)
targetImage = os.path.join(target, name)
jobs.put((sourceImage, targetImage))
return todo
def scale_one(size, smooth, sourceImage, targetImage):
oldImage = Image.from_file(sourceImage)
if oldImage.width <= size and oldImage.height <= size:
oldImage.save(targetImage)
return Result(1, 0, targetImage)
else:
if smooth:
scale = min(size / oldImage.width, size / oldImage.height)
newImage = oldImage.scale(scale)
else:
stride = int(math.ceil(max(oldImage.width / size,
oldImage.height / size)))
newImage = oldImage.subsample(stride)
newImage.save(targetImage)
return Result(0, 1, targetImage)
def summarize(summary, concurrency):
message = "copied {} scaled {} ".format(summary.copied, summary.scaled)
difference = summary.todo - (summary.copied + summary.scaled)
if difference:
message += "skipped {} ".format(difference)
message += "using {} processes".format(concurrency)
if summary.canceled:
message += " [canceled]"
Qtrac.report(message)
print()
if __name__ == "__main__":
main()
| 2.328125 | 2 |
test/integration_test.py | NoopDog/azul | 0 | 3950 | <filename>test/integration_test.py
from abc import (
ABCMeta,
)
from concurrent.futures.thread import (
ThreadPoolExecutor,
)
from contextlib import (
contextmanager,
)
import csv
from functools import (
lru_cache,
)
import gzip
from io import (
BytesIO,
TextIOWrapper,
)
import json
import logging
import os
import random
import re
import sys
import threading
import time
from typing import (
AbstractSet,
Any,
Dict,
IO,
List,
Mapping,
Optional,
Sequence,
Tuple,
cast,
)
import unittest
from unittest import (
mock,
)
import uuid
from zipfile import (
ZipFile,
)
import attr
import chalice.cli
from furl import (
furl,
)
from google.cloud import (
storage,
)
from google.oauth2 import (
service_account,
)
from hca.dss import (
DSSClient,
)
from hca.util import (
SwaggerAPIException,
)
from humancellatlas.data.metadata.helpers.dss import (
download_bundle_metadata,
)
from more_itertools import (
first,
one,
)
from openapi_spec_validator import (
validate_spec,
)
import requests
from azul import (
CatalogName,
cached_property,
config,
drs,
)
from azul.azulclient import (
AzulClient,
AzulClientNotificationError,
)
from azul.drs import (
AccessMethod,
)
import azul.dss
from azul.es import (
ESClientFactory,
)
from azul.indexer import (
BundleFQID,
)
from azul.indexer.index_service import (
IndexService,
)
from azul.logging import (
configure_test_logging,
)
from azul.modules import (
load_app_module,
)
from azul.portal_service import (
PortalService,
)
from azul.requests import (
requests_session_with_retry_after,
)
from azul.types import (
JSON,
)
from azul_test_case import (
AlwaysTearDownTestCase,
AzulTestCase,
)
log = logging.getLogger(__name__)
# noinspection PyPep8Naming
def setUpModule():
configure_test_logging(log)
class IntegrationTestCase(AzulTestCase, metaclass=ABCMeta):
bundle_uuid_prefix: str = ''
@cached_property
def azul_client(self):
return AzulClient(prefix=self.bundle_uuid_prefix)
class IndexingIntegrationTest(IntegrationTestCase, AlwaysTearDownTestCase):
prefix_length = 2
max_bundles = 64
min_timeout = 20 * 60
@classmethod
def setUpClass(cls) -> None:
super().setUpClass()
cls.bundle_uuid_prefix = ''.join([
str(random.choice('abcdef0123456789'))
for _ in range(cls.prefix_length)
])
def setUp(self) -> None:
super().setUp()
self.pruning_seed = random.randint(0, sys.maxsize)
@contextmanager
def subTest(self, msg: Any = None, **params: Any):
log.info('Beginning sub-test [%s] %r', msg, params)
with super().subTest(msg, **params):
try:
yield
except BaseException:
log.info('Failed sub-test [%s] %r', msg, params)
raise
else:
log.info('Successful sub-test [%s] %r', msg, params)
def test(self):
@attr.s(auto_attribs=True, kw_only=True)
class Catalog:
name: CatalogName
notifications: Mapping[BundleFQID, JSON]
@property
def num_bundles(self):
return len(self.notifications)
@property
def bundle_fqids(self) -> AbstractSet[BundleFQID]:
return self.notifications.keys()
def notifications_with_duplicates(self) -> List[JSON]:
num_duplicates = self.num_bundles // 2
notifications = list(self.notifications.values())
# Index some bundles again to test that we handle duplicate additions.
# Note: random.choices() may pick the same element multiple times so
# some notifications will end up being sent three or more times.
notifications.extend(random.choices(notifications, k=num_duplicates))
return notifications
def _wait_for_indexer():
num_bundles = sum(catalog.num_bundles for catalog in catalogs)
self.azul_client.wait_for_indexer(num_expected_bundles=num_bundles,
min_timeout=self.min_timeout)
# For faster modify-deploy-test cycles, set `delete` to False and run
# test once. Then also set `index` to False. Subsequent runs will use
# catalogs from first run. Don't commit changes to these two lines.
index = True
delete = True
if index:
self._reset_indexer()
catalogs: List[Catalog] = [
Catalog(name=catalog, notifications=self._prepare_notifications(catalog) if index else {})
for catalog in config.integration_test_catalogs
]
if index:
for catalog in catalogs:
log.info('Starting integration test for catalog %r with %i bundles from prefix %r.',
catalog, catalog.num_bundles, self.bundle_uuid_prefix)
self.azul_client.index(catalog=catalog.name,
notifications=catalog.notifications_with_duplicates())
_wait_for_indexer()
for catalog in catalogs:
self._assert_catalog_complete(catalog=catalog.name,
entity_type='files',
bundle_fqids=catalog.bundle_fqids)
for catalog in catalogs:
self._test_manifest(catalog.name)
self._test_dos_and_drs(catalog.name)
self._test_repository_files(catalog.name)
if index and delete:
for catalog in catalogs:
self.azul_client.index(catalog=catalog.name,
notifications=catalog.notifications_with_duplicates(),
delete=True)
_wait_for_indexer()
for catalog in catalogs:
self._assert_catalog_empty(catalog.name)
self._test_other_endpoints()
def _reset_indexer(self):
# While it's OK to erase the integration test catalog, the queues are
# shared by all catalogs and we can't afford to trash them in a stable
# deployment like production.
self.azul_client.reset_indexer(catalogs=config.integration_test_catalogs,
# Can't purge the queues in stable deployment as
# they may contain work for non-IT catalogs.
purge_queues=not config.is_stable_deployment(),
delete_indices=True,
create_indices=True)
def _test_other_endpoints(self):
service_paths = (
'/',
'/openapi',
'/version',
'/index/summary',
'/index/files/order',
)
service_routes = (
(config.service_endpoint(), path)
for path in service_paths
)
health_endpoints = (
config.service_endpoint(),
config.indexer_endpoint()
)
health_paths = (
'', # default keys for lambda
'/', # all keys
'/basic',
'/elasticsearch',
'/queues',
'/progress',
'/api_endpoints',
'/other_lambdas'
)
health_routes = (
(endpoint, '/health' + path)
for endpoint in health_endpoints
for path in health_paths
)
for endpoint, path in (*service_routes, *health_routes):
with self.subTest('other_endpoints', endpoint=endpoint, path=path):
self._check_endpoint(endpoint, path)
def _test_manifest(self, catalog: CatalogName):
for format_, validator, attempts in [
(None, self._check_manifest, 1),
('compact', self._check_manifest, 1),
('full', self._check_manifest, 3),
('terra.bdbag', self._check_terra_bdbag, 1)
]:
with self.subTest('manifest',
catalog=catalog,
format=format_,
attempts=attempts):
assert attempts > 0
params = dict(catalog=catalog)
if format_ is not None:
params['format'] = format_
for attempt in range(attempts):
start = time.time()
response = self._check_endpoint(config.service_endpoint(), '/manifest/files', params)
log.info('Request %i/%i took %.3fs to execute.', attempt + 1, attempts, time.time() - start)
validator(catalog, response)
@lru_cache(maxsize=None)
def _get_one_file_uuid(self, catalog: CatalogName) -> str:
filters = {'fileFormat': {'is': ['fastq.gz', 'fastq']}}
response = self._check_endpoint(endpoint=config.service_endpoint(),
path='/index/files',
query=dict(catalog=catalog,
filters=json.dumps(filters),
size=1,
order='asc',
sort='fileSize'))
hits = json.loads(response)
return one(one(hits['hits'])['files'])['uuid']
def _test_dos_and_drs(self, catalog: CatalogName):
if config.is_dss_enabled(catalog) and config.dss_direct_access:
file_uuid = self._get_one_file_uuid(catalog)
self._test_dos(catalog, file_uuid)
self._test_drs(catalog, file_uuid)
@cached_property
def _requests(self) -> requests.Session:
return requests_session_with_retry_after()
def _check_endpoint(self,
endpoint: str,
path: str,
query: Optional[Mapping[str, Any]] = None) -> bytes:
query = {} if query is None else {k: str(v) for k, v in query.items()}
url = furl(endpoint, path=path, query=query)
return self._get_url_content(url.url)
def _get_url_content(self, url: str) -> bytes:
return self._get_url(url).content
def _get_url(self, url: str, allow_redirects=True) -> requests.Response:
log.info('GET %s', url)
response = self._requests.get(url, allow_redirects=allow_redirects)
expected_statuses = (200,) if allow_redirects else (200, 301, 302)
self._assertResponseStatus(response, expected_statuses)
return response
def _assertResponseStatus(self,
response: requests.Response,
expected_statuses: Tuple[int, ...] = (200,)):
self.assertIn(response.status_code,
expected_statuses,
(response.reason, response.content))
def _check_manifest(self, _catalog: CatalogName, response: bytes):
self.__check_manifest(BytesIO(response), 'bundle_uuid')
def _check_terra_bdbag(self, catalog: CatalogName, response: bytes):
with ZipFile(BytesIO(response)) as zip_fh:
data_path = os.path.join(os.path.dirname(first(zip_fh.namelist())), 'data')
file_path = os.path.join(data_path, 'participants.tsv')
with zip_fh.open(file_path) as file:
rows = self.__check_manifest(file, 'bundle_uuid')
for row in rows:
# Terra doesn't allow colons in this column, but they may
# exist in versions indexed by TDR
self.assertNotIn(':', row['entity:participant_id'])
suffix = '__file_drs_uri'
header, *rows = rows
prefixes = [
c[:-len(suffix)]
for c in header.keys()
if c.endswith(suffix)
]
size, drs_uri, name = min(
(
int(row[prefix + '__file_size']),
row[prefix + suffix],
row[prefix + '__file_name'],
)
for row in rows
for prefix in prefixes
if row[prefix + suffix]
)
log.info('Resolving %r (%r) from catalog %r (%i bytes)',
drs_uri, name, catalog, size)
plugin = self.azul_client.repository_plugin(catalog)
drs_client = plugin.drs_client()
access = drs_client.get_object(drs_uri, access_method=AccessMethod.https)
self.assertIsNone(access.headers)
self.assertEqual('https', furl(access.url).scheme)
# Try HEAD first because it's more efficient, fall back to GET if the
# DRS implementations prohibits it, like Azul's DRS proxy of DSS.
for method in ('HEAD', 'GET'):
log.info('%s %s', method, access.url)
# For DSS, any HTTP client should do but for TDR we need to use an
# authenticated client. TDR does return a Bearer token in the `headers`
# part of the DRS response but we know that this token is the same as
# the one we're making the DRS request with.
response = drs_client.http_client.request(method, access.url)
if response.status != 403:
break
self.assertEqual(200, response.status, response.data)
self.assertEqual(size, int(response.headers['Content-Length']))
def __check_manifest(self, file: IO[bytes], uuid_field_name: str) -> List[Mapping[str, str]]:
text = TextIOWrapper(file)
reader = csv.DictReader(text, delimiter='\t')
rows = list(reader)
log.info(f'Manifest contains {len(rows)} rows.')
self.assertGreater(len(rows), 0)
self.assertIn(uuid_field_name, reader.fieldnames)
bundle_uuid = rows[0][uuid_field_name]
self.assertEqual(bundle_uuid, str(uuid.UUID(bundle_uuid)))
return rows
def _test_repository_files(self, catalog: str):
with self.subTest('repository_files', catalog=catalog):
file_uuid = self._get_one_file_uuid(catalog)
response = self._check_endpoint(endpoint=config.service_endpoint(),
path=f'/fetch/repository/files/{file_uuid}',
query=dict(catalog=catalog))
response = json.loads(response)
while response['Status'] != 302:
self.assertEqual(301, response['Status'])
response = self._get_url(response['Location']).json()
content = self._get_url_content(response['Location'])
self._validate_fastq_content(content)
def _test_drs(self, catalog: CatalogName, file_uuid: str):
repository_plugin = self.azul_client.repository_plugin(catalog)
drs = repository_plugin.drs_client()
for access_method in AccessMethod:
with self.subTest('drs', catalog=catalog, access_method=AccessMethod.https):
log.info('Resolving file %r with DRS using %r', file_uuid, access_method)
drs_uri = f'drs://{config.api_lambda_domain("service")}/{file_uuid}'
access = drs.get_object(drs_uri, access_method=access_method)
self.assertIsNone(access.headers)
if access.method is AccessMethod.https:
content = self._get_url_content(access.url)
elif access.method is AccessMethod.gs:
content = self._get_gs_url_content(access.url)
else:
self.fail(access_method)
self._validate_fastq_content(content)
def _test_dos(self, catalog: CatalogName, file_uuid: str):
with self.subTest('dos', catalog=catalog):
log.info('Resolving file %s with DOS', file_uuid)
response = self._check_endpoint(config.service_endpoint(),
path=drs.dos_object_url_path(file_uuid),
query=dict(catalog=catalog))
json_data = json.loads(response)['data_object']
file_url = first(json_data['urls'])['url']
while True:
response = self._get_url(file_url, allow_redirects=False)
# We handle redirects ourselves so we can log each request
if response.status_code in (301, 302):
file_url = response.headers['Location']
try:
retry_after = response.headers['Retry-After']
except KeyError:
pass
else:
time.sleep(int(retry_after))
else:
break
self._assertResponseStatus(response)
self._validate_fastq_content(response.content)
def _get_gs_url_content(self, url: str) -> bytes:
self.assertTrue(url.startswith('gs://'))
path = os.environ['GOOGLE_APPLICATION_CREDENTIALS']
credentials = service_account.Credentials.from_service_account_file(path)
storage_client = storage.Client(credentials=credentials)
content = BytesIO()
storage_client.download_blob_to_file(url, content)
return content.getvalue()
def _validate_fastq_content(self, content: bytes):
# Check signature of FASTQ file.
with gzip.open(BytesIO(content)) as buf:
fastq = buf.read(1024 * 1024)
lines = fastq.splitlines()
# Assert first character of first and third line of file (see https://en.wikipedia.org/wiki/FASTQ_format).
self.assertTrue(lines[0].startswith(b'@'))
self.assertTrue(lines[2].startswith(b'+'))
def _prepare_notifications(self, catalog: CatalogName) -> Dict[BundleFQID, JSON]:
bundle_fqids = self.azul_client.list_bundles(catalog)
bundle_fqids = self._prune_test_bundles(catalog, bundle_fqids, self.max_bundles)
return {
bundle_fqid: self.azul_client.synthesize_notification(catalog, bundle_fqid)
for bundle_fqid in bundle_fqids
}
def _prune_test_bundles(self,
catalog: CatalogName,
bundle_fqids: Sequence[BundleFQID],
max_bundles: int
) -> List[BundleFQID]:
seed = self.pruning_seed
log.info('Selecting %i bundles with projects, out of %i candidates, using random seed %i.',
max_bundles, len(bundle_fqids), seed)
random_ = random.Random(x=seed)
# The same seed should give same random order so we need to have a
# deterministic order in the input list.
bundle_fqids = sorted(bundle_fqids)
random_.shuffle(bundle_fqids)
# Pick bundles off of the randomly ordered input until we have the
# desired number of bundles with project metadata.
filtered_bundle_fqids = []
for bundle_fqid in bundle_fqids:
if len(filtered_bundle_fqids) < max_bundles:
if self.azul_client.bundle_has_project_json(catalog, bundle_fqid):
filtered_bundle_fqids.append(bundle_fqid)
else:
break
return filtered_bundle_fqids
def _assert_catalog_complete(self,
catalog: CatalogName,
entity_type: str,
bundle_fqids: AbstractSet[BundleFQID]) -> None:
with self.subTest('catalog_complete', catalog=catalog):
expected_fqids = set(self.azul_client.filter_obsolete_bundle_versions(bundle_fqids))
obsolete_fqids = bundle_fqids - expected_fqids
if obsolete_fqids:
log.debug('Ignoring obsolete bundle versions %r', obsolete_fqids)
num_bundles = len(expected_fqids)
timeout = 600
indexed_fqids = set()
log.debug('Expecting bundles %s ', sorted(expected_fqids))
retries = 0
deadline = time.time() + timeout
while True:
hits = self._get_entities(catalog, entity_type)
indexed_fqids.update(
BundleFQID(bundle['bundleUuid'], bundle['bundleVersion'])
for hit in hits
for bundle in hit.get('bundles', [])
)
log.info('Detected %i of %i bundles in %i hits for entity type %s on try #%i.',
len(indexed_fqids), num_bundles, len(hits), entity_type, retries)
if len(indexed_fqids) == num_bundles:
log.info('Found the expected %i bundles.', num_bundles)
break
elif len(indexed_fqids) > num_bundles:
log.error('Found %i bundles, more than the expected %i.',
len(indexed_fqids), num_bundles)
break
elif time.time() > deadline:
log.error('Only found %i of %i bundles in under %i seconds.',
len(indexed_fqids), num_bundles, timeout)
break
else:
retries += 1
time.sleep(5)
self.assertSetEqual(indexed_fqids, expected_fqids)
entity_types = ['files', 'projects', 'samples', 'bundles']
def _assert_catalog_empty(self, catalog: CatalogName):
for entity_type in self.entity_types:
with self.subTest('catalog_empty',
catalog=catalog,
entity_type=entity_type):
hits = self._get_entities(catalog, entity_type)
self.assertEqual([], [hit['entryId'] for hit in hits])
def _get_entities(self, catalog: CatalogName, entity_type):
entities = []
size = 100
params = dict(catalog=catalog,
size=str(size))
url = furl(url=config.service_endpoint(),
path=('index', entity_type),
query_params=params
).url
while True:
response = self._get_url(url)
body = response.json()
hits = body['hits']
entities.extend(hits)
url = body['pagination']['next']
if url is None:
break
return entities
def _assert_indices_exist(self, catalog: CatalogName):
"""
Aside from checking that all indices exist this method also asserts
that we can instantiate a local ES client pointing at a real, remote
ES domain.
"""
es_client = ESClientFactory.get()
service = IndexService()
for index_name in service.index_names(catalog):
self.assertTrue(es_client.indices.exists(index_name))
class AzulClientIntegrationTest(IntegrationTestCase):
def test_azul_client_error_handling(self):
invalid_notification = {}
notifications = [invalid_notification]
self.assertRaises(AzulClientNotificationError,
self.azul_client.index,
first(config.integration_test_catalogs),
notifications)
class PortalRegistrationIntegrationTest(IntegrationTestCase):
# FIXME: Re-enable once overloading of S3 API is resolved
# https://github.com/DataBiosphere/azul/issues/2399
@unittest.skipIf(True or config.is_main_deployment(), 'Test would pollute portal DB')
def test_concurrent_portal_db_crud(self):
"""
Use multithreading to simulate multiple users simultaneously modifying
the portals database.
"""
# Currently takes about 50 seconds and creates a 25 kb db file.
n_threads = 10
n_tasks = n_threads * 10
n_ops = 5
portal_service = PortalService()
entry_format = 'task={};op={}'
def run(thread_count):
for op_count in range(n_ops):
mock_entry = cast(JSON, {
"portal_id": "foo",
"integrations": [
{
"integration_id": "bar",
"entity_type": "project",
"integration_type": "get",
"entity_ids": ["baz"]
}
],
"mock-count": entry_format.format(thread_count, op_count)
})
portal_service._crud(lambda db: list(db) + [mock_entry])
old_db = portal_service.read()
with ThreadPoolExecutor(max_workers=n_threads) as executor:
futures = [executor.submit(run, i) for i in range(n_tasks)]
self.assertTrue(all(f.result() is None for f in futures))
new_db = portal_service.read()
old_entries = [portal for portal in new_db if 'mock-count' not in portal]
self.assertEqual(old_entries, old_db)
mock_counts = [portal['mock-count'] for portal in new_db if 'mock-count' in portal]
self.assertEqual(len(mock_counts), len(set(mock_counts)))
self.assertEqual(set(mock_counts), {entry_format.format(i, j) for i in range(n_tasks) for j in range(n_ops)})
# Reset to pre-test state.
portal_service.overwrite(old_db)
class OpenAPIIntegrationTest(AzulTestCase):
def test_openapi(self):
service = config.service_endpoint()
response = requests.get(service + '/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.headers['content-type'], 'text/html')
self.assertGreater(len(response.content), 0)
# validate OpenAPI spec
response = requests.get(service + '/openapi')
response.raise_for_status()
spec = response.json()
validate_spec(spec)
class DSSIntegrationTest(AzulTestCase):
def test_patched_dss_client(self):
query = {
"query": {
"bool": {
"must_not": [
{
"term": {
"admin_deleted": True
}
}
],
"must": [
{
"exists": {
"field": "files.project_json"
}
},
{
"range": {
"manifest.version": {
"gte": "2019-04-01"
}
}
}
]
}
}
}
self.maxDiff = None
for direct in {config.dss_direct_access, False}:
for replica in 'aws', 'gcp':
if direct:
with self._failing_s3_get_object():
dss_client = azul.dss.direct_access_client()
self._test_dss_client(direct, query, dss_client, replica, fallback=True)
dss_client = azul.dss.direct_access_client()
self._test_dss_client(direct, query, dss_client, replica, fallback=False)
else:
dss_client = azul.dss.client()
self._test_dss_client(direct, query, dss_client, replica, fallback=False)
class SpecialError(Exception):
pass
def _failing_s3_get_object(self):
def make_mock(**kwargs):
original = kwargs['spec']
def mock_boto3_client(service, *args, **kwargs):
if service == 's3':
mock_s3 = mock.MagicMock()
mock_s3.get_object.side_effect = self.SpecialError()
return mock_s3
else:
return original(service, *args, **kwargs)
return mock_boto3_client
return mock.patch('azul.deployment.aws.client', spec=True, new_callable=make_mock)
def _test_dss_client(self, direct: bool, query: JSON, dss_client: DSSClient, replica: str, fallback: bool):
with self.subTest(direct=direct, replica=replica, fallback=fallback):
response = dss_client.post_search(es_query=query, replica=replica, per_page=10)
bundle_uuid, _, bundle_version = response['results'][0]['bundle_fqid'].partition('.')
with mock.patch('azul.dss.logger') as captured_log:
_, manifest, metadata = download_bundle_metadata(client=dss_client,
replica=replica,
uuid=bundle_uuid,
version=bundle_version,
num_workers=config.num_dss_workers)
log.info('Captured log calls: %r', captured_log.mock_calls)
self.assertGreater(len(metadata), 0)
self.assertGreater(set(f['name'] for f in manifest), set(metadata.keys()))
for f in manifest:
self.assertIn('s3_etag', f)
# Extract the log method name and the first three words of log
# message logged. Note that the PyCharm debugger will call
# certain dunder methods on the variable, leading to failed
# assertions.
actual = [(m, ' '.join(re.split(r'[\s,]', a[0])[:3])) for m, a, k in captured_log.mock_calls]
if direct:
if replica == 'aws':
if fallback:
expected = [
('debug', 'Loading bundle %s'),
('debug', 'Loading object %s'),
('warning', 'Error accessing bundle'),
('warning', 'Failed getting bundle')
] + [
('debug', 'Loading file %s'),
('debug', 'Loading object %s'),
('warning', 'Error accessing file'),
('warning', 'Failed getting file')
] * len(metadata)
else:
expected = [
('debug', 'Loading bundle %s'),
('debug', 'Loading object %s')
] + [
('debug', 'Loading file %s'),
('debug', 'Loading object %s'), # file
('debug', 'Loading object %s') # blob
] * len(metadata)
else:
# On `gcp` the precondition check fails right away, preventing any attempts of direct access
expected = [
('warning', 'Failed getting bundle')
] + [
('warning', 'Failed getting file')
] * len(metadata)
else:
expected = []
self.assertSequenceEqual(sorted(expected), sorted(actual))
def test_get_file_fail(self):
for direct in {config.dss_direct_access, False}:
with self.subTest(direct=direct):
dss_client = azul.dss.direct_access_client() if direct else azul.dss.client()
with self.assertRaises(SwaggerAPIException) as e:
dss_client.get_file(uuid='acafefed-beef-4bad-babe-feedfa11afe1',
version='2018-11-19T232756.056947Z',
replica='aws')
self.assertEqual(e.exception.reason, 'not_found')
def test_mini_dss_failures(self):
uuid = 'acafefed-beef-4bad-babe-feedfa11afe1'
version = '2018-11-19T232756.056947Z'
with self._failing_s3_get_object():
mini_dss = azul.dss.MiniDSS(config.dss_endpoint)
with self.assertRaises(self.SpecialError):
mini_dss._get_file_object(uuid, version)
with self.assertRaises(KeyError):
mini_dss._get_blob_key({})
with self.assertRaises(self.SpecialError):
mini_dss._get_blob('/blobs/foo', {'content-type': 'application/json'})
with self.assertRaises(self.SpecialError):
mini_dss.get_bundle(uuid, version, 'aws')
with self.assertRaises(self.SpecialError):
mini_dss.get_file(uuid, version, 'aws')
with self.assertRaises(self.SpecialError):
mini_dss.get_native_file_url(uuid, version, 'aws')
class AzulChaliceLocalIntegrationTest(AzulTestCase):
url = furl(scheme='http', host='127.0.0.1', port=8000)
server = None
server_thread = None
@classmethod
def setUpClass(cls) -> None:
super().setUpClass()
app_module = load_app_module('service')
app_dir = os.path.dirname(app_module.__file__)
factory = chalice.cli.factory.CLIFactory(app_dir)
config = factory.create_config_obj()
cls.server = factory.create_local_server(app_obj=app_module.app,
config=config,
host=cls.url.host,
port=cls.url.port)
cls.server_thread = threading.Thread(target=cls.server.serve_forever)
cls.server_thread.start()
@classmethod
def tearDownClass(cls) -> None:
cls.server.shutdown()
cls.server_thread.join()
super().tearDownClass()
def test_local_chalice_health_endpoint(self):
url = self.url.copy().set(path='health').url
response = requests.get(url)
self.assertEqual(200, response.status_code)
catalog = first(config.integration_test_catalogs.keys())
def test_local_chalice_index_endpoints(self):
url = self.url.copy().set(path='index/files',
query=dict(catalog=self.catalog)).url
response = requests.get(url)
self.assertEqual(200, response.status_code)
def test_local_filtered_index_endpoints(self):
filters = {'genusSpecies': {'is': ['Homo sapiens']}}
url = self.url.copy().set(path='index/files',
query=dict(filters=json.dumps(filters),
catalog=self.catalog)).url
response = requests.get(url)
self.assertEqual(200, response.status_code)
| 1.765625 | 2 |
server/openapi_server/controllers/data_transformation_controller.py | mintproject/MINT-ModelCatalogIngestionAPI | 2 | 3951 | import connexion
import six
from openapi_server import query_manager
from openapi_server.utils.vars import DATATRANSFORMATION_TYPE_NAME, DATATRANSFORMATION_TYPE_URI
from openapi_server.models.data_transformation import DataTransformation # noqa: E501
from openapi_server import util
def custom_datasetspecifications_id_datatransformations_get(id, custom_query_name=None, username=None): # noqa: E501
"""Gets a list of data transformations related a dataset
Gets a list of data transformations related a dataset # noqa: E501
:param id: The ID of the dataspecification
:type id: str
:param custom_query_name: Name of the custom query
:type custom_query_name: str
:param username: Username to query
:type username: str
:rtype: List[DataTransformation]
"""
return query_manager.get_resource(id=id,
custom_query_name=custom_query_name,
username=username,
rdf_type_uri=DATATRANSFORMATION_TYPE_URI,
rdf_type_name=DATATRANSFORMATION_TYPE_NAME,
kls=DataTransformation)
def datatransformations_get(username=None, label=None, page=None, per_page=None): # noqa: E501
"""List all instances of DataTransformation
Gets a list of all instances of DataTransformation (more information in https://w3id.org/okn/o/sd#DataTransformation) # noqa: E501
:param username: Name of the user graph to query
:type username: str
:param label: Filter by label
:type label: str
:param page: Page number
:type page: int
:param per_page: Items per page
:type per_page: int
:rtype: List[DataTransformation]
"""
return query_manager.get_resource(
username=username,
label=label,
page=page,
per_page=per_page,
rdf_type_uri=DATATRANSFORMATION_TYPE_URI,
rdf_type_name=DATATRANSFORMATION_TYPE_NAME,
kls=DataTransformation)
def datatransformations_id_delete(id, user=None): # noqa: E501
"""Delete an existing DataTransformation
Delete an existing DataTransformation (more information in https://w3id.org/okn/o/sd#DataTransformation) # noqa: E501
:param id: The ID of the DataTransformation to be retrieved
:type id: str
:param user: Username
:type user: str
:rtype: None
"""
return query_manager.delete_resource(id=id,
user=user,
rdf_type_uri=DATATRANSFORMATION_TYPE_URI,
rdf_type_name=DATATRANSFORMATION_TYPE_NAME,
kls=DataTransformation)
def datatransformations_id_get(id, username=None): # noqa: E501
"""Get a single DataTransformation by its id
Gets the details of a given DataTransformation (more information in https://w3id.org/okn/o/sd#DataTransformation) # noqa: E501
:param id: The ID of the DataTransformation to be retrieved
:type id: str
:param username: Name of the user graph to query
:type username: str
:rtype: DataTransformation
"""
return query_manager.get_resource(id=id,
username=username,
rdf_type_uri=DATATRANSFORMATION_TYPE_URI,
rdf_type_name=DATATRANSFORMATION_TYPE_NAME,
kls=DataTransformation)
def datatransformations_id_put(id, user=None, data_transformation=None): # noqa: E501
"""Update an existing DataTransformation
Updates an existing DataTransformation (more information in https://w3id.org/okn/o/sd#DataTransformation) # noqa: E501
:param id: The ID of the DataTransformation to be retrieved
:type id: str
:param user: Username
:type user: str
:param data_transformation: An old DataTransformationto be updated
:type data_transformation: dict | bytes
:rtype: DataTransformation
"""
if connexion.request.is_json:
data_transformation = DataTransformation.from_dict(connexion.request.get_json()) # noqa: E501
return query_manager.put_resource(id=id,
user=user,
body=data_transformation,
rdf_type_uri=DATATRANSFORMATION_TYPE_URI,
rdf_type_name=DATATRANSFORMATION_TYPE_NAME,
kls=DataTransformation)
def datatransformations_post(user=None, data_transformation=None): # noqa: E501
"""Create one DataTransformation
Create a new instance of DataTransformation (more information in https://w3id.org/okn/o/sd#DataTransformation) # noqa: E501
:param user: Username
:type user: str
:param data_transformation: Information about the DataTransformationto be created
:type data_transformation: dict | bytes
:rtype: DataTransformation
"""
if connexion.request.is_json:
data_transformation = DataTransformation.from_dict(connexion.request.get_json()) # noqa: E501
return query_manager.post_resource(
user=user,
body=data_transformation,
rdf_type_uri=DATATRANSFORMATION_TYPE_URI,
rdf_type_name=DATATRANSFORMATION_TYPE_NAME,
kls=DataTransformation)
| 2.140625 | 2 |
shap/plots/monitoring.py | NunoEdgarGFlowHub/shap | 8 | 3952 | import numpy as np
import scipy
import warnings
try:
import matplotlib.pyplot as pl
import matplotlib
except ImportError:
warnings.warn("matplotlib could not be loaded!")
pass
from . import labels
from . import colors
def truncate_text(text, max_len):
if len(text) > max_len:
return text[:int(max_len/2)-2] + "..." + text[-int(max_len/2)+1:]
else:
return text
def monitoring_plot(ind, shap_values, features, feature_names=None):
""" Create a SHAP monitoring plot.
(Note this function is preliminary and subject to change!!)
A SHAP monitoring plot is meant to display the behavior of a model
over time. Often the shap_values given to this plot explain the loss
of a model, so changes in a feature's impact on the model's loss over
time can help in monitoring the model's performance.
Parameters
----------
ind : int
Index of the feature to plot.
shap_values : numpy.array
Matrix of SHAP values (# samples x # features)
features : numpy.array or pandas.DataFrame
Matrix of feature values (# samples x # features)
feature_names : list
Names of the features (length # features)
"""
if str(type(features)).endswith("'pandas.core.frame.DataFrame'>"):
if feature_names is None:
feature_names = features.columns
features = features.values
pl.figure(figsize=(10,3))
ys = shap_values[:,ind]
xs = np.arange(len(ys))#np.linspace(0, 12*2, len(ys))
pvals = []
inc = 50
for i in range(inc, len(ys)-inc, inc):
#stat, pval = scipy.stats.mannwhitneyu(v[:i], v[i:], alternative="two-sided")
stat, pval = scipy.stats.ttest_ind(ys[:i], ys[i:])
pvals.append(pval)
min_pval = np.min(pvals)
min_pval_ind = np.argmin(pvals)*inc + inc
if min_pval < 0.05 / shap_values.shape[1]:
pl.axvline(min_pval_ind, linestyle="dashed", color="#666666", alpha=0.2)
pl.scatter(xs, ys, s=10, c=features[:,ind], cmap=colors.red_blue)
pl.xlabel("Sample index")
pl.ylabel(truncate_text(feature_names[ind], 30) + "\nSHAP value", size=13)
pl.gca().xaxis.set_ticks_position('bottom')
pl.gca().yaxis.set_ticks_position('left')
pl.gca().spines['right'].set_visible(False)
pl.gca().spines['top'].set_visible(False)
cb = pl.colorbar()
cb.outline.set_visible(False)
bbox = cb.ax.get_window_extent().transformed(pl.gcf().dpi_scale_trans.inverted())
cb.ax.set_aspect((bbox.height - 0.7) * 20)
cb.set_label(truncate_text(feature_names[ind], 30), size=13)
pl.show() | 3.3125 | 3 |
mod_core.py | nokia-wroclaw/innovativeproject-dbshepherd | 0 | 3953 | import re
import os
import cmd
import sys
import common
from getpass import getpass
from kp import KeePassError, get_password
from configmanager import ConfigManager, ConfigManagerError
common.init()
class ParseArgsException(Exception):
def __init__(self, msg):
self.msg = msg
class ModuleCore(cmd.Cmd):
def __init__(self, module = ''):
cmd.Cmd.__init__(self)
self.master = None
if module == '#':
self.prompt_sign = '#>'
elif module != '':
self.prompt_sign = '[' + module + ']>'
else:
self.prompt_sign = '->'
#defaults
self.ruler = '-'
#Completions
self.directories = []
self.file_server_database = []
self.file_server = []
self.do_cd('.')
configs = ConfigManager().get_config_list()
for conf in configs:
self.file_server_database.append(conf)
self.file_server.append(conf)
for srv in ConfigManager('config/' + conf + '.yaml').get_all():
self.file_server_database.append(conf + '.' + srv)
self.file_server.append(conf + '.' + srv)
for db in ConfigManager('config/' + conf + '.yaml').get(srv)['databases']:
self.file_server_database.append(conf + '.' + srv + '.' + db)
def precmd(self, line):
if not sys.stdin.isatty():
print(line)
return line
def postcmd(self, stop, line):
if not sys.stdin.isatty():
print("")
return stop
def parse_args(self, string="", n=0, m=0):
list = re.findall('"+.*"+|[a-zA-Z0-9!@#$%^&*()_+-,./<>?]+', string)
arg_counter = len(list);
if (arg_counter >= n and arg_counter <= m) or (arg_counter == n and m == 0) or n == 0:
r_list = []
for l in list:
r_list.append(l.replace('"', ''))
return (r_list, len(list))
else:
raise ParseArgsException("Incorrect number of arguments")
# wykonuje daną funkcję (callback) na wszystkich bazach
def exec_on_config(self, callback, args, values, view = ''): # link - file.server.base
if values == '': # wykonaj na wszystkich plikach
files = ConfigManager().get_config_list() # pobierz listę plików konfiguracyjnych
# wyświetl na czym będziesz wykonywać
print("Exec on:")
for file in files:
print('+-',file)
ans = input("Are you sure? [NO/yes/info]: ")
if ans == "yes": #wykonaj callback
for file in files:
if view == 'tree': print('+-', file)
try:
servers = ConfigManager("config/" + file + ".yaml").get_all()
for srv in servers:
if view == 'tree': print("| +-", srv)
databases = servers[srv]["databases"]
for db in databases:
if view == 'tree': print("| | +-", db)
if view == 'list': print('[', file, '->', srv, '->', db, ']')
callback(file, srv, db, *args)
except ConfigManagerError as e:
print(e)
elif ans == "info": #podaj tylko informację na czym callback zostałby wykonany
for file in files:
print('+-', file)
servers = ConfigManager("config/" + file + ".yaml").get_all()
for srv in servers:
print('| +-', srv)
databases = servers[srv]["databases"]
for db in databases:
print('| | +-', db)
else: #jeżeli nie zdecydujemy się na wykonanie czegokolwiek
print("aborted")
else: # jeżeli specjalizujemy na czym chcemy wykonać
val = values.split('.') #rozdzielamy nazwę_pliku.serwera.bazy
params = len(val)
if params == 1: # jeżeli podano nazwę tylko pliku to wykonaj na wszystkich serwerach, bazach które są w nim zapisane
file = val[0]
try:
servers = ConfigManager("config/" + file + ".yaml").get_all()
for srv in servers:
if view == 'tree': print("+-", srv)
databases = servers[srv]["databases"]
for db in databases:
if view == 'tree': print("| +-", db)
if view == 'list': print('[', srv, '->', db, ']')
callback(file, srv, db, *args)
except ConfigManagerError as e:
print(e)
except KeyError as e:
print(e, "is not exist")
elif params == 2: # jeżeli podano nazwę pliku i serwer to wykonaj na wszystkich bazach na serwerze
file = val[0]
try:
servers = ConfigManager("config/" + file + ".yaml").get_all()
srv = val[1]
databases = servers[srv]["databases"]
for db in databases:
if view == 'tree': print("+-", db)
if view == 'list': print('[', db, ']')
callback(file, srv, db, *args)
except ConfigManagerError as e:
print(e)
except KeyError as e:
print(e, "is not exist")
elif params == 3: # podano nazwę pliku, serwer i nazwę bazy - wykonaj polecenie dokładnie na niej
try:
callback(val[0], val[1], val[2], *args)
except ConfigManagerError as e:
print(e)
except KeyError as e:
print(e, "is not exist")
# zwraca skróconą ścieżkę do aktualnego katalogu - funkcja pomocnicza
def get_shortpath(self):
path = common.get_cdir()
separator = ''
if '\\' in path:
separator = '\\'
else:
separator = '/'
start = path.find(separator)
end = path.rfind(separator, 0, len(path)-1)
if start < end:
return (path[0:start+1] + '...' + path[end:])
else:
return (path)
# autouzupełnienia dla cmd polecenia cd
def complete_cd(self, text, line, begidx, endidx):
if not text:
completions = self.directories[:]
else:
completions = [f for f in self.directories if f.startswith(text)]
return completions
# polecenie cd - pozwala na przemieszczanie się po katalogach
def do_cd(self, args):
"Move to directory"
if args == '':
print(common.get_cdir())
else:
try:
common.chdir(args)
self.prompt = self.get_shortpath() + ' ' + self.prompt_sign
self.directories = []
for name in os.listdir(common.get_cdir()):
if os.path.isdir(os.path.join(common.get_cdir(), name)):
self.directories.append(name)
except FileNotFoundError as e:
print(e)
# wyświetla wszystkie pliki w lokalizacji
def do_ls(self, args):
"List directory"
for name in os.listdir(common.get_cdir()):
print(name)
# podaje pełną ścieżkę aktualnego katalogu
def do_pwd(self, args):
"Print path"
print(common.get_cdir())
# pozwala na decyzję czy chcemy wyświetlać warningi
def do_warn(self, args):
"""warn <on/off>"""
try:
(values, values_num) = self.parse_args(args, 0, 1)
if values_num == 1:
if values[0] == 'on':
print('Warnings on')
self.warn = True
elif values[0] == 'off':
print('Warnings off')
self.warn = False
else:
print('Incorrect argument.')
else:
if self.warn == True:
print('Status: on')
else:
print('Status: off')
except ParseArgsException as e:
print(e)
# ustawia masterpassword dla keepasa
def do_setMaster(self,args):
"Set master password"
if sys.stdin.isatty(): # jezeli jako shell
p = getpass('Enter Master Password: ')
else:
p = sys.stdin.readline().rstrip()
self.master = p
def do_exit(self, *args):
return True
def do_EOF(self, line):
return True
def emptyline(self):
return False
# Musimy wyłapać wszystko co możliwe, nie ma pliku, zly master itp. i zwrocic 1 wyjątek
def get_password(self, alias):
keepass_path = common.keepass_path
if self.master == None:
raise KeePassError("Master Password Not Set")
try:
return get_password(keepass_path, self.master, alias)
except KeePassError as e:
raise e
def connect_command_builder(self,connection, perm):
try:
command = connection["adress"] + "_" + connection["user"]+ "_" + \
self.get_password(connection["keepass"]) + "_" + str(connection["sshport"]) + "_" + str(connection["remoteport"]) + "_" + perm
except (KeyError, KeePassError) as e1:
try:
command = connection["adress"] + "_" + connection["user"]+ "_" + \
connection["passwd"] + "_" + str(connection["sshport"]) + "_" + str(connection["remoteport"]) + "_" + perm
return command
except KeyError as e2:
if isinstance(e1,KeePassError):
raise KeePassError("Unable to use Keepass(" + e1.value + ") or Password")
else:
raise KeePassError("Invalid connection in yaml file")
raise KeePassError(e1)
return command | 2.484375 | 2 |
code/testbed/pde1/FemPde1.py | nicolai-schwartze/Masterthesis | 1 | 3954 | # -*- coding: utf-8 -*-
"""
Created on Mon Apr 13 14:57:32 2020
@author: Nicolai
"""
import sys
import os
importpath = os.path.dirname(os.path.realpath(__file__)) + "/../"
sys.path.append(importpath)
from FemPdeBase import FemPdeBase
import numpy as np
# import from ngsolve
import ngsolve as ngs
from netgen.geom2d import unit_square
import time
import psutil
import gc
class FemPde1(FemPdeBase):
"""
**Implementation of PDE1 of the testbed:**
.. math::
- \Delta u(\mathbf{x}) = -2^{40}y^{10}(1-y)^{10}[90x^8(1-x)^{10}
- 200x^9(1-x)^9 + 90x^{10}(1-x)^8]
-2^{40}x^{10}(1-x)^{10}[90y^8(1-y)^{10}
- 200y^9(1-y)^9 + 90y^{10}(1-y)^8]
\Omega: \mathbf{x} \in [0,1]
u(\mathbf{x})|_{\partial \Omega} = 0
**with the solution:**
.. math::
u(\mathbf{x}) = 2^{40}x^{10}(1-x)^{10}y^{10}(1-y)^{10}
Attributes
----------
max_nodf: int
the maximum number of degrees of freedom that can be created in the
adaptive mesh refinement, standard value is 50000
Methods
-------
solve()
solves the pde by calling ngsolve, provides: static condensation,
adaptive mesh refinement, parallelisation (where possible), sets the
internal variables for evaluating the exact solution and calculating
the distance between exact and approx solution
also sets execution time and memory consumption
Examples
--------
>>> import numpy as np
>>> fempde2 = FemPde2(True)
>>> pos = np.array([0.5, 0.5])
>>> fempde2.exact(pos)
>>> x -> numpy.ndarray with shape (2,)
_mesh -> ngs.comp.Mesh
_ngs_ex -> ngs.fem.CoefficientFunction
-> try to call solve() first
>>> fempde2.solve()
>>> fempde2.exact(pos)
1.0
>>> fempde2.approx(pos)
0.999998924259486
>>> fempde2.normL2()
5.853102150391562e-07
>>> fempde2.exec_time
3.830256175994873
>>> fempde2.mem_consumption
76705792
"""
def __init__(self, show_gui, max_ndof=50000):
super().__init__(show_gui)
# init protected
self._pde_string = "-laplacian(u(x)) = -(2^40*y^10*(1-y)^10*(90*x^8*(1-x)^10 - 200*x^9*(1-x)^9 + 90*x^10*(1-x)^8)) -(2^40*x^10*(1-x)^10*(90*y^8*(1-y)^10 - 200*y^9*(1-y)^9 + 90*y^10*(1-y)^8))"
self._ngs_ex = (2**(4*10))*(ngs.x**10)*((1-ngs.x)**10)*(ngs.y**10)*((1-ngs.y)**10)
# init public
self.max_ndof = max_ndof
def solve(self):
# disable garbage collector
# --------------------------------------------------------------------#
gc.disable()
while(gc.isenabled()):
time.sleep(0.1)
# --------------------------------------------------------------------#
# measure how much memory is used until here
process = psutil.Process()
memstart = process.memory_info().vms
# starts timer
tstart = time.time()
if self.show_gui:
import netgen.gui
# create mesh with initial size 0.1
self._mesh = ngs.Mesh(unit_square.GenerateMesh(maxh=0.1))
#create finite element space
self._fes = ngs.H1(self._mesh, order=2, dirichlet=".*", autoupdate=True)
# test and trail function
u = self._fes.TrialFunction()
v = self._fes.TestFunction()
# create bilinear form and enable static condensation
self._a = ngs.BilinearForm(self._fes, condense=True)
self._a += ngs.grad(u)*ngs.grad(v)*ngs.dx
# creat linear functional and apply RHS
self._f = ngs.LinearForm(self._fes)
self._f += ( \
-(2**40*ngs.y**10*(1-ngs.y)**10*(90*ngs.x**8*(1-ngs.x)**10 - 200*ngs.x**9*(1-ngs.x)**9 + 90*ngs.x**10*(1-ngs.x)**8)) \
-(2**40*ngs.x**10*(1-ngs.x)**10*(90*ngs.y**8*(1-ngs.y)**10 - 200*ngs.y**9*(1-ngs.y)**9 + 90*ngs.y**10*(1-ngs.y)**8)) )*v*ngs.dx
# preconditioner: multigrid - what prerequisits must the problem have?
self._c = ngs.Preconditioner(self._a,"multigrid")
# create grid function that holds the solution and set the boundary to 0
self._gfu = ngs.GridFunction(self._fes, autoupdate=True) # solution
self._g = 0.0
self._gfu.Set(self._g, definedon=self._mesh.Boundaries(".*"))
# draw grid function in gui
if self.show_gui:
ngs.Draw(self._gfu)
# create Hcurl space for flux calculation and estimate error
self._space_flux = ngs.HDiv(self._mesh, order=2, autoupdate=True)
self._gf_flux = ngs.GridFunction(self._space_flux, "flux", autoupdate=True)
# TaskManager starts threads that (standard thread nr is numer of cores)
with ngs.TaskManager():
# this is the adaptive loop
while self._fes.ndof < self.max_ndof:
self._solveStep()
self._estimateError()
self._mesh.Refine()
# since the adaptive loop stopped with a mesh refinement, the gfu must be
# calculated one last time
self._solveStep()
if self.show_gui:
ngs.Draw(self._gfu)
# set measured exectution time
self._exec_time = time.time() - tstart
# set measured used memory
memstop = process.memory_info().vms - memstart
self._mem_consumption = memstop
# enable garbage collector
# --------------------------------------------------------------------#
gc.enable()
gc.collect()
# --------------------------------------------------------------------#
if __name__ == "__main__":
fempde1 = FemPde1(True)
print(fempde1.pde_string)
try:
fempde1.exact(np.array([0.5,0.5]))
except:
print("Î error message above")
try:
fempde1.approx(np.array([0.5,0.5]))
except:
print("Î error message above")
fempde1.solve()
print("-------------------------------------")
print("exact(0.5, 0.5) = {}".format(fempde1.exact(np.array([0.5,0.5]))))
print("approx(0.5, 0.5) = {}".format(fempde1.approx(np.array([0.5,0.5]))))
print("L2 norm to the real solution {}".format(fempde1.normL2()))
print("solving took {} sec".format(fempde1.exec_time))
print("solving uses {} Mb".format(fempde1.mem_consumption/1000000))
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
from matplotlib import cm
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
x = y = np.arange(0, 1.01, 0.01)
X, Y = np.meshgrid(x, y)
zs0 = np.array([fempde1.exact(\
np.array([x,y])) for x,y in zip(np.ravel(X), np.ravel(Y))])
Z = zs0.reshape(X.shape)
ax.plot_surface(X, Y, Z, cmap=cm.gnuplot)
fig.tight_layout()
ax.set_xlabel("X0")
ax.set_ylabel("X1")
ax.set_zlabel("f(X0, X1)")
plt.show()
fig.savefig("sol_pde_1.pdf", bbox_inches='tight')
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
x = y = np.arange(0, 1.01, 0.01)
X, Y = np.meshgrid(x, y)
zs0 = np.array([fempde1.approx(\
np.array([x,y])) for x,y in zip(np.ravel(X), np.ravel(Y))])
Z = zs0.reshape(X.shape)
ax.plot_surface(X, Y, Z, cmap=cm.gnuplot)
ax.set_xlabel("X0")
ax.set_ylabel("X1")
ax.set_zlabel("f(X0,X1)")
plt.show()
| 2.578125 | 3 |
cvxpy/cvxcore/tests/python/364A_scripts/power_lines.py | jasondark/cvxpy | 38 | 3955 | <reponame>jasondark/cvxpy<filename>cvxpy/cvxcore/tests/python/364A_scripts/power_lines.py
import numpy as np
from cvxpy import *
import copy
import time
# data for power flow problem
import numpy as np
n = 12 # total number of nodes
m = 18 # number of edges (transmission lines)
k = 4 # number of generators
# transmission line capacities =
TIME = 0
Pmax = np.matrix("""
4.8005,
1.9246,
3.4274,
2.9439,
4.5652,
4.0484,
2.8259,
1.0740,
4.2856,
2.7788,
3.4617,
4.1677,
4.6873,
3.9528,
1.7051,
2.6228,
4.7419,
4.6676,
""")
Gmax = np.matrix("3; 2; 4; 7") # maximum generator power
c = np.matrix("4; 8; 5; 3") # supply generator costs
d = np.matrix("""
1.6154
2.3405
1.0868
1.5293
2.2197
1.0148
1.2083
1.3041
""")# network power demands
# graph incidence matrix
A = np.matrix(""" -1 -1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 ;
0 0 -1 -1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 ;
0 0 0 0 0 0 0 0 0 -1 -1 0 0 0 0 0 0 -1 ;
0 0 0 0 0 0 -1 0 0 0 0 0 0 0 -1 0 -1 0 ;
1 0 0 0 1 -1 0 0 0 0 0 0 0 0 0 0 0 0 ;
0 1 1 0 -1 0 1 -1 0 0 0 0 0 0 0 0 0 0 ;
0 0 0 1 0 0 0 0 -1 1 0 0 0 0 0 0 0 0 ;
0 0 0 0 0 0 0 1 1 0 0 0 -1 0 1 0 0 1 ;
0 0 0 0 0 0 0 0 0 0 1 -1 0 0 0 0 0 0 ;
0 0 0 0 0 0 0 0 0 0 0 1 1 -1 0 0 0 0 ;
0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 1 0 0 ;
0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 -1 1 0
""")
g = Variable(k)
p = Variable(m)
obj = Minimize(c.T*g)
constraints = [A*p == vstack(-g, d.T), abs(p) <= Pmax.T, 0 <= g, g <= Gmax]
prob = Problem(obj, constraints)
tic = time.time()
val = prob.solve()
toc = time.time()
TIME += toc - tic
ANSWERS.append(val)
pass #print val
pass #print g.value
# N - 1 contingency
g = Variable(k)
flows = []
obj = Minimize(c.T*g)
for i in range(m):
flows.append(Variable(m))
constraints = [g <= Gmax, 0 <= g]
for i in range(m): # N -1 redundancy
constraints.append(A*flows[i] == vstack(-g, d.T))
constraints.append( flows[i][i] == 0 )
constraints.append( abs(flows[i]) <= Pmax.T )
prob = Problem(obj, constraints)
tic = time.time()
val = prob.solve()
toc = time.time()
TIME += toc - tic
ANSWERS.append(val)
pass #print val
pass #print g.value
# the code below is not data for the problem
# it is used only to generate the network graph
# x-y coordinates
# each row is node x-y coordinates
XY = np.matrix("""
1.5 5.2;
4.9 5;
6.9 3.5;
1.9 3.5;
0.2 4.4;
3.2 4.8;
5.9 4.5;
3.9 3.6;
5.9 2.5;
3.9 3;
1.4 2.5;
0 3
""");
# node adjacency matrix
Ad = -A*A.T
Ad = Ad - np.diag(np.diag(Ad))
epsx = 0.05; epsy = 0.15; # text placing offset
# plotting
import matplotlib.pyplot as plt
for i in range(12): #plot edges
for j in range(i):
if Ad[i,j] == 1:
pass #plt.plot((XY[i,0], XY[j,0]), (XY[i,1], XY[j,1]), 'k-')
for j in range(k): #plot nodes
pass #plt.plot(XY[j,0],XY[j,1],'rs', markersize=12)
pass #plt.text(XY[j,0]-epsx,XY[j,1]+epsy,str(j+1))
for j in range(k,n):
pass #plt.plot(XY[j,0],XY[j,1],'ko')
pass #plt.axis('off')
pass #plt.savefig('pwr_net.eps')
if __name__ == '__main__':
pass #plt.show() | 2.234375 | 2 |
LeetCode/530 Minimum Absolute Difference in BST.py | gesuwen/Algorithms | 0 | 3956 | # Binary Search Tree
# Given a binary search tree with non-negative values, find the minimum absolute difference between values of any two nodes.
#
# Example:
#
# Input:
#
# 1
# \
# 3
# /
# 2
#
# Output:
# 1
#
# Explanation:
# The minimum absolute difference is 1, which is the difference between 2 and 1 (or between 2 and 3).
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def getMinimumDifference(self, root):
"""
:type root: TreeNode
:rtype: int
"""
self.minDiff = []
def travel(node):
if not node:
return
self.minDiff.append(node.val)
L = travel(node.left)
R = travel(node.right)
travel(root)
self.minDiff = sorted(self.minDiff)
return min(abs(a - b) for a, b in zip(self.minDiff, self.minDiff[1:]))
| 4.3125 | 4 |
backends/search/__init__.py | dev-easyshares/company | 0 | 3957 | <gh_stars>0
from company.choices import fr as choices
from mighty.errors import BackendError
import datetime, logging
logger = logging.getLogger(__name__)
CHOICES_APE = dict(choices.APE)
CHOICES_LEGALFORM = dict(choices.LEGALFORM)
CHOICES_SLICE = dict(choices.SLICE_EFFECTIVE)
class SearchBackend:
message = None
since_format = None
iso_format = '%Y-%m-%dT%H:%M:%S.%f%z'
def in_error(self, message):
self.message = message
def backend_error(self, msg):
raise BackendError(msg)
def companies(self, companies, response_code):
if str(response_code)[0] == '4': self.in_error(companies[0]['message'])
elif str(response_code)[0] == '5': self.in_error('error server')
return companies
def get_ape_str(self, code):
try:
return CHOICES_APE[code]
except Exception:
pass
return code
def get_legalform_str(self, code):
try:
code = int(code)
return CHOICES_LEGALFORM[code]
except Exception:
pass
return code
def get_slice_str(self, code):
try:
return CHOICES_SLICE[code]
except Exception:
pass
return code
def lastupdate(self, date):
return datetime.datetime.strptime(date, self.iso_format).strftime("%Y-%m-%d")
def since(self, date):
logger.warning(date)
return datetime.datetime.strptime(date, self.since_format).strftime("%Y-%m-%d")
def get_companies(self, companies, response_code):
raise NotImplementedError("Subclasses should implement get_companies()")
def get_company_by_siren(self, siren):
raise NotImplementedError("Subclasses should implement get_company_by_siren()")
def get_company_by_fulltext(self, fulltext):
raise NotImplementedError("Subclasses should implement get_company_by_fulltext()")
def get_active_companies(self, fulltext):
raise NotImplementedError("Subclasses should implement get_active_companies()") | 2.421875 | 2 |
app/database/database.py | luisornelasch/melp | 0 | 3958 | <reponame>luisornelasch/melp
from sqlalchemy import create_engine, engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
import os
SQLALCHEMY_DATABASE_URL = os.getenv("DATABASE_URL").replace("postgres://", "postgresql+psycopg2://")
engine = create_engine(SQLALCHEMY_DATABASE_URL)
SessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine)
Base = declarative_base()
def get_db():
db = SessionLocal()
try:
yield db
finally:
db.close()
| 2.265625 | 2 |
ragweed/framework.py | soumyakoduri/ragweed | 0 | 3959 | <gh_stars>0
import sys
import os
import boto
import boto.s3.connection
import json
import inspect
import pickle
import bunch
import yaml
import ConfigParser
import rados
from boto.s3.key import Key
from nose.plugins.attrib import attr
from nose.tools import eq_ as eq
from .reqs import _make_admin_request
ragweed_env = None
suite = None
class RGWConnection:
def __init__(self, access_key, secret_key, host, port, is_secure):
self.host = host
self.port = port
self.is_secure = is_secure
self.conn = boto.connect_s3(
aws_access_key_id = access_key,
aws_secret_access_key = secret_key,
host=host,
port=port,
is_secure=is_secure,
calling_format = boto.s3.connection.OrdinaryCallingFormat(),
)
def create_bucket(self, name):
return self.conn.create_bucket(name)
def get_bucket(self, name, validate=True):
return self.conn.get_bucket(name, validate=validate)
class RGWRESTAdmin:
def __init__(self, connection):
self.conn = connection
def get_resource(self, path, params):
r = _make_admin_request(self.conn, "GET", path, params)
if r.status != 200:
raise boto.exception.S3ResponseError(r.status, r.reason)
return bunch.bunchify(json.loads(r.read()))
def read_meta_key(self, key):
return self.get_resource('/admin/metadata', {'key': key})
def get_bucket_entrypoint(self, bucket_name):
return self.read_meta_key('bucket:' + bucket_name)
def get_bucket_instance_info(self, bucket_name, bucket_id = None):
if not bucket_id:
ep = self.get_bucket_entrypoint(bucket_name)
print ep
bucket_id = ep.data.bucket.bucket_id
result = self.read_meta_key('bucket.instance:' + bucket_name + ":" + bucket_id)
return result.data.bucket_info
def check_bucket_index(self, bucket_name):
return self.get_resource('/admin/bucket',{'index' : None, 'bucket':bucket_name})
def get_obj_layout(self, key):
path = '/' + key.bucket.name + '/' + key.name
params = {'layout': None}
if key.version_id is not None:
params['versionId'] = key.version_id
print params
return self.get_resource(path, params)
def get_zone_params(self):
return self.get_resource('/admin/config', {'type': 'zone'})
class RSuite:
def __init__(self, name, bucket_prefix, zone, suite_step):
self.name = name
self.bucket_prefix = bucket_prefix
self.zone = zone
self.config_bucket = None
self.rtests = []
self.do_preparing = False
self.do_check = False
for step in suite_step.split(','):
if step == 'prepare':
self.do_preparing = True
self.config_bucket = self.zone.create_raw_bucket(self.get_bucket_name('conf'))
if step == 'check' or step == 'test':
self.do_check = True
self.config_bucket = self.zone.get_raw_bucket(self.get_bucket_name('conf'))
def get_bucket_name(self, suffix):
return self.bucket_prefix + '-' + suffix
def register_test(self, t):
self.rtests.append(t)
def write_test_data(self, test):
k = Key(self.config_bucket)
k.key = 'tests/' + test._name
k.set_contents_from_string(test.to_json())
def read_test_data(self, test):
k = Key(self.config_bucket)
k.key = 'tests/' + test._name
s = k.get_contents_as_string()
print 'read_test_data=', s
test.from_json(s)
def is_preparing(self):
return self.do_preparing
def is_checking(self):
return self.do_check
class RTestJSONSerialize(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, (list, dict, str, unicode, int, float, bool, type(None))):
return JSONEncoder.default(self, obj)
return {'__pickle': pickle.dumps(obj)}
def rtest_decode_json(d):
if '__pickle' in d:
return pickle.loads(str(d['__pickle']))
return d
class RPlacementRule:
def __init__(self, rule):
r = rule.split('/', 1)
self.placement_id = r[0]
if (len(r) == 2):
self.storage_class=r[1]
else:
self.storage_class = 'STANDARD'
class RBucket:
def __init__(self, zone, bucket, bucket_info):
self.zone = zone
self.bucket = bucket
self.name = bucket.name
self.bucket_info = bucket_info
try:
self.placement_rule = RPlacementRule(self.bucket_info.placement_rule)
self.placement_target = self.zone.get_placement_target(self.bucket_info.placement_rule)
except:
pass
def get_data_pool(self):
try:
# old style explicit pool
explicit_pool = self.bucket_info.bucket.pool
except:
# new style explicit pool
explicit_pool = self.bucket_info.bucket.explicit_placement.data_pool
if explicit_pool is not None and explicit_pool != '':
return explicit_pool
return self.placement_target.get_data_pool(self.placement_rule)
def get_tail_pool(self, obj_layout):
try:
placement_rule = obj_layout.manifest.tail_placement.placement_rule
except:
placement_rule = ''
if placement_rule == '':
try:
# new style
return obj_layout.manifest.tail_placement.bucket.explicit_placement.data_pool
except:
pass
try:
# old style
return obj_layout.manifest.tail_bucket.pool
except:
pass
pr = RPlacementRule(placement_rule)
return self.placement_target.get_data_pool(pr)
class RStorageClasses:
def __init__(self, config):
if hasattr(config, 'storage_classes'):
self.storage_classes = config.storage_classes
else:
try:
self.storage_classes = bunch.bunchify({ 'STANDARD': { 'data_pool': config.data_pool }})
except:
self.storage_classes = None
pass
def get(self, storage_class):
assert(self.storage_classes != None)
try:
if not storage_class:
storage_class = 'STANDARD'
sc = self.storage_classes[storage_class]
except:
eq('could not find storage class ' + storage_class, 0)
return sc
def get_all(self):
for (name, _) in self.storage_classes.iteritems():
yield name
class RPlacementTarget:
def __init__(self, name, config):
self.name = name
self.index_pool = config.index_pool
self.data_extra_pool = config.data_extra_pool
self.storage_classes = RStorageClasses(config)
if not self.data_extra_pool:
self.data_extra_pool = self.storage_classes.get_data_pool('STANDARD')
def get_data_pool(self, placement_rule):
return self.storage_classes.get(placement_rule.storage_class).data_pool
class RZone:
def __init__(self, conn):
self.conn = conn
self.rgw_rest_admin = RGWRESTAdmin(self.conn.system)
self.zone_params = self.rgw_rest_admin.get_zone_params()
self.placement_targets = {}
for e in self.zone_params.placement_pools:
self.placement_targets[e.key] = e.val
print 'zone_params:', self.zone_params
def get_placement_target(self, placement_id):
plid = placement_id
if placement_id is None or placement_id == '':
print 'zone_params=', self.zone_params
plid = self.zone_params.default_placement
try:
return RPlacementTarget(plid, self.placement_targets[plid])
except:
pass
return None
def get_default_placement(self):
return get_placement_target(self.zone_params.default_placement)
def create_bucket(self, name):
bucket = self.create_raw_bucket(name)
bucket_info = self.rgw_rest_admin.get_bucket_instance_info(bucket.name)
print 'bucket_info:', bucket_info
return RBucket(self, bucket, bucket_info)
def get_bucket(self, name):
bucket = self.get_raw_bucket(name)
bucket_info = self.rgw_rest_admin.get_bucket_instance_info(bucket.name)
print 'bucket_info:', bucket_info
return RBucket(self, bucket, bucket_info)
def create_raw_bucket(self, name):
return self.conn.regular.create_bucket(name)
def get_raw_bucket(self, name):
return self.conn.regular.get_bucket(name)
def refresh_rbucket(self, rbucket):
rbucket.bucket = self.get_raw_bucket(rbucket.bucket.name)
rbucket.bucket_info = self.rgw_rest_admin.get_bucket_instance_info(rbucket.bucket.name)
class RTest:
def __init__(self):
self._name = self.__class__.__name__
self.r_buckets = []
self.init()
def create_bucket(self):
bid = len(self.r_buckets) + 1
bucket_name = suite.get_bucket_name(self._name + '.' + str(bid))
bucket_name = bucket_name.replace("_", "-")
rb = suite.zone.create_bucket(bucket_name)
self.r_buckets.append(rb)
return rb
def get_buckets(self):
for rb in self.r_buckets:
yield rb
def init(self):
pass
def prepare(self):
pass
def check(self):
pass
def to_json(self):
attrs = {}
for x in dir(self):
if x.startswith('r_'):
attrs[x] = getattr(self, x)
return json.dumps(attrs, cls=RTestJSONSerialize)
def from_json(self, s):
j = json.loads(s, object_hook=rtest_decode_json)
for e in j:
setattr(self, e, j[e])
def save(self):
suite.write_test_data(self)
def load(self):
suite.read_test_data(self)
for rb in self.r_buckets:
suite.zone.refresh_rbucket(rb)
def test(self):
suite.register_test(self)
if suite.is_preparing():
self.prepare()
self.save()
if suite.is_checking():
self.load()
self.check()
def read_config(fp):
config = bunch.Bunch()
g = yaml.safe_load_all(fp)
for new in g:
print bunch.bunchify(new)
config.update(bunch.bunchify(new))
return config
str_config_opts = [
'user_id',
'access_key',
'secret_key',
'host',
'ceph_conf',
'bucket_prefix',
]
int_config_opts = [
'port',
]
bool_config_opts = [
'is_secure',
]
def dict_find(d, k):
if d.has_key(k):
return d[k]
return None
class RagweedEnv:
def __init__(self):
self.config = bunch.Bunch()
cfg = ConfigParser.RawConfigParser()
try:
path = os.environ['RAGWEED_CONF']
except KeyError:
raise RuntimeError(
'To run tests, point environment '
+ 'variable RAGWEED_CONF to a config file.',
)
with file(path) as f:
cfg.readfp(f)
for section in cfg.sections():
try:
(section_type, name) = section.split(None, 1)
if not self.config.has_key(section_type):
self.config[section_type] = bunch.Bunch()
self.config[section_type][name] = bunch.Bunch()
cur = self.config[section_type]
except ValueError:
section_type = ''
name = section
self.config[name] = bunch.Bunch()
cur = self.config
cur[name] = bunch.Bunch()
for var in str_config_opts:
try:
cur[name][var] = cfg.get(section, var)
except ConfigParser.NoOptionError:
pass
for var in int_config_opts:
try:
cur[name][var] = cfg.getint(section, var)
except ConfigParser.NoOptionError:
pass
for var in bool_config_opts:
try:
cur[name][var] = cfg.getboolean(section, var)
except ConfigParser.NoOptionError:
pass
print json.dumps(self.config)
rgw_conf = self.config.rgw
try:
self.bucket_prefix = rgw_conf.bucket_prefix
except:
self.bucket_prefix = 'ragweed'
conn = bunch.Bunch()
for (k, u) in self.config.user.iteritems():
conn[k] = RGWConnection(u.access_key, u.secret_key, rgw_conf.host, dict_find(rgw_conf, 'port'), dict_find(rgw_conf, 'is_secure'))
self.zone = RZone(conn)
self.suite = RSuite('ragweed', self.bucket_prefix, self.zone, os.environ['RAGWEED_STAGES'])
try:
self.ceph_conf = self.config.rados.ceph_conf
except:
raise RuntimeError(
'ceph_conf is missing under the [rados] section in ' + os.environ['RAGWEED_CONF']
)
self.rados = rados.Rados(conffile=self.ceph_conf)
self.rados.connect()
pools = self.rados.list_pools()
for pool in pools:
print "rados pool>", pool
def setup_module():
global ragweed_env
global suite
ragweed_env = RagweedEnv()
suite = ragweed_env.suite
| 1.867188 | 2 |
exposing/_version.py | w4k2/exposing | 0 | 3960 | <filename>exposing/_version.py
"""
``exposing``
"""
__version__ = '0.2.2'
| 1.164063 | 1 |
opensteer/teams/admin.py | reckonsys/opensteer | 5 | 3961 | <filename>opensteer/teams/admin.py
from django.contrib import admin
from opensteer.teams.models import Team, Member
admin.site.register(Team)
admin.site.register(Member)
| 1.515625 | 2 |
tests/test_utils.py | ozora-ogino/tflite-human-tracking | 3 | 3962 | <gh_stars>1-10
from src.utils import check_direction, direction_config, is_intersect
# pylint:disable=unexpected-keyword-arg
class TestCheckDirection:
def test_true(self):
"""Test true case."""
directions = {
"right": {"prev_center": [0, 0], "current_center": [20, 0], "expect": True},
"left": {"prev_center": [10, 0], "current_center": [0, 0], "expect": True},
"top": {"prev_center": [0, 10], "current_center": [0, 0], "expect": True},
"bottom": {"prev_center": [0, 0], "current_center": [0, 10], "expect": True},
}
for direction_str, args in directions.items():
expect = args.pop("expect")
result = check_direction(**args, direction=direction_config[direction_str])
assert result == expect
def test_false(self):
"""Test false case."""
directions = {
"right": {"prev_center": [0, 0], "current_center": [0, 0], "expect": False},
# This is right.
"left": {"prev_center": [0, 0], "current_center": [10, 0], "expect": False},
# This is bottom.
"top": {"prev_center": [0, 0], "current_center": [0, 10], "expect": False},
# This is top.
"bottom": {"prev_center": [0, 10], "current_center": [0, 0], "expect": False},
}
for direction_str, args in directions.items():
expect = args.pop("expect")
result = check_direction(**args, direction=direction_config[direction_str])
assert result == expect
def test_direction_none(self):
"""Check if always return true when direction is set None."""
args = [
{"prev_center": [0, 0], "current_center": [0, 0]}, # No movement.
{"prev_center": [0, 0], "current_center": [10, 0]}, # Right
{"prev_center": [10, 0], "current_center": [0, 0]}, # Left.
{"prev_center": [0, 10], "current_center": [0, 0]}, # Top.
{"prev_center": [0, 0], "current_center": [0, 10]}, # Bottom.
]
for arg in args:
# If the direction is None, always return True.
result = check_direction(**arg, direction=None)
assert result == True
class TestIsIntersect:
def test_true(self):
"""Test true case."""
args = {"A": [10, 0], "B": [10, 30], "C": [0, 10], "D": [30, 0]}
result = is_intersect(**args)
assert result == True
def test_false(self):
"""Test false case."""
args = {"A": [10, 0], "B": [10, 30], "C": [0, 10], "D": [0, 0]}
result = is_intersect(**args)
assert result == False
| 2.75 | 3 |
scpp_base/scpp_base/src/db/__init__.py | scorelab/social-currency | 4 | 3963 | _all__ = ["db_handler","coin_value_handler"] | 1.007813 | 1 |
test/test_parameter_set.py | crest-cassia/caravan_search_engine | 0 | 3964 | <reponame>crest-cassia/caravan_search_engine
import unittest
from caravan.tables import Tables
from caravan.parameter_set import ParameterSet
class ParameterSetTest(unittest.TestCase):
def setUp(self):
self.t = Tables.get()
self.t.clear()
def test_ps(self):
ps = ParameterSet(500, (2, 3, 4, 5))
self.assertEqual(ps.id, 500)
self.assertEqual(ps.params, (2, 3, 4, 5))
self.assertEqual(ps.run_ids, [])
def test_find_or_create(self):
ps = ParameterSet.find_or_create(0, 1, 2, 3)
self.assertEqual(ps.id, 0)
self.assertEqual(ps.params, (0, 1, 2, 3))
self.assertEqual(len(ParameterSet.all()), 1)
ps2 = ParameterSet.find_or_create(3, 4, 5, 6)
self.assertEqual(len(ParameterSet.all()), 2)
def test_find_or_create(self):
ps1 = ParameterSet.find_or_create(0, 1, 2, 3)
ps2 = ParameterSet.find_or_create(0, 1, 2, 3)
self.assertEqual(ps1, ps2)
self.assertEqual(len(ParameterSet.all()), 1)
def test_create_runs(self):
ps = ParameterSet.find_or_create(0, 1, 2, 3)
runs = ps.create_runs_upto(3)
self.assertEqual([r.id for r in runs], [0, 1, 2])
self.assertEqual([r.seed for r in runs], [0, 1, 2])
ps2 = ParameterSet.find_or_create(0, 1, 3, 4)
runs = ps2.create_runs_upto(3)
self.assertEqual([r.id for r in runs], [3, 4, 5])
self.assertEqual([r.seed for r in runs], [0, 1, 2])
def test_is_finished(self):
ps = ParameterSet.find_or_create(0, 1, 2, 3)
self.assertEqual(ps.is_finished(), True)
runs = ps.create_runs_upto(1)
self.assertFalse(ps.is_finished())
self.assertEqual(len(ps.finished_runs()), 0)
runs[0].store_result([1.0, 2.0, 3.0], 0, 3, 111, 222)
self.assertTrue(ps.is_finished())
self.assertEqual(len(ps.finished_runs()), 1)
def test_average_results(self):
ps = ParameterSet.find_or_create(0, 1, 2, 3)
runs = ps.create_runs_upto(3)
self.assertEqual(ps.average_results(), ())
for (i, r) in enumerate(runs):
r.store_result([1.0 + i, 2.0 + i, 3.0 + 1], 0, 3, 111, 222)
self.assertEqual(ps.average_results(), (2.0, 3.0, 4.0))
def test_all(self):
ps = ParameterSet.find_or_create(0, 1, 2, 3)
self.assertEqual(ParameterSet.all(), [ps])
ps2 = ParameterSet.find_or_create(0, 1, 2, 4)
self.assertEqual(ParameterSet.all(), [ps, ps2])
self.assertEqual(len(ParameterSet.all()), 2)
def test_find(self):
ps = ParameterSet.find_or_create(0, 1, 2, 3)
ps2 = ParameterSet.find_or_create(0, 1, 2, 4)
pid = ps2.id
self.assertEqual(pid, 1)
self.assertEqual(ParameterSet.find(1), ps2)
if __name__ == '__main__':
unittest.main()
| 2.390625 | 2 |
tests/unit/transport/s3/test_settings.py | TinkoffCreditSystems/overhave | 33 | 3965 | import pytest
from pydantic import ValidationError
from overhave.transport import OverhaveS3ManagerSettings
class TestS3ManagerSettings:
""" Unit tests for :class:`OverhaveS3ManagerSettings`. """
@pytest.mark.parametrize("test_s3_enabled", [False])
def test_disabled(self, test_s3_enabled: bool) -> None:
settings = OverhaveS3ManagerSettings(enabled=test_s3_enabled)
assert not settings.enabled
assert not settings.url
assert not settings.access_key
assert not settings.secret_key
@pytest.mark.parametrize("test_s3_enabled", [True])
def test_empty_enabled(self, test_s3_enabled: bool) -> None:
with pytest.raises(ValidationError):
OverhaveS3ManagerSettings(enabled=test_s3_enabled)
@pytest.mark.parametrize("test_s3_autocreate_buckets", [False, True], indirect=True)
@pytest.mark.parametrize("test_s3_enabled", [True], indirect=True)
def test_correct_enabled(
self,
test_s3_enabled: bool,
test_s3_autocreate_buckets: bool,
test_s3_manager_settings: OverhaveS3ManagerSettings,
) -> None:
assert test_s3_manager_settings.enabled == test_s3_enabled
assert test_s3_manager_settings.url
assert test_s3_manager_settings.access_key
assert test_s3_manager_settings.secret_key
assert test_s3_manager_settings.verify
assert test_s3_manager_settings.autocreate_buckets == test_s3_autocreate_buckets
| 2.359375 | 2 |
examples/elCmd.py | mark-nicholson/python-editline | 4 | 3966 | <reponame>mark-nicholson/python-editline
"""A generic class to build line-oriented command interpreters.
Interpreters constructed with this class obey the following conventions:
1. End of file on input is processed as the command 'EOF'.
2. A command is parsed out of each line by collecting the prefix composed
of characters in the identchars member.
3. A command `foo' is dispatched to a method 'do_foo()'; the do_ method
is passed a single argument consisting of the remainder of the line.
4. Typing an empty line repeats the last command. (Actually, it calls the
method `emptyline', which may be overridden in a subclass.)
5. There is a predefined `help' method. Given an argument `topic', it
calls the command `help_topic'. With no arguments, it lists all topics
with defined help_ functions, broken into up to three topics; documented
commands, miscellaneous help topics, and undocumented commands.
6. The command '?' is a synonym for `help'. The command '!' is a synonym
for `shell', if a do_shell method exists.
7. If completion is enabled, completing commands will be done automatically,
and completing of commands args is done by calling complete_foo() with
arguments text, line, begidx, endidx. text is string we are matching
against, all returned matches must begin with it. line is the current
input line (lstripped), begidx and endidx are the beginning and end
indexes of the text being matched, which could be used to provide
different completion depending upon which position the argument is in.
The `default' method may be overridden to intercept commands for which there
is no do_ method.
The `completedefault' method may be overridden to intercept completions for
commands that have no complete_ method.
The data member `self.ruler' sets the character used to draw separator lines
in the help messages. If empty, no ruler line is drawn. It defaults to "=".
If the value of `self.intro' is nonempty when the cmdloop method is called,
it is printed out on interpreter startup. This value may be overridden
via an optional argument to the cmdloop() method.
The data members `self.doc_header', `self.misc_header', and
`self.undoc_header' set the headers used for the help function's
listings of documented functions, miscellaneous topics, and undocumented
functions respectively.
"""
import string, sys
__all__ = ["Cmd"]
PROMPT = '(Cmd) '
IDENTCHARS = string.ascii_letters + string.digits + '_'
class ElCmd:
"""A simple framework for writing line-oriented command interpreters.
These are often useful for test harnesses, administrative tools, and
prototypes that will later be wrapped in a more sophisticated interface.
A Cmd instance or subclass instance is a line-oriented interpreter
framework. There is no good reason to instantiate Cmd itself; rather,
it's useful as a superclass of an interpreter class you define yourself
in order to inherit Cmd's methods and encapsulate action methods.
"""
prompt = PROMPT
identchars = IDENTCHARS
ruler = '='
lastcmd = ''
intro = None
doc_leader = ""
doc_header = "Documented commands (type help <topic>):"
misc_header = "Miscellaneous help topics:"
undoc_header = "Undocumented commands:"
nohelp = "*** No help on %s"
use_rawinput = False
def __init__(self, completekey='tab', stdin=None, stdout=None):
"""Instantiate a line-oriented interpreter framework.
The optional argument 'completekey' is the readline name of a
completion key; it defaults to the Tab key. If completekey is
not None and the readline module is available, command completion
is done automatically. The optional arguments stdin and stdout
specify alternate input and output file objects; if not specified,
sys.stdin and sys.stdout are used.
"""
if stdin is not None:
self.stdin = stdin
else:
self.stdin = sys.stdin
if stdout is not None:
self.stdout = stdout
else:
self.stdout = sys.stdout
self.cmdqueue = []
self.completekey = completekey
if not self.use_rawinput and self.completekey:
try:
import editline
self.editline = editline.editline("CMD",
self.stdin, self.stdout, sys.stderr)
self.editline.rl_completer = self.complete
except ImportError:
print("Failed to import editline")
pass
def cmdloop(self, intro=None):
"""Repeatedly issue a prompt, accept input, parse an initial prefix
off the received input, and dispatch to action methods, passing them
the remainder of the line as argument.
"""
self.preloop()
try:
if intro is not None:
self.intro = intro
if self.intro:
self.stdout.write(str(self.intro)+"\n")
stop = None
while not stop:
if self.cmdqueue:
line = self.cmdqueue.pop(0)
else:
if self.use_rawinput:
try:
line = input(self.prompt)
except EOFError:
line = 'EOF'
else:
self.editline.prompt = self.prompt
line = self.editline.readline()
if not len(line):
line = 'EOF'
else:
line = line.rstrip('\r\n')
line = self.precmd(line)
stop = self.onecmd(line)
stop = self.postcmd(stop, line)
self.postloop()
finally:
pass
def precmd(self, line):
"""Hook method executed just before the command line is
interpreted, but after the input prompt is generated and issued.
"""
return line
def postcmd(self, stop, line):
"""Hook method executed just after a command dispatch is finished."""
return stop
def preloop(self):
"""Hook method executed once when the cmdloop() method is called."""
pass
def postloop(self):
"""Hook method executed once when the cmdloop() method is about to
return.
"""
pass
def parseline(self, line):
"""Parse the line into a command name and a string containing
the arguments. Returns a tuple containing (command, args, line).
'command' and 'args' may be None if the line couldn't be parsed.
"""
line = line.strip()
if not line:
return None, None, line
elif line[0] == '?':
line = 'help ' + line[1:]
elif line[0] == '!':
if hasattr(self, 'do_shell'):
line = 'shell ' + line[1:]
else:
return None, None, line
i, n = 0, len(line)
while i < n and line[i] in self.identchars: i = i+1
cmd, arg = line[:i], line[i:].strip()
return cmd, arg, line
def onecmd(self, line):
"""Interpret the argument as though it had been typed in response
to the prompt.
This may be overridden, but should not normally need to be;
see the precmd() and postcmd() methods for useful execution hooks.
The return value is a flag indicating whether interpretation of
commands by the interpreter should stop.
"""
cmd, arg, line = self.parseline(line)
if not line:
return self.emptyline()
if cmd is None:
return self.default(line)
self.lastcmd = line
if line == 'EOF' :
print("")
print("Bye")
sys.exit(0)
if cmd == '':
return self.default(line)
else:
try:
func = getattr(self, 'do_' + cmd)
except AttributeError:
return self.default(line)
return func(arg)
def emptyline(self):
"""Called when an empty line is entered in response to the prompt.
If this method is not overridden, it repeats the last nonempty
command entered.
"""
if self.lastcmd:
return self.onecmd(self.lastcmd)
def default(self, line):
"""Called on an input line when the command prefix is not recognized.
If this method is not overridden, it prints an error message and
returns.
"""
self.stdout.write('*** Unknown syntax: %s (%d)\n' % (line,len(line)))
def completedefault(self, *ignored):
"""Method called to complete an input line when no command-specific
complete_*() method is available.
By default, it returns an empty list.
"""
return []
def completenames(self, text, *ignored):
dotext = 'do_'+text
return [a[3:] for a in self.get_names() if a.startswith(dotext)]
def complete(self, text, state):
"""Return the next possible completion for 'text'.
If a command has not been entered, then complete against command list.
Otherwise try to call complete_<command> to get list of completions.
"""
if state == 0:
origline = self.editline.get_line_buffer()
line = origline.lstrip()
stripped = len(origline) - len(line)
begidx = self.editline.get_begidx() - stripped
endidx = self.editline.get_endidx() - stripped
if begidx>0:
cmd, args, foo = self.parseline(line)
if cmd == '':
compfunc = self.completedefault
else:
try:
compfunc = getattr(self, 'complete_' + cmd)
except AttributeError:
compfunc = self.completedefault
else:
compfunc = self.completenames
self.completion_matches = compfunc(text, line, begidx, endidx)
try:
return self.completion_matches[state]
except IndexError:
return None
def get_names(self):
# This method used to pull in base class attributes
# at a time dir() didn't do it yet.
return dir(self.__class__)
def complete_help(self, *args):
commands = set(self.completenames(*args))
topics = set(a[5:] for a in self.get_names()
if a.startswith('help_' + args[0]))
return list(commands | topics)
def do_help(self, arg):
'List available commands with "help" or detailed help with "help cmd".'
if arg:
# XXX check arg syntax
try:
func = getattr(self, 'help_' + arg)
except AttributeError:
try:
doc=getattr(self, 'do_' + arg).__doc__
if doc:
self.stdout.write("%s\n"%str(doc))
return
except AttributeError:
pass
self.stdout.write("%s\n"%str(self.nohelp % (arg,)))
return
func()
else:
names = self.get_names()
cmds_doc = []
cmds_undoc = []
help = {}
for name in names:
if name[:5] == 'help_':
help[name[5:]]=1
names.sort()
# There can be duplicates if routines overridden
prevname = ''
for name in names:
if name[:3] == 'do_':
if name == prevname:
continue
prevname = name
cmd=name[3:]
if cmd in help:
cmds_doc.append(cmd)
del help[cmd]
elif getattr(self, name).__doc__:
cmds_doc.append(cmd)
else:
cmds_undoc.append(cmd)
self.stdout.write("%s\n"%str(self.doc_leader))
self.print_topics(self.doc_header, cmds_doc, 15,80)
self.print_topics(self.misc_header, list(help.keys()),15,80)
self.print_topics(self.undoc_header, cmds_undoc, 15,80)
def print_topics(self, header, cmds, cmdlen, maxcol):
if cmds:
self.stdout.write("%s\n"%str(header))
if self.ruler:
self.stdout.write("%s\n"%str(self.ruler * len(header)))
self.columnize(cmds, maxcol-1)
self.stdout.write("\n")
def columnize(self, list, displaywidth=80):
"""Display a list of strings as a compact set of columns.
Each column is only as wide as necessary.
Columns are separated by two spaces (one was not legible enough).
"""
if not list:
self.stdout.write("<empty>\n")
return
nonstrings = [i for i in range(len(list))
if not isinstance(list[i], str)]
if nonstrings:
raise TypeError("list[i] not a string for i in %s"
% ", ".join(map(str, nonstrings)))
size = len(list)
if size == 1:
self.stdout.write('%s\n'%str(list[0]))
return
# Try every row count from 1 upwards
for nrows in range(1, len(list)):
ncols = (size+nrows-1) // nrows
colwidths = []
totwidth = -2
for col in range(ncols):
colwidth = 0
for row in range(nrows):
i = row + nrows*col
if i >= size:
break
x = list[i]
colwidth = max(colwidth, len(x))
colwidths.append(colwidth)
totwidth += colwidth + 2
if totwidth > displaywidth:
break
if totwidth <= displaywidth:
break
else:
nrows = len(list)
ncols = 1
colwidths = [0]
for row in range(nrows):
texts = []
for col in range(ncols):
i = row + nrows*col
if i >= size:
x = ""
else:
x = list[i]
texts.append(x)
while texts and not texts[-1]:
del texts[-1]
for col in range(len(texts)):
texts[col] = texts[col].ljust(colwidths[col])
self.stdout.write("%s\n"%str(" ".join(texts)))
class MyCmd(ElCmd,object):
def do_bleep(self, s):
print("bleep!")
def do_blob(self, s):
print("blob!")
def do_bob(self, s):
print("bob!")
def do_mods(self, s):
print(sys.modules.keys())
if __name__ == '__main__':
mc = MyCmd()
mc.cmdloop()
| 2.828125 | 3 |
ecommerce-website/orders/admin.py | Shanu85/FCS_Project | 0 | 3967 | <reponame>Shanu85/FCS_Project
from django.contrib import admin
from .models import Order, receiverInfo
@admin.register(Order)
class OrderAdmin(admin.ModelAdmin):
date_hierarchy = 'created_at'
list_display = ('user', 'code', 'total_price', 'shipping_status', 'created_at')
list_display_links = ('user',)
list_editable = ('shipping_status',)
list_filter = ('shipping_status', 'payment_mode', 'created_at')
list_per_page = 25
search_fields = ('user__phone_number', 'user__email', 'code')
readonly_fields = ('user','cart', 'receiver', 'payment_mode', 'shipping_status', 'code')
def total_price(self, obj):
return obj.cart.total_price
def has_add_permission(self, request):
return False
@admin.register(receiverInfo)
class receiverInfoAdmin(admin.ModelAdmin):
date_hierarchy = 'created_at'
list_display = ('id', 'full_name', 'phone_number', 'address', 'created_at')
list_display_links = ('id', 'full_name')
list_filter = ('created_at',)
list_per_page = 25
search_fields = ('full_name', 'phone_number', 'address')
readonly_fields = ('full_name', 'phone_number', 'address')
| 2.046875 | 2 |
data_structures/linked_lists/ll-kth-from-end/ll_kth.py | jeremyCtown/data-structures-and-algorithms | 0 | 3968 | <gh_stars>0
from node import Node
class LinkedList:
"""
initializes LL
"""
def __init__(self, iter=[]):
self.head = None
self._size = 0
for item in reversed(iter):
self.insert(item)
def __repr__(self):
"""
assumes head will have a val and we will need this
"""
return '<head> => {}'.format(self.head.val)
def __str__(self):
""" this is where we can see the list"""
def __len__(self):
"""
returns size of LL
"""
return self._size
def insert(self, val):
"""
basic insertion method for adding to front of LL
"""
self.head = Node(val, self.head)
self._size += 1
def append(self, val):
"""
appends node to the end of the LL
"""
new_node = Node(val, None)
current = self.head._next
while current._next is not None:
current._next = current._next._next
if current._next._next is None:
current._next._next = new_node
new_node._next is None
self._size += 1
return new_node._next
def insert_before(self, val, new_val):
"""
inserts node before node at val
"""
new_node = Node(new_val)
current = self.head._next
while current._next is not None:
if current._next.val == val:
new_node._next = current._next
current._next = new_node
self._size += 1
break
current = current._next
if current._next is None:
raise ValueError("Data not in list")
def insert_after(self, val, new_val):
"""
inserts node after node at val
"""
new_node = Node(new_val)
current = self.head._next
while current._next is not None:
if current.val == val:
new_node._next = current._next._next
current._next = new_node
self._size += 1
break
current = current._next
if current._next is None:
raise ValueError("Data not in list")
def kth_from_end(self, k):
"""
returns node at kth from end
"""
if self._size - k < 0:
raise AttributeError
current = self.head
for i in range(self._size - k - 1):
current = current._next
return current
| 3.859375 | 4 |
MuonAnalysis/MomentumScaleCalibration/test/LikelihoodPdfDBReader_cfg.py | ckamtsikis/cmssw | 852 | 3969 | import FWCore.ParameterSet.Config as cms
process = cms.Process("LIKELIHOODPDFDBREADER")
# process.load("MuonAnalysis.MomentumScaleCalibration.local_CSA08_Y_cff")
process.source = cms.Source("EmptySource",
numberEventsInRun = cms.untracked.uint32(1),
firstRun = cms.untracked.uint32(1)
)
process.load("Configuration.StandardSequences.MagneticField_cff")
process.load("Geometry.CMSCommonData.cmsIdealGeometryXML_cfi")
process.load("Geometry.CommonTopologies.globalTrackingGeometry_cfi")
process.load("RecoMuon.DetLayers.muonDetLayerGeometry_cfi")
process.load("Geometry.MuonNumbering.muonNumberingInitialization_cfi")
process.load("RecoMuon.TrackingTools.MuonServiceProxy_cff")
# process.source = cms.Source("PoolSource",
# fileNames = cms.untracked.vstring()
# )
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(1)
)
process.poolDBESSource = cms.ESSource("PoolDBESSource",
BlobStreamerName = cms.untracked.string('TBufferBlobStreamingService'),
DBParameters = cms.PSet(
messageLevel = cms.untracked.int32(2),
authenticationPath = cms.untracked.string('/afs/cern.ch/cms/DB/conddb')
),
timetype = cms.untracked.string('runnumber'),
connect = cms.string('sqlite_file:dummy2.db'),
toGet = cms.VPSet(cms.PSet(
record = cms.string('MuScleFitLikelihoodPdfRcd'),
tag = cms.string('MuScleFitLikelihoodPdf_2_1_12')
))
)
process.LikelihoodPdfDBReaderModule = cms.EDAnalyzer(
"LikelihoodPdfDBReader"
)
process.p1 = cms.Path(process.LikelihoodPdfDBReaderModule)
| 1.320313 | 1 |
fast_fine_tuna/fast_fine_tuna.py | vinid/fast_fine_tuna | 0 | 3970 | <reponame>vinid/fast_fine_tuna
from transformers import AutoModel, AutoModelForSequenceClassification, AutoTokenizer, AutoConfig
from sklearn.model_selection import StratifiedKFold
import numpy as np
import torch
from fast_fine_tuna.dataset import MainDatasetDouble, MainDataset
from transformers import AdamW
from torch.utils.data import DataLoader
import os
from tqdm import tqdm
from fast_fine_tuna.models import MiniModel
from torch import nn
class FastFineTuna:
def __init__(self, model_name, tokenizer_name):
self.model_name = model_name
self.tokenizer_name = tokenizer_name
self.device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
def cross_validate_fit(self, texts, labels, splits=5, epochs=5, batch_size=16, learning_rate=5e-5):
config = AutoConfig.from_pretrained(self.model_name, num_labels=len(set(labels)),
finetuning_task="custom")
tokenizer = AutoTokenizer.from_pretrained(self.tokenizer_name)
texts = np.array(texts)
labels = np.array(labels)
skf = StratifiedKFold(n_splits=splits)
original = []
predicted = []
for train_index, test_index in skf.split(texts, labels):
model = AutoModelForSequenceClassification.from_pretrained(self.model_name, config=config)
X_train, X_test = texts[train_index].tolist(), texts[test_index].tolist()
y_train, y_test = labels[train_index].tolist(), labels[test_index].tolist()
# not the smartest way to do this, but faster to code up
tokenized_train = tokenizer(X_train, truncation=True, padding=True)
tokenized_test = tokenizer(X_test, truncation=True, padding=True)
train_dataset = MainDataset(tokenized_train, y_train)
test_dataset = MainDataset(tokenized_test, y_test)
model.to(self.device)
model.train()
train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
optim = AdamW(model.parameters(), lr=learning_rate)
pbar = tqdm(total=epochs, position=0, leave=True)
for epoch in range(epochs):
pbar.update(1)
for batch in train_loader:
optim.zero_grad()
input_ids = batch['input_ids'].to(self.device)
attention_mask = batch['attention_mask'].to(self.device)
lab = batch['labels'].to(self.device)
outputs = model(input_ids, attention_mask=attention_mask, labels=lab)
loss = outputs[0]
loss.backward()
optim.step()
pbar.close()
model.eval()
loader = DataLoader(test_dataset, batch_size=batch_size)
original.extend(y_test)
with torch.no_grad():
for batch in loader:
input_ids = batch['input_ids'].to(self.device)
attention_mask = batch['attention_mask'].to(self.device)
outputs = model(input_ids, attention_mask=attention_mask)
predicted.extend(torch.argmax(outputs["logits"], axis=1).cpu().numpy().tolist())
del model
return original, predicted
def train_and_save(self, texts, labels, path, epochs=5, batch_size=16, learning_rate=5e-5):
config = AutoConfig.from_pretrained(self.model_name, num_labels=len(set(labels)),
finetuning_task="custom")
model = AutoModelForSequenceClassification.from_pretrained(self.model_name, config=config)
tokenizer = AutoTokenizer.from_pretrained(self.tokenizer_name)
tokenized_train = tokenizer(texts, truncation=True, padding=True)
train_dataset = MainDataset(tokenized_train, labels)
model.to(self.device)
model.train()
train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
optim = AdamW(model.parameters(), lr=learning_rate)
pbar = tqdm(total=epochs, position=0, leave=True)
for epoch in range(epochs):
pbar.update(1)
for batch in train_loader:
optim.zero_grad()
input_ids = batch['input_ids'].to(self.device)
attention_mask = batch['attention_mask'].to(self.device)
lab = batch['labels'].to(self.device)
outputs = model(input_ids, attention_mask=attention_mask, labels=lab)
loss = outputs[0]
loss.backward()
optim.step()
pbar.close()
os.makedirs(path)
model.save_pretrained(path)
tokenizer.save_pretrained(path)
class DoubleFastFineTuna:
def __init__(self, model_name, tokenizer_name):
self.model_name = model_name
self.tokenizer_name = tokenizer_name
self.device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
def cross_validate_fit(self, texts, labels_A, labels_B, splits=5, epochs=5, batch_size=16, learning_rate=5e-5,
):
tokenizer = AutoTokenizer.from_pretrained(self.tokenizer_name)
texts = np.array(texts)
labels_A = np.array(labels_A)
labels_B = np.array(labels_B)
skf = StratifiedKFold(n_splits=splits)
original_A = []
original_B = []
predicted_A = []
predicted_B = []
for train_index, test_index in skf.split(texts, labels_A, labels_B):
model = MiniModel(self.model_name, len(set(labels_A)), len(set(labels_B)))
X_train, X_test = texts[train_index].tolist(), texts[test_index].tolist()
y_A_train, y_A_test = labels_A[train_index].tolist(), labels_A[test_index].tolist()
y_B_train, y_B_test = labels_B[train_index].tolist(), labels_B[test_index].tolist()
# not the smartest way to do this, but faster to code up
tokenized_train = tokenizer(X_train, truncation=True, padding=True)
tokenized_test = tokenizer(X_test, truncation=True, padding=True)
train_dataset = MainDatasetDouble(tokenized_train, y_A_train, y_B_train)
test_dataset = MainDatasetDouble(tokenized_test, y_A_test, y_B_test)
model.to(self.device)
model.train()
train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
optim = AdamW(model.parameters(), lr=learning_rate)
pbar = tqdm(total=epochs, position=0, leave=True)
for epoch in range(epochs):
pbar.update(1)
for batch in train_loader:
optim.zero_grad()
input_ids = batch['input_ids'].to(self.device)
attention_mask = batch['attention_mask'].to(self.device)
lab_A = batch['labels_A'].to(self.device)
lab_B = batch['labels_B'].to(self.device)
outputs = model(input_ids, attention_mask=attention_mask)
loss = nn.CrossEntropyLoss()
loss_A = loss(outputs[0], lab_A)
loss_B = loss(outputs[1], lab_B)
loss = loss_A + loss_B
loss.backward()
optim.step()
pbar.close()
model.eval()
loader = DataLoader(test_dataset, batch_size=batch_size)
original_A.extend(y_A_test)
original_B.extend(y_B_test)
with torch.no_grad():
for batch in loader:
input_ids = batch['input_ids'].to(self.device)
attention_mask = batch['attention_mask'].to(self.device)
outputs = model(input_ids, attention_mask=attention_mask)
predicted_A.extend(torch.argmax(outputs[0], axis=1).cpu().numpy().tolist())
predicted_B.extend(torch.argmax(outputs[1], axis=1).cpu().numpy().tolist())
del model
return original_A, original_B, predicted_A, predicted_B
def train_and_save(self, texts, labels, path, epochs=5, batch_size=16, learning_rate=5e-5):
config = AutoConfig.from_pretrained(self.model_name, num_labels=len(set(labels)),
finetuning_task="custom")
model = AutoModelForSequenceClassification.from_pretrained(self.model_name, config=config)
tokenizer = AutoTokenizer.from_pretrained(self.tokenizer_name)
tokenized_train = tokenizer(texts, truncation=True, padding=True)
train_dataset = MainDataset(tokenized_train, labels)
model.to(self.device)
model.train()
train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
optim = AdamW(model.parameters(), lr=learning_rate)
pbar = tqdm(total=epochs, position=0, leave=True)
for epoch in range(epochs):
pbar.update(1)
for batch in train_loader:
optim.zero_grad()
input_ids = batch['input_ids'].to(self.device)
attention_mask = batch['attention_mask'].to(self.device)
lab = batch['labels'].to(self.device)
outputs = model(input_ids, attention_mask=attention_mask, labels=lab)
loss = outputs[0]
loss.backward()
optim.step()
pbar.close()
os.makedirs(path)
model.save_pretrained(path)
tokenizer.save_pretrained(path)
| 2.125 | 2 |
Message/Message.py | gauravyeole/KVstoreDB | 1 | 3971 | <filename>Message/Message.py
# Message class Implementation
# @author: <NAME> <<EMAIL>>
class Message:
class Request:
def __init__(self, action="", data=None):
self.action = action
self.data = data
class Rsponse:
def __init__(self):
self.status = False
self.data = None
def __init__(self):
pass
def set_request(self):
pass
def response(self):
pass | 2.96875 | 3 |
wpt/websockets/websock_handlers/open_delay_wsh.py | gsnedders/presto-testo | 0 | 3972 | #!/usr/bin/python
from mod_pywebsocket import msgutil
import time
def web_socket_do_extra_handshake(request):
pass # Always accept.
def web_socket_transfer_data(request):
time.sleep(3)
msgutil.send_message(request, "line")
| 2.0625 | 2 |
airflow/providers/microsoft/psrp/operators/psrp.py | augusto-herrmann/airflow | 4 | 3973 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from typing import TYPE_CHECKING, List, Optional, Sequence
from airflow.exceptions import AirflowException
from airflow.models import BaseOperator
from airflow.providers.microsoft.psrp.hooks.psrp import PSRPHook
if TYPE_CHECKING:
from airflow.utils.context import Context
class PSRPOperator(BaseOperator):
"""PowerShell Remoting Protocol operator.
:param psrp_conn_id: connection id
:type psrp_conn_id: str
:param command: command to execute on remote host. (templated)
:type command: str
:param powershell: powershell to execute on remote host. (templated)
:type powershell: str
"""
template_fields: Sequence[str] = (
"command",
"powershell",
)
template_fields_renderers = {"command": "powershell", "powershell": "powershell"}
ui_color = "#901dd2"
def __init__(
self,
*,
psrp_conn_id: str,
command: Optional[str] = None,
powershell: Optional[str] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
if not (command or powershell):
raise ValueError("Must provide either 'command' or 'powershell'")
self.conn_id = psrp_conn_id
self.command = command
self.powershell = powershell
def execute(self, context: "Context") -> List[str]:
with PSRPHook(self.conn_id) as hook:
ps = hook.invoke_powershell(
f"cmd.exe /c @'\n{self.command}\n'@" if self.command else self.powershell
)
if ps.had_errors:
raise AirflowException("Process failed")
return ps.output
| 1.851563 | 2 |
appengine/monorail/services/api_pb2_v1_helpers.py | mithro/chromium-infra | 1 | 3974 | <gh_stars>1-10
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is govered by a BSD-style
# license that can be found in the LICENSE file or at
# https://developers.google.com/open-source/licenses/bsd
"""Convert Monorail PB objects to API PB objects"""
import datetime
import logging
import time
from framework import framework_constants
from framework import framework_helpers
from framework import permissions
from framework import timestr
from proto import api_pb2_v1
from proto import project_pb2
from proto import tracker_pb2
from services import issue_svc
from services import project_svc
from services import user_svc
from tracker import tracker_bizobj
from tracker import tracker_helpers
def convert_project(project, config, role):
"""Convert Monorail Project PB to API ProjectWrapper PB."""
return api_pb2_v1.ProjectWrapper(
kind='monorail#project',
name=project.project_name,
externalId=project.project_name,
htmlLink='/p/%s/' % project.project_name,
summary=project.summary,
description=project.description,
role=role,
issuesConfig=convert_project_config(config))
def convert_project_config(config):
"""Convert Monorail ProjectIssueConfig PB to API ProjectIssueConfig PB."""
return api_pb2_v1.ProjectIssueConfig(
kind='monorail#projectIssueConfig',
restrictToKnown=config.restrict_to_known,
defaultColumns=config.default_col_spec.split(),
defaultSorting=config.default_sort_spec.split(),
statuses=[convert_status(s) for s in config.well_known_statuses],
labels=[convert_label(l) for l in config.well_known_labels],
prompts=[convert_template(t) for t in config.templates],
defaultPromptForMembers=config.default_template_for_developers,
defaultPromptForNonMembers=config.default_template_for_users)
def convert_status(status):
"""Convert Monorail StatusDef PB to API Status PB."""
return api_pb2_v1.Status(
status=status.status,
meansOpen=status.means_open,
description=status.status_docstring)
def convert_label(label):
"""Convert Monorail LabelDef PB to API Label PB."""
return api_pb2_v1.Label(
label=label.label,
description=label.label_docstring)
def convert_template(template):
"""Convert Monorail TemplateDef PB to API Prompt PB."""
return api_pb2_v1.Prompt(
name=template.name,
title=template.summary,
description=template.content,
titleMustBeEdited=template.summary_must_be_edited,
status=template.status,
labels=template.labels,
membersOnly=template.members_only,
defaultToMember=template.owner_defaults_to_member,
componentRequired=template.component_required)
def convert_person(user_id, cnxn, services, trap_exception=False):
"""Convert user id to API AtomPerson PB."""
if not user_id:
return None
try:
user = services.user.GetUser(cnxn, user_id)
except user_svc.NoSuchUserException as ex:
if trap_exception:
logging.warning(str(ex))
return None
else:
raise ex
days_ago = None
if user.last_visit_timestamp:
secs_ago = int(time.time()) - user.last_visit_timestamp
days_ago = secs_ago / framework_constants.SECS_PER_DAY
return api_pb2_v1.AtomPerson(
kind='monorail#issuePerson',
name=user.email,
htmlLink='https://%s/u/%d' % (framework_helpers.GetHostPort(), user_id),
last_visit_days_ago=days_ago,
email_bouncing=bool(user.email_bounce_timestamp),
vacation_message=user.vacation_message)
def convert_issue_ids(issue_ids, mar, services):
"""Convert global issue ids to API IssueRef PB."""
# missed issue ids are filtered out.
issues = services.issue.GetIssues(mar.cnxn, issue_ids)
result = []
for issue in issues:
issue_ref = api_pb2_v1.IssueRef(
issueId=issue.local_id,
projectId=issue.project_name,
kind='monorail#issueRef')
result.append(issue_ref)
return result
def convert_issueref_pbs(issueref_pbs, mar, services):
"""Convert API IssueRef PBs to global issue ids."""
if issueref_pbs:
result = []
for ir in issueref_pbs:
project_id = mar.project_id
if ir.projectId:
project = services.project.GetProjectByName(
mar.cnxn, ir.projectId)
if project:
project_id = project.project_id
try:
issue = services.issue.GetIssueByLocalID(
mar.cnxn, project_id, ir.issueId)
result.append(issue.issue_id)
except issue_svc.NoSuchIssueException:
logging.warning(
'Issue (%s:%d) does not exist.' % (ir.projectId, ir.issueId))
return result
else:
return None
def convert_issue(cls, issue, mar, services):
"""Convert Monorail Issue PB to API IssuesGetInsertResponse."""
config = services.config.GetProjectConfig(mar.cnxn, issue.project_id)
granted_perms = tracker_bizobj.GetGrantedPerms(
issue, mar.auth.effective_ids, config)
issue_project = services.project.GetProject(mar.cnxn, issue.project_id)
component_list = []
for cd in config.component_defs:
cid = cd.component_id
if cid in issue.component_ids:
component_list.append(cd.path)
cc_list = [convert_person(p, mar.cnxn, services) for p in issue.cc_ids]
cc_list = [p for p in cc_list if p is not None]
field_values_list = []
field_id_dict = {
fd.field_id: fd.field_name for fd in config.field_defs}
for fv in issue.field_values:
field_name = field_id_dict.get(fv.field_id)
if not field_name:
logging.warning('Custom field %d of project %s does not exist',
fv.field_id, issue_project.project_name)
continue
val = None
if fv.user_id:
val = _get_user_email(
services.user, mar.cnxn, fv.user_id)
elif fv.str_value:
val = fv.str_value
elif fv.int_value:
val = str(fv.int_value)
new_fv = api_pb2_v1.FieldValue(
fieldName=field_name,
fieldValue=val,
derived=fv.derived)
field_values_list.append(new_fv)
resp = cls(
kind='monorail#issue',
id=issue.local_id,
title=issue.summary,
summary=issue.summary,
projectId=issue_project.project_name,
stars=issue.star_count,
starred=services.issue_star.IsItemStarredBy(
mar.cnxn, issue.issue_id, mar.auth.user_id),
status=issue.status,
state=(api_pb2_v1.IssueState.open if
tracker_helpers.MeansOpenInProject(
tracker_bizobj.GetStatus(issue), config)
else api_pb2_v1.IssueState.closed),
labels=issue.labels,
components=component_list,
author=convert_person(issue.reporter_id, mar.cnxn, services),
owner=convert_person(issue.owner_id, mar.cnxn, services),
cc=cc_list,
updated=datetime.datetime.fromtimestamp(issue.modified_timestamp),
published=datetime.datetime.fromtimestamp(issue.opened_timestamp),
blockedOn=convert_issue_ids(issue.blocked_on_iids, mar, services),
blocking=convert_issue_ids(issue.blocking_iids, mar, services),
canComment=permissions.CanCommentIssue(
mar.auth.effective_ids, mar.perms, issue_project, issue,
granted_perms=granted_perms),
canEdit=permissions.CanEditIssue(
mar.auth.effective_ids, mar.perms, issue_project, issue,
granted_perms=granted_perms),
fieldValues=field_values_list)
if issue.closed_timestamp > 0:
resp.closed = datetime.datetime.fromtimestamp(issue.closed_timestamp)
if issue.merged_into:
resp.mergedInto=convert_issue_ids([issue.merged_into], mar, services)[0]
if issue.owner_modified_timestamp:
resp.owner_modified = datetime.datetime.fromtimestamp(
issue.owner_modified_timestamp)
if issue.status_modified_timestamp:
resp.status_modified = datetime.datetime.fromtimestamp(
issue.status_modified_timestamp)
if issue.component_modified_timestamp:
resp.component_modified = datetime.datetime.fromtimestamp(
issue.component_modified_timestamp)
return resp
def convert_comment(issue, comment, mar, services, granted_perms):
"""Convert Monorail IssueComment PB to API IssueCommentWrapper."""
can_delete = permissions.CanDelete(
mar.auth.user_id, mar.auth.effective_ids, mar.perms,
comment.deleted_by, comment.user_id, mar.project,
permissions.GetRestrictions(issue), granted_perms=granted_perms)
return api_pb2_v1.IssueCommentWrapper(
attachments=[convert_attachment(a) for a in comment.attachments],
author=convert_person(comment.user_id, mar.cnxn, services,
trap_exception=True),
canDelete=can_delete,
content=comment.content,
deletedBy=convert_person(comment.deleted_by, mar.cnxn, services,
trap_exception=True),
id=comment.sequence,
published=datetime.datetime.fromtimestamp(comment.timestamp),
updates=convert_amendments(issue, comment.amendments, mar, services),
kind='monorail#issueComment')
def convert_attachment(attachment):
"""Convert Monorail Attachment PB to API Attachment."""
return api_pb2_v1.Attachment(
attachmentId=attachment.attachment_id,
fileName=attachment.filename,
fileSize=attachment.filesize,
mimetype=attachment.mimetype,
isDeleted=attachment.deleted)
def convert_amendments(issue, amendments, mar, services):
"""Convert a list of Monorail Amendment PBs to API Update."""
result = api_pb2_v1.Update(kind='monorail#issueCommentUpdate')
for amendment in amendments:
if amendment.field == tracker_pb2.FieldID.SUMMARY:
result.summary = amendment.newvalue
elif amendment.field == tracker_pb2.FieldID.STATUS:
result.status = amendment.newvalue
elif amendment.field == tracker_pb2.FieldID.OWNER:
if len(amendment.added_user_ids) == 0:
result.owner = framework_constants.NO_USER_NAME
else:
result.owner = _get_user_email(
services.user, mar.cnxn, amendment.added_user_ids[0])
elif amendment.field == tracker_pb2.FieldID.LABELS:
result.labels = amendment.newvalue.split()
elif amendment.field == tracker_pb2.FieldID.CC:
for user_id in amendment.added_user_ids:
user_email = _get_user_email(
services.user, mar.cnxn, user_id)
result.cc.append(user_email)
for user_id in amendment.removed_user_ids:
user_email = _get_user_email(
services.user, mar.cnxn, user_id)
result.cc.append('-%s' % user_email)
elif amendment.field == tracker_pb2.FieldID.BLOCKEDON:
result.blockedOn = _append_project(
amendment.newvalue, issue.project_name)
elif amendment.field == tracker_pb2.FieldID.BLOCKING:
result.blocking = _append_project(
amendment.newvalue, issue.project_name)
elif amendment.field == tracker_pb2.FieldID.MERGEDINTO:
result.mergedInto = amendment.newvalue
elif amendment.field == tracker_pb2.FieldID.COMPONENTS:
result.components = amendment.newvalue.split()
elif amendment.field == tracker_pb2.FieldID.CUSTOM:
fv = api_pb2_v1.FieldValue()
fv.fieldName = amendment.custom_field_name
fv.fieldValue = amendment.newvalue
result.fieldValues.append(fv)
return result
def _get_user_email(user_service, cnxn, user_id):
"""Get user email."""
try:
user_email = user_service.LookupUserEmail(
cnxn, user_id)
if not user_email:
user_email = framework_constants.DELETED_USER_NAME
except user_svc.NoSuchUserException:
user_email = framework_constants.DELETED_USER_NAME
return user_email
def _append_project(issue_ids, project_name):
"""Append project name to convert <id> to <project>:<id> format."""
result = []
id_list = issue_ids.split()
for id_str in id_list:
if ':' in id_str:
result.append(id_str)
# '-' means this issue is being removed
elif id_str.startswith('-'):
result.append('-%s:%s' % (project_name, id_str[1:]))
else:
result.append('%s:%s' % (project_name, id_str))
return result
def split_remove_add(item_list):
"""Split one list of items into two: items to add and items to remove."""
list_to_add = []
list_to_remove = []
for item in item_list:
if item.startswith('-'):
list_to_remove.append(item[1:])
else:
list_to_add.append(item)
return list_to_add, list_to_remove
# TODO(sheyang): batch the SQL queries to fetch projects/issues.
def issue_global_ids(project_local_id_pairs, project_id, mar, services):
"""Find global issues ids given <project_name>:<issue_local_id> pairs."""
result = []
for pair in project_local_id_pairs:
issue_project_id = None
local_id = None
if ':' in pair:
pair_ary = pair.split(':')
project_name = pair_ary[0]
local_id = int(pair_ary[1])
project = services.project.GetProjectByName(mar.cnxn, project_name)
if not project:
raise project_svc.NoSuchProjectException(
'Project %s does not exist' % project_name)
issue_project_id = project.project_id
else:
issue_project_id = project_id
local_id = int(pair)
result.append(
services.issue.LookupIssueID(mar.cnxn, issue_project_id, local_id))
return result
def convert_group_settings(group_name, setting):
"""Convert UserGroupSettings to UserGroupSettingsWrapper."""
return api_pb2_v1.UserGroupSettingsWrapper(
groupName=group_name,
who_can_view_members=setting.who_can_view_members,
ext_group_type=setting.ext_group_type,
last_sync_time=setting.last_sync_time)
def convert_component_def(cd, mar, services):
"""Convert ComponentDef PB to Component PB."""
project_name = services.project.LookupProjectNames(
mar.cnxn, [cd.project_id])[cd.project_id]
user_ids = set()
user_ids.update(
cd.admin_ids + cd.cc_ids + [cd.creator_id] + [cd.modifier_id])
user_names_dict = services.user.LookupUserEmails(mar.cnxn, list(user_ids))
component = api_pb2_v1.Component(
componentId=cd.component_id,
projectName=project_name,
componentPath=cd.path,
description=cd.docstring,
admin=sorted([user_names_dict[uid] for uid in cd.admin_ids]),
cc=sorted([user_names_dict[uid] for uid in cd.cc_ids]),
deprecated=cd.deprecated)
if cd.created:
component.created = datetime.datetime.fromtimestamp(cd.created)
component.creator = user_names_dict[cd.creator_id]
if cd.modified:
component.modified = datetime.datetime.fromtimestamp(cd.modified)
component.modifier = user_names_dict[cd.modifier_id]
return component
def convert_component_ids(config, component_names):
"""Convert a list of component names to ids."""
component_names_lower = [name.lower() for name in component_names]
result = []
for cd in config.component_defs:
cpath = cd.path
if cpath.lower() in component_names_lower:
result.append(cd.component_id)
return result
def convert_field_values(field_values, mar, services):
"""Convert user passed in field value list to FieldValue PB, or labels."""
fv_list_add = []
fv_list_remove = []
fv_list_clear = []
label_list_add = []
label_list_remove = []
field_name_dict = {
fd.field_name: fd for fd in mar.config.field_defs}
for fv in field_values:
field_def = field_name_dict.get(fv.fieldName)
if not field_def:
logging.warning('Custom field %s of does not exist', fv.fieldName)
continue
if fv.operator == api_pb2_v1.FieldValueOperator.clear:
fv_list_clear.append(field_def.field_id)
continue
# Enum fields are stored as labels
if field_def.field_type == tracker_pb2.FieldTypes.ENUM_TYPE:
raw_val = '%s-%s' % (fv.fieldName, fv.fieldValue)
if fv.operator == api_pb2_v1.FieldValueOperator.remove:
label_list_remove.append(raw_val)
elif fv.operator == api_pb2_v1.FieldValueOperator.add:
label_list_add.append(raw_val)
else:
logging.warning('Unsupported field value operater %s', fv.operator)
else:
new_fv = tracker_pb2.FieldValue(
field_id=field_def.field_id)
if field_def.field_type == tracker_pb2.FieldTypes.USER_TYPE:
try:
new_fv.user_id = services.user.LookupUserID(mar.cnxn, fv.fieldValue)
except user_svc.NoSuchUserException:
new_fv.user_id = 0
elif field_def.field_type == tracker_pb2.FieldTypes.STR_TYPE:
new_fv.str_value = fv.fieldValue
elif field_def.field_type == tracker_pb2.FieldTypes.INT_TYPE:
new_fv.int_value = int(fv.fieldValue)
else:
logging.warning(
'Unsupported field value type %s', field_def.field_type)
if fv.operator == api_pb2_v1.FieldValueOperator.remove:
fv_list_remove.append(new_fv)
elif fv.operator == api_pb2_v1.FieldValueOperator.add:
fv_list_add.append(new_fv)
else:
logging.warning('Unsupported field value operater %s', fv.operator)
return (fv_list_add, fv_list_remove, fv_list_clear,
label_list_add, label_list_remove)
| 1.921875 | 2 |
excut/feedback/rulebased_deduction/deduction_engine_extended.py | mhmgad/ExCut | 5 | 3975 | """
This module contains the rule-based inference (rulebased_deduction engine)
"""
import itertools
from collections import defaultdict
from itertools import chain
from excut.explanations_mining.descriptions import dump_explanations_to_file
from excut.explanations_mining.descriptions_new import Description2, Atom, load_from_file
from excut.explanations_mining.explaining_engines_extended import PathBasedClustersExplainerExtended
from excut.explanations_mining.simple_miner.description_miner_extended import DescriptionMinerExtended, ExplanationStructure
from excut.kg.kg_query_interface_extended import EndPointKGQueryInterfaceExtended, KGQueryInterfaceExtended
from excut.kg.kg_indexing import Indexer
from excut.kg.utils.data_formating import n3_repr
from excut.utils.logging import logger
from excut.kg.utils.Constants import DEFUALT_AUX_RELATION
from excut.clustering import target_entities as tes
class Prediction:
"""
An object to represent the prediction of the rules
:ivar triple: the predicted triple
:ivar all_sources: all rules that predicted the same triple
"""
# def __init__(self, triple: tuple, source_description=Description(), all_sources=None):
def __init__(self, triple=None, sources=None):
self.triple = triple
# self.source_description = source_descriptionf
self.all_sources = sources if sources else list() # sources if sources else {source_description}
def get_subject(self):
return self.triple[0]
def get_object(self):
return self.triple[2]
def get_quality(self, measure='x_coverage', method=max):
# return self.source_description.get_quality(measure)
return method([source.get_quality(measure) for source in self.all_sources])
def get_main_description(self, measure='x_coverage', method=max):
return method(self.all_sources, key=lambda d: d.get_quality(measure))
def __str__(self):
return str(self.triple) + '<<' + str(self.get_main_description())
def __repr__(self):
return "%s\t(\t%s,%s)" % (self.__class__.__name__, repr(self.triple), repr(self.all_sources))
def __eq__(self, other):
return other.triple == self.triple
def __hash__(self):
return hash(self.triple)
class DeductionEngine():
"""
Abstract rulebased_deduction/inference engine.
"""
def __init__(self, **kwargs):
pass
def infer(self, descriptions, recursive=False, topk=-1):
pass
class SparqlBasedDeductionEngineExtended(DeductionEngine):
"""
Deduction engine that converts the rules to sparql and fire them over the KG.
The rule-based_deduction takes care of consolidating similar predictions
"""
def __init__(self, kg_query_interface: KGQueryInterfaceExtended, relation=DEFUALT_AUX_RELATION, quality='x_coverage', quality_aggregation=max):
"""
:param kg_query_interface: interface for the KG.
:param relation: the relation used in the predicted triple (optional)
:param quality: objective quality measure for ranking the predictions (optional) by default
the exclusive coverage of the rules is used
:param quality_aggregation: the methd used for aggregating the score if multiple rules infers the same fact
(optional) by default max is used.
"""
super(SparqlBasedDeductionEngineExtended, self).__init__()
self.relation = relation
self.query_executer = kg_query_interface
self.quality = quality
self.quality_aggregation = quality_aggregation
self.labels_indexer=Indexer(store=kg_query_interface.type,
endpoint=kg_query_interface.endpoint,
graph= kg_query_interface.labels_graph,
identifier=kg_query_interface.labels_identifier)
def infer(self, descriptions_list, target_entities=None, min_quality=0, topk=-1, output_filepath=None,
clear_target_entities=True):
"""
Infer new facts for a giving set of descriptions
:param descriptions_list: list of explantions/descriptions rules
:param target_entities: entities and their labels for which predictions are generated
:param min_quality: minimum aggregated quality for the predictions
:param topk: k *distinct* highest quality predictions per entity,
:param output_filepath: predictions output file.
:param clear_target_entities: clear indexed target entities after done inference
:return: dictionary of predicted entity-clusters assignments
"""
if isinstance(descriptions_list,dict):
descriptions_list=list(itertools.chain.from_iterable(descriptions_list.values()))
if target_entities:
self.labels_indexer.index_triples(target_entities)
self.relation=target_entities.get_relation()
predictions = list(map(self._infer_single, descriptions_list))
per_entity_predictions = self.consolidate(predictions)
per_entity_predictions = self._merge_and_sort_cut(per_entity_predictions, min_quality, topk=topk)
if output_filepath:
dump_predictions_map(per_entity_predictions, output_filepath, triple_format=True, topk=topk, with_weight=True,
with_description=False, quality=self.quality)
if target_entities and clear_target_entities:
self.labels_indexer.drop()
return per_entity_predictions
def consolidate(self, predictions):
"""
Combine predictions from different rules
:param predictions: list of generated predictions
:return: combined single prediction with several sources for equivalent predictions
:rtype: dict
"""
# per_var_predictions = defaultdict(lambda: defaultdict(list))
# for p in chain.from_iterable(predictions):
# per_var_predictions[p.get_subject()][p.get_object()].append(p)
per_entity_predictions = defaultdict(lambda: defaultdict(Prediction))
for p in list(chain.from_iterable(predictions)):
cons_pred = per_entity_predictions[p.get_subject()][p.get_object()]
cons_pred.triple = p.triple
cons_pred.all_sources += p.all_sources
return per_entity_predictions
def _merge_and_sort_cut(self, per_entity_prediction, threshold=0, topk=-1):
"""
Merge the the inferred facts in case of functional predicates
:param per_entity_prediction:
:return:
"""
def quality_method(p):
return p.get_quality(self.quality, self.quality_aggregation)
per_entity_prediction_filtered = defaultdict(list)
for sub, per_obj_predictions in per_entity_prediction.items():
# print([(k, p.triple[2], qaulity_method(p)) for k, p in per_obj_predictions.items()])
merged_predictions = list(
filter(lambda p: quality_method(p) > threshold, list(per_obj_predictions.values())))
merged_predictions.sort(key=quality_method, reverse=True)
include = topk if topk > 0 else len(merged_predictions)
per_entity_prediction_filtered[sub] = merged_predictions[:include]
return per_entity_prediction_filtered
def _infer_single(self, description: Description2):
"""
Infer new facts for the given Description
:param description:
:return:
"""
bindings = self.query_executer.get_arguments_bindings(description,
restriction_pattern=Description2(body=[Atom('?x',
self.relation,
'?z')]))
head = description.head
# only supports p(?x,CONSTANT)
predictions = [Prediction((b, head.predicate, head.object), [description]) for b in bindings]
return predictions
def dump_predictions_map(per_var_predictions, out_filepath, triple_format=True, topk=-1, with_weight=True,
with_description=False, quality='x_coverage'):
"""
Writes the predictions to two files, the first is human readable and the other with .parsable extension that can be
parsed in python.
:param per_var_predictions:
:param out_filepath:
:param triple_format:
:param topk:
:param with_weight:
:param with_description:
:return:
"""
out_file_parsable = out_filepath + '.parsable'
out_filepath_with_type = out_filepath + ('.%s' % quality if len(quality) > 0 else '')
with open(out_filepath_with_type, 'w') as out_file:
for var, predictions in per_var_predictions.items():
if topk > 0:
predictions = predictions[:topk]
for p in predictions:
if triple_format:
# I only output normalized_coverage
out_str = n3_repr(p.triple) + ('\t%f' % p.get_quality(quality) if with_weight else '') + (
'\t%s' % p.source_description if with_description else '')
else:
out_str = str(p)
out_file.write(out_str)
out_file.write('\n')
with open(out_file_parsable + ('.%s' % quality if len(quality) > 0 else ''), 'w') as out_file:
out_file.write('\n'.join(
map(str, chain.from_iterable(map(lambda l: l[:topk] if topk > 0 else l, per_var_predictions.values())))))
return out_filepath_with_type
if __name__ == '__main__':
target_entities=tes.load_from_file('/scratch/GW/pool0/gadelrab/ExDEC/data/yago/yago_art_3_4k.tsv')
vos_executer = EndPointKGQueryInterfaceExtended('http://halimede:8890/sparql',
['http://yago-expr.org', 'http://yago-expr.org.types'],
labels_identifier='http://yago-expr.org.labels')
explainer=PathBasedClustersExplainerExtended(vos_executer, language_bias={'max_length': 4, 'structure': ExplanationStructure.TREE})
explans=explainer.explain(target_entities,
output_file='/scratch/GW/pool0/gadelrab/ExDEC/tmp/explanations_tree.txt')
ded = SparqlBasedDeductionEngineExtended(vos_executer)
per_var_predictions = ded.infer(explans, target_entities,
output_filepath='/scratch/GW/pool0/gadelrab/ExDEC/tmp/predictions_tree.tsv')
logger.info("Total variables with predictions subjects: %i", len(per_var_predictions))
| 2.234375 | 2 |
dataloader/viperlist_train.py | urasakikeisuke/rigidmask | 138 | 3976 | <reponame>urasakikeisuke/rigidmask<gh_stars>100-1000
import torch.utils.data as data
from PIL import Image
import os
import os.path
import numpy as np
import pdb
import glob
IMG_EXTENSIONS = [
'.jpg', '.JPG', '.jpeg', '.JPEG',
'.png', '.PNG', '.ppm', '.PPM', '.bmp', '.BMP',
]
def is_image_file(filename):
return any(filename.endswith(extension) for extension in IMG_EXTENSIONS)
def dataloader(filepath):
left_fold = 'image_2/'
train = glob.glob(filepath+left_fold+'/0*.jpg')
train = sorted(train)
l0_train = []
l1_train = []
flow_train = []
for img in train:
img1 = ('%s_%s.jpg'%(img.rsplit('_',1)[0],'%05d'%(1+int(img.split('.')[0].split('_')[-1])) ))
flowp = img.replace('.jpg', '.png').replace('image_2','flow_occ')
if (img1 in train and len(glob.glob(flowp))>0 and ('01000' not in img)):
l0_train.append(img)
l1_train.append(img1)
flow_train.append(flowp)
return l0_train, l1_train, flow_train
| 2.234375 | 2 |
floodcomparison/__init__.py | jsosa/floodcomparison | 0 | 3977 | <filename>floodcomparison/__init__.py
from floodcomparison.core import floodcomparison
| 1.195313 | 1 |
weaver/wps_restapi/quotation/quotes.py | crim-ca/weaver | 16 | 3978 | import logging
import random
from datetime import timedelta
from typing import TYPE_CHECKING
from duration import to_iso8601
from pyramid.httpexceptions import HTTPBadRequest, HTTPCreated, HTTPNotFound, HTTPOk
from weaver import sort
from weaver.config import WEAVER_CONFIGURATION_ADES, WEAVER_CONFIGURATION_EMS, get_weaver_configuration
from weaver.database import get_db
from weaver.datatype import Bill, Quote
from weaver.exceptions import ProcessNotFound, QuoteNotFound, log_unhandled_exceptions
from weaver.formats import OUTPUT_FORMAT_JSON
from weaver.processes.types import PROCESS_APPLICATION, PROCESS_WORKFLOW
from weaver.processes.wps_package import get_package_workflow_steps, get_process_location
from weaver.store.base import StoreBills, StoreQuotes
from weaver.utils import get_settings, get_weaver_url
from weaver.wps_restapi import swagger_definitions as sd
from weaver.wps_restapi.processes.processes import submit_local_job
if TYPE_CHECKING:
from weaver.datatype import Process
from weaver.typedefs import JSON
LOGGER = logging.getLogger(__name__)
def process_quote_estimator(process): # noqa: E811
# type: (Process) -> JSON
"""
Simulate quote parameters for the process execution.
:param process: instance of :class:`weaver.datatype.Process` for which to evaluate the quote.
:return: dict of {price, currency, estimatedTime} values for the process quote.
"""
# TODO: replace by some fancy ml technique or something?
price = random.uniform(0, 10) # nosec
currency = "CAD"
estimated_time = to_iso8601(timedelta(minutes=random.uniform(5, 60))) # nosec
return {"price": price, "currency": currency, "estimatedTime": estimated_time}
@sd.process_quotes_service.post(tags=[sd.TAG_BILL_QUOTE, sd.TAG_PROCESSES], renderer=OUTPUT_FORMAT_JSON,
schema=sd.PostProcessQuoteRequestEndpoint(), response_schemas=sd.post_quotes_responses)
@log_unhandled_exceptions(logger=LOGGER, message=sd.InternalServerErrorResponseSchema.description)
def request_quote(request):
"""
Request a quotation for a process.
"""
settings = get_settings(request)
weaver_config = get_weaver_configuration(settings)
if weaver_config not in [WEAVER_CONFIGURATION_ADES, WEAVER_CONFIGURATION_EMS]:
raise HTTPBadRequest("Unsupported request for configuration '{}'.".format(weaver_config))
process_id = request.matchdict.get("process_id")
process_store = get_db(request).get_store("processes")
try:
process = process_store.fetch_by_id(process_id)
except ProcessNotFound:
raise HTTPNotFound("Could not find process with specified 'process_id'.")
store = get_db(request).get_store(StoreQuotes)
process_url = get_process_location(process_id, data_source=get_weaver_url(settings))
process_type = process.type
process_params = dict()
for param in ["inputs", "outputs", "mode", "response"]:
if param in request.json:
process_params[param] = request.json.pop(param)
process_quote_info = process_quote_estimator(process)
process_quote_info.update({
"process": process_id,
"processParameters": process_params,
"location": process_url,
"user": str(request.authenticated_userid)
})
# loop workflow sub-process steps to get individual quotes
if process_type == PROCESS_WORKFLOW and weaver_config == WEAVER_CONFIGURATION_EMS:
workflow_quotes = list()
for step in get_package_workflow_steps(process_url):
# retrieve quote from provider ADES
# TODO: data source mapping
process_step_url = get_process_location(step["reference"])
process_quote_url = "{}/quotations".format(process_step_url)
subreq = request.copy()
subreq.path_info = process_quote_url
resp_json = request.invoke_subrequest(subreq).json()
quote_json = resp_json["quote"]
quote = store.save_quote(Quote(**quote_json))
workflow_quotes.append(quote.id)
process_quote_info.update({"steps": workflow_quotes})
quote = store.save_quote(Quote(**process_quote_info))
return HTTPCreated(json={"quote": quote.json()})
# single application quotes (ADES or EMS)
elif process_type == PROCESS_APPLICATION:
quote = store.save_quote(Quote(**process_quote_info))
quote_json = quote.json()
quote_json.pop("steps", None)
return HTTPCreated(json={"quote": quote_json})
# error if not handled up to this point
raise HTTPBadRequest("Unsupported quoting process type '{0}' on '{1}'.".format(process_type, weaver_config))
@sd.process_quotes_service.get(tags=[sd.TAG_BILL_QUOTE, sd.TAG_PROCESSES], renderer=OUTPUT_FORMAT_JSON,
schema=sd.ProcessQuotesEndpoint(), response_schemas=sd.get_quote_list_responses)
@sd.quotes_service.get(tags=[sd.TAG_BILL_QUOTE], renderer=OUTPUT_FORMAT_JSON,
schema=sd.QuotesEndpoint(), response_schemas=sd.get_quote_list_responses)
@log_unhandled_exceptions(logger=LOGGER, message=sd.InternalServerErrorResponseSchema.description)
def get_quote_list(request):
"""
Get list of quotes IDs.
"""
page = int(request.params.get("page", "0"))
limit = int(request.params.get("limit", "10"))
filters = {
"process_id": request.params.get("process", None) or request.matchdict.get("process_id", None),
"page": page,
"limit": limit,
"sort": request.params.get("sort", sort.SORT_CREATED),
}
store = get_db(request).get_store(StoreQuotes)
items, count = store.find_quotes(**filters)
return HTTPOk(json={
"count": count,
"page": page,
"limit": limit,
"quotes": [quote.id for quote in items]
})
@sd.process_quote_service.get(tags=[sd.TAG_BILL_QUOTE, sd.TAG_PROCESSES], renderer=OUTPUT_FORMAT_JSON,
schema=sd.ProcessQuoteEndpoint(), response_schemas=sd.get_quote_responses)
@sd.quote_service.get(tags=[sd.TAG_BILL_QUOTE], renderer=OUTPUT_FORMAT_JSON,
schema=sd.QuoteEndpoint(), response_schemas=sd.get_quote_responses)
@log_unhandled_exceptions(logger=LOGGER, message=sd.InternalServerErrorResponseSchema.description)
def get_quote_info(request):
"""
Get quote information.
"""
quote_id = request.matchdict.get("quote_id")
store = get_db(request).get_store(StoreQuotes)
try:
quote = store.fetch_by_id(quote_id)
except QuoteNotFound:
raise HTTPNotFound("Could not find quote with specified 'quote_id'.")
return HTTPOk(json={"quote": quote.json()})
@sd.process_quote_service.post(tags=[sd.TAG_BILL_QUOTE, sd.TAG_EXECUTE, sd.TAG_PROCESSES], renderer=OUTPUT_FORMAT_JSON,
schema=sd.PostProcessQuote(), response_schemas=sd.post_quote_responses)
@sd.quote_service.post(tags=[sd.TAG_BILL_QUOTE, sd.TAG_EXECUTE], renderer=OUTPUT_FORMAT_JSON,
schema=sd.PostQuote(), response_schemas=sd.post_quote_responses)
@log_unhandled_exceptions(logger=LOGGER, message=sd.InternalServerErrorResponseSchema.description)
def execute_quote(request):
"""
Execute a quoted process.
"""
quote_info = get_quote_info(request).json["quote"]
quote_bill_info = {
"quote": quote_info.get("id"),
"price": quote_info.get("price"),
"currency": quote_info.get("currency")
}
job_resp = submit_local_job(request)
job_json = job_resp.json
job_id = job_json.get("jobID")
user_id = str(request.authenticated_userid)
store = get_db(request).get_store(StoreBills)
bill = store.save_bill(Bill(user=user_id, job=job_id, **quote_bill_info))
job_json.update({"bill": bill.id})
return HTTPCreated(json=job_json)
| 2.015625 | 2 |
strava.py | AartGoossens/streamlit-activity-viewer | 4 | 3979 | <reponame>AartGoossens/streamlit-activity-viewer
import base64
import os
import arrow
import httpx
import streamlit as st
import sweat
from bokeh.models.widgets import Div
APP_URL = os.environ["APP_URL"]
STRAVA_CLIENT_ID = os.environ["STRAVA_CLIENT_ID"]
STRAVA_CLIENT_SECRET = os.environ["STRAVA_CLIENT_SECRET"]
STRAVA_AUTHORIZATION_URL = "https://www.strava.com/oauth/authorize"
STRAVA_API_BASE_URL = "https://www.strava.com/api/v3"
DEFAULT_ACTIVITY_LABEL = "NO_ACTIVITY_SELECTED"
STRAVA_ORANGE = "#fc4c02"
@st.cache(show_spinner=False)
def load_image_as_base64(image_path):
with open(image_path, "rb") as f:
contents = f.read()
return base64.b64encode(contents).decode("utf-8")
def powered_by_strava_logo():
base64_image = load_image_as_base64("./static/api_logo_pwrdBy_strava_horiz_light.png")
st.markdown(
f'<img src="data:image/png;base64,{base64_image}" width="100%" alt="powered by strava">',
unsafe_allow_html=True,
)
def authorization_url():
request = httpx.Request(
method="GET",
url=STRAVA_AUTHORIZATION_URL,
params={
"client_id": STRAVA_CLIENT_ID,
"redirect_uri": APP_URL,
"response_type": "code",
"approval_prompt": "auto",
"scope": "activity:read_all"
}
)
return request.url
def login_header(header=None):
strava_authorization_url = authorization_url()
if header is None:
base = st
else:
col1, _, _, button = header
base = button
with col1:
powered_by_strava_logo()
base64_image = load_image_as_base64("./static/[email protected]")
base.markdown(
(
f"<a href=\"{strava_authorization_url}\">"
f" <img alt=\"strava login\" src=\"data:image/png;base64,{base64_image}\" width=\"100%\">"
f"</a>"
),
unsafe_allow_html=True,
)
def logout_header(header=None):
if header is None:
base = st
else:
_, col2, _, button = header
base = button
with col2:
powered_by_strava_logo()
if base.button("Log out"):
js = f"window.location.href = '{APP_URL}'"
html = f"<img src onerror=\"{js}\">"
div = Div(text=html)
st.bokeh_chart(div)
def logged_in_title(strava_auth, header=None):
if header is None:
base = st
else:
col, _, _, _ = header
base = col
first_name = strava_auth["athlete"]["firstname"]
last_name = strava_auth["athlete"]["lastname"]
col.markdown(f"*Welcome, {first_name} {last_name}!*")
@st.cache(show_spinner=False, suppress_st_warning=True)
def exchange_authorization_code(authorization_code):
response = httpx.post(
url="https://www.strava.com/oauth/token",
json={
"client_id": STRAVA_CLIENT_ID,
"client_secret": STRAVA_CLIENT_SECRET,
"code": authorization_code,
"grant_type": "authorization_code",
}
)
try:
response.raise_for_status()
except httpx.HTTPStatusError:
st.error("Something went wrong while authenticating with Strava. Please reload and try again")
st.experimental_set_query_params()
st.stop()
return
strava_auth = response.json()
return strava_auth
def authenticate(header=None, stop_if_unauthenticated=True):
query_params = st.experimental_get_query_params()
authorization_code = query_params.get("code", [None])[0]
if authorization_code is None:
authorization_code = query_params.get("session", [None])[0]
if authorization_code is None:
login_header(header=header)
if stop_if_unauthenticated:
st.stop()
return
else:
logout_header(header=header)
strava_auth = exchange_authorization_code(authorization_code)
logged_in_title(strava_auth, header)
st.experimental_set_query_params(session=authorization_code)
return strava_auth
def header():
col1, col2, col3 = st.beta_columns(3)
with col3:
strava_button = st.empty()
return col1, col2, col3, strava_button
@st.cache(show_spinner=False)
def get_activities(auth, page=1):
access_token = auth["access_token"]
response = httpx.get(
url=f"{STRAVA_API_BASE_URL}/athlete/activities",
params={
"page": page,
},
headers={
"Authorization": f"Bearer {access_token}",
},
)
return response.json()
def activity_label(activity):
if activity["name"] == DEFAULT_ACTIVITY_LABEL:
return ""
start_date = arrow.get(activity["start_date_local"])
human_readable_date = start_date.humanize(granularity=["day"])
date_string = start_date.format("YYYY-MM-DD")
return f"{activity['name']} - {date_string} ({human_readable_date})"
def select_strava_activity(auth):
col1, col2 = st.beta_columns([1, 3])
with col1:
page = st.number_input(
label="Activities page",
min_value=1,
help="The Strava API returns your activities in chunks of 30. Increment this field to go to the next page.",
)
with col2:
activities = get_activities(auth=auth, page=page)
if not activities:
st.info("This Strava account has no activities or you ran out of pages.")
st.stop()
default_activity = {"name": DEFAULT_ACTIVITY_LABEL, "start_date_local": ""}
activity = st.selectbox(
label="Select an activity",
options=[default_activity] + activities,
format_func=activity_label,
)
if activity["name"] == DEFAULT_ACTIVITY_LABEL:
st.write("No activity selected")
st.stop()
return
activity_url = f"https://www.strava.com/activities/{activity['id']}"
st.markdown(
f"<a href=\"{activity_url}\" style=\"color:{STRAVA_ORANGE};\">View on Strava</a>",
unsafe_allow_html=True
)
return activity
@st.cache(show_spinner=False, max_entries=30, allow_output_mutation=True)
def download_activity(activity, strava_auth):
with st.spinner(f"Downloading activity \"{activity['name']}\"..."):
return sweat.read_strava(activity["id"], strava_auth["access_token"])
| 2.75 | 3 |
appliance/src/ufw_interface.py | reap3r/nmfta-bouncer | 1 | 3980 | <filename>appliance/src/ufw_interface.py
#!/usr/bin/env python
#shamelessy stolen from: https://gitlab.com/dhj/easyufw
# A thin wrapper over the thin wrapper that is ufw
# Usage:
# import easyufw as ufw
# ufw.disable() # disable firewall
# ufw.enable() # enable firewall
# ufw.allow() # default allow -- allow all
# ufw.allow(22) # allow port 22, any protocol
# ufw.allow(22,'tcp') # allow port 22, tcp protocol
# ufw.allow('22/tcp') # allow port 22, tcp protocol
# ufw.allow(53,'udp') # allow port 53, udp protocol
# ufw.allow(53,'udp') # allow port 53, udp protocol
# ufw.deny() # default deny -- deny all
# ufw.deny(22,'tcp') # deny port 22, tcp protocol
# ufw.delete(22) # delete rules referencing port 22
# ufw.reset() # restore defaults
# ufw.status() # return status string (default verbose=True)
# ufw.run("allow 22") # directly run command as if from command line
import ufw.frontend
import ufw.common
import gettext
progName = ufw.common.programName
gettext.install(progName)#, unicode=True) # for i18n; fixes '_' not defined
ui = ufw.frontend.UFWFrontend(False) # no dryrun -- do it live
backend = ui.backend
parse_command = ufw.frontend.parse_command
def _parse(actionstr):
# parse commands like "allow 22", "reset", "default allow"
argv = [progName]
argv.extend(actionstr.split(' ')) # generate bogus argv to parse
pr = parse_command(argv)
return pr
def run(actionstr, force=False):
# run command with an explicit force argument
pr = _parse(actionstr)
rule = pr.data.get('rule','') # commands like reset don't have a rule
iptype = pr.data.get('iptype','')
return ui.do_action(pr.action,rule,iptype,force)
def reset(force=True):
run('reset',force=force)
def enable():
ui.set_enabled(True)
def disable():
ui.set_enabled(False)
def allow(port=None, protocol=None):
# port int; protocol str ['tcp','udp']
pp = None
if port is not None:
pp = "" # port and protocol string
pp += str(port)
if protocol is not None:
pp += '/' + protocol
_allow(pp)
def _allow(pp=None):
# pp = port and protocol string ['22','22/tcp','53/udp']
# port without protocol includes all protocols
if pp is None:
run('default allow')
else:
run('allow ' + pp)
def deny(port=None, protocol=None):
# port int; protocol str ['tcp','udp']
pp = None
if port is not None:
pp = "" # port and protocol string
pp += str(port)
if protocol is not None:
pp += '/' + protocol
_deny(pp)
def _deny(pp=None):
# pp = port and protocol string
if pp is None:
run('default deny')
else:
run('deny ' + pp)
def delete(port):
# delete all rules by destination port
while _delete(port): pass # while ports deleted re-enumerate and continue
def _delete(port):
for i,rule in enumerate(backend.get_rules()):
rule_port = None
try:
rule_port = int(rule.dport)
except:
rule_port = None
if rule_port is not None and port == rule_port:
run("delete " + str(i+1), force=True)
return True # delete one rule; enumeration changes after delete
return False
def status(verbose=True):
cmd = 'status'
if verbose:
cmd += ' verbose'
return run(cmd)
| 2.078125 | 2 |
test/libsalt/test_vehicle.py | etri-city-traffic-brain/traffic-simulator | 8 | 3981 | <reponame>etri-city-traffic-brain/traffic-simulator
import libsalt
def test(salt_scenario):
libsalt.start(salt_scenario)
libsalt.setCurrentStep(25200)
step = libsalt.getCurrentStep()
while step <= 36000:
if (step % 100 == 0):
print("Simulation Step: ", step)
test_funcs()
libsalt.simulationStep()
step = libsalt.getCurrentStep()
libsalt.close()
print("Python: Simulation End!!!")
def test_funcs():
standbys = libsalt.vehicle.getStandbyVehicles()
runnings = libsalt.vehicle.getRunningVehicles()
print("#Running Vehicles: ", len(runnings))
#for vehicle in runnings:
# print("\t", vehicle.toString())
#for vehicle in standbys:
# print("\t", vehicle.toString())
# for vehicle in runnings:
# print("Running Vehicle)", vehicle.id, ":", libsalt.vehicle.getRoute(vehicle.id).toString())
# print("Running Vehicle)", vehicle.id, ":", vehicle.toString())
#print("#Standby Vehicles: ", len(standbys))
#for vehicle in standbys:
# print("Standby Vehicle)", vehicle.id, ":", libsalt.vehicle.getRouteString(vehicle.id))
#print("Standby Vehicle)", vehicle.id, ":", vehicle.toString())
if __name__ == "__main__":
salt_scenario = r"/home/mclee/project/traffic-simulator/data/dj_sample_data/2020-dj_sample.json"
test(salt_scenario)
| 2.4375 | 2 |
Masters/Copy Layer to Layer.py | davidtahim/Glyphs-Scripts | 1 | 3982 | #MenuTitle: Copy Layer to Layer
# -*- coding: utf-8 -*-
__doc__="""
Copies one master to another master in selected glyphs.
"""
import GlyphsApp
import vanilla
import math
def getComponentScaleX_scaleY_rotation( self ):
a = self.transform[0]
b = self.transform[1]
c = self.transform[2]
d = self.transform[3]
scale_x = math.sqrt(math.pow(a,2)+math.pow(b,2))
scale_y = math.sqrt(math.pow(c,2)+math.pow(d,2))
if (b<0 and c<0):
scale_y = scale_y * -1
rotation = math.atan2(b, a) * (180/math.pi)
return [scale_x, scale_y, rotation]
class MasterFiller( object ):
def __init__( self ):
# Window 'self.w':
windowWidth = 280
windowHeight = 155
windowWidthResize = 120 # user can resize width by this value
windowHeightResize = 0 # user can resize height by this value
self.w = vanilla.FloatingWindow(
( windowWidth, windowHeight ), # default window size
"Copy layer to layer", # window title
minSize = ( windowWidth, windowHeight ), # minimum size (for resizing)
maxSize = ( windowWidth + windowWidthResize, windowHeight + windowHeightResize ), # maximum size (for resizing)
autosaveName = "com.mekkablue.MasterFiller.mainwindow" # stores last window position and size
)
self.w.text_1 = vanilla.TextBox((15, 12+2, 120, 14), "Copy paths from", sizeStyle='small')
self.w.master_from = vanilla.PopUpButton((120, 12, -15, 17), self.GetMasterNames(), sizeStyle='small', callback=self.MasterChangeCallback)
self.w.text_2 = vanilla.TextBox((15, 32+2, 120, 14), "into selection of", sizeStyle='small')
self.w.master_into = vanilla.PopUpButton((120, 32, -15, 17), self.GetMasterNames(), sizeStyle='small', callback=self.MasterChangeCallback)
self.w.include_components = vanilla.CheckBox((15, 52+2, -100, 20), "Include components", sizeStyle='small', callback=self.SavePreferences, value=True)
self.w.include_anchors = vanilla.CheckBox((15, 52+20, -100, 20), "Include anchors", sizeStyle='small', callback=self.SavePreferences, value=True)
self.w.include_metrics = vanilla.CheckBox((15, 52+38, -100, 20), "Include metrics", sizeStyle='small', callback=self.SavePreferences, value=True)
self.w.keep_window_open = vanilla.CheckBox((15, 52+56, -100, 20), "Keep window open", sizeStyle='small', callback=self.SavePreferences, value=True)
self.w.copybutton = vanilla.Button((-80, -30, -15, -10), "Copy", sizeStyle='small', callback=self.buttonCallback)
self.w.setDefaultButton( self.w.copybutton )
# Load Settings:
if not self.LoadPreferences():
print "Note: 'Copy Layer to Layer' could not load preferences. Will resort to defaults."
self.w.open()
self.w.makeKey()
self.w.master_into.set(1)
def SavePreferences( self, sender ):
try:
Glyphs.defaults["com.mekkablue.MasterFiller.include_components"] = self.w.include_components.get()
Glyphs.defaults["com.mekkablue.MasterFiller.include_anchors"] = self.w.include_anchors.get()
Glyphs.defaults["com.mekkablue.MasterFiller.include_metrics"] = self.w.include_metrics.get()
Glyphs.defaults["com.mekkablue.MasterFiller.keep_window_open"] = self.w.keep_window_open.get()
except:
return False
return True
def LoadPreferences( self ):
try:
NSUserDefaults.standardUserDefaults().registerDefaults_(
{
"com.mekkablue.MasterFiller.include_components" : "1",
"com.mekkablue.MasterFiller.include_anchors" : "1",
"com.mekkablue.MasterFiller.include_metrics" : "1",
"com.mekkablue.MasterFiller.keep_window_open" : "1"
}
)
self.w.include_components.set( Glyphs.defaults["com.mekkablue.MasterFiller.include_components"] )
self.w.include_anchors.set( Glyphs.defaults["com.mekkablue.MasterFiller.include_anchors"] )
self.w.include_metrics.set( Glyphs.defaults["com.mekkablue.MasterFiller.include_metrics"] )
self.w.keep_window_open.set( Glyphs.defaults["com.mekkablue.MasterFiller.keep_window_open"] )
except:
return False
return True
def GetMasterNames( self ):
myMasterList = []
for i in range( len( Glyphs.currentDocument.font.masters ) ):
x = Glyphs.currentDocument.font.masters[i]
myMasterList.append( '%i: %s' % (i, x.name) )
return myMasterList
def MasterChangeCallback( self, sender ):
if self.w.master_from.get() == self.w.master_into.get():
self.w.copybutton.enable( False )
else:
self.w.copybutton.enable( True )
def copyPathsFromLayerToLayer( self, sourceLayer, targetLayer ):
"""Copies all paths from sourceLayer to targetLayer"""
num_from = len( sourceLayer.paths )
num_into = len( targetLayer.paths )
if num_into != 0:
print "- Cleaning out paths in target layer"
for i in range( num_into )[::-1]:
del targetLayer.paths[i]
if num_from > 0:
print "- Copying paths"
for thisPath in sourceLayer.paths:
newPath = GSPath()
for n in thisPath.nodes:
newNode = GSNode()
newNode.type = n.type
newNode.connection = n.connection
newNode.setPosition_( (n.x, n.y) )
newPath.addNode_( newNode )
newPath.closed = thisPath.closed
targetLayer.paths.append( newPath )
def copyComponentsFromLayerToLayer( self, sourceLayer, targetLayer ):
"""Copies all components from sourceLayer to targetLayer."""
comp_from = len( sourceLayer.components )
comp_into = len( targetLayer.components )
if comp_into != 0:
print "- Cleaning out components in target layer"
for i in range( comp_into )[::-1]:
del targetLayer.components[i]
if comp_from > 0:
print "- Copying components:"
for thisComp in sourceLayer.components:
compName = str( thisComp.componentName ) # str() probably not necessary anymore, but once fixed a problem
newComp = GSComponent( compName )
newComp.setPosition_( (thisComp.x, thisComp.y) )
ScaleX_scaleY_rotation = getComponentScaleX_scaleY_rotation(thisComp)
newComp.setScaleX_scaleY_rotation_(ScaleX_scaleY_rotation[0],ScaleX_scaleY_rotation[1],ScaleX_scaleY_rotation[2])
print "-- Component: %s" % ( compName )
targetLayer.components.append( newComp )
def copyAnchorsFromLayerToLayer( self, sourceLayer, targetLayer ):
"""Copies all anchors from sourceLayer to targetLayer."""
anch_from = len( sourceLayer.anchors )
anch_into = len( targetLayer.anchors )
if anch_into != 0:
print "- Cleaning out anchors in target layer"
sourceLayer.setAnchors_( None )
if anch_from > 0:
print "- Copying anchors from source layer:"
for thisAnchor in sourceLayer.anchors:
anchorName = thisAnchor.name
anchorPosition = NSPoint( thisAnchor.x, thisAnchor.y )
newAnchor = GSAnchor( anchorName, anchorPosition )
print "-- %s (%i, %i)" % ( anchorName, anchorPosition.x, anchorPosition.y )
targetLayer.addAnchor_( newAnchor )
def copyMetricsFromLayerToLayer( self, sourceLayer, targetLayer ):
"""Copies width of sourceLayer to targetLayer."""
sourceWidth = sourceLayer.width
if targetLayer.width != sourceWidth:
targetLayer.width = sourceWidth
print "- Copying width (%.1f)" % sourceWidth
else:
print "- Width not changed (already was %.1f)" % sourceWidth
def buttonCallback( self, sender ):
Glyphs.clearLog()
Glyphs.showMacroWindow()
print "Copy Layer to Layer Protocol:"
Font = Glyphs.font
Doc = Glyphs.currentDocument
selectedGlyphs = [ x.parent for x in Font.selectedLayers ]
index_from = self.w.master_from.get()
index_into = self.w.master_into.get()
compYesNo = self.w.include_components.get()
anchYesNo = self.w.include_anchors.get()
metrYesNo = self.w.include_metrics.get()
for thisGlyph in selectedGlyphs:
try:
print "\nProcessing", thisGlyph.name
sourcelayer = thisGlyph.layers[ index_from ]
targetlayer = thisGlyph.layers[ index_into ]
Font.disableUpdateInterface()
# copy paths:
self.copyPathsFromLayerToLayer( sourcelayer, targetlayer )
# copy components:
if compYesNo:
self.copyComponentsFromLayerToLayer( sourcelayer, targetlayer )
# copy anchors:
if anchYesNo:
self.copyAnchorsFromLayerToLayer( sourcelayer, targetlayer )
# copy metrics:
if metrYesNo:
self.copyMetricsFromLayerToLayer( sourcelayer, targetlayer )
Font.enableUpdateInterface()
except Exception, e:
print e
if not self.w.keep_window_open.get():
self.w.close()
MasterFiller()
| 2.609375 | 3 |
vunit/test/unit/test_tokenizer.py | bjacobs1/vunit | 1 | 3983 | <reponame>bjacobs1/vunit
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Copyright (c) 2014-2018, <NAME> <EMAIL>
"""
Test of the general tokenizer
"""
from unittest import TestCase
from vunit.parsing.tokenizer import describe_location
from vunit.test.mock_2or3 import mock
class TestTokenizer(TestCase):
"""
Test of the general tokenizer
"""
def test_describes_single_char_location(self):
self.assertEqual(
_describe_location("""\
S
"""), """\
at filename0 line 1:
S
~""")
def test_describes_single_char_location_within(self):
self.assertEqual(
_describe_location("""\
S
"""), """\
at filename0 line 1:
S
~""")
def test_describes_multi_char_location(self):
self.assertEqual(
_describe_location("""\
S E
"""), """\
at filename0 line 1:
S E
~~~""")
def test_describes_multi_char_location_within(self):
self.assertEqual(
_describe_location("""\
S E
"""), """\
at filename0 line 1:
S E
~~~""")
def test_describes_multi_line_location(self):
self.assertEqual(
_describe_location("""\
S____
E
"""), """\
at filename0 line 1:
S____
~~~~~""")
def test_describes_multi_file_location(self):
self.assertEqual(
_describe_location("""\
S__E""", """\
SE"""), """\
from filename0 line 2:
S__E
~~~~
at filename1 line 3:
SE
~~""")
def test_describe_location_none(self):
self.assertEqual(describe_location(None),
"Unknown location")
def test_describe_missing_location(self):
self.assertEqual(describe_location((("missing.svh", (0, 0)), None)),
"Unknown location in missing.svh")
def test_describe_none_filename_location(self):
self.assertEqual(describe_location(((None, (0, 0)), None)),
"Unknown Python string")
def _describe_location(*codes):
"""
Helper to test describe_location
"""
contents = {}
location = None
for idx, code in enumerate(codes):
filename = "filename%i" % idx
contents[filename] = code
start = code.index("S")
if "E" in code:
end = code.index("E")
else:
end = start
location = ((filename, (start, end)), location)
with mock.patch("vunit.parsing.tokenizer.read_file", autospec=True) as mock_read_file:
with mock.patch("vunit.parsing.tokenizer.file_exists", autospec=True) as mock_file_exists:
def file_exists_side_effect(filename):
return filename in contents
def read_file_side_effect(filename):
return contents[filename]
mock_file_exists.side_effect = file_exists_side_effect
mock_read_file.side_effect = read_file_side_effect
retval = describe_location(location=location)
return retval
| 2.53125 | 3 |
modules/star_se_SP.py | tbersez/Allmine | 5 | 3984 | <filename>modules/star_se_SP.py
# STAR aligner single end mode, second pass
#
# This module runs the second pass of the STAR aligner 2 path
# strategy. The goal is to align reads taking in account splice
# junction found in the fist pass..
#
# Inputs:
# - sample_trim.fastq.gz
# - splicing junction files (.tab)
#
# Output:
# - aligned reads
# - logs for follow up and debuging if needed
#
# Parameters:
# No fancy parameters needed, only the threads number is specified.
rule star_se_SP:
input:
# fake input
flag = ancient(config["REF"] + "REindexing_done.txt"),
R1 = config["TRIMMED"] + "{samples}_trim.fastq.gz",
genomeDir = ancient(config["REF"])
output:
bam = config["MAP"] + "{samples}_sorted.bam.gz"
params:
prefix = config["MAP"] + "{samples}.",
tmp = config["MAP"] + "SP/" + "{samples}_sp_STAR_TMP",
bind = config["BIND"],
cont = config["CONT"]
benchmark:
"benchmarks/star_SP/{samples}.tsv"
message : "Running STAR second pass with {input.R1}. \n"
shell:
"""
singularity exec -B {params.bind} {params.cont} \
STAR \
--runThreadN 10 \
--genomeDir {input.genomeDir} \
--readFilesIn {input.R1} \
--outSAMtype BAM SortedByCoordinate \
--outFileNamePrefix {params.prefix} \
--outStd BAM_SortedByCoordinate \
--outTmpDir {params.tmp} \
--scoreGap 0 \
--scoreGapNoncan -8 \
--scoreGapGCAG -4 \
--scoreGapATAC -8 \
--scoreGenomicLengthLog2scale -0.25 \
--scoreDelOpen -2 \
--scoreDelBase -2 \
--scoreInsOpen -2 \
--scoreInsBase -2 \
--scoreStitchSJshift 1 \
--readFilesCommand zcat | gzip --stdout > {output.bam}
"""
| 2.375 | 2 |
Udemy/REST-Django-VueJS/C3-practice/03-demo/job_board/jobs/models.py | runzezhang/MOOCs | 3 | 3985 | from django.db import models
class JobOffer(models.Model):
company_name = models.CharField(max_length=50)
company_email = models.EmailField()
job_title = models.CharField(max_length=60)
job_description = models.TextField()
salary = models.PositiveIntegerField()
city = models.CharField(max_length=35)
state = models.CharField(max_length=35)
created_at = models.DateField(auto_now_add=True)
available = models.BooleanField(default=True)
def __str__(self):
return self.company_name
| 2.296875 | 2 |
memeapp/views.py | barbaramootian/Memes-app | 0 | 3986 | from django.shortcuts import render,redirect
from django.contrib.auth.models import User
from django.contrib import messages
from .forms import PictureUploadForm,CommentForm
from .models import Image,Profile,Likes,Comments
from django.contrib.auth.decorators import login_required
from django.contrib .auth import authenticate,login,logout
from django.contrib.auth.forms import UserCreationForm
from datetime import datetime
def index(request):
images=Image.objects.all()
context={'images':images}
return render(request,'memeapp/index.html',context)
def registerPage(request):
form=UserCreationForm()
if request.method == "POST":
form_results=UserCreationForm(request.POST)
if form_results.is_valid():
user =form_results.save(commit=False)
user.username=user.username.lower()
user.save()
login(request,user)
return redirect('index')
else:
messages.error(request, 'Error occured during registration')
context = {'reg_form':form}
return render(request, 'memeapp/auth.html',context)
def loginPage(request):
page='login'
if request.user.is_authenticated:
return redirect('index')
if request.method == "POST":
username=request.POST.get('username').lower()
password=request.POST.get('password')
try:
user=User.objects.get(username=username)
except:
messages.error(request, 'User does not exist')
user=authenticate(request,username=username,password=password)
if user is not None:
login(request,user)
return redirect('index')
else:
messages.error(request, 'Username OR Password does not exist')
context={'page':page}
return render(request, 'memeapp/auth.html', context)
def logoutUser(request):
logout(request)
return redirect('index')
@login_required(login_url='login')
def uploadPicture(request):
form = PictureUploadForm()
if request.method == "POST":
form_results = PictureUploadForm(request.POST,request.FILES)
if form_results.is_valid():
form_results.save()
return redirect('index')
context = {"form": form}
return render(request, 'memeapp/upload_picture.html', context)
@login_required(login_url='login')
def my_images(request):
current_user = request.user
images = Profile.objects.filter(user_id=current_user.id).first()
profiles = Image.objects.filter(user_id=current_user.id)
return render(request, 'memeapp/profile.html', {"profile": images,"images":profiles})
@login_required(login_url='login')
def each_image(request, id):
image = Image.objects.get(id=id)
return render(request, 'memeapp/image_details.html', {'image': image})
@login_required(login_url='login')
def like_picture(request, id):
likes = Likes.objects.filter(image_id=id).first()
if Likes.objects.filter(image_id=id, user_id=request.user.id).exists():
likes.delete()
image = Image.objects.get(id=id)
if image.likes_number == 0:
image.likes_number = 0
image.save()
else:
image.likes_number -= 1
image.save()
return redirect('/')
else:
likes = Likes(image_id=id, user_id=request.user.id)
likes.save()
image = Image.objects.get(id=id)
image.likes_number = image.likes_number + 1
image.save()
return redirect('/')
@login_required(login_url='login')
def comment(request,pk):
profile = Image.objects.get(pk=pk)
form_results = CommentForm(request.POST,instance=profile)
if request.method == "POST":
if form_results.is_valid():
user = request.user
comment= form_results.cleaned_data['comment']
comment_content = Comments(user=user, image=profile, comment=comment, created_on=datetime.now())
comment_content.save()
profile.comments_number = profile.comments_number + 1
profile.save()
return redirect('index')
else:
print('form is invalid')
else:
form_results = CommentForm
context = {'form':form_results,'image':profile}
return render(request,'memeapp/comments.html',context)
def search(request):
title = "Search"
if 'search_query' in request.GET and request.GET["search_query"]:
search_term = request.GET.get("search_query").lower()
searched_results = Image.search_image(search_term)
message = f"{search_term}"
context = {'message': message, 'results': searched_results, 'title': title}
return render(request, 'memeapp/search.html', context)
else:
messages.error(request, "You haven't searched for any term")
message = "You haven't searched for any term"
return render(request, 'memeapp/search.html', {"message": message})
| 2.140625 | 2 |
sparv/modules/hist/diapivot.py | spraakbanken/sparv-pipeline | 17 | 3987 | """Create diapivot annotation."""
import logging
import pickle
import xml.etree.ElementTree as etree
import sparv.util as util
from sparv import Annotation, Model, ModelOutput, Output, annotator, modelbuilder
log = logging.getLogger(__name__)
PART_DELIM1 = "^1"
# @annotator("Diapivot annotation", language=["swe-1800"])
def diapivot_annotate(out: Output = Output("<token>:hist.diapivot", description="SALDO IDs corresponding to lemgrams"),
lemgram: Annotation = Annotation("<token>:saldo.lemgram"),
model: Model = Model("hist/diapivot.pickle")):
"""Annotate each lemgram with its corresponding saldo_id according to model.
Args:
out (str, optional): Resulting annotation file.
Defaults to Output("<token>:hist.diapivot", description="SALDO IDs corresponding to lemgrams").
lemgram (str, optional): Existing lemgram annotation. Defaults to Annotation("<token>:saldo.lemgram").
model (str, optional): Crosslink model. Defaults to Model("hist/diapivot.pickle").
"""
lexicon = PivotLexicon(model)
lemgram_annotation = list(lemgram.read())
out_annotation = []
for lemgrams in lemgram_annotation:
saldo_ids = []
for lemgram in lemgrams.split(util.DELIM):
s_i = lexicon.get_exactMatch(lemgram)
if s_i:
saldo_ids += [s_i]
out_annotation.append(util.AFFIX + util.DELIM.join(set(saldo_ids)) + util.AFFIX if saldo_ids else util.AFFIX)
out.write(out_annotation)
# @modelbuilder("Diapivot model", language=["swe"])
def build_diapivot(out: ModelOutput = ModelOutput("hist/diapivot.pickle")):
"""Download diapivot XML dictionary and save as a pickle file."""
# Download diapivot.xml
xml_model = Model("hist/diapivot.xml")
xml_model.download("https://svn.spraakdata.gu.se/sb-arkiv/pub/lmf/diapivot/diapivot.xml")
# Create pickle file
xml_lexicon = read_xml(xml_model.path)
log.info("Saving cross lexicon in Pickle format")
picklex = {}
for lem in xml_lexicon:
lemgrams = []
for saldo, match in list(xml_lexicon[lem].items()):
lemgrams.append(PART_DELIM1.join([saldo, match]))
picklex[lem] = sorted(lemgrams)
out.write_pickle(picklex)
# Clean up
xml_model.remove()
################################################################################
# Auxiliaries
################################################################################
class PivotLexicon:
"""A lexicon for old swedish SALDO lookups.
It is initialized from a pickled file.
"""
def __init__(self, crossfile, verbose=True):
"""Read pickled lexicon."""
if verbose:
log.info("Reading cross lexicon: %s", crossfile)
with open(crossfile, "rb") as F:
self.lexicon = pickle.load(F)
if verbose:
log.info("OK, read %d words", len(self.lexicon))
def lookup(self, lem):
"""Lookup a word in the lexicon."""
if lem.lower() == lem:
annotation_tag_pairs = self.lexicon.get(lem, [])
else:
annotation_tag_pairs = self.lexicon.get(lem, []) + self.lexicon.get(lem.lower(), [])
return list(map(_split_val, annotation_tag_pairs))
def get_exactMatch(self, word):
"""Get only exact matches from lexicon."""
s = self.lookup(word)
if s and s[0] == "exactMatch":
return s[1]
def _split_val(key_val):
return key_val.rsplit(PART_DELIM1)[1]
def read_xml(xml):
"""Read the XML version of crosslinked lexicon."""
log.info("Reading XML lexicon")
lexicon = {}
context = etree.iterparse(xml, events=("start", "end")) # "start" needed to save reference to root element
context = iter(context)
_event, root = next(context)
for event, elem in context:
if event == "end":
if elem.tag == 'LexicalEntry':
lemma = elem.find("Lemma")
dalin, saldo = [], ''
for form in lemma.findall("FormRepresentation"):
cat = _findval(form, "category")
lem = _findval(form, "lemgram")
if cat == "modern":
saldo = lem
else:
match = _findval(form, "match")
dalin += [(lem, match)]
[lexicon.update({d: {'saldo': saldo, 'match': m}}) for (d, m) in dalin]
# Done parsing section. Clear tree to save memory
if elem.tag in ['LexicalEntry', 'frame', 'resFrame']:
root.clear()
testwords = ["tigerhjerta..nn.1",
"lågland..nn.1",
"gud..nn.1"]
util.test_lexicon(lexicon, testwords)
log.info("OK, read")
return lexicon
def _findval(elems, key):
for form in elems:
att = form.get("att", "")
if att == key:
return form.get("val")
return ""
| 2.546875 | 3 |
src/xbot/util/path.py | xinyang178/xbot | 77 | 3988 | import os
def get_root_path():
current_path = os.path.abspath(os.path.dirname(__file__))
root_path = os.path.dirname(
os.path.dirname(os.path.dirname(os.path.dirname(current_path)))
)
return os.path.join(root_path, "xbot")
def get_config_path():
config_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "../config"))
return config_path
def get_data_path():
data_path = os.path.abspath(
os.path.join(os.path.dirname(__file__), "../../../data/")
)
return data_path
| 3.03125 | 3 |
home/website/wagtail_hooks.py | HackSoftware/hackconf.bg | 12 | 3989 | from django.utils.html import format_html
from wagtail.wagtailcore import hooks
@hooks.register('insert_editor_js')
def enable_source():
return format_html(
"""
<script>
registerHalloPlugin('hallohtml');
</script>
"""
)
| 1.585938 | 2 |
src/reporter/tests/test_api.py | msgis/ngsi-timeseries-api | 0 | 3990 | from conftest import QL_URL
import requests
def test_api():
api_url = "{}/".format(QL_URL)
r = requests.get('{}'.format(api_url))
assert r.status_code == 200, r.text
assert r.json() == {
"notify_url": "/v2/notify",
"subscriptions_url": "/v2/subscriptions",
"entities_url": "/v2/entities",
"types_url": "/v2/types",
"attributes_url": "/v2/attrs"
}
| 2.671875 | 3 |
zorg/buildbot/conditions/FileConditions.py | dyung/llvm-zorg | 27 | 3991 | from buildbot.process.remotecommand import RemoteCommand
from buildbot.interfaces import WorkerTooOldError
import stat
class FileExists(object):
"""I check a file existence on the worker. I return True if the file
with the given name exists, False if the file does not exist or that is
a directory.
Use me with doStepIf to make a build step conditional to existence of some
file. For example
doStepIf=FileExists('build/configure')
"""
def __init__(self, filename):
self.filename = filename
def __call__(self, step):
step.checkWorkerHasCommand('stat')
cmd = RemoteCommand('stat', {'file': self.filename})
d = step.runCommand(cmd)
d.addCallback(lambda res: self.commandComplete(cmd))
return d
def commandComplete(self, cmd):
if cmd.didFail():
return False
s = cmd.updates["stat"][-1]
filemode = s[stat.ST_MODE]
if stat.S_ISREG(filemode) or stat.S_ISLNK(filemode):
# True only if this is a file or a link and not any other file
# system object.
return True
else:
return False
class FileDoesNotExist(object):
"""I check a file existence on the worker. I return False if
the file with the given name exists or that is a directory, True if the
file does not exist.
Use me with doStepIf to make a build step conditional to nonexistence
of some file. For example
doStepIf=FileDoesNotExist('build/configure')
"""
def __init__(self, filename):
self.filename = filename
def __call__(self, step):
step.checkWorkerHasCommand('stat')
cmd = RemoteCommand('stat', {'file': self.filename})
d = step.runCommand(cmd)
d.addCallback(lambda res: self.commandComplete(cmd))
return d
def commandComplete(self, cmd):
# False if any filesystem object with the given name exists.
return cmd.didFail()
| 2.90625 | 3 |
gym_combat/gym_combat/envs/main.py | refaev/combat_gym | 0 | 3992 | <filename>gym_combat/gym_combat/envs/main.py
from matplotlib import style
from tqdm import tqdm
style.use("ggplot")
from gym_combat.envs.Arena.CState import State
from gym_combat.envs.Arena.Entity import Entity
from gym_combat.envs.Arena.Environment import Environment, Episode
from gym_combat.envs.Common.constants import *
from gym_combat.envs.Qtable import Qtable_DecisionMaker
from gym_combat.envs.DQN import DQNAgent_keras
from gym_combat.envs.Greedy import Greedy_player
import matplotlib.pyplot as plt
def print_start_of_game_info(blue_decision_maker, red_decision_maker):
print("Starting tournament!")
print("Blue player type: ", Agent_type_str[blue_decision_maker.type()])
if blue_decision_maker.path_model_to_load==None:
print("Blue player starting with no model")
else:
print("Blue player starting tournament with trained model: " , blue_decision_maker.path_model_to_load)
print("Red player type: ", Agent_type_str[red_decision_maker.type()])
if red_decision_maker.path_model_to_load==None:
print("Red player starting with no model")
else:
print("Red player starting tournament with trained model: " , red_decision_maker.path_model_to_load)
print("Number of rounds: ", NUM_OF_EPISODES)
print("~~~ GO! ~~~\n\n")
def evaluate(episode_number):
#if episode_number % EVALUATE_PLAYERS_EVERY == 0:
a = episode_number % EVALUATE_PLAYERS_EVERY
if a>=0 and a<EVALUATE_BATCH_SIZE:
EVALUATE = True
else:
EVALUATE = False
return EVALUATE
def print_states(observation_for_blue_s0, observation_for_blue_s1):
import matplotlib.pyplot as plt
plt.matshow(observation_for_blue_s0.img)
plt.show()
plt.matshow(observation_for_blue_s1.img)
plt.show()
if __name__ == '__main__':
env = Environment(IS_TRAINING)
print("Starting Blue player")
blue_decision_maker = DQNAgent_keras.DQNAgent_keras()
#blue_decision_maker = DQNAgent_keras.DQNAgent_keras(UPDATE_CONTEXT=True, path_model_to_load='conv1(6_6_1_256)_conv2(4_4_256_128)_conv3(3_3_128_128)_flatten_fc__blue_202001_ 0.95max_ -0.04avg_ -3.10min__1620558885.model')
print("Starting red player")
### Red Decision Maker
red_decision_maker = Greedy_player.Greedy_player()
env.blue_player = Entity(blue_decision_maker)
env.red_player = Entity(red_decision_maker)
print_start_of_game_info(blue_decision_maker, red_decision_maker)
NUM_OF_EPISODES = env.NUMBER_OF_EPISODES
for episode in tqdm(range(1, NUM_OF_EPISODES + 1), ascii=True, unit='episodes'):
EVALUATE = evaluate(episode)
current_episode = Episode(episode, EVALUATE, show_always=False if IS_TRAINING else True)
# set new start position for the players
env.reset_game(episode)
# get observation
observation_for_blue_s0: State = env.get_observation_for_blue()
action_blue = -1
# initialize the decision_makers for the players
blue_decision_maker.set_initial_state(observation_for_blue_s0, episode)
#red_decision_maker.set_initial_state(observation_for_red_s0, episode) # for non-greedy players
blue_won_the_game = False
red_won_the_game = False
for steps_current_game in range(1, MAX_STEPS_PER_EPISODE + 1):
##### Blue's turn! #####
observation_for_blue_s0: State = env.get_observation_for_blue()
current_episode.print_episode(env, steps_current_game)
action_blue: AgentAction = blue_decision_maker.get_action(observation_for_blue_s0, EVALUATE)
env.take_action(Color.Blue, action_blue) # take the action!
current_episode.print_episode(env, steps_current_game)
current_episode.is_terminal = (env.compute_terminal(whos_turn=Color.Blue) is not WinEnum.NoWin)
if current_episode.is_terminal:# Blue won the game!
blue_won_the_game=True
else:
##### Red's turn! #####
observation_for_red_s0: State = env.get_observation_for_red()
action_red: AgentAction = red_decision_maker.get_action(observation_for_red_s0, EVALUATE)
env.take_action(Color.Red, action_red) # take the action!
current_episode.is_terminal = (env.compute_terminal(whos_turn=Color.Red) is not WinEnum.NoWin)
if current_episode.is_terminal: # Blue won the game!
red_won_the_game = True
current_episode.print_episode(env, steps_current_game)
reward_step_blue, reward_step_red = env.handle_reward(steps_current_game)
current_episode.episode_reward_red += reward_step_red
current_episode.episode_reward_blue += reward_step_blue
observation_for_blue_s1: State = env.get_observation_for_blue()
blue_decision_maker.update_context(observation_for_blue_s0, action_blue, reward_step_blue, observation_for_blue_s1,
current_episode.is_terminal, EVALUATE)
if steps_current_game == MAX_STEPS_PER_EPISODE:
# if we exited the loop because we reached MAX_STEPS_PER_EPISODE
current_episode.is_terminal = True
if blue_won_the_game or red_won_the_game:
break
# for statistics
env.update_win_counters(steps_current_game)
env.data_for_statistics(current_episode.episode_reward_blue, current_episode.episode_reward_red, steps_current_game, blue_decision_maker.get_epsolon())
env.evaluate_info(EVALUATE, episode, steps_current_game, blue_decision_maker.get_epsolon())
if current_episode.episode_number % SAVE_STATS_EVERY == 0:
if False:#blue_decision_maker.type()== AgentType.DQN_keras or blue_decision_maker.type() == AgentType.DQN_basic:
blue_decision_maker._decision_maker.print_model(observation_for_blue_s0, episode, "conv")#env.save_folder_path)
# print info of episode:
current_episode.print_info_of_episode(env, steps_current_game, blue_decision_maker.get_epsolon(), episode)
env.end_run()
if blue_decision_maker.type() == AgentType.DQN_keras or blue_decision_maker.type() == AgentType.DQN_basic:
blue_decision_maker._decision_maker.print_model(observation_for_blue_s0, episode, env.save_folder_path)
| 2.28125 | 2 |
libqif/core/hyper.py | ramongonze/libqif | 2 | 3993 | """Hyper-distributions."""
from libqif.core.secrets import Secrets
from libqif.core.channel import Channel
from numpy import array, arange, zeros
from numpy import delete as npdelete
class Hyper:
def __init__(self, channel):
"""Hyper-distribution. To create an instance of this class it is
class it is necessary to have an instance of :py:class:`.Channel`
class. Once created an instance of :py:class:`.Hyper`, the constructor
generates the joint, outer and inner distributions.
Attributes
----------
channel : core.Channel
Channel object.
joint : numpy.ndarray
Matrix of joint distribution.
outer : numpy.ndarray
Outer distribution.
inners : numpy.ndarray
Matrix of inner distributions.
num_posteriors : int
Number of posterior distributions resulted by reducing the
hyper-distribution, i.e., remove columns that contains only
zeros and merge columns which one of them a linear combination
of the other.
Parameters
----------
channel : core.Channel
Channel object.
"""
self._check_types(channel)
self.channel = channel
self.joint = self._generate_joint_distribution()
self.outer, self.inners = self._generate_posteriors()
self._reduce_hyper()
self.num_posteriors = len(self.outer)
def update_prior(self, prior):
"""Update the prior distribution on set of secrets.
The number of secrets must match the current number of rows of the channel.
Parameters
----------
prior : list, numpy.ndarray
Prior distribution on the set of secrets. prior[i] is the
probability of secret named labels[i] beeing the real secret.
"""
self.channel.update_prior(prior)
self.joint = self._generate_joint_distribution()
self.outer, self.inners = self._generate_posteriors()
self._reduce_hyper()
self.num_posteriors = len(self.outer)
def _check_types(self, channel):
if type(channel) != type(Channel(Secrets(['x1','x2'], [1,0]), ['y1'], array([[1],[1]]))):
raise TypeError('The parameter \'channel\' must be a core.channel.Channel object')
def _generate_joint_distribution(self):
joint = []
channel_t = self.channel.matrix.T
for i in arange(self.channel.num_outputs):
joint.append(self.channel.secrets.prior * channel_t[i])
return array(joint).T
def _generate_posteriors(self):
joint_t = self.joint.T.copy()
outer = []
for i in arange(self.channel.num_outputs):
outer.append(joint_t[i].sum())
if outer[i] > 0:
joint_t[i] = joint_t[i]/outer[i]
return array(outer), joint_t.T
def _reduce_hyper(self):
"""Given the hyper-distribution generated by _generate_posteriors
remove columns with zeros and merge columns that are a linear
combination of others. Thus algorithm has time complexity of O(n*m^2)
where n is the number of secrets and m is the number of outputs in
the.
"""
epsilon = 10**(-6)
# Delete inners that have 0 probability of occuring
zero_prob = self.outer < epsilon
self.outer = npdelete(self.outer, zero_prob, 0)
self.inners = npdelete(self.inners, zero_prob, 1)
delete_inner = [False] * len(self.outer)
for i in arange(self.inners.shape[1]):
for j in arange(i+1, self.inners.shape[1]):
# Check if inner i is equal to inner j
if (abs(self.inners[:,i] - self.inners[:,j]) < epsilon).sum() == self.channel.secrets.num_secrets:
delete_inner[j] = True # Delete inner j
self.outer[i] += self.outer[j] # Merge inner j into inner i
self.outer = npdelete(self.outer, delete_inner, 0)
self.inners = npdelete(self.inners, delete_inner, 1)
| 3.28125 | 3 |
ansible/venv/lib/python2.7/site-packages/ansible/modules/network/fortios/fortios_system_virtual_wan_link.py | gvashchenkolineate/gvashchenkolineate_infra_trytravis | 17 | 3994 | #!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_system_virtual_wan_link
short_description: Configure redundant internet connections using SD-WAN (formerly virtual WAN link) in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS (FOS) device by allowing the
user to set and modify system feature and virtual_wan_link category.
Examples include all parameters and values need to be adjusted to datasources before usage.
Tested with FOS v6.0.5
version_added: "2.8"
author:
- <NAME> (@mamunozgonzalez)
- <NAME> (@thomnico)
notes:
- Requires fortiosapi library developed by Fortinet
- Run as a local_action in your playbook
requirements:
- fortiosapi>=0.9.8
options:
host:
description:
- FortiOS or FortiGate IP address.
type: str
required: false
username:
description:
- FortiOS or FortiGate username.
type: str
required: false
password:
description:
- FortiOS or FortiGate password.
type: str
default: ""
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
type: str
default: root
https:
description:
- Indicates if the requests towards FortiGate must use HTTPS protocol.
type: bool
default: true
ssl_verify:
description:
- Ensures FortiGate certificate must be verified by a proper CA.
type: bool
default: true
version_added: 2.9
system_virtual_wan_link:
description:
- Configure redundant internet connections using SD-WAN (formerly virtual WAN link).
default: null
type: dict
suboptions:
fail_alert_interfaces:
description:
- Physical interfaces that will be alerted.
type: list
suboptions:
name:
description:
- Physical interface name. Source system.interface.name.
required: true
type: str
fail_detect:
description:
- Enable/disable SD-WAN Internet connection status checking (failure detection).
type: str
choices:
- enable
- disable
health_check:
description:
- SD-WAN status checking or health checking. Identify a server on the Internet and determine how SD-WAN verifies that the FortiGate can
communicate with it.
type: list
suboptions:
addr_mode:
description:
- Address mode (IPv4 or IPv6).
type: str
choices:
- ipv4
- ipv6
failtime:
description:
- Number of failures before server is considered lost (1 - 3600).
type: int
http_agent:
description:
- String in the http-agent field in the HTTP header.
type: str
http_get:
description:
- URL used to communicate with the server if the protocol if the protocol is HTTP.
type: str
http_match:
description:
- Response string expected from the server if the protocol is HTTP.
type: str
interval:
description:
- Status check interval, or the time between attempting to connect to the server (1 - 3600 sec).
type: int
members:
description:
- Member sequence number list.
type: list
suboptions:
seq_num:
description:
- Member sequence number. Source system.virtual-wan-link.members.seq-num.
type: int
name:
description:
- Status check or health check name.
required: true
type: str
packet_size:
description:
- Packet size of a twamp test session,
type: int
password:
description:
- Twamp controller password in authentication mode
type: str
port:
description:
- Port number used to communicate with the server over the selected protocol.
type: int
protocol:
description:
- Protocol used to determine if the FortiGate can communicate with the server.
type: str
choices:
- ping
- tcp-echo
- udp-echo
- http
- twamp
- ping6
recoverytime:
description:
- Number of successful responses received before server is considered recovered (1 - 3600).
type: int
security_mode:
description:
- Twamp controller security mode.
type: str
choices:
- none
- authentication
server:
description:
- IP address or FQDN name of the server.
type: str
sla:
description:
- Service level agreement (SLA).
type: list
suboptions:
id:
description:
- SLA ID.
required: true
type: int
jitter_threshold:
description:
- Jitter for SLA to make decision in milliseconds. (0 - 10000000).
type: int
latency_threshold:
description:
- Latency for SLA to make decision in milliseconds. (0 - 10000000).
type: int
link_cost_factor:
description:
- Criteria on which to base link selection.
type: str
choices:
- latency
- jitter
- packet-loss
packetloss_threshold:
description:
- Packet loss for SLA to make decision in percentage. (0 - 100).
type: int
threshold_alert_jitter:
description:
- Alert threshold for jitter (ms).
type: int
threshold_alert_latency:
description:
- Alert threshold for latency (ms).
type: int
threshold_alert_packetloss:
description:
- Alert threshold for packet loss (percentage).
type: int
threshold_warning_jitter:
description:
- Warning threshold for jitter (ms).
type: int
threshold_warning_latency:
description:
- Warning threshold for latency (ms).
type: int
threshold_warning_packetloss:
description:
- Warning threshold for packet loss (percentage).
type: int
update_cascade_interface:
description:
- Enable/disable update cascade interface.
type: str
choices:
- enable
- disable
update_static_route:
description:
- Enable/disable updating the static route.
type: str
choices:
- enable
- disable
load_balance_mode:
description:
- Algorithm or mode to use for load balancing Internet traffic to SD-WAN members.
type: str
choices:
- source-ip-based
- weight-based
- usage-based
- source-dest-ip-based
- measured-volume-based
members:
description:
- Physical FortiGate interfaces added to the virtual-wan-link.
type: list
suboptions:
comment:
description:
- Comments.
type: str
gateway:
description:
- The default gateway for this interface. Usually the default gateway of the Internet service provider that this interface is
connected to.
type: str
gateway6:
description:
- IPv6 gateway.
type: str
ingress_spillover_threshold:
description:
- Ingress spillover threshold for this interface (0 - 16776000 kbit/s). When this traffic volume threshold is reached, new
sessions spill over to other interfaces in the SD-WAN.
type: int
interface:
description:
- Interface name. Source system.interface.name.
type: str
priority:
description:
- Priority of the interface (0 - 4294967295). Used for SD-WAN rules or priority rules.
type: int
seq_num:
description:
- Sequence number(1-255).
type: int
source:
description:
- Source IP address used in the health-check packet to the server.
type: str
source6:
description:
- Source IPv6 address used in the health-check packet to the server.
type: str
spillover_threshold:
description:
- Egress spillover threshold for this interface (0 - 16776000 kbit/s). When this traffic volume threshold is reached, new sessions
spill over to other interfaces in the SD-WAN.
type: int
status:
description:
- Enable/disable this interface in the SD-WAN.
type: str
choices:
- disable
- enable
volume_ratio:
description:
- Measured volume ratio (this value / sum of all values = percentage of link volume, 0 - 255).
type: int
weight:
description:
- Weight of this interface for weighted load balancing. (0 - 255) More traffic is directed to interfaces with higher weights.
type: int
service:
description:
- Create SD-WAN rules or priority rules (also called services) to control how sessions are distributed to physical interfaces in the
SD-WAN.
type: list
suboptions:
addr_mode:
description:
- Address mode (IPv4 or IPv6).
type: str
choices:
- ipv4
- ipv6
bandwidth_weight:
description:
- Coefficient of reciprocal of available bidirectional bandwidth in the formula of custom-profile-1.
type: int
default:
description:
- Enable/disable use of SD-WAN as default service.
type: str
choices:
- enable
- disable
dscp_forward:
description:
- Enable/disable forward traffic DSCP tag.
type: str
choices:
- enable
- disable
dscp_forward_tag:
description:
- Forward traffic DSCP tag.
type: str
dscp_reverse:
description:
- Enable/disable reverse traffic DSCP tag.
type: str
choices:
- enable
- disable
dscp_reverse_tag:
description:
- Reverse traffic DSCP tag.
type: str
dst:
description:
- Destination address name.
type: list
suboptions:
name:
description:
- Address or address group name. Source firewall.address.name firewall.addrgrp.name.
required: true
type: str
dst_negate:
description:
- Enable/disable negation of destination address match.
type: str
choices:
- enable
- disable
dst6:
description:
- Destination address6 name.
type: list
suboptions:
name:
description:
- Address6 or address6 group name. Source firewall.address6.name firewall.addrgrp6.name.
required: true
type: str
end_port:
description:
- End destination port number.
type: int
gateway:
description:
- Enable/disable SD-WAN service gateway.
type: str
choices:
- enable
- disable
groups:
description:
- User groups.
type: list
suboptions:
name:
description:
- Group name. Source user.group.name.
required: true
type: str
health_check:
description:
- Health check. Source system.virtual-wan-link.health-check.name.
type: str
hold_down_time:
description:
- Waiting period in seconds when switching from the back-up member to the primary member (0 - 10000000).
type: int
id:
description:
- Priority rule ID (1 - 4000).
required: true
type: int
input_device:
description:
- Source interface name.
type: list
suboptions:
name:
description:
- Interface name. Source system.interface.name.
required: true
type: str
internet_service:
description:
- Enable/disable use of Internet service for application-based load balancing.
type: str
choices:
- enable
- disable
internet_service_ctrl:
description:
- Control-based Internet Service ID list.
type: list
suboptions:
id:
description:
- Control-based Internet Service ID.
required: true
type: int
internet_service_ctrl_group:
description:
- Control-based Internet Service group list.
type: list
suboptions:
name:
description:
- Control-based Internet Service group name. Source application.group.name.
required: true
type: str
internet_service_custom:
description:
- Custom Internet service name list.
type: list
suboptions:
name:
description:
- Custom Internet service name. Source firewall.internet-service-custom.name.
required: true
type: str
internet_service_custom_group:
description:
- Custom Internet Service group list.
type: list
suboptions:
name:
description:
- Custom Internet Service group name. Source firewall.internet-service-custom-group.name.
required: true
type: str
internet_service_group:
description:
- Internet Service group list.
type: list
suboptions:
name:
description:
- Internet Service group name. Source firewall.internet-service-group.name.
required: true
type: str
internet_service_id:
description:
- Internet service ID list.
type: list
suboptions:
id:
description:
- Internet service ID. Source firewall.internet-service.id.
required: true
type: int
jitter_weight:
description:
- Coefficient of jitter in the formula of custom-profile-1.
type: int
latency_weight:
description:
- Coefficient of latency in the formula of custom-profile-1.
type: int
link_cost_factor:
description:
- Link cost factor.
type: str
choices:
- latency
- jitter
- packet-loss
- inbandwidth
- outbandwidth
- bibandwidth
- custom-profile-1
link_cost_threshold:
description:
- Percentage threshold change of link cost values that will result in policy route regeneration (0 - 10000000).
type: int
member:
description:
- Member sequence number.
type: int
mode:
description:
- Control how the priority rule sets the priority of interfaces in the SD-WAN.
type: str
choices:
- auto
- manual
- priority
- sla
name:
description:
- Priority rule name.
type: str
packet_loss_weight:
description:
- Coefficient of packet-loss in the formula of custom-profile-1.
type: int
priority_members:
description:
- Member sequence number list.
type: list
suboptions:
seq_num:
description:
- Member sequence number. Source system.virtual-wan-link.members.seq-num.
type: int
protocol:
description:
- Protocol number.
type: int
quality_link:
description:
- Quality grade.
type: int
route_tag:
description:
- IPv4 route map route-tag.
type: int
sla:
description:
- Service level agreement (SLA).
type: list
suboptions:
health_check:
description:
- Virtual WAN Link health-check. Source system.virtual-wan-link.health-check.name.
type: str
id:
description:
- SLA ID.
type: int
src:
description:
- Source address name.
type: list
suboptions:
name:
description:
- Address or address group name. Source firewall.address.name firewall.addrgrp.name.
required: true
type: str
src_negate:
description:
- Enable/disable negation of source address match.
type: str
choices:
- enable
- disable
src6:
description:
- Source address6 name.
type: list
suboptions:
name:
description:
- Address6 or address6 group name. Source firewall.address6.name firewall.addrgrp6.name.
required: true
type: str
start_port:
description:
- Start destination port number.
type: int
status:
description:
- Enable/disable SD-WAN service.
type: str
choices:
- enable
- disable
tos:
description:
- Type of service bit pattern.
type: str
tos_mask:
description:
- Type of service evaluated bits.
type: str
users:
description:
- User name.
type: list
suboptions:
name:
description:
- User name. Source user.local.name.
required: true
type: str
status:
description:
- Enable/disable SD-WAN.
type: str
choices:
- disable
- enable
'''
EXAMPLES = '''
- hosts: localhost
vars:
host: "192.168.122.40"
username: "admin"
password: ""
vdom: "root"
ssl_verify: "False"
tasks:
- name: Configure redundant internet connections using SD-WAN (formerly virtual WAN link).
fortios_system_virtual_wan_link:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
vdom: "{{ vdom }}"
https: "False"
system_virtual_wan_link:
fail_alert_interfaces:
-
name: "default_name_4 (source system.interface.name)"
fail_detect: "enable"
health_check:
-
addr_mode: "ipv4"
failtime: "8"
http_agent: "<your_own_value>"
http_get: "<your_own_value>"
http_match: "<your_own_value>"
interval: "12"
members:
-
seq_num: "14 (source system.virtual-wan-link.members.seq-num)"
name: "default_name_15"
packet_size: "16"
password: "<<PASSWORD>>"
port: "18"
protocol: "ping"
recoverytime: "20"
security_mode: "none"
server: "192.168.100.40"
sla:
-
id: "24"
jitter_threshold: "25"
latency_threshold: "26"
link_cost_factor: "latency"
packetloss_threshold: "28"
threshold_alert_jitter: "29"
threshold_alert_latency: "30"
threshold_alert_packetloss: "31"
threshold_warning_jitter: "32"
threshold_warning_latency: "33"
threshold_warning_packetloss: "34"
update_cascade_interface: "enable"
update_static_route: "enable"
load_balance_mode: "source-ip-based"
members:
-
comment: "Comments."
gateway: "<your_own_value>"
gateway6: "<your_own_value>"
ingress_spillover_threshold: "42"
interface: "<your_own_value> (source system.interface.name)"
priority: "44"
seq_num: "45"
source: "<your_own_value>"
source6: "<your_own_value>"
spillover_threshold: "48"
status: "disable"
volume_ratio: "50"
weight: "51"
service:
-
addr_mode: "ipv4"
bandwidth_weight: "54"
default: "enable"
dscp_forward: "enable"
dscp_forward_tag: "<your_own_value>"
dscp_reverse: "enable"
dscp_reverse_tag: "<your_own_value>"
dst:
-
name: "default_name_61 (source firewall.address.name firewall.addrgrp.name)"
dst_negate: "enable"
dst6:
-
name: "default_name_64 (source firewall.address6.name firewall.addrgrp6.name)"
end_port: "65"
gateway: "enable"
groups:
-
name: "default_name_68 (source user.group.name)"
health_check: "<your_own_value> (source system.virtual-wan-link.health-check.name)"
hold_down_time: "70"
id: "71"
input_device:
-
name: "default_name_73 (source system.interface.name)"
internet_service: "enable"
internet_service_ctrl:
-
id: "76"
internet_service_ctrl_group:
-
name: "default_name_78 (source application.group.name)"
internet_service_custom:
-
name: "default_name_80 (source firewall.internet-service-custom.name)"
internet_service_custom_group:
-
name: "default_name_82 (source firewall.internet-service-custom-group.name)"
internet_service_group:
-
name: "default_name_84 (source firewall.internet-service-group.name)"
internet_service_id:
-
id: "86 (source firewall.internet-service.id)"
jitter_weight: "87"
latency_weight: "88"
link_cost_factor: "latency"
link_cost_threshold: "90"
member: "91"
mode: "auto"
name: "default_name_93"
packet_loss_weight: "94"
priority_members:
-
seq_num: "96 (source system.virtual-wan-link.members.seq-num)"
protocol: "97"
quality_link: "98"
route_tag: "99"
sla:
-
health_check: "<your_own_value> (source system.virtual-wan-link.health-check.name)"
id: "102"
src:
-
name: "default_name_104 (source firewall.address.name firewall.addrgrp.name)"
src_negate: "enable"
src6:
-
name: "default_name_107 (source firewall.address6.name firewall.addrgrp6.name)"
start_port: "108"
status: "enable"
tos: "<your_own_value>"
tos_mask: "<your_own_value>"
users:
-
name: "default_name_113 (source user.local.name)"
status: "disable"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
from ansible.module_utils.network.fortimanager.common import FAIL_SOCKET_MSG
def login(data, fos):
host = data['host']
username = data['username']
password = data['password']
ssl_verify = data['ssl_verify']
fos.debug('on')
if 'https' in data and not data['https']:
fos.https('off')
else:
fos.https('on')
fos.login(host, username, password, verify=ssl_verify)
def filter_system_virtual_wan_link_data(json):
option_list = ['fail_alert_interfaces', 'fail_detect', 'health_check',
'load_balance_mode', 'members', 'service',
'status']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def underscore_to_hyphen(data):
if isinstance(data, list):
for elem in data:
elem = underscore_to_hyphen(elem)
elif isinstance(data, dict):
new_data = {}
for k, v in data.items():
new_data[k.replace('_', '-')] = underscore_to_hyphen(v)
data = new_data
return data
def system_virtual_wan_link(data, fos):
vdom = data['vdom']
system_virtual_wan_link_data = data['system_virtual_wan_link']
filtered_data = underscore_to_hyphen(filter_system_virtual_wan_link_data(system_virtual_wan_link_data))
return fos.set('system',
'virtual-wan-link',
data=filtered_data,
vdom=vdom)
def is_successful_status(status):
return status['status'] == "success" or \
status['http_method'] == "DELETE" and status['http_status'] == 404
def fortios_system(data, fos):
if data['system_virtual_wan_link']:
resp = system_virtual_wan_link(data, fos)
return not is_successful_status(resp), \
resp['status'] == "success", \
resp
def main():
fields = {
"host": {"required": False, "type": "str"},
"username": {"required": False, "type": "str"},
"password": {"required": False, "type": "str", "default": "", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"https": {"required": False, "type": "bool", "default": True},
"ssl_verify": {"required": False, "type": "bool", "default": True},
"system_virtual_wan_link": {
"required": False, "type": "dict", "default": None,
"options": {
"fail_alert_interfaces": {"required": False, "type": "list",
"options": {
"name": {"required": True, "type": "str"}
}},
"fail_detect": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"health_check": {"required": False, "type": "list",
"options": {
"addr_mode": {"required": False, "type": "str",
"choices": ["ipv4", "ipv6"]},
"failtime": {"required": False, "type": "int"},
"http_agent": {"required": False, "type": "str"},
"http_get": {"required": False, "type": "str"},
"http_match": {"required": False, "type": "str"},
"interval": {"required": False, "type": "int"},
"members": {"required": False, "type": "list",
"options": {
"seq_num": {"required": False, "type": "int"}
}},
"name": {"required": True, "type": "str"},
"packet_size": {"required": False, "type": "int"},
"password": {"required": False, "type": "str"},
"port": {"required": False, "type": "int"},
"protocol": {"required": False, "type": "str",
"choices": ["ping", "tcp-echo", "udp-echo",
"http", "twamp", "ping6"]},
"recoverytime": {"required": False, "type": "int"},
"security_mode": {"required": False, "type": "str",
"choices": ["none", "authentication"]},
"server": {"required": False, "type": "str"},
"sla": {"required": False, "type": "list",
"options": {
"id": {"required": True, "type": "int"},
"jitter_threshold": {"required": False, "type": "int"},
"latency_threshold": {"required": False, "type": "int"},
"link_cost_factor": {"required": False, "type": "str",
"choices": ["latency", "jitter", "packet-loss"]},
"packetloss_threshold": {"required": False, "type": "int"}
}},
"threshold_alert_jitter": {"required": False, "type": "int"},
"threshold_alert_latency": {"required": False, "type": "int"},
"threshold_alert_packetloss": {"required": False, "type": "int"},
"threshold_warning_jitter": {"required": False, "type": "int"},
"threshold_warning_latency": {"required": False, "type": "int"},
"threshold_warning_packetloss": {"required": False, "type": "int"},
"update_cascade_interface": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"update_static_route": {"required": False, "type": "str",
"choices": ["enable", "disable"]}
}},
"load_balance_mode": {"required": False, "type": "str",
"choices": ["source-ip-based", "weight-based", "usage-based",
"source-dest-ip-based", "measured-volume-based"]},
"members": {"required": False, "type": "list",
"options": {
"comment": {"required": False, "type": "str"},
"gateway": {"required": False, "type": "str"},
"gateway6": {"required": False, "type": "str"},
"ingress_spillover_threshold": {"required": False, "type": "int"},
"interface": {"required": False, "type": "str"},
"priority": {"required": False, "type": "int"},
"seq_num": {"required": False, "type": "int"},
"source": {"required": False, "type": "str"},
"source6": {"required": False, "type": "str"},
"spillover_threshold": {"required": False, "type": "int"},
"status": {"required": False, "type": "str",
"choices": ["disable", "enable"]},
"volume_ratio": {"required": False, "type": "int"},
"weight": {"required": False, "type": "int"}
}},
"service": {"required": False, "type": "list",
"options": {
"addr_mode": {"required": False, "type": "str",
"choices": ["ipv4", "ipv6"]},
"bandwidth_weight": {"required": False, "type": "int"},
"default": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"dscp_forward": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"dscp_forward_tag": {"required": False, "type": "str"},
"dscp_reverse": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"dscp_reverse_tag": {"required": False, "type": "str"},
"dst": {"required": False, "type": "list",
"options": {
"name": {"required": True, "type": "str"}
}},
"dst_negate": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"dst6": {"required": False, "type": "list",
"options": {
"name": {"required": True, "type": "str"}
}},
"end_port": {"required": False, "type": "int"},
"gateway": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"groups": {"required": False, "type": "list",
"options": {
"name": {"required": True, "type": "str"}
}},
"health_check": {"required": False, "type": "str"},
"hold_down_time": {"required": False, "type": "int"},
"id": {"required": True, "type": "int"},
"input_device": {"required": False, "type": "list",
"options": {
"name": {"required": True, "type": "str"}
}},
"internet_service": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"internet_service_ctrl": {"required": False, "type": "list",
"options": {
"id": {"required": True, "type": "int"}
}},
"internet_service_ctrl_group": {"required": False, "type": "list",
"options": {
"name": {"required": True, "type": "str"}
}},
"internet_service_custom": {"required": False, "type": "list",
"options": {
"name": {"required": True, "type": "str"}
}},
"internet_service_custom_group": {"required": False, "type": "list",
"options": {
"name": {"required": True, "type": "str"}
}},
"internet_service_group": {"required": False, "type": "list",
"options": {
"name": {"required": True, "type": "str"}
}},
"internet_service_id": {"required": False, "type": "list",
"options": {
"id": {"required": True, "type": "int"}
}},
"jitter_weight": {"required": False, "type": "int"},
"latency_weight": {"required": False, "type": "int"},
"link_cost_factor": {"required": False, "type": "str",
"choices": ["latency", "jitter", "packet-loss",
"inbandwidth", "outbandwidth", "bibandwidth",
"custom-profile-1"]},
"link_cost_threshold": {"required": False, "type": "int"},
"member": {"required": False, "type": "int"},
"mode": {"required": False, "type": "str",
"choices": ["auto", "manual", "priority",
"sla"]},
"name": {"required": False, "type": "str"},
"packet_loss_weight": {"required": False, "type": "int"},
"priority_members": {"required": False, "type": "list",
"options": {
"seq_num": {"required": False, "type": "int"}
}},
"protocol": {"required": False, "type": "int"},
"quality_link": {"required": False, "type": "int"},
"route_tag": {"required": False, "type": "int"},
"sla": {"required": False, "type": "list",
"options": {
"health_check": {"required": False, "type": "str"},
"id": {"required": False, "type": "int"}
}},
"src": {"required": False, "type": "list",
"options": {
"name": {"required": True, "type": "str"}
}},
"src_negate": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"src6": {"required": False, "type": "list",
"options": {
"name": {"required": True, "type": "str"}
}},
"start_port": {"required": False, "type": "int"},
"status": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"tos": {"required": False, "type": "str"},
"tos_mask": {"required": False, "type": "str"},
"users": {"required": False, "type": "list",
"options": {
"name": {"required": True, "type": "str"}
}}
}},
"status": {"required": False, "type": "str",
"choices": ["disable", "enable"]}
}
}
}
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
# legacy_mode refers to using fortiosapi instead of HTTPAPI
legacy_mode = 'host' in module.params and module.params['host'] is not None and \
'username' in module.params and module.params['username'] is not None and \
'password' in module.params and module.params['password'] is not None
if not legacy_mode:
if module._socket_path:
connection = Connection(module._socket_path)
fos = FortiOSHandler(connection)
is_error, has_changed, result = fortios_system(module.params, fos)
else:
module.fail_json(**FAIL_SOCKET_MSG)
else:
try:
from fortiosapi import FortiOSAPI
except ImportError:
module.fail_json(msg="fortiosapi module is required")
fos = FortiOSAPI()
login(module.params, fos)
is_error, has_changed, result = fortios_system(module.params, fos)
fos.logout()
if not is_error:
module.exit_json(changed=has_changed, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
| 1.671875 | 2 |
src/Puerta.py | victorlujan/Dise-odeSoftwarePatrones | 0 | 3995 | <filename>src/Puerta.py
from ElementoMapa import ElementoMapa
class Puerta (ElementoMapa):
def __init__(self):
self.abierta= True
self.lado2=None
self.lado1=None
def get_abierta(self):
return self.abierta
def print_cosas(self):
print("hola")
def set_abierta(self, value):
self.abierta = value
def get_lado1(self):
return self.lado1
def set_lado1(self, value):
self.lado1 = value
def get_lado2(self):
return self.lado2
def set_lado2(self, value):
self.lado2 = value
def espuerta(self):
return True
def abrir(self):
self.abierta=True
def entrar(self,habitacion):
if self.abierta==True and (self.lado1.id == habitacion.id or self.lado2.id == habitacion.id):
print("Ahora estas en la habitacion", habitacion.id)
if habitacion.hijos[0] == None:
pass
else:
if habitacion.hijos[0].activa == True:
print("La bomba ha estallado")
if self.abierta==False:
print("La puerta esta cerrada")
| 3.171875 | 3 |
pong.py | Teenahshe/ponggame | 0 | 3996 | """
# Step 1 - Create the App
# Step 2 - Create the Game
# Step 3 - Build the Game
# Step 4 - Run the App
"""
from kivy.app import App
from kivy.uix.widget import Widget
from kivy.properties import NumericProperty, ReferenceListProperty, ObjectProperty
from kivy.vector import Vector
from kivy.clock import Clock
from random import randint
class PongPaddle(Widget):
score = NumericProperty(0)
def bounce_ball(self, ball):
if self.collide_widget(ball):
ball.velocity_x *= -1
print('hello world')
class PongBall(Widget):
velocity_x = NumericProperty(0)
velocity_y = NumericProperty(0)
velocity = ReferenceListProperty(velocity_x, velocity_y)
# Latest Position of the Ball = Current Velocity + Current Position
def move(self):
self.pos = Vector(*self.velocity) + self.pos
# Update - moving the ball by calling the move function and other stuff
# on touch_down() = When our fingers/mouse touches he screen
# on touch_up() - when we lift our finger off the screen after touching it
# on_touch_move() - when we drag our finger on the screen
class PongGame(Widget):
ball = ObjectProperty(None)
player1 = ObjectProperty(None)
player2 = ObjectProperty(None)
def serve_ball(self):
self.ball.velocity = Vector(4, 0).rotate(randint(0, 360))
def update(self, dt):
self.ball.move()
# Bounce off top and bottom Y
if (self.ball.y < 0) or (self.ball.y > self.height - 50):
self.ball.velocity_y *= -1.1
# Bounce off left and increase th score
if self.ball.x < 0:
self.ball.velocity_x *= -1
self.player1.score += 1
# Bounce off right and increase the score
if self.ball.x > self.width - 50:
self.ball.velocity_x *= -1
self.player2.score += 1
self.player1.bounce_ball(self.ball)
self.player2.bounce_ball(self.ball)
def on_touch_move(self, touch):
if touch.x < self.width / 1 / 4:
self.player1.center_y = touch.y
if touch.x > self.width * 3 / 4:
self.player2.center_y = touch.y
class PongApp(App):
def build(self):
game = PongGame()
game.serve_ball()
Clock.schedule_interval(game.update, 1.0 / 60.0)
return game
PongApp().run()
| 3.984375 | 4 |
get_block_data/relation.py | cyclone923/blocks-world | 1 | 3997 | class SceneRelation:
def __init__(self):
self.on_ground = set()
self.on_block = {}
self.clear = set()
def print_relation(self):
print(self.on_ground)
print(self.on_block)
print(self.clear) | 2.453125 | 2 |
bridge_RL_agent_v16.py | EricZLou/BridgeRLAgent | 0 | 3998 | <filename>bridge_RL_agent_v16.py
"""
CS 238 Final Project: Bridge RL Agent
<NAME> & <NAME>
"""
import copy
import datetime
import numpy as np
import random
from collections import namedtuple
"""'''''''''''''''''''''''''''''''''''''''''''''''''''''''''
REPRESENTATIONS OF BRIDGE
Representing a "Card" as an integer:
Cards 0 -> 12 are Club 2 -> Club 14
Cards 13 -> 25 are Diamond 2 -> Diamond 14
Cards 26 -> 38 are Heart 2 -> Heart 14
Cards 39 -> 51 are Spade 2 -> Spade 14
Jack is 11
Queen is 12
King is 13
Ace is 14
Representing a "Suit" as an integer:
n/a is -1 <-- used in a "State" where no cards have been played yet.
Clubs is 0
Diamonds is 1
Hearts is 2
Spades is 3
Representing a "State" as an opening suit and frozenset of up to 3 "Card"-s:
state = State(1, frozenset(23, 0))
We have a Diamond 12 and Club 2 with an opening suit of Diamonds.
The agent is 3rd to play a card and must play a Diamond if it has one.
Representing the MDP with a Map from a "State" to an array of length-52:
We call this Map "weights". And the array of length-52 represets the
proportion with which the agent should play each of the 52 cards given
that it is at that state.
In this example, with state = (1, set(23, 0)), weights[state] will
likely have very large values at indices 24 and 25 since a
Diamond 13 and Diamond 14 will beat the Diamond 12.
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''"""
State = namedtuple('State', ['opening_suit', 'cards_played', 'partners_card'])
"""'''''''''''''''''''''''''''''''''''''''''''''''''''''''''
"
" DEFINE SOME CONSTANTS
"
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''"""
NUM_ACTIONS = 52 # Agent can choose any card to play (only some are valid).
NUM_GAMES_TRAIN = 10000
NUM_GAMES_TEST = 10000
STATS_PER = 1000
"""'''''''''''''''''''''''''''''''''''''''''''''''''''''''''
"
" RL AGENT
"
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''"""
class BridgeAgent:
def __init__(self):
# We initialize all weights to 1 such that every card has an equal chance of being chosen.
self.weights = {}
self.weights[State(-1, frozenset(), -1)] = np.full(NUM_ACTIONS, 1.0)
for opening_suit in range(4):
for card_1 in range(52):
for card_2 in range(card_1, 52):
for card_3 in range(card_2, 52):
for card_partner in [-1, card_1, card_2, card_3]:
state = State(
opening_suit,
frozenset([card_1, card_2, card_3]),
card_partner)
self.weights[state] = np.full(NUM_ACTIONS, 1.0)
# self.alpha = 0.997 # 1,000
# self.alpha = 0.9995 # 10,000
# self.alpha = 0.99995 # 100,000
self.alpha = 0.999995 # 1,000,000
# self.alpha = 0.9999995 # 5,000,000
self.game_num = 1
"""
EXAMPLE
state = State(1, set(23, 0)) # Diamond 12, Club 2 <-- first 2 cards in round
card_played = 24 # Diamond 13 <-- 3rd card in round
If 4th card is not 25, then the agent wins. We want to incrase the proportion
with which we play 24.
ba.add_win(state, card_played)
"""
def add_win(self, state, card_played):
self.weights[state][card_played] *= (1 + 0.1 * self.alpha ** self.game_num)
"""
EXAMPLE
state = State(1, set(23, 0))
card_played = 24
If 4th card is 25 (Diamond 14), then the agent loses. We want to decrease the
proportion with which we play 24.
ba.add_loss(state, card_played)
"""
def add_loss(self, state, card_played):
self.weights[state][card_played] /= (1 + 0.1 * self.alpha ** self.game_num)
"""
EXAMPLE
state = State(1, set(23, 0))
cards_in_hand = set(0, 1, 4, 8, 11, 20, 24, 38)
The agent choose to play whichever remaining card has the highest weight.
The agent must play a Diamond if it has Diamonds. In this example, agent
will most likely play 24, which beats 23 <-- hopefully 24 has the highest
weight.
card_played = ba.play_card(state, cards_in_hand)
"""
def play_card(self, state, cards_in_hand):
# Following the EXAMPLE:
# suit = 1
suit = state.opening_suit
# valid_cards = [20, 24]
valid_cards = np.array([i for i in range(suit * 13, (suit + 1) * 13) if i in cards_in_hand])
if len(valid_cards) == 0:
valid_cards = cards_in_hand
# Choose the valid card with highest weight.
# index_into_valid_counts = 1 since 20 has a smaller weight than 24.
# index_into_valid_cards = np.argmax(self.weights[state][valid_cards])
index_into_valid_cards = np.random.choice(np.flatnonzero(self.weights[state][valid_cards] == self.weights[state][valid_cards].max()))
# returns valid_cards[1] = 24
return valid_cards[index_into_valid_cards]
"""
This function write the policy at the end of the data training phase.
"""
def write_policy(self, cards_in_hand, policy, filename, states_accessed):
count = 0
with open(filename + "_Last_Game.txt", 'w') as g:
g.write("Cards in Hand: {}\n\n".format(cards_in_hand))
with open(filename + ".txt", 'w') as f:
for state in self.weights:
f.write("State: suit {} | cards played {} | partner's card {}\nBest Card To Play: {}\n\n".format(state.opening_suit,
state.cards_played, state.partners_card,
policy[count]))
if state in states_accessed:
g.write("State: suit {} | cards played {} | partner's card {}\nBest Card To Play: {}\n\n".format(state.opening_suit,
state.cards_played, state.partners_card,
policy[count]))
count += 1
"""'''''''''''''''''''''''''''''''''''''''''''''''''''''''''
"
" UTILITY FUNCTIONS
"
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''"""
"""
This functions deals random cards.
"""
deck = list(range(52))
def shuffle_cards():
random.shuffle(deck)
return [deck[0:13], deck[13:26], deck[26:39], deck[39:52]]
"""
This function is used by non-agents who play randomly.
"""
def play_random_card(suit, cards_in_hand):
if suit == -1:
return random.choice(cards_in_hand)
valid_cards = [i for i in range(suit * 13, (suit + 1) * 13) if i in cards_in_hand]
if len(valid_cards) == 0:
return random.choice(cards_in_hand)
return random.choice(valid_cards)
"""
This function determines the winner of the round.
"""
def determine_round_winner(suit, cards_played):
max_idx = -1
max_val = -1
for idx, card in enumerate(cards_played):
if suit * 13 <= card < (suit + 1) * 13 and card > max_val:
max_val, max_idx = card, idx
return max_idx
"""
This function determines the declarer based on partnership with the most points.
Return: (agent_is_declarer, declarer_idx)
"""
def agent_declarer(hands):
points = count_points(hands) # determines the number of points in each hand
# agent's partnership has more points and agent is declarer
if points[0] + points[2] > points[1] + points[3] and points[2] > points[0]:
return True, 2
# agent is not declarer and agent should start the play
return False, -1
"""
This function counts the points in each hand.
Note: Ace is 12, 25, 38, 51
"""
def count_points(hands):
points = []
for hand in hands:
p = 0
for card in hand:
if card % 13 == 12:
p += 4
elif card % 13 == 11:
p += 3
elif card % 13 == 10:
p += 2
elif card % 13 == 9:
p += 1
points.append(p)
return points
"""'''''''''''''''''''''''''''''''''''''''''''''''''''''''''
"
" TRACKS PERFORMANCE OF BRIDGE AGENT
"
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''"""
class BridgeAgentRedFlags:
def __init__(self):
self.RED_FLAG_VIOLATIONS = np.zeros(3)
self.RED_FLAG_TOTAL_COUNT = np.zeros(3)
self.ALL_RED_FLAG_VIOLATIONS = np.zeros(3) # Cumulative
self.ALL_RED_FLAG_TOTAL_COUNT = np.zeros(3) # Cumulative
def clear_red_flags(self):
self.RED_FLAG_VIOLATIONS = np.zeros(3)
self.RED_FLAG_TOTAL_COUNT = np.zeros(3)
"""
This function checks if the agent plays their highest card even though the
highest card already played is higher than theirs.
"""
def highest_card(self, valid_cards, agent_valid_cards, card):
if len(agent_valid_cards) > 1 and max(valid_cards) > max(agent_valid_cards):
self.RED_FLAG_TOTAL_COUNT[0] += 1
self.ALL_RED_FLAG_TOTAL_COUNT[0] += 1
if card == max(agent_valid_cards):
self.RED_FLAG_VIOLATIONS[0] += 1
self.ALL_RED_FLAG_VIOLATIONS[0] += 1
"""
This function checks if the agent wins a round when there's three cards played already
and the agent has at least one higher card than what's been played.
"""
def higher_card(self, valid_cards, agent_valid_cards, card, cards_played, partners_cards):
if (len(cards_played) == 3 and len(agent_valid_cards) > 1 and
max(agent_valid_cards) > max(valid_cards) and
max(valid_cards) not in partners_cards
):
self.RED_FLAG_TOTAL_COUNT[1] += 1
self.ALL_RED_FLAG_TOTAL_COUNT[1] += 1
if card < max(valid_cards):
self.RED_FLAG_VIOLATIONS[1] += 1
self.ALL_RED_FLAG_VIOLATIONS[1] += 1
"""
This function checks if the agent plays a higher card even though their partner is guaranteed to win.
"""
def partner_win(self, valid_cards, agent_valid_cards, card, cards_played, partners_cards):
if (len(cards_played) == 3 and len(agent_valid_cards) > 1 and
max(valid_cards) in partners_cards
):
self.RED_FLAG_TOTAL_COUNT[2] += 1
self.ALL_RED_FLAG_TOTAL_COUNT[2] += 1
if card > max(valid_cards):
self.RED_FLAG_VIOLATIONS[2] += 1
self.ALL_RED_FLAG_VIOLATIONS[2] += 1
"""
This function checks for any red flags based on what the agent played.
"""
def assess_card_played(self, hands, card, suit, cards_played, player_idx, partners_cards):
all_valid_cards = list(range(suit * 13, (suit + 1) * 13))
valid_cards = np.array([i for i in all_valid_cards if i in cards_played])
agent_valid_cards = np.array([i for i in all_valid_cards if i in hands[player_idx]])
if suit == -1:
return
# highest card played so far is higher than agent's highest card
self.highest_card(valid_cards, agent_valid_cards, card)
# 3 cards played and agent has higher cards, does it play highest card or highest necessary card?
self.higher_card(valid_cards, agent_valid_cards, card, cards_played, partners_cards)
# 3 cards played + partner has played highest card, does agent play lowest card? do they beat their partner?
self.partner_win(valid_cards, agent_valid_cards, card, cards_played, partners_cards)
"""'''''''''''''''''''''''''''''''''''''''''''''''''''''''''
"
" PLAY A SINGLE GAME OF BRIDGE
"
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''"""
"""
This function plays 13 rounds of 1 NT bridge and outputs a winner.
"""
def play_game(game, hands, train=False, ba=None, barf=None):
partners_cards = copy.copy(hands[0])
agents_cards = copy.copy(hands[2])
declarer, d = agent_declarer(hands)
"""
hands[0] = North's cards
hands[1] = East's cards
hands[2] = Agent's cards
hands[3] = West's cards
"""
round_winner = (d + 1) % 4 # the person to the right of the declarer starts the game
NS_Wins = 0 # used to count total wins in agent partnership
states_accessed = [] # records which states have been updated for this game
# For each round
for _ in range(13):
cards_played = []
agent_card_played = [-1, -1]
agent_state = None
agent_state_2 = None
opening_suit = -1
# Each player plays a card in order starting from round_winner
for player in range(4):
card = None
player_idx = (round_winner + player) % 4
if player_idx == 2: # Agent plays
if ba:
agent_state = State(opening_suit, frozenset(cards_played), agent_card_played[1])
states_accessed.append(agent_state)
card = ba.play_card(agent_state, hands[player_idx])
else:
card = play_random_card(opening_suit, hands[player_idx])
agent_card_played[0] = card
barf.assess_card_played(hands, card, opening_suit, cards_played, player_idx, partners_cards)
elif player_idx == 0: # if agent is declarer, they play their partner's cards
if ba and declarer:
agent_state_2 = State(opening_suit, frozenset(cards_played), agent_card_played[0])
states_accessed.append(agent_state_2)
card = ba.play_card(agent_state_2, hands[player_idx])
barf.assess_card_played(hands, card, opening_suit, cards_played, player_idx, partners_cards)
else:
card = play_random_card(opening_suit, hands[player_idx])
agent_card_played[1] = card
else: # Random bot plays
card = play_random_card(opening_suit, hands[player_idx])
# Keep track of the opening suit.
if player == 0:
opening_suit = card // 13
hands[player_idx].remove(card)
cards_played.append(card)
# Get the winning card.
round_winner = (determine_round_winner(opening_suit, cards_played) + round_winner) % 4
# Adjust the BridgeAgent weights.
# If the BridgeAgent or N wins.
if round_winner == 0 or round_winner == 2:
if ba and train:
ba.add_win(agent_state, agent_card_played[0])
if declarer:
ba.add_win(agent_state_2, agent_card_played[1])
NS_Wins += 1
else:
if ba and train:
ba.add_loss(agent_state, agent_card_played[0])
if declarer:
ba.add_loss(agent_state_2, agent_card_played[1])
# for the last game, determine and write out policy
if ba and game == (NUM_GAMES_TRAIN - 1):
policy = []
count = 0
for x in ba.weights:
y = copy.deepcopy(ba.weights[x])
max = np.argmax(y)
while max in x.cards_played:
y[max] = -1
max = np.argmax(y)
policy.append(max)
count += 1
game_file = "Bridge_" + str(game + 1)
ba.write_policy(agents_cards, policy, game_file, states_accessed)
return NS_Wins
def game_summary(ba, t, iterations=NUM_GAMES_TRAIN):
with open(str(NUM_GAMES_TRAIN) + "_Game_Data_Train-" + str(t) + ".csv", 'w') as k:
k.write("game,"
"agent_wins,random_wins,diff_wins,"
"agent_rfv_a,agent_rftc_a,"
"agent_rfv_b,agent_rftc_b,"
"agent_rfv_c,agent_rftc_c,"
"random_rfv_a,random_rftc_a,"
"random_rfv_b,random_rftc_b,"
"random_rfv_c,random_rftc_c\n")
barf = BridgeAgentRedFlags()
barf_random = BridgeAgentRedFlags()
NS_Wins = [0]
NS_Wins_random = [0]
for game in range(iterations):
hands = shuffle_cards()
NS_Wins[-1] += play_game(game=game, hands=copy.deepcopy(hands), train=True, ba=ba, barf=barf)
NS_Wins_random[-1] += play_game(game=game, hands=hands, ba=None, barf=barf_random)
ba.game_num += 1
if (game + 1) % STATS_PER == 0:
print(f"{game + 1} / ", end="", flush=True)
rfv = barf.RED_FLAG_VIOLATIONS
rfv_random = barf_random.RED_FLAG_VIOLATIONS
rftc = barf.RED_FLAG_TOTAL_COUNT
rftc_random = barf_random.RED_FLAG_TOTAL_COUNT
with open(str(NUM_GAMES_TRAIN) + "_Game_Data_Train-" + str(t) + ".csv", 'a') as k:
k.write(
f"{game + 1},"
f"{NS_Wins[-1]},{NS_Wins_random[-1]},{NS_Wins[-1] - NS_Wins_random[-1]},"
f"{rfv[0]},{rftc[0]},"
f"{rfv[1]},{rftc[1]},"
f"{rfv[2]},{rftc[2]},"
f"{rfv_random[0]},{rftc_random[0]},"
f"{rfv_random[1]},{rftc_random[1]},"
f"{rfv_random[2]},{rftc_random[2]},"
f"\n")
# Cumulative statistics on red flags for every STATS_PER games.
barf.clear_red_flags()
barf_random.clear_red_flags()
NS_Wins.append(0)
NS_Wins_random.append(0)
average_win_delta = (sum(NS_Wins)-sum(NS_Wins_random)) / ((len(NS_Wins) - 1) * STATS_PER)
average_rf_ratios_agent = np.divide(barf.ALL_RED_FLAG_VIOLATIONS, barf.ALL_RED_FLAG_TOTAL_COUNT)
average_rf_ratios_random = np.divide(barf_random.ALL_RED_FLAG_VIOLATIONS, barf_random.ALL_RED_FLAG_TOTAL_COUNT)
print(f"Average Win Delta (want this to be positive): {average_win_delta}")
print(f"Average Red Flag Ratios - Agent: {average_rf_ratios_agent}")
print(f"Average Red Flag Ratios - Random: {average_rf_ratios_random}")
with open(str(NUM_GAMES_TRAIN) + "_Game_Data_Avg_Train-" + str(t) + ".csv", 'w') as m:
m.write(f"avg_win_delta,avg_rf_agent,avg_rf_random\n"
f"{average_win_delta},{average_rf_ratios_agent},{average_rf_ratios_random}\n")
return ba
def main():
start_time = datetime.datetime.now()
hands = []
# TRAINING
print(f"TRAINING on {NUM_GAMES_TRAIN} games")
ba = BridgeAgent()
ba = game_summary(ba, True)
# TESTING -- we don't change the weights here
print(f"TESTING on {NUM_GAMES_TEST} games")
game_summary(ba, False, iterations=NUM_GAMES_TEST)
end_time = datetime.datetime.now()
print("Runtime: ", end_time - start_time) # runtime
if __name__ == "__main__":
main()
| 3.046875 | 3 |
tests/hacsbase/test_hacsbase_data.py | chbonkie/hacs | 2 | 3999 | """Data Test Suite."""
from aiogithubapi.objects import repository
import pytest
import os
from homeassistant.core import HomeAssistant
from custom_components.hacs.hacsbase.data import HacsData
from custom_components.hacs.helpers.classes.repository import HacsRepository
from custom_components.hacs.hacsbase.configuration import Configuration
from custom_components.hacs.share import get_hacs
from tests.dummy_repository import dummy_repository_base
@pytest.mark.asyncio
async def test_hacs_data_async_write1(tmpdir):
data = HacsData()
hacs = get_hacs()
repository = dummy_repository_base()
repository.data.installed = True
repository.data.installed_version = "1"
hacs.repositories = [repository]
hacs.hass = HomeAssistant()
hacs.hass.config.config_dir = tmpdir
hacs.configuration = Configuration()
await data.async_write()
@pytest.mark.asyncio
async def test_hacs_data_async_write2(tmpdir):
data = HacsData()
hacs = get_hacs()
hacs.hass = HomeAssistant()
hacs.hass.config.config_dir = tmpdir
hacs.configuration = Configuration()
hacs.system.status.background_task = False
hacs.system.disabled = False
await data.async_write()
@pytest.mark.asyncio
async def test_hacs_data_restore(tmpdir):
data = HacsData()
hacs = get_hacs()
hacs.hass = HomeAssistant()
hacs.hass.config.config_dir = tmpdir
await data.restore()
| 1.953125 | 2 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.