text
stringlengths 4
1.02M
| meta
dict |
---|---|
def get_prime_array(high):
"""Gets all primes lower than high from the pre-generated primes to
improve efficiency.
Parameters
==========
high : Integer
The number below which primes will be returned
Returns
=======
int array : The primes less than high
"""
# Array of pre-generated primes less than high
primes = []
with open("../pre_generated_primes/primes-to-100k.txt") as f:
for line in f:
hundred = [int(i) for i in line.split()]
primes.extend(hundred)
if (high > 100000):
with open("../pre_generated_primes/primes-to-200k.txt") as f2:
for line in f2:
two_hundred = [int(i) for i in line.split()]
primes.extend(two_hundred)
if (high > 200000):
with open("../pre_generated_primes/primes-to-300k.txt") as f:
for line in f:
three_hundred = [int(i) for i in line.split()]
primes.extend(three_hundred)
if (high > 300000):
with open("../pre_generated_primes/primes-to-400k.txt") as f:
for line in f:
four_hundred = [int(i) for i in line.split()]
primes.extend(four_hundred)
if (high > 400000):
with open("../pre_generated_primes/primes-to-500k.txt") as f:
for line in f:
five_hundred = [int(i) for i in line.split()]
primes.extend(five_hundred)
for x in reversed(range(0, len(primes))):
if primes[x] > high:
primes.pop(x)
else:
break
return primes
| {
"content_hash": "9373e9d89bf02836786ccd27e289e227",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 71,
"avg_line_length": 30.60377358490566,
"alnum_prop": 0.5419235511713933,
"repo_name": "edschembor/PrimalPy",
"id": "3cdc864d3ae70bc1b093c00af170e275cda39e6c",
"size": "1697",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "get_prime_array.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "11815"
}
],
"symlink_target": ""
} |
import sys, os
import tornado.ioloop
from tornado import gen
sys.path.append(os.path.dirname(sys.path[0]))
import time
from custor.decorators import run_with_thread_future
@run_with_thread_future(None)
def thread_sleep(self, args):
time.sleep(5)
@gen.coroutine
def sleep_coroutine():
yield thread_sleep(None, None)
print('sleep finish.')
sleep_coroutine()
print('continue other work.')
tornado.ioloop.IOLoop.instance().start()
| {
"content_hash": "9a1a8506ca833bd0dd52aeec4345ed4e",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 52,
"avg_line_length": 19.391304347826086,
"alnum_prop": 0.742152466367713,
"repo_name": "jmpews/torweb",
"id": "bd52d3bfd2cc4af5154b9b294ff4955f85df716d",
"size": "462",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_thread_future.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "30407375"
}
],
"symlink_target": ""
} |
from peer import *
from multiViewer import MultiViewer
from viewer import Viewer
from editor import Editor
class MultiViewEditor(Peer):
Description = ("Routing between editor elements and the query document of a multi-view element (like a table)",
'{ "dstRoom": null, "pillow": "MultiViewer.In.Refresh" }')
Receiving = [
MultiViewer.Out.QueryDocument,
Editor.Out.FieldChanged
]
Sending = [
Editor.In.Enable
]
Routings = [
(MultiViewer.Out.QueryDocument, Viewer.In.Document),
(Editor.Out.FieldChanged, Viewer.In.Refresh)
]
def __init__(self, room, dstRoom=None, pillow="MultiViewer.In.Refresh"):
Peer.__init__(self, room)
self._dstRoom = dstRoom
self._pillow = pillow
self._catch(MultiViewer.Out.QueryDocument, self._enable)
if dstRoom is not None:
self._catch(Editor.Out.FieldChanged, self._fieldChanged)
def _fieldChanged(self, pillow, feathers):
if self._dstRoom:
self._throw(self._dstRoom + ":" + self._pillow, None)
def _enable(self, pillow, feathers):
self._throw(Editor.In.Enable, True)
def initialize(self):
pass
| {
"content_hash": "ff8f1ec37efff053b6ec6840f31ebefc",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 115,
"avg_line_length": 26.29787234042553,
"alnum_prop": 0.6229773462783171,
"repo_name": "FreshXOpenSource/wallaby-base",
"id": "7dde7c132db309969c390a50c8914ea0f72738f4",
"size": "1317",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "wallaby/pf/peer/multiViewEditor.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "248711"
}
],
"symlink_target": ""
} |
"""
"""
import tornado
from tornado.web import HTTPError
from tornado import httputil
import pyrestful
import pyrestful.rest
class BaseHandler(pyrestful.rest.RestHandler):
def send_error(self, status_code=500, **kwargs):
"""
Generates the custom HTTP error.And always return 200 code.
"""
reason = None
if 'exc_info' in kwargs:
exception = kwargs['exc_info'][1]
if isinstance(exception, HTTPError) and exception.reason:
reason = exception.reason
try:
msg = reason if reason else httputil.responses[status_code]
except KeyError:
msg = "unkown error"
result = {"status_code":status_code, "reason": msg}
self.clear()
self.set_header("Content-Type", "application/json")
self.set_status(200)
self.write(tornado.escape.json_encode(result))
self.finish()
@property
def db(self):
return self.application.db
@property
def log(self):
return self.application.log
def check_args(self):
d = None
try:
d = tornado.escape.json_decode(self.request.body)
except ValueError, e:
_ = 'decode track data error. e=%s' % e
self._gen_response(status_txt='decode json error', log_message=_)
return
return d
### private help funcs
def _gen_response(self, status_code=500, status_txt=None,log_message=None):
r = {}
self.log.error(log_message)
r['status_code'] = status_code
r['status_txt'] = status_txt
self.write(tornado.escape.json_encode(r))
self.finish()
| {
"content_hash": "7a3386d4267a51399c6576e069c42a91",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 79,
"avg_line_length": 26.03076923076923,
"alnum_prop": 0.5892434988179669,
"repo_name": "zhkzyth/tornado-async-rest-api",
"id": "9554b0b8428d41e2cdcb34285e53a8143dfc0aed",
"size": "1738",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "api/base_handler.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "44785"
}
],
"symlink_target": ""
} |
import os
import sys
from bcbio.rnaseq import (featureCounts, cufflinks, oncofuse, count, dexseq,
express, variation, stringtie, sailfish)
from bcbio.ngsalign import bowtie2, alignprep
from bcbio.variation import vardict
import bcbio.pipeline.datadict as dd
from bcbio.utils import filter_missing, flatten
from bcbio.log import logger
def fast_rnaseq(samples, run_parallel):
# samples = run_parallel("run_rapmap_pseudoalign", samples)
samples = run_parallel("run_salmon_reads", samples)
samples = sailfish.combine_sailfish(samples)
# samples = run_parallel("run_salmon_bam", samples)
return samples
def singlecell_rnaseq(samples, run_parallel):
samples = run_parallel("run_umi_transform", samples)
samples = run_parallel("run_barcode_histogram", samples)
samples = run_parallel("run_filter_barcodes", samples)
samples = run_parallel("run_rapmap_align", samples)
samples = run_parallel("run_tagcount", samples)
return samples
def rnaseq_variant_calling(samples, run_parallel):
"""
run RNA-seq variant calling using GATK
"""
samples = run_parallel("run_rnaseq_variant_calling", samples)
samples = run_parallel("run_rnaseq_joint_genotyping", [samples])
return samples
def run_rnaseq_variant_calling(data):
variantcaller = dd.get_variantcaller(data)
if isinstance(variantcaller, list) and len(variantcaller) > 1:
logger.error("Only one variantcaller can be run for RNA-seq at "
"this time. Post an issue here "
"(https://github.com/chapmanb/bcbio-nextgen/issues) "
"if this is something you need to do.")
sys.exit(1)
if variantcaller and "gatk" in variantcaller:
data = variation.rnaseq_gatk_variant_calling(data)
if vardict.get_vardict_command(data):
data = variation.rnaseq_vardict_variant_calling(data)
return [[data]]
def run_rnaseq_joint_genotyping(*samples):
data = samples[0][0]
variantcaller = dd.get_variantcaller(data)
if not variantcaller:
return samples
if "gatk" not in variantcaller:
return samples
ref_file = dd.get_ref_file(data)
out_file = os.path.join(dd.get_work_dir(data, "."), "variation", "combined.vcf")
if variantcaller and "gatk" in variantcaller:
vrn_files = [dd.get_vrn_file(d) for d in dd.sample_data_iterator(samples)]
out_file = variation.gatk_joint_calling(data, vrn_files, ref_file, out_file)
updated_samples = []
for data in dd.sample_data_iterator(samples):
data = dd.set_square_vcf(data, out_file)
updated_samples.append([data])
return updated_samples
return samples
def quantitate_expression_parallel(samples, run_parallel):
"""
quantitate expression, all programs run here should be multithreaded to
take advantage of the threaded run_parallel environment
"""
data = samples[0][0]
samples = run_parallel("generate_transcript_counts", samples)
samples = run_parallel("run_sailfish", samples)
samples = sailfish.combine_sailfish(samples)
if "cufflinks" in dd.get_expression_caller(data):
samples = run_parallel("run_cufflinks", samples)
if "stringtie" in dd.get_expression_caller(data):
samples = run_parallel("run_stringtie_expression", samples)
return samples
def quantitate_expression_noparallel(samples, run_parallel):
"""
run transcript quantitation for algorithms that don't run in parallel
"""
data = samples[0][0]
if "express" in dd.get_expression_caller(data):
samples = run_parallel("run_express", samples)
samples = run_parallel("run_dexseq", samples)
return samples
def generate_transcript_counts(data):
"""Generate counts per transcript and per exon from an alignment"""
data["count_file"] = featureCounts.count(data)
if dd.get_fusion_mode(data, False):
oncofuse_file = oncofuse.run(data)
if oncofuse_file:
data = dd.set_oncofuse_file(data, oncofuse_file)
if dd.get_transcriptome_align(data):
# to create a disambiguated transcriptome file realign with bowtie2
if dd.get_disambiguate(data):
logger.info("Aligning to the transcriptome with bowtie2 using the "
"disambiguated reads.")
bam_path = data["work_bam"]
fastq_paths = alignprep._bgzip_from_bam(bam_path, data["dirs"], data["config"], is_retry=False, output_infix='-transcriptome')
if len(fastq_paths) == 2:
file1, file2 = fastq_paths
else:
file1, file2 = fastq_paths[0], None
ref_file = dd.get_ref_file(data)
data = bowtie2.align_transcriptome(file1, file2, ref_file, data)
else:
file1, file2 = dd.get_input_sequence_files(data)
if not dd.get_transcriptome_bam(data):
ref_file = dd.get_ref_file(data)
logger.info("Transcriptome alignment was flagged to run, but the "
"transcriptome BAM file was not found. Aligning to the "
"transcriptome with bowtie2.")
data = bowtie2.align_transcriptome(file1, file2, ref_file, data)
return [[data]]
def run_stringtie_expression(data):
"""Calculate transcript and gene level FPKM with Stringtie"""
data = stringtie.run_stringtie_expression(data)
return [[data]]
def run_dexseq(data):
"""Quantitate exon-level counts with DEXSeq"""
if dd.get_dexseq_gff(data, None):
data = dexseq.bcbio_run(data)
return [[data]]
def run_express(data):
"""Quantitative isoform expression by eXpress"""
data = express.run(data)
return [[data]]
def combine_express(samples, combined):
"""Combine tpm, effective counts and fpkm from express results"""
to_combine = [dd.get_express_counts(x) for x in
dd.sample_data_iterator(samples) if dd.get_express_counts(x)]
gtf_file = dd.get_gtf_file(samples[0][0])
isoform_to_gene_file = os.path.join(os.path.dirname(combined), "isoform_to_gene.txt")
isoform_to_gene_file = express.isoform_to_gene_name(gtf_file, isoform_to_gene_file)
if len(to_combine) > 0:
eff_counts_combined_file = os.path.splitext(combined)[0] + ".isoform.express_counts"
eff_counts_combined = count.combine_count_files(to_combine, eff_counts_combined_file, ext=".counts")
to_combine = [dd.get_express_tpm(x) for x in
dd.sample_data_iterator(samples) if dd.get_express_tpm(x)]
tpm_counts_combined_file = os.path.splitext(combined)[0] + ".isoform.express_tpm"
tpm_counts_combined = count.combine_count_files(to_combine, tpm_counts_combined_file)
to_combine = [dd.get_express_fpkm(x) for x in dd.sample_data_iterator(samples)
if dd.get_express_fpkm(x)]
fpkm_counts_combined_file = os.path.splitext(combined)[0] + ".isoform.express_fpkm"
fpkm_counts_combined = count.combine_count_files(to_combine, fpkm_counts_combined_file, ext=".fpkm")
return {'counts': eff_counts_combined, 'tpm': tpm_counts_combined,
'fpkm': fpkm_counts_combined, 'isoform_to_gene': isoform_to_gene_file}
return {}
def run_cufflinks(data):
"""Quantitate transcript expression with Cufflinks"""
if "cufflinks" in dd.get_tools_off(data):
return [[data]]
work_bam = dd.get_work_bam(data)
ref_file = dd.get_sam_ref(data)
out_dir, fpkm_file, fpkm_isoform_file = cufflinks.run(work_bam, ref_file, data)
data = dd.set_cufflinks_dir(data, out_dir)
data = dd.set_fpkm(data, fpkm_file)
data = dd.set_fpkm_isoform(data, fpkm_isoform_file)
return [[data]]
def cufflinks_assemble(data):
bam_file = dd.get_work_bam(data)
ref_file = dd.get_sam_ref(data)
out_dir = os.path.join(dd.get_work_dir(data), "assembly")
num_cores = dd.get_num_cores(data)
assembled_gtf = cufflinks.assemble(bam_file, ref_file, num_cores, out_dir, data)
dd.get_assembled_gtf(data).append(assembled_gtf)
return [[data]]
def cufflinks_merge(*samples):
to_merge = filter_missing(flatten([dd.get_assembled_gtf(data) for data in
dd.sample_data_iterator(samples)]))
data = samples[0][0]
ref_file = dd.get_sam_ref(data)
gtf_file = dd.get_gtf_file(data)
num_cores = dd.get_num_cores(data)
merged_gtf = cufflinks.merge(to_merge, ref_file, gtf_file, num_cores,
samples[0][0])
updated_samples = []
for data in dd.sample_data_iterator(samples):
data = dd.set_merged_gtf(data, merged_gtf)
updated_samples.append([data])
return updated_samples
def stringtie_merge(*samples):
to_merge = filter_missing(flatten([dd.get_assembled_gtf(data) for data in
dd.sample_data_iterator(samples)]))
data = samples[0][0]
ref_file = dd.get_sam_ref(data)
gtf_file = dd.get_gtf_file(data)
num_cores = dd.get_num_cores(data)
merged_gtf = stringtie.merge(to_merge, ref_file, gtf_file, num_cores, data)
updated_samples = []
for data in dd.sample_data_iterator(samples):
data = dd.set_merged_gtf(data, merged_gtf)
updated_samples.append([data])
return updated_samples
def assemble_transcripts(run_parallel, samples):
"""
assembly strategy rationale implemented as suggested in
http://www.nature.com/nprot/journal/v7/n3/full/nprot.2012.016.html
run Cufflinks in without a reference GTF for each individual sample
merge the assemblies with Cuffmerge using a reference GTF
"""
assembler = dd.get_in_samples(samples, dd.get_transcript_assembler)
data = samples[0][0]
if assembler:
if "cufflinks" in assembler:
samples = run_parallel("cufflinks_assemble", samples)
if "stringtie" in assembler:
samples = run_parallel("run_stringtie_expression", samples)
if "stringtie" in assembler and stringtie.supports_merge(data):
samples = run_parallel("stringtie_merge", [samples])
else:
samples = run_parallel("cufflinks_merge", [samples])
return samples
def combine_files(samples):
"""
after quantitation, combine the counts/FPKM/TPM/etc into a single table with
all samples
"""
gtf_file = dd.get_gtf_file(samples[0][0], None)
dexseq_gff = dd.get_dexseq_gff(samples[0][0])
# combine featureCount files
count_files = filter_missing([dd.get_count_file(x[0]) for x in samples])
combined = count.combine_count_files(count_files, ext=".counts")
annotated = count.annotate_combined_count_file(combined, gtf_file)
# combine eXpress files
express_counts_combined = combine_express(samples, combined)
# combine Cufflinks files
fpkm_combined_file = os.path.splitext(combined)[0] + ".fpkm"
fpkm_files = filter_missing([dd.get_fpkm(x[0]) for x in samples])
if fpkm_files:
fpkm_combined = count.combine_count_files(fpkm_files, fpkm_combined_file)
else:
fpkm_combined = None
fpkm_isoform_combined_file = os.path.splitext(combined)[0] + ".isoform.fpkm"
isoform_files = filter_missing([dd.get_fpkm_isoform(x[0]) for x in samples])
if isoform_files:
fpkm_isoform_combined = count.combine_count_files(isoform_files,
fpkm_isoform_combined_file,
".isoform.fpkm")
else:
fpkm_isoform_combined = None
# combine DEXseq files
dexseq_combined_file = os.path.splitext(combined)[0] + ".dexseq"
to_combine_dexseq = filter_missing([dd.get_dexseq_counts(data[0]) for data in samples])
if to_combine_dexseq:
dexseq_combined = count.combine_count_files(to_combine_dexseq,
dexseq_combined_file, ".dexseq")
dexseq.create_dexseq_annotation(dexseq_gff, dexseq_combined)
else:
dexseq_combined = None
updated_samples = []
for data in dd.sample_data_iterator(samples):
data = dd.set_combined_counts(data, combined)
if annotated:
data = dd.set_annotated_combined_counts(data, annotated)
if fpkm_combined:
data = dd.set_combined_fpkm(data, fpkm_combined)
if fpkm_isoform_combined:
data = dd.set_combined_fpkm_isoform(data, fpkm_isoform_combined)
if express_counts_combined:
data = dd.set_express_counts(data, express_counts_combined['counts'])
data = dd.set_express_tpm(data, express_counts_combined['tpm'])
data = dd.set_express_fpkm(data, express_counts_combined['fpkm'])
data = dd.set_isoform_to_gene(data, express_counts_combined['isoform_to_gene'])
if dexseq_combined:
data = dd.set_dexseq_counts(data, dexseq_combined_file)
updated_samples.append([data])
return updated_samples
| {
"content_hash": "0cce525e3fe7d55bb2dccc9ca299f17a",
"timestamp": "",
"source": "github",
"line_count": 291,
"max_line_length": 138,
"avg_line_length": 44.75257731958763,
"alnum_prop": 0.6496199032480995,
"repo_name": "mjafin/bcbio-nextgen",
"id": "759a082cb9f1171f18f9fe9aa67440d818064f0c",
"size": "13023",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bcbio/pipeline/rnaseq.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1767655"
},
{
"name": "Ruby",
"bytes": "624"
},
{
"name": "Shell",
"bytes": "14377"
}
],
"symlink_target": ""
} |
from corehq.apps.sms.models import SMS
from pillowtop.dao.exceptions import DocumentNotFoundError
from pillowtop.dao.interface import ReadOnlyDocumentStore
class ReadonlySMSDocumentStore(ReadOnlyDocumentStore):
def get_document(self, doc_id):
try:
sms = SMS.objects.get(couch_id=doc_id)
except SMS.DoesNotExist as e:
raise DocumentNotFoundError(e)
return sms.to_json()
| {
"content_hash": "d3d259e7cd390dcc3f08c655170f4a0f",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 58,
"avg_line_length": 30.357142857142858,
"alnum_prop": 0.7223529411764706,
"repo_name": "qedsoftware/commcare-hq",
"id": "e55fc37291f6f81d38c6329cc582ecd33aeef2f7",
"size": "425",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "corehq/apps/sms/document_stores.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ActionScript",
"bytes": "15950"
},
{
"name": "CSS",
"bytes": "508392"
},
{
"name": "HTML",
"bytes": "2869325"
},
{
"name": "JavaScript",
"bytes": "2395360"
},
{
"name": "PHP",
"bytes": "2232"
},
{
"name": "PLpgSQL",
"bytes": "125298"
},
{
"name": "Python",
"bytes": "14670713"
},
{
"name": "Shell",
"bytes": "37514"
}
],
"symlink_target": ""
} |
import os.path
import uuid
import pytest
from ..context import dnsimple
@pytest.fixture
def client():
return dnsimple.Client(
sandbox = True,
credentials_search_paths = [os.path.dirname(__file__)]
)
unregistered_domain_name = 'example-{0}.com'.format(uuid.uuid4())
# Ensure no domains exist
for domain in client().domains():
domain.delete()
# Ensure no contacts exist
for contact in client().contacts():
contact.delete()
| {
"content_hash": "21ac2b103213ebd20570b1b24d8952f6",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 65,
"avg_line_length": 20.695652173913043,
"alnum_prop": 0.6617647058823529,
"repo_name": "vigetlabs/dnsimple",
"id": "519f8ca02682aae73c02bfcc277dbcdef96de636",
"size": "476",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/integration/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "332"
},
{
"name": "Python",
"bytes": "103200"
}
],
"symlink_target": ""
} |
"""Test harness for diff_match_patch.py
Copyright 2006 Google Inc.
http://code.google.com/p/google-diff-match-patch/
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import sys
import time
import unittest
import diff_match_patch as dmp_module
# Force a module reload. Allows one to edit the DMP module and rerun the tests
# without leaving the Python interpreter.
reload(dmp_module)
class DiffMatchPatchTest(unittest.TestCase):
def setUp(self):
"Test harness for dmp_module."
self.dmp = dmp_module.diff_match_patch()
def diff_rebuildtexts(self, diffs):
# Construct the two texts which made up the diff originally.
text1 = ""
text2 = ""
for x in xrange(0, len(diffs)):
if diffs[x][0] != dmp_module.diff_match_patch.DIFF_INSERT:
text1 += diffs[x][1]
if diffs[x][0] != dmp_module.diff_match_patch.DIFF_DELETE:
text2 += diffs[x][1]
return (text1, text2)
class DiffTest(DiffMatchPatchTest):
"""DIFF TEST FUNCTIONS"""
def testDiffCommonPrefix(self):
# Detect any common prefix.
# Null case.
self.assertEquals(0, self.dmp.diff_commonPrefix("abc", "xyz"))
# Non-null case.
self.assertEquals(4, self.dmp.diff_commonPrefix("1234abcdef", "1234xyz"))
# Whole case.
self.assertEquals(4, self.dmp.diff_commonPrefix("1234", "1234xyz"))
def testDiffCommonSuffix(self):
# Detect any common suffix.
# Null case.
self.assertEquals(0, self.dmp.diff_commonSuffix("abc", "xyz"))
# Non-null case.
self.assertEquals(4, self.dmp.diff_commonSuffix("abcdef1234", "xyz1234"))
# Whole case.
self.assertEquals(4, self.dmp.diff_commonSuffix("1234", "xyz1234"))
def testDiffCommonOverlap(self):
# Null case.
self.assertEquals(0, self.dmp.diff_commonOverlap("", "abcd"))
# Whole case.
self.assertEquals(3, self.dmp.diff_commonOverlap("abc", "abcd"))
# No overlap.
self.assertEquals(0, self.dmp.diff_commonOverlap("123456", "abcd"))
# Overlap.
self.assertEquals(3, self.dmp.diff_commonOverlap("123456xxx", "xxxabcd"))
def testDiffHalfMatch(self):
# Detect a halfmatch.
self.dmp.Diff_Timeout = 1
# No match.
self.assertEquals(None, self.dmp.diff_halfMatch("1234567890", "abcdef"))
self.assertEquals(None, self.dmp.diff_halfMatch("12345", "23"))
# Single Match.
self.assertEquals(("12", "90", "a", "z", "345678"), self.dmp.diff_halfMatch("1234567890", "a345678z"))
self.assertEquals(("a", "z", "12", "90", "345678"), self.dmp.diff_halfMatch("a345678z", "1234567890"))
self.assertEquals(("abc", "z", "1234", "0", "56789"), self.dmp.diff_halfMatch("abc56789z", "1234567890"))
self.assertEquals(("a", "xyz", "1", "7890", "23456"), self.dmp.diff_halfMatch("a23456xyz", "1234567890"))
# Multiple Matches.
self.assertEquals(("12123", "123121", "a", "z", "1234123451234"), self.dmp.diff_halfMatch("121231234123451234123121", "a1234123451234z"))
self.assertEquals(("", "-=-=-=-=-=", "x", "", "x-=-=-=-=-=-=-="), self.dmp.diff_halfMatch("x-=-=-=-=-=-=-=-=-=-=-=-=", "xx-=-=-=-=-=-=-="))
self.assertEquals(("-=-=-=-=-=", "", "", "y", "-=-=-=-=-=-=-=y"), self.dmp.diff_halfMatch("-=-=-=-=-=-=-=-=-=-=-=-=y", "-=-=-=-=-=-=-=yy"))
# Non-optimal halfmatch.
# Optimal diff would be -q+x=H-i+e=lloHe+Hu=llo-Hew+y not -qHillo+x=HelloHe-w+Hulloy
self.assertEquals(("qHillo", "w", "x", "Hulloy", "HelloHe"), self.dmp.diff_halfMatch("qHilloHelloHew", "xHelloHeHulloy"))
# Optimal no halfmatch.
self.dmp.Diff_Timeout = 0
self.assertEquals(None, self.dmp.diff_halfMatch("qHilloHelloHew", "xHelloHeHulloy"))
def testDiffLinesToChars(self):
# Convert lines down to characters.
self.assertEquals(("\x01\x02\x01", "\x02\x01\x02", ["", "alpha\n", "beta\n"]), self.dmp.diff_linesToChars("alpha\nbeta\nalpha\n", "beta\nalpha\nbeta\n"))
self.assertEquals(("", "\x01\x02\x03\x03", ["", "alpha\r\n", "beta\r\n", "\r\n"]), self.dmp.diff_linesToChars("", "alpha\r\nbeta\r\n\r\n\r\n"))
self.assertEquals(("\x01", "\x02", ["", "a", "b"]), self.dmp.diff_linesToChars("a", "b"))
# More than 256 to reveal any 8-bit limitations.
n = 300
lineList = []
charList = []
for x in range(1, n + 1):
lineList.append(str(x) + "\n")
charList.append(unichr(x))
self.assertEquals(n, len(lineList))
lines = "".join(lineList)
chars = "".join(charList)
self.assertEquals(n, len(chars))
lineList.insert(0, "")
self.assertEquals((chars, "", lineList), self.dmp.diff_linesToChars(lines, ""))
def testDiffCharsToLines(self):
# Convert chars up to lines.
diffs = [(self.dmp.DIFF_EQUAL, "\x01\x02\x01"), (self.dmp.DIFF_INSERT, "\x02\x01\x02")]
self.dmp.diff_charsToLines(diffs, ["", "alpha\n", "beta\n"])
self.assertEquals([(self.dmp.DIFF_EQUAL, "alpha\nbeta\nalpha\n"), (self.dmp.DIFF_INSERT, "beta\nalpha\nbeta\n")], diffs)
# More than 256 to reveal any 8-bit limitations.
n = 300
lineList = []
charList = []
for x in range(1, n + 1):
lineList.append(str(x) + "\n")
charList.append(unichr(x))
self.assertEquals(n, len(lineList))
lines = "".join(lineList)
chars = "".join(charList)
self.assertEquals(n, len(chars))
lineList.insert(0, "")
diffs = [(self.dmp.DIFF_DELETE, chars)]
self.dmp.diff_charsToLines(diffs, lineList)
self.assertEquals([(self.dmp.DIFF_DELETE, lines)], diffs)
def testDiffCleanupMerge(self):
# Cleanup a messy diff.
# Null case.
diffs = []
self.dmp.diff_cleanupMerge(diffs)
self.assertEquals([], diffs)
# No change case.
diffs = [(self.dmp.DIFF_EQUAL, "a"), (self.dmp.DIFF_DELETE, "b"), (self.dmp.DIFF_INSERT, "c")]
self.dmp.diff_cleanupMerge(diffs)
self.assertEquals([(self.dmp.DIFF_EQUAL, "a"), (self.dmp.DIFF_DELETE, "b"), (self.dmp.DIFF_INSERT, "c")], diffs)
# Merge equalities.
diffs = [(self.dmp.DIFF_EQUAL, "a"), (self.dmp.DIFF_EQUAL, "b"), (self.dmp.DIFF_EQUAL, "c")]
self.dmp.diff_cleanupMerge(diffs)
self.assertEquals([(self.dmp.DIFF_EQUAL, "abc")], diffs)
# Merge deletions.
diffs = [(self.dmp.DIFF_DELETE, "a"), (self.dmp.DIFF_DELETE, "b"), (self.dmp.DIFF_DELETE, "c")]
self.dmp.diff_cleanupMerge(diffs)
self.assertEquals([(self.dmp.DIFF_DELETE, "abc")], diffs)
# Merge insertions.
diffs = [(self.dmp.DIFF_INSERT, "a"), (self.dmp.DIFF_INSERT, "b"), (self.dmp.DIFF_INSERT, "c")]
self.dmp.diff_cleanupMerge(diffs)
self.assertEquals([(self.dmp.DIFF_INSERT, "abc")], diffs)
# Merge interweave.
diffs = [(self.dmp.DIFF_DELETE, "a"), (self.dmp.DIFF_INSERT, "b"), (self.dmp.DIFF_DELETE, "c"), (self.dmp.DIFF_INSERT, "d"), (self.dmp.DIFF_EQUAL, "e"), (self.dmp.DIFF_EQUAL, "f")]
self.dmp.diff_cleanupMerge(diffs)
self.assertEquals([(self.dmp.DIFF_DELETE, "ac"), (self.dmp.DIFF_INSERT, "bd"), (self.dmp.DIFF_EQUAL, "ef")], diffs)
# Prefix and suffix detection.
diffs = [(self.dmp.DIFF_DELETE, "a"), (self.dmp.DIFF_INSERT, "abc"), (self.dmp.DIFF_DELETE, "dc")]
self.dmp.diff_cleanupMerge(diffs)
self.assertEquals([(self.dmp.DIFF_EQUAL, "a"), (self.dmp.DIFF_DELETE, "d"), (self.dmp.DIFF_INSERT, "b"), (self.dmp.DIFF_EQUAL, "c")], diffs)
# Prefix and suffix detection with equalities.
diffs = [(self.dmp.DIFF_EQUAL, "x"), (self.dmp.DIFF_DELETE, "a"), (self.dmp.DIFF_INSERT, "abc"), (self.dmp.DIFF_DELETE, "dc"), (self.dmp.DIFF_EQUAL, "y")]
self.dmp.diff_cleanupMerge(diffs)
self.assertEquals([(self.dmp.DIFF_EQUAL, "xa"), (self.dmp.DIFF_DELETE, "d"), (self.dmp.DIFF_INSERT, "b"), (self.dmp.DIFF_EQUAL, "cy")], diffs)
# Slide edit left.
diffs = [(self.dmp.DIFF_EQUAL, "a"), (self.dmp.DIFF_INSERT, "ba"), (self.dmp.DIFF_EQUAL, "c")]
self.dmp.diff_cleanupMerge(diffs)
self.assertEquals([(self.dmp.DIFF_INSERT, "ab"), (self.dmp.DIFF_EQUAL, "ac")], diffs)
# Slide edit right.
diffs = [(self.dmp.DIFF_EQUAL, "c"), (self.dmp.DIFF_INSERT, "ab"), (self.dmp.DIFF_EQUAL, "a")]
self.dmp.diff_cleanupMerge(diffs)
self.assertEquals([(self.dmp.DIFF_EQUAL, "ca"), (self.dmp.DIFF_INSERT, "ba")], diffs)
# Slide edit left recursive.
diffs = [(self.dmp.DIFF_EQUAL, "a"), (self.dmp.DIFF_DELETE, "b"), (self.dmp.DIFF_EQUAL, "c"), (self.dmp.DIFF_DELETE, "ac"), (self.dmp.DIFF_EQUAL, "x")]
self.dmp.diff_cleanupMerge(diffs)
self.assertEquals([(self.dmp.DIFF_DELETE, "abc"), (self.dmp.DIFF_EQUAL, "acx")], diffs)
# Slide edit right recursive.
diffs = [(self.dmp.DIFF_EQUAL, "x"), (self.dmp.DIFF_DELETE, "ca"), (self.dmp.DIFF_EQUAL, "c"), (self.dmp.DIFF_DELETE, "b"), (self.dmp.DIFF_EQUAL, "a")]
self.dmp.diff_cleanupMerge(diffs)
self.assertEquals([(self.dmp.DIFF_EQUAL, "xca"), (self.dmp.DIFF_DELETE, "cba")], diffs)
def testDiffCleanupSemanticLossless(self):
# Slide diffs to match logical boundaries.
# Null case.
diffs = []
self.dmp.diff_cleanupSemanticLossless(diffs)
self.assertEquals([], diffs)
# Blank lines.
diffs = [(self.dmp.DIFF_EQUAL, "AAA\r\n\r\nBBB"), (self.dmp.DIFF_INSERT, "\r\nDDD\r\n\r\nBBB"), (self.dmp.DIFF_EQUAL, "\r\nEEE")]
self.dmp.diff_cleanupSemanticLossless(diffs)
self.assertEquals([(self.dmp.DIFF_EQUAL, "AAA\r\n\r\n"), (self.dmp.DIFF_INSERT, "BBB\r\nDDD\r\n\r\n"), (self.dmp.DIFF_EQUAL, "BBB\r\nEEE")], diffs)
# Line boundaries.
diffs = [(self.dmp.DIFF_EQUAL, "AAA\r\nBBB"), (self.dmp.DIFF_INSERT, " DDD\r\nBBB"), (self.dmp.DIFF_EQUAL, " EEE")]
self.dmp.diff_cleanupSemanticLossless(diffs)
self.assertEquals([(self.dmp.DIFF_EQUAL, "AAA\r\n"), (self.dmp.DIFF_INSERT, "BBB DDD\r\n"), (self.dmp.DIFF_EQUAL, "BBB EEE")], diffs)
# Word boundaries.
diffs = [(self.dmp.DIFF_EQUAL, "The c"), (self.dmp.DIFF_INSERT, "ow and the c"), (self.dmp.DIFF_EQUAL, "at.")]
self.dmp.diff_cleanupSemanticLossless(diffs)
self.assertEquals([(self.dmp.DIFF_EQUAL, "The "), (self.dmp.DIFF_INSERT, "cow and the "), (self.dmp.DIFF_EQUAL, "cat.")], diffs)
# Alphanumeric boundaries.
diffs = [(self.dmp.DIFF_EQUAL, "The-c"), (self.dmp.DIFF_INSERT, "ow-and-the-c"), (self.dmp.DIFF_EQUAL, "at.")]
self.dmp.diff_cleanupSemanticLossless(diffs)
self.assertEquals([(self.dmp.DIFF_EQUAL, "The-"), (self.dmp.DIFF_INSERT, "cow-and-the-"), (self.dmp.DIFF_EQUAL, "cat.")], diffs)
# Hitting the start.
diffs = [(self.dmp.DIFF_EQUAL, "a"), (self.dmp.DIFF_DELETE, "a"), (self.dmp.DIFF_EQUAL, "ax")]
self.dmp.diff_cleanupSemanticLossless(diffs)
self.assertEquals([(self.dmp.DIFF_DELETE, "a"), (self.dmp.DIFF_EQUAL, "aax")], diffs)
# Hitting the end.
diffs = [(self.dmp.DIFF_EQUAL, "xa"), (self.dmp.DIFF_DELETE, "a"), (self.dmp.DIFF_EQUAL, "a")]
self.dmp.diff_cleanupSemanticLossless(diffs)
self.assertEquals([(self.dmp.DIFF_EQUAL, "xaa"), (self.dmp.DIFF_DELETE, "a")], diffs)
def testDiffCleanupSemantic(self):
# Cleanup semantically trivial equalities.
# Null case.
diffs = []
self.dmp.diff_cleanupSemantic(diffs)
self.assertEquals([], diffs)
# No elimination #1.
diffs = [(self.dmp.DIFF_DELETE, "ab"), (self.dmp.DIFF_INSERT, "cd"), (self.dmp.DIFF_EQUAL, "12"), (self.dmp.DIFF_DELETE, "e")]
self.dmp.diff_cleanupSemantic(diffs)
self.assertEquals([(self.dmp.DIFF_DELETE, "ab"), (self.dmp.DIFF_INSERT, "cd"), (self.dmp.DIFF_EQUAL, "12"), (self.dmp.DIFF_DELETE, "e")], diffs)
# No elimination #2.
diffs = [(self.dmp.DIFF_DELETE, "abc"), (self.dmp.DIFF_INSERT, "ABC"), (self.dmp.DIFF_EQUAL, "1234"), (self.dmp.DIFF_DELETE, "wxyz")]
self.dmp.diff_cleanupSemantic(diffs)
self.assertEquals([(self.dmp.DIFF_DELETE, "abc"), (self.dmp.DIFF_INSERT, "ABC"), (self.dmp.DIFF_EQUAL, "1234"), (self.dmp.DIFF_DELETE, "wxyz")], diffs)
# Simple elimination.
diffs = [(self.dmp.DIFF_DELETE, "a"), (self.dmp.DIFF_EQUAL, "b"), (self.dmp.DIFF_DELETE, "c")]
self.dmp.diff_cleanupSemantic(diffs)
self.assertEquals([(self.dmp.DIFF_DELETE, "abc"), (self.dmp.DIFF_INSERT, "b")], diffs)
# Backpass elimination.
diffs = [(self.dmp.DIFF_DELETE, "ab"), (self.dmp.DIFF_EQUAL, "cd"), (self.dmp.DIFF_DELETE, "e"), (self.dmp.DIFF_EQUAL, "f"), (self.dmp.DIFF_INSERT, "g")]
self.dmp.diff_cleanupSemantic(diffs)
self.assertEquals([(self.dmp.DIFF_DELETE, "abcdef"), (self.dmp.DIFF_INSERT, "cdfg")], diffs)
# Multiple eliminations.
diffs = [(self.dmp.DIFF_INSERT, "1"), (self.dmp.DIFF_EQUAL, "A"), (self.dmp.DIFF_DELETE, "B"), (self.dmp.DIFF_INSERT, "2"), (self.dmp.DIFF_EQUAL, "_"), (self.dmp.DIFF_INSERT, "1"), (self.dmp.DIFF_EQUAL, "A"), (self.dmp.DIFF_DELETE, "B"), (self.dmp.DIFF_INSERT, "2")]
self.dmp.diff_cleanupSemantic(diffs)
self.assertEquals([(self.dmp.DIFF_DELETE, "AB_AB"), (self.dmp.DIFF_INSERT, "1A2_1A2")], diffs)
# Word boundaries.
diffs = [(self.dmp.DIFF_EQUAL, "The c"), (self.dmp.DIFF_DELETE, "ow and the c"), (self.dmp.DIFF_EQUAL, "at.")]
self.dmp.diff_cleanupSemantic(diffs)
self.assertEquals([(self.dmp.DIFF_EQUAL, "The "), (self.dmp.DIFF_DELETE, "cow and the "), (self.dmp.DIFF_EQUAL, "cat.")], diffs)
# Overlap elimination #1.
diffs = [(self.dmp.DIFF_DELETE, "abcxx"), (self.dmp.DIFF_INSERT, "xxdef")]
self.dmp.diff_cleanupSemantic(diffs)
self.assertEquals([(self.dmp.DIFF_DELETE, "abc"), (self.dmp.DIFF_EQUAL, "xx"), (self.dmp.DIFF_INSERT, "def")], diffs)
# Overlap elimination #2.
diffs = [(self.dmp.DIFF_DELETE, "abcxx"), (self.dmp.DIFF_INSERT, "xxdef"), (self.dmp.DIFF_DELETE, "ABCXX"), (self.dmp.DIFF_INSERT, "XXDEF")]
self.dmp.diff_cleanupSemantic(diffs)
self.assertEquals([(self.dmp.DIFF_DELETE, "abc"), (self.dmp.DIFF_EQUAL, "xx"), (self.dmp.DIFF_INSERT, "def"), (self.dmp.DIFF_DELETE, "ABC"), (self.dmp.DIFF_EQUAL, "XX"), (self.dmp.DIFF_INSERT, "DEF")], diffs)
def testDiffCleanupEfficiency(self):
# Cleanup operationally trivial equalities.
self.dmp.Diff_EditCost = 4
# Null case.
diffs = []
self.dmp.diff_cleanupEfficiency(diffs)
self.assertEquals([], diffs)
# No elimination.
diffs = [(self.dmp.DIFF_DELETE, "ab"), (self.dmp.DIFF_INSERT, "12"), (self.dmp.DIFF_EQUAL, "wxyz"), (self.dmp.DIFF_DELETE, "cd"), (self.dmp.DIFF_INSERT, "34")]
self.dmp.diff_cleanupEfficiency(diffs)
self.assertEquals([(self.dmp.DIFF_DELETE, "ab"), (self.dmp.DIFF_INSERT, "12"), (self.dmp.DIFF_EQUAL, "wxyz"), (self.dmp.DIFF_DELETE, "cd"), (self.dmp.DIFF_INSERT, "34")], diffs)
# Four-edit elimination.
diffs = [(self.dmp.DIFF_DELETE, "ab"), (self.dmp.DIFF_INSERT, "12"), (self.dmp.DIFF_EQUAL, "xyz"), (self.dmp.DIFF_DELETE, "cd"), (self.dmp.DIFF_INSERT, "34")]
self.dmp.diff_cleanupEfficiency(diffs)
self.assertEquals([(self.dmp.DIFF_DELETE, "abxyzcd"), (self.dmp.DIFF_INSERT, "12xyz34")], diffs)
# Three-edit elimination.
diffs = [(self.dmp.DIFF_INSERT, "12"), (self.dmp.DIFF_EQUAL, "x"), (self.dmp.DIFF_DELETE, "cd"), (self.dmp.DIFF_INSERT, "34")]
self.dmp.diff_cleanupEfficiency(diffs)
self.assertEquals([(self.dmp.DIFF_DELETE, "xcd"), (self.dmp.DIFF_INSERT, "12x34")], diffs)
# Backpass elimination.
diffs = [(self.dmp.DIFF_DELETE, "ab"), (self.dmp.DIFF_INSERT, "12"), (self.dmp.DIFF_EQUAL, "xy"), (self.dmp.DIFF_INSERT, "34"), (self.dmp.DIFF_EQUAL, "z"), (self.dmp.DIFF_DELETE, "cd"), (self.dmp.DIFF_INSERT, "56")]
self.dmp.diff_cleanupEfficiency(diffs)
self.assertEquals([(self.dmp.DIFF_DELETE, "abxyzcd"), (self.dmp.DIFF_INSERT, "12xy34z56")], diffs)
# High cost elimination.
self.dmp.Diff_EditCost = 5
diffs = [(self.dmp.DIFF_DELETE, "ab"), (self.dmp.DIFF_INSERT, "12"), (self.dmp.DIFF_EQUAL, "wxyz"), (self.dmp.DIFF_DELETE, "cd"), (self.dmp.DIFF_INSERT, "34")]
self.dmp.diff_cleanupEfficiency(diffs)
self.assertEquals([(self.dmp.DIFF_DELETE, "abwxyzcd"), (self.dmp.DIFF_INSERT, "12wxyz34")], diffs)
self.dmp.Diff_EditCost = 4
def testDiffPrettyHtml(self):
# Pretty print.
diffs = [(self.dmp.DIFF_EQUAL, "a\n"), (self.dmp.DIFF_DELETE, "<B>b</B>"), (self.dmp.DIFF_INSERT, "c&d")]
self.assertEquals("<span>a¶<br></span><del style=\"background:#ffe6e6;\"><B>b</B></del><ins style=\"background:#e6ffe6;\">c&d</ins>", self.dmp.diff_prettyHtml(diffs))
def testDiffText(self):
# Compute the source and destination texts.
diffs = [(self.dmp.DIFF_EQUAL, "jump"), (self.dmp.DIFF_DELETE, "s"), (self.dmp.DIFF_INSERT, "ed"), (self.dmp.DIFF_EQUAL, " over "), (self.dmp.DIFF_DELETE, "the"), (self.dmp.DIFF_INSERT, "a"), (self.dmp.DIFF_EQUAL, " lazy")]
self.assertEquals("jumps over the lazy", self.dmp.diff_text1(diffs))
self.assertEquals("jumped over a lazy", self.dmp.diff_text2(diffs))
def testDiffDelta(self):
# Convert a diff into delta string.
diffs = [(self.dmp.DIFF_EQUAL, "jump"), (self.dmp.DIFF_DELETE, "s"), (self.dmp.DIFF_INSERT, "ed"), (self.dmp.DIFF_EQUAL, " over "), (self.dmp.DIFF_DELETE, "the"), (self.dmp.DIFF_INSERT, "a"), (self.dmp.DIFF_EQUAL, " lazy"), (self.dmp.DIFF_INSERT, "old dog")]
text1 = self.dmp.diff_text1(diffs)
self.assertEquals("jumps over the lazy", text1)
delta = self.dmp.diff_toDelta(diffs)
self.assertEquals("=4\t-1\t+ed\t=6\t-3\t+a\t=5\t+old dog", delta)
# Convert delta string into a diff.
self.assertEquals(diffs, self.dmp.diff_fromDelta(text1, delta))
# Generates error (19 != 20).
try:
self.dmp.diff_fromDelta(text1 + "x", delta)
self.assertFalse(True)
except ValueError:
# Exception expected.
pass
# Generates error (19 != 18).
try:
self.dmp.diff_fromDelta(text1[1:], delta)
self.assertFalse(True)
except ValueError:
# Exception expected.
pass
# Generates error (%c3%xy invalid Unicode).
try:
self.dmp.diff_fromDelta("", "+%c3xy")
self.assertFalse(True)
except ValueError:
# Exception expected.
pass
# Test deltas with special characters.
diffs = [(self.dmp.DIFF_EQUAL, u"\u0680 \x00 \t %"), (self.dmp.DIFF_DELETE, u"\u0681 \x01 \n ^"), (self.dmp.DIFF_INSERT, u"\u0682 \x02 \\ |")]
text1 = self.dmp.diff_text1(diffs)
self.assertEquals(u"\u0680 \x00 \t %\u0681 \x01 \n ^", text1)
delta = self.dmp.diff_toDelta(diffs)
self.assertEquals("=7\t-7\t+%DA%82 %02 %5C %7C", delta)
# Convert delta string into a diff.
self.assertEquals(diffs, self.dmp.diff_fromDelta(text1, delta))
# Verify pool of unchanged characters.
diffs = [(self.dmp.DIFF_INSERT, "A-Z a-z 0-9 - _ . ! ~ * ' ( ) ; / ? : @ & = + $ , # ")]
text2 = self.dmp.diff_text2(diffs)
self.assertEquals("A-Z a-z 0-9 - _ . ! ~ * \' ( ) ; / ? : @ & = + $ , # ", text2)
delta = self.dmp.diff_toDelta(diffs)
self.assertEquals("+A-Z a-z 0-9 - _ . ! ~ * \' ( ) ; / ? : @ & = + $ , # ", delta)
# Convert delta string into a diff.
self.assertEquals(diffs, self.dmp.diff_fromDelta("", delta))
def testDiffXIndex(self):
# Translate a location in text1 to text2.
self.assertEquals(5, self.dmp.diff_xIndex([(self.dmp.DIFF_DELETE, "a"), (self.dmp.DIFF_INSERT, "1234"), (self.dmp.DIFF_EQUAL, "xyz")], 2))
# Translation on deletion.
self.assertEquals(1, self.dmp.diff_xIndex([(self.dmp.DIFF_EQUAL, "a"), (self.dmp.DIFF_DELETE, "1234"), (self.dmp.DIFF_EQUAL, "xyz")], 3))
def testDiffLevenshtein(self):
# Levenshtein with trailing equality.
self.assertEquals(4, self.dmp.diff_levenshtein([(self.dmp.DIFF_DELETE, "abc"), (self.dmp.DIFF_INSERT, "1234"), (self.dmp.DIFF_EQUAL, "xyz")]))
# Levenshtein with leading equality.
self.assertEquals(4, self.dmp.diff_levenshtein([(self.dmp.DIFF_EQUAL, "xyz"), (self.dmp.DIFF_DELETE, "abc"), (self.dmp.DIFF_INSERT, "1234")]))
# Levenshtein with middle equality.
self.assertEquals(7, self.dmp.diff_levenshtein([(self.dmp.DIFF_DELETE, "abc"), (self.dmp.DIFF_EQUAL, "xyz"), (self.dmp.DIFF_INSERT, "1234")]))
def testDiffBisect(self):
# Normal.
a = "cat"
b = "map"
# Since the resulting diff hasn't been normalized, it would be ok if
# the insertion and deletion pairs are swapped.
# If the order changes, tweak this test as required.
self.assertEquals([(self.dmp.DIFF_DELETE, "c"), (self.dmp.DIFF_INSERT, "m"), (self.dmp.DIFF_EQUAL, "a"), (self.dmp.DIFF_DELETE, "t"), (self.dmp.DIFF_INSERT, "p")], self.dmp.diff_bisect(a, b, sys.maxint))
# Timeout.
self.assertEquals([(self.dmp.DIFF_DELETE, "cat"), (self.dmp.DIFF_INSERT, "map")], self.dmp.diff_bisect(a, b, 0))
def testDiffMain(self):
# Perform a trivial diff.
# Null case.
self.assertEquals([], self.dmp.diff_main("", "", False))
# Equality.
self.assertEquals([(self.dmp.DIFF_EQUAL, "abc")], self.dmp.diff_main("abc", "abc", False))
# Simple insertion.
self.assertEquals([(self.dmp.DIFF_EQUAL, "ab"), (self.dmp.DIFF_INSERT, "123"), (self.dmp.DIFF_EQUAL, "c")], self.dmp.diff_main("abc", "ab123c", False))
# Simple deletion.
self.assertEquals([(self.dmp.DIFF_EQUAL, "a"), (self.dmp.DIFF_DELETE, "123"), (self.dmp.DIFF_EQUAL, "bc")], self.dmp.diff_main("a123bc", "abc", False))
# Two insertions.
self.assertEquals([(self.dmp.DIFF_EQUAL, "a"), (self.dmp.DIFF_INSERT, "123"), (self.dmp.DIFF_EQUAL, "b"), (self.dmp.DIFF_INSERT, "456"), (self.dmp.DIFF_EQUAL, "c")], self.dmp.diff_main("abc", "a123b456c", False))
# Two deletions.
self.assertEquals([(self.dmp.DIFF_EQUAL, "a"), (self.dmp.DIFF_DELETE, "123"), (self.dmp.DIFF_EQUAL, "b"), (self.dmp.DIFF_DELETE, "456"), (self.dmp.DIFF_EQUAL, "c")], self.dmp.diff_main("a123b456c", "abc", False))
# Perform a real diff.
# Switch off the timeout.
self.dmp.Diff_Timeout = 0
# Simple cases.
self.assertEquals([(self.dmp.DIFF_DELETE, "a"), (self.dmp.DIFF_INSERT, "b")], self.dmp.diff_main("a", "b", False))
self.assertEquals([(self.dmp.DIFF_DELETE, "Apple"), (self.dmp.DIFF_INSERT, "Banana"), (self.dmp.DIFF_EQUAL, "s are a"), (self.dmp.DIFF_INSERT, "lso"), (self.dmp.DIFF_EQUAL, " fruit.")], self.dmp.diff_main("Apples are a fruit.", "Bananas are also fruit.", False))
self.assertEquals([(self.dmp.DIFF_DELETE, "a"), (self.dmp.DIFF_INSERT, u"\u0680"), (self.dmp.DIFF_EQUAL, "x"), (self.dmp.DIFF_DELETE, "\t"), (self.dmp.DIFF_INSERT, u"\x00")], self.dmp.diff_main("ax\t", u"\u0680x\x00", False))
# Overlaps.
self.assertEquals([(self.dmp.DIFF_DELETE, "1"), (self.dmp.DIFF_EQUAL, "a"), (self.dmp.DIFF_DELETE, "y"), (self.dmp.DIFF_EQUAL, "b"), (self.dmp.DIFF_DELETE, "2"), (self.dmp.DIFF_INSERT, "xab")], self.dmp.diff_main("1ayb2", "abxab", False))
self.assertEquals([(self.dmp.DIFF_INSERT, "xaxcx"), (self.dmp.DIFF_EQUAL, "abc"), (self.dmp.DIFF_DELETE, "y")], self.dmp.diff_main("abcy", "xaxcxabc", False))
self.assertEquals([(self.dmp.DIFF_DELETE, "ABCD"), (self.dmp.DIFF_EQUAL, "a"), (self.dmp.DIFF_DELETE, "="), (self.dmp.DIFF_INSERT, "-"), (self.dmp.DIFF_EQUAL, "bcd"), (self.dmp.DIFF_DELETE, "="), (self.dmp.DIFF_INSERT, "-"), (self.dmp.DIFF_EQUAL, "efghijklmnopqrs"), (self.dmp.DIFF_DELETE, "EFGHIJKLMNOefg")], self.dmp.diff_main("ABCDa=bcd=efghijklmnopqrsEFGHIJKLMNOefg", "a-bcd-efghijklmnopqrs", False))
# Large equality.
self.assertEquals([(self.dmp.DIFF_INSERT, " "), (self.dmp.DIFF_EQUAL,"a"), (self.dmp.DIFF_INSERT,"nd"), (self.dmp.DIFF_EQUAL," [[Pennsylvania]]"), (self.dmp.DIFF_DELETE," and [[New")], self.dmp.diff_main("a [[Pennsylvania]] and [[New", " and [[Pennsylvania]]", False))
# Timeout.
self.dmp.Diff_Timeout = 0.1 # 100ms
a = "`Twas brillig, and the slithy toves\nDid gyre and gimble in the wabe:\nAll mimsy were the borogoves,\nAnd the mome raths outgrabe.\n"
b = "I am the very model of a modern major general,\nI've information vegetable, animal, and mineral,\nI know the kings of England, and I quote the fights historical,\nFrom Marathon to Waterloo, in order categorical.\n"
# Increase the text lengths by 1024 times to ensure a timeout.
for x in xrange(10):
a = a + a
b = b + b
startTime = time.time()
self.dmp.diff_main(a, b)
endTime = time.time()
# Test that we took at least the timeout period.
self.assertTrue(self.dmp.Diff_Timeout <= endTime - startTime)
# Test that we didn't take forever (be forgiving).
# Theoretically this test could fail very occasionally if the
# OS task swaps or locks up for a second at the wrong moment.
self.assertTrue(self.dmp.Diff_Timeout * 2 > endTime - startTime)
self.dmp.Diff_Timeout = 0
# Test the linemode speedup.
# Must be long to pass the 100 char cutoff.
# Simple line-mode.
a = "1234567890\n" * 13
b = "abcdefghij\n" * 13
self.assertEquals(self.dmp.diff_main(a, b, False), self.dmp.diff_main(a, b, True))
# Single line-mode.
a = "1234567890" * 13
b = "abcdefghij" * 13
self.assertEquals(self.dmp.diff_main(a, b, False), self.dmp.diff_main(a, b, True))
# Overlap line-mode.
a = "1234567890\n" * 13
b = "abcdefghij\n1234567890\n1234567890\n1234567890\nabcdefghij\n1234567890\n1234567890\n1234567890\nabcdefghij\n1234567890\n1234567890\n1234567890\nabcdefghij\n"
texts_linemode = self.diff_rebuildtexts(self.dmp.diff_main(a, b, True))
texts_textmode = self.diff_rebuildtexts(self.dmp.diff_main(a, b, False))
self.assertEquals(texts_textmode, texts_linemode)
# Test null inputs.
try:
self.dmp.diff_main(None, None)
self.assertFalse(True)
except ValueError:
# Exception expected.
pass
class MatchTest(DiffMatchPatchTest):
"""MATCH TEST FUNCTIONS"""
def testMatchAlphabet(self):
# Initialise the bitmasks for Bitap.
self.assertEquals({"a":4, "b":2, "c":1}, self.dmp.match_alphabet("abc"))
self.assertEquals({"a":37, "b":18, "c":8}, self.dmp.match_alphabet("abcaba"))
def testMatchBitap(self):
self.dmp.Match_Distance = 100
self.dmp.Match_Threshold = 0.5
# Exact matches.
self.assertEquals(5, self.dmp.match_bitap("abcdefghijk", "fgh", 5))
self.assertEquals(5, self.dmp.match_bitap("abcdefghijk", "fgh", 0))
# Fuzzy matches.
self.assertEquals(4, self.dmp.match_bitap("abcdefghijk", "efxhi", 0))
self.assertEquals(2, self.dmp.match_bitap("abcdefghijk", "cdefxyhijk", 5))
self.assertEquals(-1, self.dmp.match_bitap("abcdefghijk", "bxy", 1))
# Overflow.
self.assertEquals(2, self.dmp.match_bitap("123456789xx0", "3456789x0", 2))
self.assertEquals(0, self.dmp.match_bitap("abcdef", "xxabc", 4))
self.assertEquals(3, self.dmp.match_bitap("abcdef", "defyy", 4))
self.assertEquals(0, self.dmp.match_bitap("abcdef", "xabcdefy", 0))
# Threshold test.
self.dmp.Match_Threshold = 0.4
self.assertEquals(4, self.dmp.match_bitap("abcdefghijk", "efxyhi", 1))
self.dmp.Match_Threshold = 0.3
self.assertEquals(-1, self.dmp.match_bitap("abcdefghijk", "efxyhi", 1))
self.dmp.Match_Threshold = 0.0
self.assertEquals(1, self.dmp.match_bitap("abcdefghijk", "bcdef", 1))
self.dmp.Match_Threshold = 0.5
# Multiple select.
self.assertEquals(0, self.dmp.match_bitap("abcdexyzabcde", "abccde", 3))
self.assertEquals(8, self.dmp.match_bitap("abcdexyzabcde", "abccde", 5))
# Distance test.
self.dmp.Match_Distance = 10 # Strict location.
self.assertEquals(-1, self.dmp.match_bitap("abcdefghijklmnopqrstuvwxyz", "abcdefg", 24))
self.assertEquals(0, self.dmp.match_bitap("abcdefghijklmnopqrstuvwxyz", "abcdxxefg", 1))
self.dmp.Match_Distance = 1000 # Loose location.
self.assertEquals(0, self.dmp.match_bitap("abcdefghijklmnopqrstuvwxyz", "abcdefg", 24))
def testMatchMain(self):
# Full match.
# Shortcut matches.
self.assertEquals(0, self.dmp.match_main("abcdef", "abcdef", 1000))
self.assertEquals(-1, self.dmp.match_main("", "abcdef", 1))
self.assertEquals(3, self.dmp.match_main("abcdef", "", 3))
self.assertEquals(3, self.dmp.match_main("abcdef", "de", 3))
self.assertEquals(3, self.dmp.match_main("abcdef", "defy", 4))
self.assertEquals(0, self.dmp.match_main("abcdef", "abcdefy", 0))
# Complex match.
self.dmp.Match_Threshold = 0.7
self.assertEquals(4, self.dmp.match_main("I am the very model of a modern major general.", " that berry ", 5))
self.dmp.Match_Threshold = 0.5
# Test null inputs.
try:
self.dmp.match_main(None, None, 0)
self.assertFalse(True)
except ValueError:
# Exception expected.
pass
class PatchTest(DiffMatchPatchTest):
"""PATCH TEST FUNCTIONS"""
def testPatchObj(self):
# Patch Object.
p = dmp_module.patch_obj()
p.start1 = 20
p.start2 = 21
p.length1 = 18
p.length2 = 17
p.diffs = [(self.dmp.DIFF_EQUAL, "jump"), (self.dmp.DIFF_DELETE, "s"), (self.dmp.DIFF_INSERT, "ed"), (self.dmp.DIFF_EQUAL, " over "), (self.dmp.DIFF_DELETE, "the"), (self.dmp.DIFF_INSERT, "a"), (self.dmp.DIFF_EQUAL, "\nlaz")]
strp = str(p)
self.assertEquals("@@ -21,18 +22,17 @@\n jump\n-s\n+ed\n over \n-the\n+a\n %0Alaz\n", strp)
def testPatchFromText(self):
self.assertEquals([], self.dmp.patch_fromText(""))
strp = "@@ -21,18 +22,17 @@\n jump\n-s\n+ed\n over \n-the\n+a\n %0Alaz\n"
self.assertEquals(strp, str(self.dmp.patch_fromText(strp)[0]))
self.assertEquals("@@ -1 +1 @@\n-a\n+b\n", str(self.dmp.patch_fromText("@@ -1 +1 @@\n-a\n+b\n")[0]))
self.assertEquals("@@ -1,3 +0,0 @@\n-abc\n", str(self.dmp.patch_fromText("@@ -1,3 +0,0 @@\n-abc\n")[0]))
self.assertEquals("@@ -0,0 +1,3 @@\n+abc\n", str(self.dmp.patch_fromText("@@ -0,0 +1,3 @@\n+abc\n")[0]))
# Generates error.
try:
self.dmp.patch_fromText("Bad\nPatch\n")
self.assertFalse(True)
except ValueError:
# Exception expected.
pass
def testPatchToText(self):
strp = "@@ -21,18 +22,17 @@\n jump\n-s\n+ed\n over \n-the\n+a\n laz\n"
p = self.dmp.patch_fromText(strp)
self.assertEquals(strp, self.dmp.patch_toText(p))
strp = "@@ -1,9 +1,9 @@\n-f\n+F\n oo+fooba\n@@ -7,9 +7,9 @@\n obar\n-,\n+.\n tes\n"
p = self.dmp.patch_fromText(strp)
self.assertEquals(strp, self.dmp.patch_toText(p))
def testPatchAddContext(self):
self.dmp.Patch_Margin = 4
p = self.dmp.patch_fromText("@@ -21,4 +21,10 @@\n-jump\n+somersault\n")[0]
self.dmp.patch_addContext(p, "The quick brown fox jumps over the lazy dog.")
self.assertEquals("@@ -17,12 +17,18 @@\n fox \n-jump\n+somersault\n s ov\n", str(p))
# Same, but not enough trailing context.
p = self.dmp.patch_fromText("@@ -21,4 +21,10 @@\n-jump\n+somersault\n")[0]
self.dmp.patch_addContext(p, "The quick brown fox jumps.")
self.assertEquals("@@ -17,10 +17,16 @@\n fox \n-jump\n+somersault\n s.\n", str(p))
# Same, but not enough leading context.
p = self.dmp.patch_fromText("@@ -3 +3,2 @@\n-e\n+at\n")[0]
self.dmp.patch_addContext(p, "The quick brown fox jumps.")
self.assertEquals("@@ -1,7 +1,8 @@\n Th\n-e\n+at\n qui\n", str(p))
# Same, but with ambiguity.
p = self.dmp.patch_fromText("@@ -3 +3,2 @@\n-e\n+at\n")[0]
self.dmp.patch_addContext(p, "The quick brown fox jumps. The quick brown fox crashes.")
self.assertEquals("@@ -1,27 +1,28 @@\n Th\n-e\n+at\n quick brown fox jumps. \n", str(p))
def testPatchMake(self):
# Null case.
patches = self.dmp.patch_make("", "")
self.assertEquals("", self.dmp.patch_toText(patches))
text1 = "The quick brown fox jumps over the lazy dog."
text2 = "That quick brown fox jumped over a lazy dog."
# Text2+Text1 inputs.
expectedPatch = "@@ -1,8 +1,7 @@\n Th\n-at\n+e\n qui\n@@ -21,17 +21,18 @@\n jump\n-ed\n+s\n over \n-a\n+the\n laz\n"
# The second patch must be "-21,17 +21,18", not "-22,17 +21,18" due to rolling context.
patches = self.dmp.patch_make(text2, text1)
self.assertEquals(expectedPatch, self.dmp.patch_toText(patches))
# Text1+Text2 inputs.
expectedPatch = "@@ -1,11 +1,12 @@\n Th\n-e\n+at\n quick b\n@@ -22,18 +22,17 @@\n jump\n-s\n+ed\n over \n-the\n+a\n laz\n"
patches = self.dmp.patch_make(text1, text2)
self.assertEquals(expectedPatch, self.dmp.patch_toText(patches))
# Diff input.
diffs = self.dmp.diff_main(text1, text2, False)
patches = self.dmp.patch_make(diffs)
self.assertEquals(expectedPatch, self.dmp.patch_toText(patches))
# Text1+Diff inputs.
patches = self.dmp.patch_make(text1, diffs)
self.assertEquals(expectedPatch, self.dmp.patch_toText(patches))
# Text1+Text2+Diff inputs (deprecated).
patches = self.dmp.patch_make(text1, text2, diffs)
self.assertEquals(expectedPatch, self.dmp.patch_toText(patches))
# Character encoding.
patches = self.dmp.patch_make("`1234567890-=[]\\;',./", "~!@#$%^&*()_+{}|:\"<>?")
self.assertEquals("@@ -1,21 +1,21 @@\n-%601234567890-=%5B%5D%5C;',./\n+~!@#$%25%5E&*()_+%7B%7D%7C:%22%3C%3E?\n", self.dmp.patch_toText(patches))
# Character decoding.
diffs = [(self.dmp.DIFF_DELETE, "`1234567890-=[]\\;',./"), (self.dmp.DIFF_INSERT, "~!@#$%^&*()_+{}|:\"<>?")]
self.assertEquals(diffs, self.dmp.patch_fromText("@@ -1,21 +1,21 @@\n-%601234567890-=%5B%5D%5C;',./\n+~!@#$%25%5E&*()_+%7B%7D%7C:%22%3C%3E?\n")[0].diffs)
# Long string with repeats.
text1 = ""
for x in range(100):
text1 += "abcdef"
text2 = text1 + "123"
expectedPatch = "@@ -573,28 +573,31 @@\n cdefabcdefabcdefabcdefabcdef\n+123\n"
patches = self.dmp.patch_make(text1, text2)
self.assertEquals(expectedPatch, self.dmp.patch_toText(patches))
# Test null inputs.
try:
self.dmp.patch_make(None, None)
self.assertFalse(True)
except ValueError:
# Exception expected.
pass
def testPatchSplitMax(self):
# Assumes that Match_MaxBits is 32.
patches = self.dmp.patch_make("abcdefghijklmnopqrstuvwxyz01234567890", "XabXcdXefXghXijXklXmnXopXqrXstXuvXwxXyzX01X23X45X67X89X0")
self.dmp.patch_splitMax(patches)
self.assertEquals("@@ -1,32 +1,46 @@\n+X\n ab\n+X\n cd\n+X\n ef\n+X\n gh\n+X\n ij\n+X\n kl\n+X\n mn\n+X\n op\n+X\n qr\n+X\n st\n+X\n uv\n+X\n wx\n+X\n yz\n+X\n 012345\n@@ -25,13 +39,18 @@\n zX01\n+X\n 23\n+X\n 45\n+X\n 67\n+X\n 89\n+X\n 0\n", self.dmp.patch_toText(patches))
patches = self.dmp.patch_make("abcdef1234567890123456789012345678901234567890123456789012345678901234567890uvwxyz", "abcdefuvwxyz")
oldToText = self.dmp.patch_toText(patches)
self.dmp.patch_splitMax(patches)
self.assertEquals(oldToText, self.dmp.patch_toText(patches))
patches = self.dmp.patch_make("1234567890123456789012345678901234567890123456789012345678901234567890", "abc")
self.dmp.patch_splitMax(patches)
self.assertEquals("@@ -1,32 +1,4 @@\n-1234567890123456789012345678\n 9012\n@@ -29,32 +1,4 @@\n-9012345678901234567890123456\n 7890\n@@ -57,14 +1,3 @@\n-78901234567890\n+abc\n", self.dmp.patch_toText(patches))
patches = self.dmp.patch_make("abcdefghij , h : 0 , t : 1 abcdefghij , h : 0 , t : 1 abcdefghij , h : 0 , t : 1", "abcdefghij , h : 1 , t : 1 abcdefghij , h : 1 , t : 1 abcdefghij , h : 0 , t : 1")
self.dmp.patch_splitMax(patches)
self.assertEquals("@@ -2,32 +2,32 @@\n bcdefghij , h : \n-0\n+1\n , t : 1 abcdef\n@@ -29,32 +29,32 @@\n bcdefghij , h : \n-0\n+1\n , t : 1 abcdef\n", self.dmp.patch_toText(patches))
def testPatchAddPadding(self):
# Both edges full.
patches = self.dmp.patch_make("", "test")
self.assertEquals("@@ -0,0 +1,4 @@\n+test\n", self.dmp.patch_toText(patches))
self.dmp.patch_addPadding(patches)
self.assertEquals("@@ -1,8 +1,12 @@\n %01%02%03%04\n+test\n %01%02%03%04\n", self.dmp.patch_toText(patches))
# Both edges partial.
patches = self.dmp.patch_make("XY", "XtestY")
self.assertEquals("@@ -1,2 +1,6 @@\n X\n+test\n Y\n", self.dmp.patch_toText(patches))
self.dmp.patch_addPadding(patches)
self.assertEquals("@@ -2,8 +2,12 @@\n %02%03%04X\n+test\n Y%01%02%03\n", self.dmp.patch_toText(patches))
# Both edges none.
patches = self.dmp.patch_make("XXXXYYYY", "XXXXtestYYYY")
self.assertEquals("@@ -1,8 +1,12 @@\n XXXX\n+test\n YYYY\n", self.dmp.patch_toText(patches))
self.dmp.patch_addPadding(patches)
self.assertEquals("@@ -5,8 +5,12 @@\n XXXX\n+test\n YYYY\n", self.dmp.patch_toText(patches))
def testPatchApply(self):
self.dmp.Match_Distance = 1000
self.dmp.Match_Threshold = 0.5
self.dmp.Patch_DeleteThreshold = 0.5
# Null case.
patches = self.dmp.patch_make("", "")
results = self.dmp.patch_apply(patches, "Hello world.")
self.assertEquals(("Hello world.", []), results)
# Exact match.
patches = self.dmp.patch_make("The quick brown fox jumps over the lazy dog.", "That quick brown fox jumped over a lazy dog.")
results = self.dmp.patch_apply(patches, "The quick brown fox jumps over the lazy dog.")
self.assertEquals(("That quick brown fox jumped over a lazy dog.", [True, True]), results)
# Partial match.
results = self.dmp.patch_apply(patches, "The quick red rabbit jumps over the tired tiger.")
self.assertEquals(("That quick red rabbit jumped over a tired tiger.", [True, True]), results)
# Failed match.
results = self.dmp.patch_apply(patches, "I am the very model of a modern major general.")
self.assertEquals(("I am the very model of a modern major general.", [False, False]), results)
# Big delete, small change.
patches = self.dmp.patch_make("x1234567890123456789012345678901234567890123456789012345678901234567890y", "xabcy")
results = self.dmp.patch_apply(patches, "x123456789012345678901234567890-----++++++++++-----123456789012345678901234567890y")
self.assertEquals(("xabcy", [True, True]), results)
# Big delete, big change 1.
patches = self.dmp.patch_make("x1234567890123456789012345678901234567890123456789012345678901234567890y", "xabcy")
results = self.dmp.patch_apply(patches, "x12345678901234567890---------------++++++++++---------------12345678901234567890y")
self.assertEquals(("xabc12345678901234567890---------------++++++++++---------------12345678901234567890y", [False, True]), results)
# Big delete, big change 2.
self.dmp.Patch_DeleteThreshold = 0.6
patches = self.dmp.patch_make("x1234567890123456789012345678901234567890123456789012345678901234567890y", "xabcy")
results = self.dmp.patch_apply(patches, "x12345678901234567890---------------++++++++++---------------12345678901234567890y")
self.assertEquals(("xabcy", [True, True]), results)
self.dmp.Patch_DeleteThreshold = 0.5
# Compensate for failed patch.
self.dmp.Match_Threshold = 0.0
self.dmp.Match_Distance = 0
patches = self.dmp.patch_make("abcdefghijklmnopqrstuvwxyz--------------------1234567890", "abcXXXXXXXXXXdefghijklmnopqrstuvwxyz--------------------1234567YYYYYYYYYY890")
results = self.dmp.patch_apply(patches, "ABCDEFGHIJKLMNOPQRSTUVWXYZ--------------------1234567890")
self.assertEquals(("ABCDEFGHIJKLMNOPQRSTUVWXYZ--------------------1234567YYYYYYYYYY890", [False, True]), results)
self.dmp.Match_Threshold = 0.5
self.dmp.Match_Distance = 1000
# No side effects.
patches = self.dmp.patch_make("", "test")
patchstr = self.dmp.patch_toText(patches)
results = self.dmp.patch_apply(patches, "")
self.assertEquals(patchstr, self.dmp.patch_toText(patches))
# No side effects with major delete.
patches = self.dmp.patch_make("The quick brown fox jumps over the lazy dog.", "Woof")
patchstr = self.dmp.patch_toText(patches)
self.dmp.patch_apply(patches, "The quick brown fox jumps over the lazy dog.")
self.assertEquals(patchstr, self.dmp.patch_toText(patches))
# Edge exact match.
patches = self.dmp.patch_make("", "test")
self.dmp.patch_apply(patches, "")
self.assertEquals(("test", [True]), results)
# Near edge exact match.
patches = self.dmp.patch_make("XY", "XtestY")
results = self.dmp.patch_apply(patches, "XY")
self.assertEquals(("XtestY", [True]), results)
# Edge partial match.
patches = self.dmp.patch_make("y", "y123")
results = self.dmp.patch_apply(patches, "x")
self.assertEquals(("x123", [True]), results)
if __name__ == "__main__":
unittest.main()
| {
"content_hash": "028cc7d63c733d9cee2cf0ce320544eb",
"timestamp": "",
"source": "github",
"line_count": 847,
"max_line_length": 408,
"avg_line_length": 47.91027154663518,
"alnum_prop": 0.6453918186298669,
"repo_name": "lemonad/methodiki",
"id": "11a01f23c7820a07f99d94c97a09c6389c8de47f",
"size": "40602",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "methodiki/third_party/google_diff_match_patch/diff_match_patch_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "124489"
},
{
"name": "Python",
"bytes": "370142"
},
{
"name": "Shell",
"bytes": "138"
}
],
"symlink_target": ""
} |
from sys import version_info
if version_info >= (2,6,0):
def swig_import_helper():
from os.path import dirname
import imp
fp = None
try:
fp, pathname, description = imp.find_module('_param_EtherTap', [dirname(__file__)])
except ImportError:
import _param_EtherTap
return _param_EtherTap
if fp is not None:
try:
_mod = imp.load_module('_param_EtherTap', fp, pathname, description)
finally:
fp.close()
return _mod
_param_EtherTap = swig_import_helper()
del swig_import_helper
else:
import _param_EtherTap
del version_info
try:
_swig_property = property
except NameError:
pass # Python < 2.2 doesn't have 'property'.
def _swig_setattr_nondynamic(self,class_type,name,value,static=1):
if (name == "thisown"): return self.this.own(value)
if (name == "this"):
if type(value).__name__ == 'SwigPyObject':
self.__dict__[name] = value
return
method = class_type.__swig_setmethods__.get(name,None)
if method: return method(self,value)
if (not static) or hasattr(self,name):
self.__dict__[name] = value
else:
raise AttributeError("You cannot add attributes to %s" % self)
def _swig_setattr(self,class_type,name,value):
return _swig_setattr_nondynamic(self,class_type,name,value,0)
def _swig_getattr(self,class_type,name):
if (name == "thisown"): return self.this.own()
method = class_type.__swig_getmethods__.get(name,None)
if method: return method(self)
raise AttributeError(name)
def _swig_repr(self):
try: strthis = "proxy of " + self.this.__repr__()
except: strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
try:
_object = object
_newclass = 1
except AttributeError:
class _object : pass
_newclass = 0
def _swig_setattr_nondynamic_method(set):
def set_attr(self,name,value):
if (name == "thisown"): return self.this.own(value)
if hasattr(self,name) or (name == "this"):
set(self,name,value)
else:
raise AttributeError("You cannot add attributes to %s" % self)
return set_attr
import param_EtherDump
import param_SimObject
import param_EtherObject
class EtherTap(param_EtherObject.EtherObject):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs): raise AttributeError("No constructor defined")
__repr__ = _swig_repr
EtherTap_swigregister = _param_EtherTap.EtherTap_swigregister
EtherTap_swigregister(EtherTap)
class EtherTapParams(param_EtherObject.EtherObjectParams):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def create(self): return _param_EtherTap.EtherTapParams_create(self)
bufsz = _swig_property(_param_EtherTap.EtherTapParams_bufsz_get, _param_EtherTap.EtherTapParams_bufsz_set)
dump = _swig_property(_param_EtherTap.EtherTapParams_dump_get, _param_EtherTap.EtherTapParams_dump_set)
port = _swig_property(_param_EtherTap.EtherTapParams_port_get, _param_EtherTap.EtherTapParams_port_set)
def __init__(self):
this = _param_EtherTap.new_EtherTapParams()
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _param_EtherTap.delete_EtherTapParams
__del__ = lambda self : None;
EtherTapParams_swigregister = _param_EtherTap.EtherTapParams_swigregister
EtherTapParams_swigregister(EtherTapParams)
| {
"content_hash": "33a7d8a27cdba9aa46a9cb52b07df220",
"timestamp": "",
"source": "github",
"line_count": 99,
"max_line_length": 110,
"avg_line_length": 37.05050505050505,
"alnum_prop": 0.6523991275899673,
"repo_name": "silkyar/570_Big_Little",
"id": "13f648f8de785058c2178b03426156bcc4488fa7",
"size": "3872",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "build/ARM/python/m5/internal/param_EtherTap.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Assembly",
"bytes": "232078"
},
{
"name": "C",
"bytes": "887097"
},
{
"name": "C++",
"bytes": "52497889"
},
{
"name": "D",
"bytes": "13736198"
},
{
"name": "Emacs Lisp",
"bytes": "1969"
},
{
"name": "Java",
"bytes": "3096"
},
{
"name": "JavaScript",
"bytes": "78818"
},
{
"name": "Perl",
"bytes": "13199821"
},
{
"name": "Prolog",
"bytes": "977139"
},
{
"name": "Python",
"bytes": "3831426"
},
{
"name": "Ruby",
"bytes": "19404"
},
{
"name": "Scilab",
"bytes": "14370"
},
{
"name": "Shell",
"bytes": "16704"
},
{
"name": "Visual Basic",
"bytes": "2884"
},
{
"name": "XML",
"bytes": "16048"
}
],
"symlink_target": ""
} |
import re
import falcon
from monasca_common.simport import simport
from monasca_common.validation import metrics as metric_validation
from oslo_config import cfg
from oslo_log import log
from oslo_utils import encodeutils
import pyparsing
import six
from monasca_api.api import alarm_definitions_api_v2
from monasca_api.common.repositories import exceptions
import monasca_api.expression_parser.alarm_expr_parser
from monasca_api.v2.common.exceptions import HTTPBadRequestError
from monasca_api.v2.common.exceptions import HTTPUnprocessableEntityError
from monasca_api.v2.common.schemas import (
alarm_definition_request_body_schema as schema_alarms)
from monasca_api.v2.common import validation
from monasca_api.v2.reference import alarming
from monasca_api.v2.reference import helpers
from monasca_api.v2.reference import resource
LOG = log.getLogger(__name__)
class AlarmDefinitions(alarm_definitions_api_v2.AlarmDefinitionsV2API,
alarming.Alarming):
def __init__(self):
try:
super(AlarmDefinitions, self).__init__()
self._region = cfg.CONF.region
self._alarm_definitions_repo = simport.load(
cfg.CONF.repositories.alarm_definitions_driver)()
except Exception as ex:
LOG.exception(ex)
raise exceptions.RepositoryException(ex)
@resource.resource_try_catch_block
def on_post(self, req, res):
helpers.validate_authorization(req, ['api:alarms:definition:post'])
alarm_definition = helpers.from_json(req)
self._validate_alarm_definition(alarm_definition)
name = get_query_alarm_definition_name(alarm_definition)
expression = get_query_alarm_definition_expression(alarm_definition)
description = get_query_alarm_definition_description(alarm_definition)
severity = get_query_alarm_definition_severity(alarm_definition)
match_by = get_query_alarm_definition_match_by(alarm_definition)
alarm_actions = get_query_alarm_definition_alarm_actions(
alarm_definition)
undetermined_actions = get_query_alarm_definition_undetermined_actions(
alarm_definition)
ok_actions = get_query_ok_actions(alarm_definition)
result = self._alarm_definition_create(req.project_id, name, expression,
description, severity, match_by,
alarm_actions,
undetermined_actions,
ok_actions)
helpers.add_links_to_resource(result, req.uri)
res.body = helpers.to_json(result)
res.status = falcon.HTTP_201
@resource.resource_try_catch_block
def on_get(self, req, res, alarm_definition_id=None):
helpers.validate_authorization(req, ['api:alarms:definition:get'])
if alarm_definition_id is None:
name = helpers.get_query_name(req)
dimensions = helpers.get_query_dimensions(req)
severity = helpers.get_query_param(req, "severity", default_val=None)
if severity is not None:
validation.validate_severity_query(severity)
severity = severity.upper()
sort_by = helpers.get_query_param(req, 'sort_by', default_val=None)
if sort_by is not None:
if isinstance(sort_by, six.string_types):
sort_by = sort_by.split(',')
allowed_sort_by = {'id', 'name', 'severity',
'updated_at', 'created_at'}
validation.validate_sort_by(sort_by, allowed_sort_by)
offset = helpers.get_query_param(req, 'offset')
if offset is not None and not isinstance(offset, int):
try:
offset = int(offset)
except Exception:
raise HTTPUnprocessableEntityError(
'Unprocessable Entity',
'Offset value {} must be an integer'.format(offset))
result = self._alarm_definition_list(req.project_id, name,
dimensions, severity,
req.uri, sort_by,
offset, req.limit)
else:
result = self._alarm_definition_show(req.project_id,
alarm_definition_id)
helpers.add_links_to_resource(result,
re.sub('/' + alarm_definition_id, '',
req.uri))
res.body = helpers.to_json(result)
res.status = falcon.HTTP_200
@resource.resource_try_catch_block
def on_put(self, req, res, alarm_definition_id=None):
if not alarm_definition_id:
raise HTTPBadRequestError('Bad Request', 'Alarm definition ID not provided')
helpers.validate_authorization(req, ['api:alarms:definition:put'])
alarm_definition = helpers.from_json(req)
self._validate_alarm_definition(alarm_definition, require_all=True)
name = get_query_alarm_definition_name(alarm_definition)
expression = get_query_alarm_definition_expression(alarm_definition)
actions_enabled = (
get_query_alarm_definition_actions_enabled(alarm_definition))
description = get_query_alarm_definition_description(alarm_definition)
alarm_actions = get_query_alarm_definition_alarm_actions(alarm_definition)
ok_actions = get_query_ok_actions(alarm_definition)
undetermined_actions = get_query_alarm_definition_undetermined_actions(
alarm_definition)
match_by = get_query_alarm_definition_match_by(alarm_definition)
severity = get_query_alarm_definition_severity(alarm_definition)
result = self._alarm_definition_update_or_patch(req.project_id,
alarm_definition_id,
name,
expression,
actions_enabled,
description,
alarm_actions,
ok_actions,
undetermined_actions,
match_by,
severity,
patch=False)
helpers.add_links_to_resource(
result, re.sub('/' + alarm_definition_id, '', req.uri))
res.body = helpers.to_json(result)
res.status = falcon.HTTP_200
@resource.resource_try_catch_block
def on_patch(self, req, res, alarm_definition_id=None):
if not alarm_definition_id:
raise HTTPBadRequestError('Bad Request', 'Alarm definition ID not provided')
helpers.validate_authorization(req, ['api:alarms:definition:patch'])
alarm_definition = helpers.from_json(req)
# Optional args
name = get_query_alarm_definition_name(alarm_definition,
return_none=True)
expression = get_query_alarm_definition_expression(alarm_definition,
return_none=True)
actions_enabled = (
get_query_alarm_definition_actions_enabled(alarm_definition,
return_none=True))
description = get_query_alarm_definition_description(alarm_definition,
return_none=True)
alarm_actions = get_query_alarm_definition_alarm_actions(
alarm_definition, return_none=True)
ok_actions = get_query_ok_actions(alarm_definition, return_none=True)
undetermined_actions = get_query_alarm_definition_undetermined_actions(
alarm_definition, return_none=True)
match_by = get_query_alarm_definition_match_by(alarm_definition,
return_none=True)
severity = get_query_alarm_definition_severity(alarm_definition,
return_none=True)
result = self._alarm_definition_update_or_patch(req.project_id,
alarm_definition_id,
name,
expression,
actions_enabled,
description,
alarm_actions,
ok_actions,
undetermined_actions,
match_by,
severity,
patch=True)
helpers.add_links_to_resource(
result, re.sub('/' + alarm_definition_id, '', req.uri))
res.body = helpers.to_json(result)
res.status = falcon.HTTP_200
@resource.resource_try_catch_block
def on_delete(self, req, res, alarm_definition_id=None):
if not alarm_definition_id:
raise HTTPBadRequestError('Bad Request', 'Alarm definition ID not provided')
helpers.validate_authorization(req, ['api:alarms:definition:delete'])
self._alarm_definition_delete(req.project_id, alarm_definition_id)
res.status = falcon.HTTP_204
def _validate_name_not_conflicting(self, tenant_id, name, expected_id=None):
definitions = self._alarm_definitions_repo.get_alarm_definitions(tenant_id=tenant_id,
name=name,
dimensions=None,
severity=None,
sort_by=None,
offset=None,
limit=0)
if definitions:
if not expected_id:
LOG.warning(
"Found existing definition for {} with tenant_id {}".format(name, tenant_id))
raise exceptions.AlreadyExistsException(
"An alarm definition with the name {} already exists" .format(name))
found_definition_id = definitions[0]['id']
if found_definition_id != expected_id:
LOG.warning(
"Found existing alarm definition for {} with tenant_id {} with unexpected id {}"
.format(name, tenant_id, found_definition_id))
raise exceptions.AlreadyExistsException(
"An alarm definition with the name {} already exists with id {}"
.format(name, found_definition_id))
def _alarm_definition_show(self, tenant_id, id):
alarm_definition_row = (
self._alarm_definitions_repo.get_alarm_definition(tenant_id, id))
return self._build_alarm_definition_show_result(alarm_definition_row)
def _build_alarm_definition_show_result(self, alarm_definition_row):
match_by = get_comma_separated_str_as_list(
alarm_definition_row['match_by'])
alarm_actions_list = get_comma_separated_str_as_list(
alarm_definition_row['alarm_actions'])
ok_actions_list = get_comma_separated_str_as_list(
alarm_definition_row['ok_actions'])
undetermined_actions_list = get_comma_separated_str_as_list(
alarm_definition_row['undetermined_actions'])
description = (alarm_definition_row['description']
if alarm_definition_row['description'] is not None else None)
expression = alarm_definition_row['expression']
is_deterministic = is_definition_deterministic(expression)
result = {
u'actions_enabled': alarm_definition_row['actions_enabled'] == 1,
u'alarm_actions': alarm_actions_list,
u'undetermined_actions': undetermined_actions_list,
u'ok_actions': ok_actions_list,
u'description': description,
u'expression': expression,
u'deterministic': is_deterministic,
u'id': alarm_definition_row['id'],
u'match_by': match_by,
u'name': alarm_definition_row['name'],
u'severity': alarm_definition_row['severity'].upper()
}
return result
def _alarm_definition_delete(self, tenant_id, id):
sub_alarm_definition_rows = (
self._alarm_definitions_repo.get_sub_alarm_definitions(id))
alarm_metric_rows = self._alarm_definitions_repo.get_alarm_metrics(
tenant_id, id)
sub_alarm_rows = self._alarm_definitions_repo.get_sub_alarms(
tenant_id, id)
if not self._alarm_definitions_repo.delete_alarm_definition(
tenant_id, id):
raise falcon.HTTPNotFound
self._send_alarm_definition_deleted_event(id,
sub_alarm_definition_rows)
self._send_alarm_event(u'alarm-deleted', tenant_id, id,
alarm_metric_rows, sub_alarm_rows, None, None)
def _alarm_definition_list(self, tenant_id, name, dimensions, severity, req_uri, sort_by,
offset, limit):
alarm_definition_rows = (
self._alarm_definitions_repo.get_alarm_definitions(tenant_id, name,
dimensions, severity, sort_by,
offset, limit))
result = []
for alarm_definition_row in alarm_definition_rows:
match_by = get_comma_separated_str_as_list(
alarm_definition_row['match_by'])
alarm_actions_list = get_comma_separated_str_as_list(
alarm_definition_row['alarm_actions'])
ok_actions_list = get_comma_separated_str_as_list(
alarm_definition_row['ok_actions'])
undetermined_actions_list = get_comma_separated_str_as_list(
alarm_definition_row['undetermined_actions'])
expression = alarm_definition_row['expression']
is_deterministic = is_definition_deterministic(expression)
ad = {u'id': alarm_definition_row['id'],
u'name': alarm_definition_row['name'],
u'description': alarm_definition_row['description'] if (
alarm_definition_row['description']) else u'',
u'expression': alarm_definition_row['expression'],
u'deterministic': is_deterministic,
u'match_by': match_by,
u'severity': alarm_definition_row['severity'].upper(),
u'actions_enabled':
alarm_definition_row['actions_enabled'] == 1,
u'alarm_actions': alarm_actions_list,
u'ok_actions': ok_actions_list,
u'undetermined_actions': undetermined_actions_list}
helpers.add_links_to_resource(ad, req_uri)
result.append(ad)
result = helpers.paginate_alarming(result, req_uri, limit)
return result
def _validate_alarm_definition(self, alarm_definition, require_all=False):
try:
schema_alarms.validate(alarm_definition, require_all=require_all)
if 'match_by' in alarm_definition:
for name in alarm_definition['match_by']:
metric_validation.validate_dimension_key(name)
except Exception as ex:
LOG.debug(ex)
raise HTTPUnprocessableEntityError('Unprocessable Entity', str(ex))
def _alarm_definition_update_or_patch(self, tenant_id,
definition_id,
name,
expression,
actions_enabled,
description,
alarm_actions,
ok_actions,
undetermined_actions,
match_by,
severity,
patch):
if expression:
try:
sub_expr_list = (
monasca_api.expression_parser.alarm_expr_parser.
AlarmExprParser(expression).sub_expr_list)
except (pyparsing.ParseException,
pyparsing.ParseFatalException) as ex:
LOG.exception(ex)
title = "Invalid alarm expression"
msg = "parser failed on expression '{}' at column {}: {}".format(
expression.encode('utf8'), str(ex.column).encode('utf8'),
ex.msg.encode('utf8'))
raise HTTPUnprocessableEntityError(title, msg)
else:
sub_expr_list = None
if name:
self._validate_name_not_conflicting(tenant_id, name, expected_id=definition_id)
alarm_def_row, sub_alarm_def_dicts = (
self._alarm_definitions_repo.update_or_patch_alarm_definition(
tenant_id,
definition_id,
name,
expression,
sub_expr_list,
actions_enabled,
description,
alarm_actions,
ok_actions,
undetermined_actions,
match_by,
severity,
patch))
old_sub_alarm_def_event_dict = (
self._build_sub_alarm_def_update_dict(
sub_alarm_def_dicts['old']))
new_sub_alarm_def_event_dict = (
self._build_sub_alarm_def_update_dict(sub_alarm_def_dicts[
'new']))
changed_sub_alarm_def_event_dict = (
self._build_sub_alarm_def_update_dict(sub_alarm_def_dicts[
'changed']))
unchanged_sub_alarm_def_event_dict = (
self._build_sub_alarm_def_update_dict(sub_alarm_def_dicts[
'unchanged']))
result = self._build_alarm_definition_show_result(alarm_def_row)
# Not all of the passed in parameters will be set if this called
# from on_patch vs on_update. The alarm-definition-updated event
# MUST have all of the fields set so use the dict built from the
# data returned from the database
alarm_def_event_dict = (
{u'tenantId': tenant_id,
u'alarmDefinitionId': definition_id,
u'alarmName': result['name'],
u'alarmDescription': result['description'],
u'alarmExpression': result['expression'],
u'severity': result['severity'],
u'matchBy': result['match_by'],
u'alarmActionsEnabled': result['actions_enabled'],
u'oldAlarmSubExpressions': old_sub_alarm_def_event_dict,
u'changedSubExpressions': changed_sub_alarm_def_event_dict,
u'unchangedSubExpressions': unchanged_sub_alarm_def_event_dict,
u'newAlarmSubExpressions': new_sub_alarm_def_event_dict})
alarm_definition_updated_event = (
{u'alarm-definition-updated': alarm_def_event_dict})
self.send_event(self.events_message_queue,
alarm_definition_updated_event)
return result
def _build_sub_alarm_def_update_dict(self, sub_alarm_def_dict):
sub_alarm_def_update_dict = {}
for id, sub_alarm_def in sub_alarm_def_dict.items():
dimensions = {}
for name, value in sub_alarm_def.dimensions.items():
dimensions[name] = value
sub_alarm_def_update_dict[sub_alarm_def.id] = {}
sub_alarm_def_update_dict[sub_alarm_def.id][u'function'] = (
sub_alarm_def.function)
sub_alarm_def_update_dict[sub_alarm_def.id][
u'metricDefinition'] = (
{u'name': sub_alarm_def.metric_name,
u'dimensions': dimensions})
sub_alarm_def_update_dict[sub_alarm_def.id][u'operator'] = (
sub_alarm_def.operator)
sub_alarm_def_update_dict[sub_alarm_def.id][u'threshold'] = (
sub_alarm_def.threshold)
sub_alarm_def_update_dict[sub_alarm_def.id][u'period'] = (
sub_alarm_def.period)
sub_alarm_def_update_dict[sub_alarm_def.id][u'periods'] = (
sub_alarm_def.periods)
sub_alarm_def_update_dict[sub_alarm_def.id][u'expression'] = (
sub_alarm_def.expression)
return sub_alarm_def_update_dict
def _alarm_definition_create(self, tenant_id, name, expression,
description, severity, match_by,
alarm_actions, undetermined_actions,
ok_actions):
try:
sub_expr_list = (
monasca_api.expression_parser.alarm_expr_parser.
AlarmExprParser(expression).sub_expr_list)
except (pyparsing.ParseException,
pyparsing.ParseFatalException) as ex:
LOG.exception(ex)
title = u"Invalid alarm expression"
msg = u"parser failed on expression '{}' at column {}: {}".format(
encodeutils.safe_decode(expression, 'utf-8'),
encodeutils.safe_decode(str(ex.column), 'utf-8'),
encodeutils.safe_decode(ex.msg, 'utf-8'))
raise HTTPUnprocessableEntityError(title, msg)
self._validate_name_not_conflicting(tenant_id, name)
alarm_definition_id = (
self._alarm_definitions_repo.
create_alarm_definition(tenant_id,
name,
expression,
sub_expr_list,
description,
severity,
match_by,
alarm_actions,
undetermined_actions,
ok_actions))
self._send_alarm_definition_created_event(tenant_id,
alarm_definition_id,
name, expression,
sub_expr_list,
description, match_by)
result = (
{u'alarm_actions': alarm_actions, u'ok_actions': ok_actions,
u'description': description, u'match_by': match_by,
u'severity': severity, u'actions_enabled': True,
u'undetermined_actions': undetermined_actions,
u'expression': expression, u'id': alarm_definition_id,
u'deterministic': is_definition_deterministic(expression),
u'name': name})
return result
def _send_alarm_definition_deleted_event(self, alarm_definition_id,
sub_alarm_definition_rows):
sub_alarm_definition_deleted_event_msg = {}
alarm_definition_deleted_event_msg = {u"alarm-definition-deleted": {
u"alarmDefinitionId": alarm_definition_id,
u'subAlarmMetricDefinitions':
sub_alarm_definition_deleted_event_msg}}
for sub_alarm_definition in sub_alarm_definition_rows:
sub_alarm_definition_deleted_event_msg[
sub_alarm_definition['id']] = {
u'name': sub_alarm_definition['metric_name']}
dimensions = {}
sub_alarm_definition_deleted_event_msg[sub_alarm_definition['id']][
u'dimensions'] = dimensions
if sub_alarm_definition['dimensions']:
for dimension in sub_alarm_definition['dimensions'].split(','):
parsed_dimension = dimension.split('=')
dimensions[parsed_dimension[0]] = parsed_dimension[1]
self.send_event(self.events_message_queue,
alarm_definition_deleted_event_msg)
def _send_alarm_definition_created_event(self, tenant_id,
alarm_definition_id, name,
expression, sub_expr_list,
description, match_by):
alarm_definition_created_event_msg = {
u'alarm-definition-created': {u'tenantId': tenant_id,
u'alarmDefinitionId':
alarm_definition_id,
u'alarmName': name,
u'alarmDescription': description,
u'alarmExpression': expression,
u'matchBy': match_by}}
sub_expr_event_msg = {}
for sub_expr in sub_expr_list:
sub_expr_event_msg[sub_expr.id] = {
u'function': sub_expr.normalized_func}
metric_definition = {u'name': sub_expr.metric_name}
sub_expr_event_msg[sub_expr.id][
u'metricDefinition'] = metric_definition
dimensions = {}
for dimension in sub_expr.dimensions_as_list:
parsed_dimension = dimension.split("=")
dimensions[parsed_dimension[0]] = parsed_dimension[1]
metric_definition[u'dimensions'] = dimensions
sub_expr_event_msg[sub_expr.id][
u'operator'] = sub_expr.normalized_operator
sub_expr_event_msg[sub_expr.id][u'threshold'] = sub_expr.threshold
sub_expr_event_msg[sub_expr.id][u'period'] = sub_expr.period
sub_expr_event_msg[sub_expr.id][u'periods'] = sub_expr.periods
sub_expr_event_msg[sub_expr.id][
u'expression'] = sub_expr.fmtd_sub_expr_str
alarm_definition_created_event_msg[u'alarm-definition-created'][
u'alarmSubExpressions'] = sub_expr_event_msg
self.send_event(self.events_message_queue,
alarm_definition_created_event_msg)
def get_query_alarm_definition_name(alarm_definition, return_none=False):
try:
if 'name' in alarm_definition:
name = alarm_definition['name']
return name
else:
if return_none:
return None
else:
raise Exception("Missing name")
except Exception as ex:
LOG.debug(ex)
raise HTTPUnprocessableEntityError('Unprocessable Entity', str(ex))
def get_query_alarm_definition_expression(alarm_definition,
return_none=False):
try:
if 'expression' in alarm_definition:
expression = alarm_definition['expression']
return expression
else:
if return_none:
return None
else:
raise Exception("Missing expression")
except Exception as ex:
LOG.debug(ex)
raise HTTPUnprocessableEntityError('Unprocessable Entity', str(ex))
def get_query_alarm_definition_description(alarm_definition,
return_none=False):
if 'description' in alarm_definition:
return alarm_definition['description']
else:
if return_none:
return None
else:
return ''
def get_query_alarm_definition_severity(alarm_definition, return_none=False):
if 'severity' in alarm_definition:
severity = encodeutils.safe_decode(alarm_definition['severity'], 'utf-8').upper()
if severity not in ['LOW', 'MEDIUM', 'HIGH', 'CRITICAL']:
raise HTTPUnprocessableEntityError('Unprocessable Entity', 'Invalid severity')
return severity
else:
if return_none:
return None
else:
return 'LOW'
def get_query_alarm_definition_match_by(alarm_definition, return_none=False):
if 'match_by' in alarm_definition:
match_by = alarm_definition['match_by']
return match_by
else:
if return_none:
return None
else:
return []
def get_query_alarm_definition_alarm_actions(alarm_definition,
return_none=False):
if 'alarm_actions' in alarm_definition:
alarm_actions = alarm_definition['alarm_actions']
return alarm_actions
else:
if return_none:
return None
else:
return []
def get_query_alarm_definition_undetermined_actions(alarm_definition,
return_none=False):
if 'undetermined_actions' in alarm_definition:
undetermined_actions = alarm_definition['undetermined_actions']
return undetermined_actions
else:
if return_none:
return None
else:
return []
def get_query_ok_actions(alarm_definition, return_none=False):
if 'ok_actions' in alarm_definition:
ok_actions = alarm_definition['ok_actions']
return ok_actions
else:
if return_none:
return None
else:
return []
def get_query_alarm_definition_actions_enabled(alarm_definition,
required=False,
return_none=False):
try:
if 'actions_enabled' in alarm_definition:
enabled_actions = alarm_definition['actions_enabled']
return enabled_actions
else:
if return_none:
return None
elif required:
raise Exception("Missing actions-enabled")
else:
return ''
except Exception as ex:
LOG.debug(ex)
raise HTTPUnprocessableEntityError('Unprocessable Entity', str(ex))
def get_comma_separated_str_as_list(comma_separated_str):
if not comma_separated_str:
return []
else:
return encodeutils.safe_decode(comma_separated_str, 'utf-8').split(',')
def is_definition_deterministic(expression):
"""Evaluates if found expression is deterministic or not.
In order to do that expression is parsed into sub expressions.
Each sub expression needs to be deterministic in order for
entity expression to be such.
Otherwise expression is non-deterministic.
:param str expression: expression to be evaluated
:return: true/false
:rtype: bool
"""
expr_parser = (monasca_api.expression_parser
.alarm_expr_parser.AlarmExprParser(expression))
sub_expressions = expr_parser.sub_expr_list
for sub_expr in sub_expressions:
if not sub_expr.deterministic:
return False
return True
| {
"content_hash": "3bebd87cc9d5124ab5ff825b611aa9ce",
"timestamp": "",
"source": "github",
"line_count": 741,
"max_line_length": 100,
"avg_line_length": 43.18488529014845,
"alnum_prop": 0.53371875,
"repo_name": "stackforge/monasca-api",
"id": "a2913d27d042432dcaa2e03f4805addfb60978e9",
"size": "32635",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "monasca_api/v2/reference/alarm_definitions.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Java",
"bytes": "764767"
},
{
"name": "PLpgSQL",
"bytes": "4289"
},
{
"name": "Python",
"bytes": "710072"
},
{
"name": "Ruby",
"bytes": "3774"
},
{
"name": "Shell",
"bytes": "53573"
}
],
"symlink_target": ""
} |
'''
slurm2json.py: convert a slurm.conf to a machines.json input file
Copyright (c) 2017-2019 Vanessa Sochat
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
import argparse
import json
import sys
import os
import re
def get_parser():
parser = argparse.ArgumentParser(
description="convert slurm.conf to machines.json")
parser.add_argument("--input", dest='input',
help='''one or more slurm config files, separated by commas.''',
type=str, default=None)
parser.add_argument("--update", dest='update',
help='''Update an already existing machines.json (or other)''',
default=False, action='store_true')
parser.add_argument("--disclude-part", dest='disclude_part',
help='''Partitions to disclude, separated by commas''',
type=str, default=None)
parser.add_argument("--print", dest='print',
help="print to screen instead of saving to machines.json",
default=False, action='store_true')
parser.add_argument("--quiet", dest='quiet',
help='''Suppress all output (other than print)''',
default=False, action='store_true')
parser.add_argument("--force", dest='force',
help="Force overwrite of the output file, if it exists.",
default=False, action='store_true')
# Two images, for similarity function
parser.add_argument("--outfile", dest='outfile',
help="output json file. Default is machines.json",
type=str, default='machines.json')
return parser
def main():
parser = get_parser()
try:
args = parser.parse_args()
except:
sys.exit(0)
# User must specify a slurm.conf
if args.input == None:
parser.print_help()
message('Please supply a slurm.conf with --input.', args.quiet)
sys.exit(1)
if os.path.exists(args.outfile) and args.force is False and args.update is False and args.print is False:
message("%s already exists! Use --force to force overwrite." % args.outfile)
sys.exit(1)
print("Parsing %s, please wait!" % args.input)
# If the user wants an update, the output file must exist
if args.update is True:
if not os.path.exists(args.outfile):
message("Cannot find %s. Did you specify the right path?" % args.outfile)
sys.exit(1)
message("Found %s to update." % args.outfile, args.quiet)
machines = read_json(args.outfile)
else:
machines = dict()
# Does the user want to disclude partitions?
disclude_part = None
if args.disclude_part is not None:
disclude_part = args.disclude_part.split(',')
message("%s will not be included." %', '.join(disclude_part),args.quiet)
else:
message("All partitions will be included.",args.quiet)
input_files = args.input.split(',')
for input_file in input_files:
if not os.path.exists(input_file):
message("Cannot find %s. Did you specify the right path?" % input_file)
sys.exit(1)
cluster = parse_config(config_file=input_file)
cluster_names = ",".join(list(cluster.keys()))
message('Adding cluster %s' %(cluster_names),args.quiet)
if disclude_part is not None:
cluster = disclude_partitions(cluster,disclude_part)
machines.update(cluster)
cluster_names = ",".join(list(machines.keys()))
message('Compiling clusters %s' %(cluster_names),args.quiet)
if args.print is True:
message(json.dumps(machines, indent=4, sort_keys=True))
else:
write_json(machines,args.outfile)
################################################################################
# Utils
################################################################################
def message(text,quiet=False):
if not quiet:
print(text)
def unpack_data(data):
config = data['config']
nodes = data['nodes']
partitions = data['partitions']
return config,nodes,partitions
def pack_data(config,nodes,partitions):
return {'config':config,
'nodes':nodes,
'partitions':partitions}
def combine_clusters(clusters):
machines = dict()
for cluster in clusters:
for cluster_name,metadata in cluster:
machines[cluster_name] = metadata
return machines
def read_file(file_name, clean=True, join=False):
'''read in a file, with optional "clean up" to remove
comments (lines starting with #) and empty lines)
'''
with open(file_name,'r') as filey:
content = filey.readlines()
if clean is True:
content = [c.strip('\n')
for c in content
if not c.startswith('#')
and len(c.strip('\n')) > 0]
if join:
content = ''.join(content)
return content
def write_json(json_obj,filename,mode="w"):
with open(filename,mode) as filey:
filey.writelines(json.dumps(json_obj, indent=4, separators=(',', ': ')))
return filename
def read_json(filename,mode='r'):
with open(filename,mode) as filey:
data = json.load(filey)
return data
def remove_comments(line):
return line.rsplit('#',1)[0].strip()
def parse_line_multi(line,keepers=None):
'''parse_line_multiple will return a dictionary with keys and
values for a line from a slurm conf, with variables expected to be separated
by spaces. If keepers is not defined, all is kept'''
parsed = dict()
lines = line.strip().split('#')[0].split(' ')
for line in lines:
if len(line) > 0:
params = line.split('=')
key = params[0]
key = "%s%s" %(key[0].capitalize(),key[1:])
value = params[-1].strip()
if keepers is not None:
if key in keepers:
parsed[key] = value
else:
if key is not "\\":
parsed[key] = value
return parsed
################################################################################
# Nodes
################################################################################
def get_node_variables():
return ["RealMemory",
"Gres",
"Weight",
"Feature",
"Default"]
def break_range_expressions(node_name):
parts = list(node_name)
current = ''
finished = []
opened = False
for c in range(len(parts)):
part = parts[c]
if part == '{':
if len(current) > 0:
finished.append(current)
opened = True
current='{'
elif part == '}':
if len(current) > 0:
finished.append("%s}" %current)
current=''
opened = False
else:
current = "%s%s" %(current,part)
if opened:
current = "%s}" %(current)
if current not in finished and len(current)>0:
finished.append(current)
return finished
def parse_single_node(node_name):
'''this function will parse a single string to describe a group of
nodes, eg gpu-27-{21,35}
'''
parts = break_range_expressions(node_name)
options = []
for part in parts:
node_options = []
if not re.search("^{|}$",part):
options.append([part])
else:
node_ranges = re.findall("[0-9]+-[0-9]+",part)
node_lists = re.findall("[0-9]+,[0-9]+",part)
for node_range in node_ranges:
start,end = [int(x) for x in node_range.split('-')]
node_options += [int(x) for x in range(start,end+1)]
for node_list in node_lists:
node_options += [int(x) for x in node_list.split(',')]
options.append(node_options)
final_options = options.pop(0)
while len(options) > 0:
option_set = options.pop(0)
new_options = []
for final_option in final_options:
for option in option_set:
new_options.append("%s%s" %(final_option,option))
final_options = new_options
return final_options
def parse_node_names(line):
'''parse_node_names will take a whole list of nodes (multiple with
ranges and lists in brackets) and return a list of unique,
complete names.
'''
new_nodes = []
#nodelist = re.sub("\\\\| ","",line).split('=')[-1]
nodelist = re.sub("\\\\| ","", line)
nodelist = nodelist.replace('[','{').replace(']','}')
nodelist = re.split(',\s*(?![^{}]*\})', nodelist)
for node_name in nodelist:
contenders = [x for x in parse_single_node(node_name) if x not in new_nodes]
new_nodes = new_nodes + contenders
return list(set(new_nodes))
def keep_going(name):
'''A function to filter a string to determine if
the calling function should continue. Returns False
if the string being checked contains any flag variables.
'''
skip_these = ['DEFAULT','test']
go_on = True
for skip in skip_these:
if name.startswith(skip):
go_on = False
return go_on
def parse_node_block(data):
'''line should be the first line popped that has 'NodeName'
and config is the entire config following that. The new node
entry is added to the global nodes.
'''
config, nodes, partitions = unpack_data(data)
line = parse_line_multi(config.pop(0))
if "NodeName" not in line:
return pack_data(config, nodes, partitions)
node = line['NodeName']
if not keep_going(node):
return pack_data(config,nodes,partitions)
# Get all variables for node group
keepers = get_node_variables()
node_names = parse_node_names(node)
del line['NodeName']
node_settings = line
done = False
while not done:
line = remove_comments(config.pop(0))
if not line.endswith('\\'):
done = True
updates = parse_line_multi(line, keepers)
node_settings.update(updates)
for node in node_names:
if node not in nodes:
nodes[node] = node_settings
nodes[node]['partitions'] = []
else:
nodes[node].update(node_settings)
return pack_data(config,nodes,partitions)
################################################################################
# Features and Defaults
################################################################################
def parse_features(data):
config,nodes,partitions = unpack_data(data)
features = dict()
for node_name,attributes in nodes.items():
if 'Feature' in attributes:
feature_list = attributes['Feature'].strip('"').split(',')
for partition in attributes['partitions']:
if partition not in features:
features[partition] = feature_list
else:
new_features = [x for x in feature_list
if x not in features[partition]]
features[partition] = features[partition] + new_features
return features
def find_defaults(data):
defaults = dict()
for key, datum in data.items():
if datum:
defaults[key] = []
for name, attributes in datum.items():
if "Default" in attributes:
defaults[key].append(name)
return defaults
################################################################################
# Partitions
################################################################################
def get_partition_variables():
return ["DefaultTime",
"Default",
"DefMemPerCPU",
"MaxMemPerCPU",
"AllowQos",
"Nodes"]
def disclude_partitions(cluster,disclude_parts):
for cluster_name, attributes in cluster.items():
if "partitions" in attributes:
for disclude_part in disclude_parts:
if disclude_part in attributes['partitions']:
del cluster[cluster_name]['partitions'][disclude_part]
if "nodes" in attributes:
for node_name, node_attributes in attributes['nodes'].items():
if "partitions" in node_attributes:
update = [x for x in cluster[cluster_name]['nodes'][node_name]['partitions']
if x not in disclude_parts]
cluster[cluster_name]['nodes'][node_name]['partitions'] = update
if "features" in attributes:
for disclude_part in disclude_parts:
if disclude_part in attributes['features']:
del cluster[cluster_name]['features'][disclude_part]
return cluster
def parse_partition_block(data):
'''line should be the first line popped that has 'PartitionName'
and config is the entire config following that. The new partition
entry is added to the global partitions. The next line (non partition)
is returned.
'''
config, nodes, partitions = unpack_data(data)
line = parse_line_multi(config.pop(0))
if "PartitionName" not in line:
return pack_data(config, nodes, partitions)
partition_name = line['PartitionName']
keepers = get_partition_variables()
if not keep_going(partition_name):
return pack_data(config,nodes,partitions)
# Get all variables for node group
new_partition = line
done = False
while not done:
line = remove_comments(config.pop(0))
if not line.endswith('\\'):
done = True
updates = parse_line_multi(line,keepers)
if "Nodes" in updates:
parts = parse_node_names(updates['Nodes'])
for node in parts:
if node in nodes:
if partition_name not in nodes[node]['partitions']:
nodes[node]['partitions'].append(partition_name)
else:
nodes[node] = {'partitions':[partition_name]}
# Note, we don't add an exaustive list of nodes to each partition
# But if we needed to, could do that here.
updates['maxNodes'] = len(parts)
del updates['Nodes']
new_partition.update(updates)
partitions[partition_name] = new_partition
return pack_data(config,nodes,partitions)
################################################################################
# Main Parser
################################################################################
def parse_config(config_file):
'''parse a config file to return a complete list of machines
'''
machines = dict()
config = read_file(config_file)
data = {'partitions':{},
'nodes': {},
'config':config}
# If the configuration file doesn't have a cluster
cluster = "default"
while data['config']:
line = data['config'][0]
if line.startswith('ClusterName'):
line = data['config'].pop(0)
cluster = parse_line_multi(line)['ClusterName']
elif line.startswith('PartitionName'):
data = parse_partition_block(data)
elif line.startswith('NodeName'):
data = parse_node_block(data)
else:
data['config'].pop(0)
# Calculate features for each partition
machines[cluster] = dict()
machines[cluster]['features'] = parse_features(data)
# Find Defaults
machines[cluster]['defaults'] = find_defaults(data)
del data['config']
machines[cluster].update(data)
return machines
if __name__ == '__main__':
main()
| {
"content_hash": "e8c593bd8b4684e0c6b02374ddd8c299",
"timestamp": "",
"source": "github",
"line_count": 506,
"max_line_length": 109,
"avg_line_length": 33.32411067193676,
"alnum_prop": 0.5620922784960266,
"repo_name": "researchapps/job-maker",
"id": "430d7a18d0a8fa08ff810b9e899a9b1600a69c61",
"size": "16885",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "helpers/slurm2json.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2213"
},
{
"name": "Dockerfile",
"bytes": "320"
},
{
"name": "HTML",
"bytes": "10938"
},
{
"name": "JavaScript",
"bytes": "8718"
},
{
"name": "Python",
"bytes": "16885"
},
{
"name": "Shell",
"bytes": "2590"
}
],
"symlink_target": ""
} |
from keystone.contrib.extensions.admin.extension import BaseExtensionHandler
from keystone.controllers.services import ServicesController
from keystone.controllers.roles import RolesController
class ExtensionHandler(BaseExtensionHandler):
def map_extension_methods(self, mapper, options):
# Services
services_controller = ServicesController(options)
mapper.connect("/OS-KSADM/services",
controller=services_controller,
action="get_services",
conditions=dict(method=["GET"]))
mapper.connect("/OS-KSADM/services",
controller=services_controller,
action="create_service",
conditions=dict(method=["POST"]))
mapper.connect("/OS-KSADM/services/{service_id}",
controller=services_controller,
action="delete_service",
conditions=dict(method=["DELETE"]))
mapper.connect("/OS-KSADM/services/{service_id}",
controller=services_controller,
action="get_service",
conditions=dict(method=["GET"]))
#Roles
roles_controller = RolesController(options)
mapper.connect("/OS-KSADM/roles", controller=roles_controller,
action="create_role", conditions=dict(method=["POST"]))
mapper.connect("/OS-KSADM/roles", controller=roles_controller,
action="get_roles", conditions=dict(method=["GET"]))
mapper.connect("/OS-KSADM/roles/{role_id}",
controller=roles_controller, action="get_role",
conditions=dict(method=["GET"]))
mapper.connect("/OS-KSADM/roles/{role_id}",
controller=roles_controller, action="delete_role",
conditions=dict(method=["DELETE"]))
#User Roles
#Add/Delete Global role.
mapper.connect("/users/{user_id}/roles/OS-KSADM/{role_id}",
controller=roles_controller, action="add_role_to_user",
conditions=dict(method=["POST"]))
mapper.connect("/users/{user_id}/roles/OS-KSADM/{role_id}",
controller=roles_controller, action="delete_role_from_user",
conditions=dict(method=["DELETE"]))
#Add/Delete Tenant specific role.
mapper.connect(
"/tenants/{tenant_id}/users/{user_id}/roles/OS-KSADM/{role_id}",
controller=roles_controller, action="add_role_to_user",
conditions=dict(method=["POST"]))
mapper.connect(
"/tenants/{tenant_id}/users/{user_id}/roles/OS-KSADM/{role_id}",
controller=roles_controller, action="delete_role_from_user",
conditions=dict(method=["DELETE"]))
mapper.connect("/users/{user_id}/roleRefs",
controller=roles_controller, action="get_role_refs",
conditions=dict(method=["GET"]))
| {
"content_hash": "74347b94a4cfce043421c5f9950f2d68",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 76,
"avg_line_length": 48.78333333333333,
"alnum_prop": 0.6006149641270926,
"repo_name": "pvo/keystone",
"id": "5b34c18876c67c1f0ceedf0204e6888a1631025f",
"size": "3575",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "keystone/contrib/extensions/admin/osksadm/extension_handler.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "67937"
},
{
"name": "Python",
"bytes": "639506"
},
{
"name": "Shell",
"bytes": "4547"
}
],
"symlink_target": ""
} |
from sqlalchemy import Column, String, Integer, DATETIME, Boolean
from datetime import datetime
from .utils import Base
from .abstract import QkouBase
from static import LEC_INFO_ID_TEMPLATE, LEC_INFO_TEMPLATE
class Info(Base, QkouBase):
"""
Model for lecture information. This model relate with subject model.
"""
__tablename__ = 'lec_info'
id = Column(Integer, primary_key=True, autoincrement=True)
title = Column(String(length=150))
teacher = Column(String(length=150))
abstract = Column(String(length=40))
detail = Column(String(length=2000))
week = Column(String(length=10))
period = Column(String(length=10))
unique_hash = Column(String(length=255), unique=True)
renew_hash = Column(String(length=255))
first = Column(DATETIME)
updated_date = Column(DATETIME)
created_at = Column(DATETIME, default=datetime.now())
last_confirmed = Column(DATETIME)
is_deleted = Column(Boolean, unique=False, default=False)
def __init__(self, title: str, teacher: str, week: str, period: str, abstract: str,
detail: str, first: str, updated_date: str, created_at: datetime=datetime.now()):
self.week = week
self.period = period
self.title = title
self.teacher = teacher
self.abstract = abstract
self.detail = detail
self.first = self.convert_datetime(first)
self.updated_date = self.convert_datetime(updated_date)
self.unique_hash = self.make_unique_hash(title, teacher, week, period, abstract, first)
self.renew_hash = self.make_unique_hash(detail, updated_date)
self.created_at = created_at
self.last_confirmed = created_at
@property
def table_name(self) -> str:
"""
Return:
table name
"""
return self.__tablename__
@property
def tweet_text(self) -> str:
"""
Return:
tweet text under 140 characters.
"""
unformatted = self.__str__()
num = LEC_INFO_ID_TEMPLATE.format(id=self.id)
return unformatted[0:131] + num if len(unformatted) > 131 else unformatted + num
def __str__(self) -> str:
return LEC_INFO_TEMPLATE.format(subject=self.title, teacher=self.teacher,
week=self.week, period=self.period,
abstract=self.abstract, detail=self.detail)
def __repr__(self) -> str:
return "<Info '{title}' '{week}' '{period}' '{date}'>".format(title=self.title,
week=self.week,
period=self.period,
date=self.first.strftime("%Y/%m/%d"))
| {
"content_hash": "eba329d9e617af5de226780a0aea64c4",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 107,
"avg_line_length": 39.486111111111114,
"alnum_prop": 0.5768554344002814,
"repo_name": "pddg/qkouserver",
"id": "0a1774b769c590818331b6bb6c763254454d5729",
"size": "2843",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "qkoubot/models/info.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "87540"
},
{
"name": "Shell",
"bytes": "3826"
}
],
"symlink_target": ""
} |
"""
deserializers.form_data
~~~~~~~~~~~~~~~~~~~~~~~~~
Deserializer that is compliant with the RFC 2388
multipart/form-data format
It's broken down into 2 parts:
* A parser for spec compliant validations
* A normalizer for converting into a common
format expected by our resources/responders.
WARN: Currently we only accept 1 upload at a time!
An error message will be sent if more than 1
upload is detected.
"""
import cgi
import goldman
import goldman.exceptions as exceptions
import goldman.extensions as extensions
from ..deserializers.base import Deserializer as BaseDeserializer
from goldman.utils.error_helpers import abort
class Deserializer(BaseDeserializer):
""" RFC 2388 compliant deserializer """
MIMETYPE = goldman.FILEUPLOAD_MIMETYPE
def deserialize(self, mimetypes): # pylint: disable=arguments-differ
""" Invoke the deserializer
Upon successful deserialization a dict will be returned
containing the following key/vals:
{
'content': <uploaded object>,
'content-type': <content-type of content>,
'file-ext': <file extension based on content-type>,
'file-name': <file name of content>,
}
:param mimetypes:
allowed mimetypes of the object in the request
payload
:return:
normalized dict
"""
super(Deserializer, self).deserialize()
parts = self.parse(mimetypes)
data = self.normalize(parts)
return data
def normalize(self, parts):
""" Invoke the RFC 2388 spec compliant normalizer
:param parts:
the already vetted & parsed FieldStorage objects
:return:
normalized dict
"""
part = parts.list[0]
return {
'content': part.file.read(),
'content-type': part.type,
'file-ext': extensions.get(part.type),
'file-name': part.filename,
}
def _parse_top_level_content_type(self):
""" Ensure a boundary is present in the Content-Type header
This is the Content-Type header outside of any form-data
& should simply be:
Content-Type: multipart/form-data; boundary=<value>\r\n
This is generated by the client obviously & should not
occur within the uploaded payload.
"""
if not self.req.content_type_params.get('boundary'):
abort(exceptions.InvalidRequestHeader(**{
'detail': 'A boundary param is required in the Content-Type '
'header & cannot be an empty string. The details '
'of its grammar are further outlined in RFC 2046 '
'- section 5.1.1.',
'links': 'tools.ietf.org/html/rfc2388#section-4.1',
}))
def _parse_section_three(self, part, mimetypes):
""" Parse & validate a part according to section #3
The logic applied follows section 3 guidelines from top
to bottom.
"""
link = 'tools.ietf.org/html/rfc2388#section-3'
if part.disposition != 'form-data' or not part.name:
self.fail('Each part of a multipart/form-data requires a '
'Content-Disposition header with a disposition type '
'of "form-data" AND a unique "name" parameter.', link)
elif part.type.lower() not in mimetypes:
allowed = ', '.join(mimetypes)
self.fail('Invalid upload Content-Type. Each part of the '
'multipart/form-data upload MUST be one of: %s. '
'%s is not allowed.' % (allowed, part.type), link)
def _parse_part(self, part, mimetypes):
""" Validate each part of the multipart per RFC 2388 ""
:param part:
a FieldStorage object
"""
self._parse_section_three(part, mimetypes)
def parse(self, mimetypes):
""" Invoke the RFC 2388 spec compliant parser """
self._parse_top_level_content_type()
link = 'tools.ietf.org/html/rfc2388'
parts = cgi.FieldStorage(
fp=self.req.stream,
environ=self.req.env,
)
if not parts:
self.fail('A payload in the body of your request is required '
'& must be encapsulated by the boundary with proper '
'headers according to RFC 2388', link)
elif len(parts) > 1:
self.fail('Currently, only 1 upload at a time is allowed. Please '
'break up your request into %s individual requests & '
'retry' % len(parts), link)
else:
self._parse_part(parts.list[0], mimetypes)
return parts
| {
"content_hash": "14a4091e74dfb9dc94e8f7107871700b",
"timestamp": "",
"source": "github",
"line_count": 150,
"max_line_length": 78,
"avg_line_length": 32.666666666666664,
"alnum_prop": 0.5789795918367346,
"repo_name": "sassoo/goldman",
"id": "efade46a82de7b2fa85fd8d2649618089d5eaa83",
"size": "4900",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "goldman/deserializers/form_data.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "198658"
}
],
"symlink_target": ""
} |
"""
SQLAlchemy models for nova data.
"""
from sqlalchemy import Column, Index, Integer, BigInteger, Enum, String, schema
from sqlalchemy.dialects.mysql import MEDIUMTEXT
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import ForeignKey, DateTime, Boolean, Text, Float
from sqlalchemy.orm import relationship, backref, object_mapper
from oslo.config import cfg
from nova.db.sqlalchemy import types
from nova.openstack.common.db.sqlalchemy import models
from nova.openstack.common import timeutils
CONF = cfg.CONF
BASE = declarative_base()
def MediumText():
return Text().with_variant(MEDIUMTEXT(), 'mysql')
class NovaBase(models.SoftDeleteMixin,
models.TimestampMixin,
models.ModelBase):
metadata = None
class Service(BASE, NovaBase):
"""Represents a running service on a host."""
__tablename__ = 'services'
__table_args__ = (
schema.UniqueConstraint("host", "topic", "deleted",
name="uniq_services0host0topic0deleted"),
schema.UniqueConstraint("host", "binary", "deleted",
name="uniq_services0host0binary0deleted")
)
id = Column(Integer, primary_key=True)
host = Column(String(255)) # , ForeignKey('hosts.id'))
binary = Column(String(255))
topic = Column(String(255))
report_count = Column(Integer, nullable=False, default=0)
disabled = Column(Boolean, default=False)
disabled_reason = Column(String(255))
class ComputeNode(BASE, NovaBase):
"""Represents a running compute service on a host."""
__tablename__ = 'compute_nodes'
__table_args__ = ()
id = Column(Integer, primary_key=True)
service_id = Column(Integer, ForeignKey('services.id'), nullable=False)
service = relationship(Service,
backref=backref('compute_node'),
foreign_keys=service_id,
primaryjoin='and_('
'ComputeNode.service_id == Service.id,'
'ComputeNode.deleted == 0)')
vcpus = Column(Integer, nullable=False)
memory_mb = Column(Integer, nullable=False)
local_gb = Column(Integer, nullable=False)
vcpus_used = Column(Integer, nullable=False)
memory_mb_used = Column(Integer, nullable=False)
local_gb_used = Column(Integer, nullable=False)
hypervisor_type = Column(MediumText(), nullable=False)
hypervisor_version = Column(Integer, nullable=False)
hypervisor_hostname = Column(String(255))
hypervisor_qos = Column(String(255))
# Free Ram, amount of activity (resize, migration, boot, etc) and
# the number of running VM's are a good starting point for what's
# important when making scheduling decisions.
free_ram_mb = Column(Integer)
free_disk_gb = Column(Integer)
current_workload = Column(Integer)
running_vms = Column(Integer)
# Note(masumotok): Expected Strings example:
#
# '{"arch":"x86_64",
# "model":"Nehalem",
# "topology":{"sockets":1, "threads":2, "cores":3},
# "features":["tdtscp", "xtpr"]}'
#
# Points are "json translatable" and it must have all dictionary keys
# above, since it is copied from <cpu> tag of getCapabilities()
# (See libvirt.virtConnection).
cpu_info = Column(MediumText(), nullable=False)
disk_available_least = Column(Integer)
host_ip = Column(types.IPAddress())
supported_instances = Column(Text)
# Note(yongli): json string PCI Stats
# '{"vendor_id":"8086", "product_id":"1234", "count":3 }'
pci_stats = Column(Text)
class ComputeNodeStat(BASE, NovaBase):
"""Stats related to the current workload of a compute host that are
intended to aid in making scheduler decisions.
"""
__tablename__ = 'compute_node_stats'
__table_args__ = (
Index('ix_compute_node_stats_compute_node_id', 'compute_node_id'),
Index('compute_node_stats_node_id_and_deleted_idx',
'compute_node_id', 'deleted')
)
id = Column(Integer, primary_key=True)
key = Column(String(255), nullable=False)
value = Column(String(255))
compute_node_id = Column(Integer, ForeignKey('compute_nodes.id'),
nullable=False)
primary_join = ('and_(ComputeNodeStat.compute_node_id == '
'ComputeNode.id, ComputeNodeStat.deleted == 0)')
stats = relationship("ComputeNode", backref="stats",
primaryjoin=primary_join)
def __str__(self):
return "{%d: %s = %s}" % (self.compute_node_id, self.key, self.value)
class Certificate(BASE, NovaBase):
"""Represents a x509 certificate."""
__tablename__ = 'certificates'
__table_args__ = (
Index('certificates_project_id_deleted_idx', 'project_id', 'deleted'),
Index('certificates_user_id_deleted_idx', 'user_id', 'deleted')
)
id = Column(Integer, primary_key=True)
user_id = Column(String(255))
project_id = Column(String(255))
file_name = Column(String(255))
class Instance(BASE, NovaBase):
"""Represents a guest VM."""
__tablename__ = 'instances'
__table_args__ = (
Index('uuid', 'uuid', unique=True),
Index('project_id', 'project_id'),
Index('instances_host_deleted_idx',
'host', 'deleted'),
Index('instances_reservation_id_idx',
'reservation_id'),
Index('instances_terminated_at_launched_at_idx',
'terminated_at', 'launched_at'),
Index('instances_uuid_deleted_idx',
'uuid', 'deleted'),
Index('instances_task_state_updated_at_idx',
'task_state', 'updated_at'),
Index('instances_host_node_deleted_idx',
'host', 'node', 'deleted'),
Index('instances_host_deleted_cleaned_idx',
'host', 'deleted', 'cleaned'),
)
injected_files = []
id = Column(Integer, primary_key=True, autoincrement=True)
@property
def name(self):
try:
base_name = CONF.instance_name_template % self.id
except TypeError:
# Support templates like "uuid-%(uuid)s", etc.
info = {}
# NOTE(russellb): Don't use self.iteritems() here, as it will
# result in infinite recursion on the name property.
for column in iter(object_mapper(self).columns):
key = column.name
# prevent recursion if someone specifies %(name)s
# %(name)s will not be valid.
if key == 'name':
continue
info[key] = self[key]
try:
base_name = CONF.instance_name_template % info
except KeyError:
base_name = self.uuid
return base_name
def _extra_keys(self):
return ['name']
user_id = Column(String(255))
project_id = Column(String(255))
image_ref = Column(String(255))
kernel_id = Column(String(255))
ramdisk_id = Column(String(255))
hostname = Column(String(255))
launch_index = Column(Integer)
key_name = Column(String(255))
key_data = Column(MediumText())
power_state = Column(Integer)
vm_state = Column(String(255))
task_state = Column(String(255))
memory_mb = Column(Integer)
vcpus = Column(Integer)
root_gb = Column(Integer)
ephemeral_gb = Column(Integer)
# This is not related to hostname, above. It refers
# to the nova node.
host = Column(String(255)) # , ForeignKey('hosts.id'))
# To identify the "ComputeNode" which the instance resides in.
# This equals to ComputeNode.hypervisor_hostname.
node = Column(String(255))
# *not* flavorid, this is the internal primary_key
instance_type_id = Column(Integer)
user_data = Column(MediumText())
reservation_id = Column(String(255))
scheduled_at = Column(DateTime)
launched_at = Column(DateTime)
terminated_at = Column(DateTime)
availability_zone = Column(String(255))
# User editable field for display in user-facing UIs
display_name = Column(String(255))
display_description = Column(String(255))
# To remember on which host an instance booted.
# An instance may have moved to another host by live migration.
launched_on = Column(MediumText())
# NOTE(jdillaman): locked deprecated in favor of locked_by,
# to be removed in Icehouse
locked = Column(Boolean)
locked_by = Column(Enum('owner', 'admin'))
os_type = Column(String(255))
architecture = Column(String(255))
vm_mode = Column(String(255))
uuid = Column(String(36))
root_device_name = Column(String(255))
default_ephemeral_device = Column(String(255))
default_swap_device = Column(String(255))
config_drive = Column(String(255))
# User editable field meant to represent what ip should be used
# to connect to the instance
access_ip_v4 = Column(types.IPAddress())
access_ip_v6 = Column(types.IPAddress())
auto_disk_config = Column(Boolean())
progress = Column(Integer)
# EC2 instance_initiated_shutdown_terminate
# True: -> 'terminate'
# False: -> 'stop'
# Note(maoy): currently Nova will always stop instead of terminate
# no matter what the flag says. So we set the default to False.
shutdown_terminate = Column(Boolean(), default=False)
# EC2 disable_api_termination
disable_terminate = Column(Boolean(), default=False)
# OpenStack compute cell name. This will only be set at the top of
# the cells tree and it'll be a full cell name such as 'api!hop1!hop2'
cell_name = Column(String(255))
internal_id = Column(Integer)
# Records whether an instance has been deleted from disk
cleaned = Column(Integer, default=0)
class InstanceInfoCache(BASE, NovaBase):
"""
Represents a cache of information about an instance
"""
__tablename__ = 'instance_info_caches'
__table_args__ = (
schema.UniqueConstraint(
"instance_uuid",
name="uniq_instance_info_caches0instance_uuid"),)
id = Column(Integer, primary_key=True, autoincrement=True)
# text column used for storing a json object of network data for api
network_info = Column(MediumText())
instance_uuid = Column(String(36), ForeignKey('instances.uuid'),
nullable=False)
instance = relationship(Instance,
backref=backref('info_cache', uselist=False),
foreign_keys=instance_uuid,
primaryjoin=instance_uuid == Instance.uuid)
class InstanceTypes(BASE, NovaBase):
"""Represents possible flavors for instances.
Note: instance_type and flavor are synonyms and the term instance_type is
deprecated and in the process of being removed.
"""
__tablename__ = "instance_types"
__table_args__ = (
schema.UniqueConstraint("flavorid", "deleted",
name="uniq_instance_types0flavorid0deleted"),
schema.UniqueConstraint("name", "deleted",
name="uniq_instance_types0name0deleted")
)
# Internal only primary key/id
id = Column(Integer, primary_key=True)
name = Column(String(255))
memory_mb = Column(Integer, nullable=False)
vcpus = Column(Integer, nullable=False)
root_gb = Column(Integer)
ephemeral_gb = Column(Integer)
# Public facing id will be renamed public_id
flavorid = Column(String(255))
swap = Column(Integer, nullable=False, default=0)
rxtx_factor = Column(Float, default=1)
vcpu_weight = Column(Integer)
disabled = Column(Boolean, default=False)
is_public = Column(Boolean, default=True)
class Volume(BASE, NovaBase):
"""Represents a block storage device that can be attached to a VM."""
__tablename__ = 'volumes'
__table_args__ = (
Index('volumes_instance_uuid_idx', 'instance_uuid'),
)
id = Column(String(36), primary_key=True, nullable=False)
deleted = Column(String(36), default="")
@property
def name(self):
return CONF.volume_name_template % self.id
ec2_id = Column(String(255))
user_id = Column(String(255))
project_id = Column(String(255))
snapshot_id = Column(String(36))
host = Column(String(255))
size = Column(Integer)
availability_zone = Column(String(255))
instance_uuid = Column(String(36))
mountpoint = Column(String(255))
attach_time = Column(DateTime)
status = Column(String(255)) # TODO(vish): enum?
attach_status = Column(String(255)) # TODO(vish): enum
scheduled_at = Column(DateTime)
launched_at = Column(DateTime)
terminated_at = Column(DateTime)
display_name = Column(String(255))
display_description = Column(String(255))
provider_location = Column(String(256))
provider_auth = Column(String(256))
volume_type_id = Column(Integer)
class Quota(BASE, NovaBase):
"""Represents a single quota override for a project.
If there is no row for a given project id and resource, then the
default for the quota class is used. If there is no row for a
given quota class and resource, then the default for the
deployment is used. If the row is present but the hard limit is
Null, then the resource is unlimited.
"""
__tablename__ = 'quotas'
__table_args__ = (
schema.UniqueConstraint("project_id", "resource", "deleted",
name="uniq_quotas0project_id0resource0deleted"
),
)
id = Column(Integer, primary_key=True)
project_id = Column(String(255))
resource = Column(String(255), nullable=False)
hard_limit = Column(Integer)
class ProjectUserQuota(BASE, NovaBase):
"""Represents a single quota override for a user with in a project."""
__tablename__ = 'project_user_quotas'
uniq_name = "uniq_project_user_quotas0user_id0project_id0resource0deleted"
__table_args__ = (
schema.UniqueConstraint("user_id", "project_id", "resource", "deleted",
name=uniq_name),
Index('project_user_quotas_project_id_deleted_idx',
'project_id', 'deleted'),
Index('project_user_quotas_user_id_deleted_idx',
'user_id', 'deleted')
)
id = Column(Integer, primary_key=True, nullable=False)
project_id = Column(String(255), nullable=False)
user_id = Column(String(255), nullable=False)
resource = Column(String(255), nullable=False)
hard_limit = Column(Integer)
class QuotaClass(BASE, NovaBase):
"""Represents a single quota override for a quota class.
If there is no row for a given quota class and resource, then the
default for the deployment is used. If the row is present but the
hard limit is Null, then the resource is unlimited.
"""
__tablename__ = 'quota_classes'
__table_args__ = (
Index('ix_quota_classes_class_name', 'class_name'),
)
id = Column(Integer, primary_key=True)
class_name = Column(String(255))
resource = Column(String(255))
hard_limit = Column(Integer)
class QuotaUsage(BASE, NovaBase):
"""Represents the current usage for a given resource."""
__tablename__ = 'quota_usages'
__table_args__ = (
Index('ix_quota_usages_project_id', 'project_id'),
)
id = Column(Integer, primary_key=True)
project_id = Column(String(255))
user_id = Column(String(255))
resource = Column(String(255), nullable=False)
in_use = Column(Integer, nullable=False)
reserved = Column(Integer, nullable=False)
@property
def total(self):
return self.in_use + self.reserved
until_refresh = Column(Integer)
class Reservation(BASE, NovaBase):
"""Represents a resource reservation for quotas."""
__tablename__ = 'reservations'
__table_args__ = (
Index('ix_reservations_project_id', 'project_id'),
Index('reservations_uuid_idx', 'uuid'),
)
id = Column(Integer, primary_key=True, nullable=False)
uuid = Column(String(36), nullable=False)
usage_id = Column(Integer, ForeignKey('quota_usages.id'), nullable=False)
project_id = Column(String(255))
user_id = Column(String(255))
resource = Column(String(255))
delta = Column(Integer, nullable=False)
expire = Column(DateTime)
usage = relationship(
"QuotaUsage",
foreign_keys=usage_id,
primaryjoin='and_(Reservation.usage_id == QuotaUsage.id,'
'QuotaUsage.deleted == 0)')
class Snapshot(BASE, NovaBase):
"""Represents a block storage device that can be attached to a VM."""
__tablename__ = 'snapshots'
__table_args__ = ()
id = Column(String(36), primary_key=True, nullable=False)
deleted = Column(String(36), default="")
@property
def name(self):
return CONF.snapshot_name_template % self.id
@property
def volume_name(self):
return CONF.volume_name_template % self.volume_id
user_id = Column(String(255))
project_id = Column(String(255))
volume_id = Column(String(36), nullable=False)
status = Column(String(255))
progress = Column(String(255))
volume_size = Column(Integer)
scheduled_at = Column(DateTime)
display_name = Column(String(255))
display_description = Column(String(255))
class BlockDeviceMapping(BASE, NovaBase):
"""Represents block device mapping that is defined by EC2."""
__tablename__ = "block_device_mapping"
__table_args__ = (
Index('snapshot_id', 'snapshot_id'),
Index('volume_id', 'volume_id'),
Index('block_device_mapping_instance_uuid_device_name_idx',
'instance_uuid', 'device_name'),
Index('block_device_mapping_instance_uuid_volume_id_idx',
'instance_uuid', 'volume_id'),
Index('block_device_mapping_instance_uuid_idx', 'instance_uuid'),
#TODO(sshturm) Should be dropped. `virtual_name` was dropped
#in 186 migration,
#Duplicates `block_device_mapping_instance_uuid_device_name_idx` index.
Index("block_device_mapping_instance_uuid_virtual_name"
"_device_name_idx", 'instance_uuid', 'device_name'),
)
id = Column(Integer, primary_key=True, autoincrement=True)
instance_uuid = Column(String(36), ForeignKey('instances.uuid'))
instance = relationship(Instance,
backref=backref('block_device_mapping'),
foreign_keys=instance_uuid,
primaryjoin='and_(BlockDeviceMapping.'
'instance_uuid=='
'Instance.uuid,'
'BlockDeviceMapping.deleted=='
'0)')
source_type = Column(String(255))
destination_type = Column(String(255))
guest_format = Column(String(255))
device_type = Column(String(255))
disk_bus = Column(String(255))
boot_index = Column(Integer)
device_name = Column(String(255))
# default=False for compatibility of the existing code.
# With EC2 API,
# default True for ami specified device.
# default False for created with other timing.
#TODO(sshturm) add default in db
delete_on_termination = Column(Boolean, default=False)
snapshot_id = Column(String(36))
volume_id = Column(String(36))
volume_size = Column(Integer)
image_id = Column(String(36))
# for no device to suppress devices.
no_device = Column(Boolean)
connection_info = Column(MediumText())
class IscsiTarget(BASE, NovaBase):
"""Represents an iscsi target for a given host."""
__tablename__ = 'iscsi_targets'
__table_args__ = (
Index('iscsi_targets_volume_id_fkey', 'volume_id'),
Index('iscsi_targets_host_idx', 'host'),
Index('iscsi_targets_host_volume_id_deleted_idx', 'host', 'volume_id',
'deleted')
)
id = Column(Integer, primary_key=True, nullable=False)
target_num = Column(Integer)
host = Column(String(255))
volume_id = Column(String(36), ForeignKey('volumes.id'))
volume = relationship(Volume,
backref=backref('iscsi_target', uselist=False),
foreign_keys=volume_id,
primaryjoin='and_(IscsiTarget.volume_id==Volume.id,'
'IscsiTarget.deleted==0)')
class SecurityGroupInstanceAssociation(BASE, NovaBase):
__tablename__ = 'security_group_instance_association'
__table_args__ = (
Index('security_group_instance_association_instance_uuid_idx',
'instance_uuid'),
)
id = Column(Integer, primary_key=True, nullable=False)
security_group_id = Column(Integer, ForeignKey('security_groups.id'))
instance_uuid = Column(String(36), ForeignKey('instances.uuid'))
class SecurityGroup(BASE, NovaBase):
"""Represents a security group."""
__tablename__ = 'security_groups'
__table_args__ = (
Index('uniq_security_groups0project_id0name0deleted', 'project_id',
'name', 'deleted'),
)
id = Column(Integer, primary_key=True)
name = Column(String(255))
description = Column(String(255))
user_id = Column(String(255))
project_id = Column(String(255))
instances = relationship(Instance,
secondary="security_group_instance_association",
primaryjoin='and_('
'SecurityGroup.id == '
'SecurityGroupInstanceAssociation.security_group_id,'
'SecurityGroupInstanceAssociation.deleted == 0,'
'SecurityGroup.deleted == 0)',
secondaryjoin='and_('
'SecurityGroupInstanceAssociation.instance_uuid == Instance.uuid,'
# (anthony) the condition below shouldn't be necessary now that the
# association is being marked as deleted. However, removing this
# may cause existing deployments to choke, so I'm leaving it
'Instance.deleted == 0)',
backref='security_groups')
class SecurityGroupIngressRule(BASE, NovaBase):
"""Represents a rule in a security group."""
__tablename__ = 'security_group_rules'
__table_args__ = ()
id = Column(Integer, primary_key=True)
parent_group_id = Column(Integer, ForeignKey('security_groups.id'))
parent_group = relationship("SecurityGroup", backref="rules",
foreign_keys=parent_group_id,
primaryjoin='and_('
'SecurityGroupIngressRule.parent_group_id == SecurityGroup.id,'
'SecurityGroupIngressRule.deleted == 0)')
protocol = Column(String(255))
from_port = Column(Integer)
to_port = Column(Integer)
cidr = Column(types.CIDR())
# Note: This is not the parent SecurityGroup. It's SecurityGroup we're
# granting access for.
group_id = Column(Integer, ForeignKey('security_groups.id'))
grantee_group = relationship("SecurityGroup",
foreign_keys=group_id,
primaryjoin='and_('
'SecurityGroupIngressRule.group_id == SecurityGroup.id,'
'SecurityGroupIngressRule.deleted == 0)')
class SecurityGroupIngressDefaultRule(BASE, NovaBase):
__tablename__ = 'security_group_default_rules'
__table_args__ = ()
id = Column(Integer, primary_key=True, nullable=False)
protocol = Column(String(5)) # "tcp", "udp" or "icmp"
from_port = Column(Integer)
to_port = Column(Integer)
cidr = Column(types.CIDR())
class ProviderFirewallRule(BASE, NovaBase):
"""Represents a rule in a security group."""
__tablename__ = 'provider_fw_rules'
__table_args__ = ()
id = Column(Integer, primary_key=True, nullable=False)
protocol = Column(String(5)) # "tcp", "udp", or "icmp"
from_port = Column(Integer)
to_port = Column(Integer)
cidr = Column(types.CIDR())
class KeyPair(BASE, NovaBase):
"""Represents a public key pair for ssh."""
__tablename__ = 'key_pairs'
__table_args__ = (
schema.UniqueConstraint("user_id", "name", "deleted",
name="uniq_key_pairs0user_id0name0deleted"),
)
id = Column(Integer, primary_key=True, nullable=False)
name = Column(String(255))
user_id = Column(String(255))
fingerprint = Column(String(255))
public_key = Column(MediumText())
class Migration(BASE, NovaBase):
"""Represents a running host-to-host migration."""
__tablename__ = 'migrations'
__table_args__ = (
Index('migrations_instance_uuid_and_status_idx', 'instance_uuid',
'status'),
Index('migrations_by_host_nodes_and_status_idx', 'deleted',
'source_compute', 'dest_compute', 'source_node', 'dest_node',
'status'),
)
id = Column(Integer, primary_key=True, nullable=False)
# NOTE(tr3buchet): the ____compute variables are instance['host']
source_compute = Column(String(255))
dest_compute = Column(String(255))
# nodes are equivalent to a compute node's 'hypvervisor_hostname'
source_node = Column(String(255))
dest_node = Column(String(255))
# NOTE(tr3buchet): dest_host, btw, is an ip address
dest_host = Column(String(255))
old_instance_type_id = Column(Integer())
new_instance_type_id = Column(Integer())
instance_uuid = Column(String(36), ForeignKey('instances.uuid'))
#TODO(_cerberus_): enum
status = Column(String(255))
instance = relationship("Instance", foreign_keys=instance_uuid,
primaryjoin='and_(Migration.instance_uuid == '
'Instance.uuid, Instance.deleted == '
'0)')
class Network(BASE, NovaBase):
"""Represents a network."""
__tablename__ = 'networks'
__table_args__ = (
schema.UniqueConstraint("vlan", "deleted",
name="uniq_networks0vlan0deleted"),
Index('networks_bridge_deleted_idx', 'bridge', 'deleted'),
Index('networks_host_idx', 'host'),
Index('networks_project_id_deleted_idx', 'project_id', 'deleted'),
Index('networks_uuid_project_id_deleted_idx', 'uuid',
'project_id', 'deleted'),
Index('networks_vlan_deleted_idx', 'vlan', 'deleted'),
Index('networks_cidr_v6_idx', 'cidr_v6')
)
id = Column(Integer, primary_key=True, nullable=False)
label = Column(String(255))
injected = Column(Boolean, default=False)
cidr = Column(types.CIDR())
cidr_v6 = Column(types.CIDR())
multi_host = Column(Boolean, default=False)
gateway_v6 = Column(types.IPAddress())
netmask_v6 = Column(types.IPAddress())
netmask = Column(types.IPAddress())
bridge = Column(String(255))
bridge_interface = Column(String(255))
gateway = Column(types.IPAddress())
broadcast = Column(types.IPAddress())
dns1 = Column(types.IPAddress())
dns2 = Column(types.IPAddress())
vlan = Column(Integer)
vpn_public_address = Column(types.IPAddress())
vpn_public_port = Column(Integer)
vpn_private_address = Column(types.IPAddress())
dhcp_start = Column(types.IPAddress())
rxtx_base = Column(Integer)
project_id = Column(String(255))
priority = Column(Integer)
host = Column(String(255)) # , ForeignKey('hosts.id'))
uuid = Column(String(36))
class VirtualInterface(BASE, NovaBase):
"""Represents a virtual interface on an instance."""
__tablename__ = 'virtual_interfaces'
__table_args__ = (
schema.UniqueConstraint("address", "deleted",
name="uniq_virtual_interfaces0address0deleted"),
Index('network_id', 'network_id'),
Index('virtual_interfaces_instance_uuid_fkey', 'instance_uuid'),
)
id = Column(Integer, primary_key=True, nullable=False)
address = Column(String(255))
network_id = Column(Integer)
instance_uuid = Column(String(36), ForeignKey('instances.uuid'))
uuid = Column(String(36))
# TODO(vish): can these both come from the same baseclass?
class FixedIp(BASE, NovaBase):
"""Represents a fixed ip for an instance."""
__tablename__ = 'fixed_ips'
__table_args__ = (
schema.UniqueConstraint(
"address", "deleted", name="uniq_fixed_ips0address0deleted"),
Index('fixed_ips_virtual_interface_id_fkey', 'virtual_interface_id'),
Index('network_id', 'network_id'),
Index('address', 'address'),
Index('fixed_ips_instance_uuid_fkey', 'instance_uuid'),
Index('fixed_ips_host_idx', 'host'),
Index('fixed_ips_network_id_host_deleted_idx', 'network_id', 'host',
'deleted'),
Index('fixed_ips_address_reserved_network_id_deleted_idx',
'address', 'reserved', 'network_id', 'deleted'),
Index('fixed_ips_deleted_allocated_idx', 'address', 'deleted',
'allocated')
)
id = Column(Integer, primary_key=True)
address = Column(types.IPAddress())
network_id = Column(Integer)
virtual_interface_id = Column(Integer)
instance_uuid = Column(String(36), ForeignKey('instances.uuid'))
# associated means that a fixed_ip has its instance_id column set
# allocated means that a fixed_ip has its virtual_interface_id column set
#TODO(sshturm) add default in db
allocated = Column(Boolean, default=False)
# leased means dhcp bridge has leased the ip
#TODO(sshturm) add default in db
leased = Column(Boolean, default=False)
#TODO(sshturm) add default in db
reserved = Column(Boolean, default=False)
host = Column(String(255))
network = relationship(Network,
backref=backref('fixed_ips'),
foreign_keys=network_id,
primaryjoin='and_('
'FixedIp.network_id == Network.id,'
'FixedIp.deleted == 0,'
'Network.deleted == 0)')
instance = relationship(Instance,
foreign_keys=instance_uuid,
primaryjoin='and_('
'FixedIp.instance_uuid == Instance.uuid,'
'FixedIp.deleted == 0,'
'Instance.deleted == 0)')
class FloatingIp(BASE, NovaBase):
"""Represents a floating ip that dynamically forwards to a fixed ip."""
__tablename__ = 'floating_ips'
__table_args__ = (
schema.UniqueConstraint("address", "deleted",
name="uniq_floating_ips0address0deleted"),
Index('fixed_ip_id', 'fixed_ip_id'),
Index('floating_ips_host_idx', 'host'),
Index('floating_ips_project_id_idx', 'project_id'),
Index('floating_ips_pool_deleted_fixed_ip_id_project_id_idx',
'pool', 'deleted', 'fixed_ip_id', 'project_id')
)
id = Column(Integer, primary_key=True)
address = Column(types.IPAddress())
fixed_ip_id = Column(Integer)
project_id = Column(String(255))
host = Column(String(255)) # , ForeignKey('hosts.id'))
auto_assigned = Column(Boolean, default=False)
#TODO(sshturm) add default in db
pool = Column(String(255))
interface = Column(String(255))
fixed_ip = relationship(FixedIp,
backref=backref('floating_ips'),
foreign_keys=fixed_ip_id,
primaryjoin='and_('
'FloatingIp.fixed_ip_id == FixedIp.id,'
'FloatingIp.deleted == 0,'
'FixedIp.deleted == 0)')
class DNSDomain(BASE, NovaBase):
"""Represents a DNS domain with availability zone or project info."""
__tablename__ = 'dns_domains'
__table_args__ = (
Index('project_id', 'project_id'),
Index('dns_domains_domain_deleted_idx', 'domain', 'deleted'),
)
deleted = Column(Boolean, default=False)
domain = Column(String(255), primary_key=True)
scope = Column(String(255))
availability_zone = Column(String(255))
project_id = Column(String(255))
class ConsolePool(BASE, NovaBase):
"""Represents pool of consoles on the same physical node."""
__tablename__ = 'console_pools'
__table_args__ = (
schema.UniqueConstraint(
"host", "console_type", "compute_host", "deleted",
name="uniq_console_pools0host0console_type0compute_host0deleted"),
)
id = Column(Integer, primary_key=True)
address = Column(types.IPAddress())
username = Column(String(255))
password = Column(String(255))
console_type = Column(String(255))
public_hostname = Column(String(255))
host = Column(String(255))
compute_host = Column(String(255))
class Console(BASE, NovaBase):
"""Represents a console session for an instance."""
__tablename__ = 'consoles'
__table_args__ = (
Index('consoles_instance_uuid_idx', 'instance_uuid'),
)
id = Column(Integer, primary_key=True)
instance_name = Column(String(255))
instance_uuid = Column(String(36), ForeignKey('instances.uuid'))
password = Column(String(255))
port = Column(Integer)
pool_id = Column(Integer, ForeignKey('console_pools.id'))
pool = relationship(ConsolePool, backref=backref('consoles'))
class InstanceMetadata(BASE, NovaBase):
"""Represents a user-provided metadata key/value pair for an instance."""
__tablename__ = 'instance_metadata'
__table_args__ = (
Index('instance_metadata_instance_uuid_idx', 'instance_uuid'),
)
id = Column(Integer, primary_key=True)
key = Column(String(255))
value = Column(String(255))
instance_uuid = Column(String(36), ForeignKey('instances.uuid'))
instance = relationship(Instance, backref="metadata",
foreign_keys=instance_uuid,
primaryjoin='and_('
'InstanceMetadata.instance_uuid == '
'Instance.uuid,'
'InstanceMetadata.deleted == 0)')
class InstanceSystemMetadata(BASE, NovaBase):
"""Represents a system-owned metadata key/value pair for an instance."""
__tablename__ = 'instance_system_metadata'
__table_args__ = ()
id = Column(Integer, primary_key=True)
key = Column(String(255), nullable=False)
value = Column(String(255))
instance_uuid = Column(String(36),
ForeignKey('instances.uuid'),
nullable=False)
primary_join = ('and_(InstanceSystemMetadata.instance_uuid == '
'Instance.uuid, InstanceSystemMetadata.deleted == 0)')
instance = relationship(Instance, backref="system_metadata",
foreign_keys=instance_uuid,
primaryjoin=primary_join)
class InstanceTypeProjects(BASE, NovaBase):
"""Represent projects associated instance_types."""
__tablename__ = "instance_type_projects"
__table_args__ = (schema.UniqueConstraint(
"instance_type_id", "project_id", "deleted",
name="uniq_instance_type_projects0instance_type_id0project_id0deleted"
),
)
id = Column(Integer, primary_key=True)
instance_type_id = Column(Integer, ForeignKey('instance_types.id'),
nullable=False)
project_id = Column(String(255))
instance_type = relationship(InstanceTypes, backref="projects",
foreign_keys=instance_type_id,
primaryjoin='and_('
'InstanceTypeProjects.instance_type_id == InstanceTypes.id,'
'InstanceTypeProjects.deleted == 0)')
class InstanceTypeExtraSpecs(BASE, NovaBase):
"""Represents additional specs as key/value pairs for an instance_type."""
__tablename__ = 'instance_type_extra_specs'
__table_args__ = (
Index('instance_type_extra_specs_instance_type_id_key_idx',
'instance_type_id', 'key'),
schema.UniqueConstraint(
"instance_type_id", "key", "deleted",
name=("uniq_instance_type_extra_specs0"
"instance_type_id0key0deleted")
),
)
id = Column(Integer, primary_key=True)
key = Column(String(255))
value = Column(String(255))
instance_type_id = Column(Integer, ForeignKey('instance_types.id'),
nullable=False)
instance_type = relationship(InstanceTypes, backref="extra_specs",
foreign_keys=instance_type_id,
primaryjoin='and_('
'InstanceTypeExtraSpecs.instance_type_id == InstanceTypes.id,'
'InstanceTypeExtraSpecs.deleted == 0)')
class Cell(BASE, NovaBase):
"""Represents parent and child cells of this cell. Cells can
have multiple parents and children, so there could be any number
of entries with is_parent=True or False
"""
__tablename__ = 'cells'
__table_args__ = (schema.UniqueConstraint(
"name", "deleted", name="uniq_cells0name0deleted"
),
)
id = Column(Integer, primary_key=True)
# Name here is the 'short name' of a cell. For instance: 'child1'
name = Column(String(255))
api_url = Column(String(255))
transport_url = Column(String(255), nullable=False)
weight_offset = Column(Float(), default=0.0)
weight_scale = Column(Float(), default=1.0)
is_parent = Column(Boolean())
class AggregateHost(BASE, NovaBase):
"""Represents a host that is member of an aggregate."""
__tablename__ = 'aggregate_hosts'
__table_args__ = (schema.UniqueConstraint(
"host", "aggregate_id", "deleted",
name="uniq_aggregate_hosts0host0aggregate_id0deleted"
),
)
id = Column(Integer, primary_key=True, autoincrement=True)
host = Column(String(255))
aggregate_id = Column(Integer, ForeignKey('aggregates.id'), nullable=False)
class AggregateMetadata(BASE, NovaBase):
"""Represents a metadata key/value pair for an aggregate."""
__tablename__ = 'aggregate_metadata'
__table_args__ = (
schema.UniqueConstraint("aggregate_id", "key", "deleted",
name="uniq_aggregate_metadata0aggregate_id0key0deleted"
),
Index('aggregate_metadata_key_idx', 'key'),
)
id = Column(Integer, primary_key=True)
key = Column(String(255), nullable=False)
value = Column(String(255), nullable=False)
aggregate_id = Column(Integer, ForeignKey('aggregates.id'), nullable=False)
class Aggregate(BASE, NovaBase):
"""Represents a cluster of hosts that exists in this zone."""
__tablename__ = 'aggregates'
__table_args__ = ()
id = Column(Integer, primary_key=True, autoincrement=True)
name = Column(String(255))
_hosts = relationship(AggregateHost,
primaryjoin='and_('
'Aggregate.id == AggregateHost.aggregate_id,'
'AggregateHost.deleted == 0,'
'Aggregate.deleted == 0)')
_metadata = relationship(AggregateMetadata,
primaryjoin='and_('
'Aggregate.id == AggregateMetadata.aggregate_id,'
'AggregateMetadata.deleted == 0,'
'Aggregate.deleted == 0)')
def _extra_keys(self):
return ['hosts', 'metadetails', 'availability_zone']
@property
def hosts(self):
return [h.host for h in self._hosts]
@property
def metadetails(self):
return dict([(m.key, m.value) for m in self._metadata])
@property
def availability_zone(self):
if 'availability_zone' not in self.metadetails:
return None
return self.metadetails['availability_zone']
class AgentBuild(BASE, NovaBase):
"""Represents an agent build."""
__tablename__ = 'agent_builds'
__table_args__ = (
Index('agent_builds_hypervisor_os_arch_idx', 'hypervisor', 'os',
'architecture'),
schema.UniqueConstraint("hypervisor", "os", "architecture", "deleted",
name="uniq_agent_builds0hypervisor0os0architecture0deleted"),
)
id = Column(Integer, primary_key=True)
hypervisor = Column(String(255))
os = Column(String(255))
architecture = Column(String(255))
version = Column(String(255))
url = Column(String(255))
md5hash = Column(String(255))
class BandwidthUsage(BASE, NovaBase):
"""Cache for instance bandwidth usage data pulled from the hypervisor."""
__tablename__ = 'bw_usage_cache'
__table_args__ = (
Index('bw_usage_cache_uuid_start_period_idx', 'uuid',
'start_period'),
)
id = Column(Integer, primary_key=True, nullable=False)
uuid = Column(String(36))
mac = Column(String(255))
start_period = Column(DateTime, nullable=False)
last_refreshed = Column(DateTime)
bw_in = Column(BigInteger)
bw_out = Column(BigInteger)
last_ctr_in = Column(BigInteger)
last_ctr_out = Column(BigInteger)
class VolumeUsage(BASE, NovaBase):
"""Cache for volume usage data pulled from the hypervisor."""
__tablename__ = 'volume_usage_cache'
__table_args__ = ()
id = Column(Integer, primary_key=True, nullable=False)
volume_id = Column(String(36), nullable=False)
instance_uuid = Column(String(36))
project_id = Column(String(36))
user_id = Column(String(36))
availability_zone = Column(String(255))
tot_last_refreshed = Column(DateTime)
tot_reads = Column(BigInteger, default=0)
tot_read_bytes = Column(BigInteger, default=0)
tot_writes = Column(BigInteger, default=0)
tot_write_bytes = Column(BigInteger, default=0)
curr_last_refreshed = Column(DateTime)
curr_reads = Column(BigInteger, default=0)
curr_read_bytes = Column(BigInteger, default=0)
curr_writes = Column(BigInteger, default=0)
curr_write_bytes = Column(BigInteger, default=0)
class S3Image(BASE, NovaBase):
"""Compatibility layer for the S3 image service talking to Glance."""
__tablename__ = 's3_images'
__table_args__ = ()
id = Column(Integer, primary_key=True, nullable=False, autoincrement=True)
uuid = Column(String(36), nullable=False)
class VolumeIdMapping(BASE, NovaBase):
"""Compatibility layer for the EC2 volume service."""
__tablename__ = 'volume_id_mappings'
__table_args__ = ()
id = Column(Integer, primary_key=True, nullable=False, autoincrement=True)
uuid = Column(String(36), nullable=False)
class SnapshotIdMapping(BASE, NovaBase):
"""Compatibility layer for the EC2 snapshot service."""
__tablename__ = 'snapshot_id_mappings'
__table_args__ = ()
id = Column(Integer, primary_key=True, nullable=False, autoincrement=True)
uuid = Column(String(36), nullable=False)
class InstanceFault(BASE, NovaBase):
__tablename__ = 'instance_faults'
__table_args__ = (
Index('instance_faults_host_idx', 'host'),
Index('instance_faults_instance_uuid_deleted_created_at_idx',
'instance_uuid', 'deleted', 'created_at')
)
id = Column(Integer, primary_key=True, nullable=False)
instance_uuid = Column(String(36),
ForeignKey('instances.uuid'))
code = Column(Integer(), nullable=False)
message = Column(String(255))
details = Column(MediumText())
host = Column(String(255))
class InstanceAction(BASE, NovaBase):
"""Track client actions on an instance.
The intention is that there will only be one of these per user request. A
lookup by (instance_uuid, request_id) should always return a single result.
"""
__tablename__ = 'instance_actions'
__table_args__ = (
Index('instance_uuid_idx', 'instance_uuid'),
Index('request_id_idx', 'request_id')
)
id = Column(Integer, primary_key=True, nullable=False, autoincrement=True)
action = Column(String(255))
instance_uuid = Column(String(36),
ForeignKey('instances.uuid'))
request_id = Column(String(255))
user_id = Column(String(255))
project_id = Column(String(255))
start_time = Column(DateTime, default=timeutils.utcnow)
finish_time = Column(DateTime)
message = Column(String(255))
class InstanceActionEvent(BASE, NovaBase):
"""Track events that occur during an InstanceAction."""
__tablename__ = 'instance_actions_events'
__table_args__ = ()
id = Column(Integer, primary_key=True, nullable=False, autoincrement=True)
event = Column(String(255))
action_id = Column(Integer, ForeignKey('instance_actions.id'))
start_time = Column(DateTime, default=timeutils.utcnow)
finish_time = Column(DateTime)
result = Column(String(255))
traceback = Column(Text)
class InstanceIdMapping(BASE, NovaBase):
"""Compatibility layer for the EC2 instance service."""
__tablename__ = 'instance_id_mappings'
__table_args__ = (
Index('ix_instance_id_mappings_uuid', 'uuid'),
)
id = Column(Integer, primary_key=True, nullable=False, autoincrement=True)
uuid = Column(String(36), nullable=False)
class TaskLog(BASE, NovaBase):
"""Audit log for background periodic tasks."""
__tablename__ = 'task_log'
__table_args__ = (
schema.UniqueConstraint(
'task_name', 'host', 'period_beginning', 'period_ending',
name="uniq_task_log0task_name0host0period_beginning0period_ending"
),
Index('ix_task_log_period_beginning', 'period_beginning'),
Index('ix_task_log_host', 'host'),
Index('ix_task_log_period_ending', 'period_ending'),
)
id = Column(Integer, primary_key=True, nullable=False, autoincrement=True)
task_name = Column(String(255), nullable=False)
state = Column(String(255), nullable=False)
host = Column(String(255), nullable=False)
period_beginning = Column(DateTime, default=timeutils.utcnow,
nullable=False)
period_ending = Column(DateTime, default=timeutils.utcnow,
nullable=False)
message = Column(String(255), nullable=False)
task_items = Column(Integer(), default=0)
errors = Column(Integer(), default=0)
class InstanceGroupMember(BASE, NovaBase):
"""Represents the members for an instance group."""
__tablename__ = 'instance_group_member'
__table_args__ = (
Index('instance_group_member_instance_idx', 'instance_id'),
)
id = Column(Integer, primary_key=True, nullable=False)
instance_id = Column(String(255))
group_id = Column(Integer, ForeignKey('instance_groups.id'),
nullable=False)
class InstanceGroupPolicy(BASE, NovaBase):
"""Represents the policy type for an instance group."""
__tablename__ = 'instance_group_policy'
__table_args__ = (
Index('instance_group_policy_policy_idx', 'policy'),
)
id = Column(Integer, primary_key=True, nullable=False)
policy = Column(String(255))
group_id = Column(Integer, ForeignKey('instance_groups.id'),
nullable=False)
class InstanceGroupMetadata(BASE, NovaBase):
"""Represents a key/value pair for an instance group."""
__tablename__ = 'instance_group_metadata'
__table_args__ = (
Index('instance_group_metadata_key_idx', 'key'),
)
id = Column(Integer, primary_key=True, nullable=False)
key = Column(String(255))
value = Column(String(255))
group_id = Column(Integer, ForeignKey('instance_groups.id'),
nullable=False)
class InstanceGroup(BASE, NovaBase):
"""Represents an instance group.
A group will maintain a collection of instances and the relationship
between them.
"""
__tablename__ = 'instance_groups'
__table_args__ = (
schema.UniqueConstraint("uuid", "deleted",
name="uniq_instance_groups0uuid0deleted"),
)
id = Column(Integer, primary_key=True, autoincrement=True)
user_id = Column(String(255))
project_id = Column(String(255))
uuid = Column(String(36), nullable=False)
name = Column(String(255))
_policies = relationship(InstanceGroupPolicy, primaryjoin='and_('
'InstanceGroup.id == InstanceGroupPolicy.group_id,'
'InstanceGroupPolicy.deleted == 0,'
'InstanceGroup.deleted == 0)')
_metadata = relationship(InstanceGroupMetadata, primaryjoin='and_('
'InstanceGroup.id == InstanceGroupMetadata.group_id,'
'InstanceGroupMetadata.deleted == 0,'
'InstanceGroup.deleted == 0)')
_members = relationship(InstanceGroupMember, primaryjoin='and_('
'InstanceGroup.id == InstanceGroupMember.group_id,'
'InstanceGroupMember.deleted == 0,'
'InstanceGroup.deleted == 0)')
@property
def policies(self):
return [p.policy for p in self._policies]
@property
def metadetails(self):
return dict((m.key, m.value) for m in self._metadata)
@property
def members(self):
return [m.instance_id for m in self._members]
class PciDevice(BASE, NovaBase):
"""
Represents a PCI host device that can be passed through to instances.
"""
__tablename__ = 'pci_devices'
__table_args__ = (
Index('ix_pci_devices_compute_node_id_deleted',
'compute_node_id', 'deleted'),
Index('ix_pci_devices_instance_uuid_deleted',
'instance_uuid', 'deleted'),
schema.UniqueConstraint(
"compute_node_id", "address", "deleted",
name="uniq_pci_devices0compute_node_id0address0deleted")
)
id = Column(Integer, primary_key=True)
compute_node_id = Column(Integer, ForeignKey('compute_nodes.id'),
nullable=False)
# physical address of device domain:bus:slot.func (0000:09:01.1)
address = Column(String(12), nullable=False)
vendor_id = Column(String(4), nullable=False)
product_id = Column(String(4), nullable=False)
dev_type = Column(String(8), nullable=False)
dev_id = Column(String(255))
# label is abstract device name, that is used to unify devices with the
# same functionality with different addresses or host.
label = Column(String(255), nullable=False)
status = Column(String(36), nullable=False)
extra_info = Column(Text)
instance_uuid = Column(String(36))
instance = relationship(Instance, backref="pci_devices",
foreign_keys=instance_uuid,
primaryjoin='and_('
'PciDevice.instance_uuid == Instance.uuid,'
'PciDevice.deleted == 0)')
| {
"content_hash": "f68bce785070573eff89bbfce63a32a3",
"timestamp": "",
"source": "github",
"line_count": 1388,
"max_line_length": 79,
"avg_line_length": 36.757925072046106,
"alnum_prop": 0.6198549588396707,
"repo_name": "Yuriy-Leonov/nova",
"id": "3f44850caaae3a4e033e1906619a7e26dec72c57",
"size": "51904",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nova/db/sqlalchemy/models.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "13206133"
},
{
"name": "Shell",
"bytes": "17194"
}
],
"symlink_target": ""
} |
from Module import AbstractModule
class Module(AbstractModule):
def __init__(self):
AbstractModule.__init__(self)
def run(
self, network, in_data, out_attributes, user_options, num_cores,
outfile):
import os
from genomicode import filelib
from genomicode import parallel
from genomicode import config
signal_node = in_data
signal_file = signal_node.identifier
assert os.path.exists(signal_file)
slice_matrix = filelib.which_assert(config.slice_matrix)
sq = parallel.quote
cmd = [
sq(slice_matrix),
"--cpm",
signal_file,
]
cmd = " ".join(cmd)
cmd = "%s >& %s" % (cmd, outfile)
parallel.sshell(cmd)
filelib.assert_exists_nz(outfile)
def name_outfile(self, antecedents, user_options):
return "signal.cpm"
| {
"content_hash": "c37ec5326f27753d1cd2b3517e6daed0",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 72,
"avg_line_length": 24.44736842105263,
"alnum_prop": 0.5683530678148547,
"repo_name": "jefftc/changlab",
"id": "2344f027fc7dfc86f51131ef3bce7cbbf6b8e81a",
"size": "929",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Betsy/Betsy/modules/convert_counts_to_cpm.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "116953"
},
{
"name": "CSS",
"bytes": "75418"
},
{
"name": "Groff",
"bytes": "10237"
},
{
"name": "HTML",
"bytes": "200459"
},
{
"name": "JavaScript",
"bytes": "159618"
},
{
"name": "Makefile",
"bytes": "11719"
},
{
"name": "Python",
"bytes": "9300228"
},
{
"name": "R",
"bytes": "94670"
},
{
"name": "Shell",
"bytes": "63514"
},
{
"name": "TeX",
"bytes": "64"
}
],
"symlink_target": ""
} |
"""
Le premier programme en Python
* utilisation des arguments de la lignne de commande
* les listes et la fonction map
* les threads
* le logger
@author Dragos STOICA
@version 0.3
@date 16.feb.2014
"""
import sys, threading, logging
class Bonjour(threading.Thread):
def __init__(self, personne):
threading.Thread.__init__(self)
self.personne = personne
def run(self):
#Fonction polie - saluer une personne
print "Bonjour %(personne)s !\n" % \
{"personne":self.personne}
logging.info("From %s(thread_name)\n" %{"thread_name":self.getName()})
#les messages d'alertes, les erreurs
def utilisation():
#Affichage mode d'utilisation
print """
Le programme doit etre appelle avec minimum 1 argument:
python bonjour_listes.py Dragos
"""
def main(argv=None):
#La boucle principale
if argv is None:
argv = sys.argv
if len(argv) == 1:
utilisation()
else:
#Dites bonjour a chaque personne de la liste
for nom in sys.argv[1:]:
monThread = Bonjour(nom)
monThread.start()
print "Programme principal execution terminee.\n"
return 0
if __name__ == "__main__":
#Simplifiez la logique de la fonction principale
sys.exit(main())
| {
"content_hash": "5aacd91f4b0bbb123f43db140b1102f8",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 78,
"avg_line_length": 26.44,
"alnum_prop": 0.6180030257186082,
"repo_name": "UPB-FILS/SE",
"id": "72a5ebbd1fd77e165e2502a2f564a96b9efe10de",
"size": "1322",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "TD2/bonjour_threads.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "8039"
},
{
"name": "Python",
"bytes": "36897"
}
],
"symlink_target": ""
} |
from app import baza
from models import Pytanie, Odpowiedz
import os
def pobierz_dane(plikcsv):
"""Funkcja zwraca tuplę tupli zawierających dane pobrane z pliku csv."""
dane = []
if os.path.isfile(plikcsv):
with open(plikcsv, "r") as sCsv:
for line in sCsv:
line = line.replace("\n", "") # usuwamy znaki końca linii
line = line.decode("utf-8") # format kodowania znaków
dane.append(tuple(line.split("#")))
else:
print "Plik z danymi", plikcsv, "nie istnieje!"
return tuple(dane)
def dodaj_pytania(dane):
"""Funkcja dodaje pytania i odpowiedzi przekazane w tupli do bazy."""
for pytanie, odpowiedzi, odpok in dane:
pyt = Pytanie(pytanie=pytanie, odpok=odpok)
baza.session.add(pyt)
baza.session.commit()
for o in odpowiedzi.split(","):
odp = Odpowiedz(pnr=pyt.id, odpowiedz=o.strip())
baza.session.add(odp)
baza.session.commit()
print "Dodano przykładowe pytania"
| {
"content_hash": "871aeed7ed5d79556b4b8ba56c403bba",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 76,
"avg_line_length": 31.606060606060606,
"alnum_prop": 0.6116970278044104,
"repo_name": "koduj-z-klasa/python101",
"id": "e3140b9cffae652c1f6f17cc3755e3248f9b030b",
"size": "1091",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "quiz2/quiz2_sa/dane.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2740"
},
{
"name": "HTML",
"bytes": "18056"
},
{
"name": "Python",
"bytes": "157924"
}
],
"symlink_target": ""
} |
import gnupg
import os.path
class GpgEncrypt:
def __init__(self):
self.encrypted_file = ''
self.try_decoded_file = ''
self.exported_private_key_file = os.path.dirname(os.path.abspath(__file__)) + '/exported_key.private'
self.exported_public_key_file = os.path.dirname(os.path.abspath(__file__)) + '/exported_key.public'
gnupg_path = os.path.dirname(os.path.abspath(__file__)) + '/.gnupg'
self.gpg = gnupg.GPG(gnupghome=gnupg_path)
self.gpg.encoding = 'utf-8'
# import public key if exist
if os.path.exists(self.exported_public_key_file):
print 'exist'
with open(self.exported_public_key_file, 'rb') as key_file:
key_data = key_file.read()
import_result = self.gpg.import_keys(key_data)
self.fp = import_result.fingerprints
print('Imported public key fingerprint:[%s]' % self.fp)
# run at the first time, generate and export key pair
else:
print 'the first time'
key = self.gpg.gen_key(self.gpg.gen_key_input(key_type="RSA", key_length=1024))
self.fp = key.fingerprint
ascii_armored_public_keys = self.gpg.export_keys(self.fp)
ascii_armored_private_keys = self.gpg.export_keys(self.fp, True)
with open(self.exported_public_key_file, 'w') as the_public_key:
the_public_key.write(ascii_armored_public_keys)
print('Exported public key:[%s]' % self.exported_public_key_file)
with open(self.exported_private_key_file, 'w') as the_key:
the_key.write(ascii_armored_private_keys)
print('Exported private key:[%s]' % self.exported_private_key_file)
# store both encrypted and locally decrypted file
def do_encrypt(self, file_path):
self.encrypted_file = file_path + '.encoded'
self.try_decoded_file = file_path + '.decoded'
# encryption
with open(file_path, 'rb') as content_file:
data = content_file.read()
encrypted_ascii_data = str(self.gpg.encrypt(data, self.fp, output=self.encrypted_file))
print('Encrypted_data=[%s]' % self.encrypted_file)
# decryption
self.gpg.decrypt(encrypted_ascii_data, output=self.try_decoded_file)
print('Decrypted_data=[%s]' % self.try_decoded_file)
return self.encrypted_file
def delete_key(self):
print('Delete GPG private key:[%s]' % str(self.gpg.delete_keys(self.fp, True)))
print('Delete GPG public key:[%s]' % str(self.gpg.delete_keys(self.fp)))
# data = raw_input("Enter full path of file to encrypt:")
# rkeys = raw_input("Enter key IDs seperated by spaces:")
# savefile = data+".asc"
# afile = open(data, "rb")
# encrypted_ascii_data = gpg.encrypt_file(afile, rkeys.split(), always_trust=True, output=savefile)
# afile.close()
if __name__ == '__main__':
encryptor = GpgEncrypt()
encryptor.do_encrypt('/home/alice/Music/kite.mp3')
encryptor.delete_key()
| {
"content_hash": "902c193e81d8dd337de1d49718f26407",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 109,
"avg_line_length": 46.71212121212121,
"alnum_prop": 0.611417450535193,
"repo_name": "boisde/Greed_Island",
"id": "23dbf9fde761dbb2d2a376154dbc218347939bd0",
"size": "3083",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "snippets/gpgEncrypt.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "631"
},
{
"name": "CSS",
"bytes": "39566"
},
{
"name": "HTML",
"bytes": "46586"
},
{
"name": "JavaScript",
"bytes": "112239"
},
{
"name": "Python",
"bytes": "265001"
},
{
"name": "Shell",
"bytes": "4885"
}
],
"symlink_target": ""
} |
"""
Django settings for vanguard project.
Generated by 'django-admin startproject' using Django 1.9.4.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
import base64
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# 32 byte top secret for en/de-cryption
KEY = base64.urlsafe_b64encode('LWjpEnEY377N7qtpIy_CslXvBopesr0=')
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'm%9&l0&c2-@3b+kjo*3+u^7c$b$(wxsgrbd6dmoj$p60&#@gi2'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'vanguard',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'vanguard.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'vanguard.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'America/Los_Angeles'
USE_I18N = True
USE_L10N = True
USE_TZ = True
STATIC_URL = '/static/'
LOGGING = {
'version': 1,
'disable_existing_loggers': True,
'formatters': {
'verbose': {
'format': "[%(asctime)s] %(levelname)s [%(name)s:%(lineno)s] %(message)s",
'datefmt': "%d/%b/%Y %H:%M:%S"
},
'simple': {
'format': '[%(asctime)s] %(levelname)s %(message)s',
'datefmt': "%d/%b/%Y %H:%M:%S"
},
},
'handlers': {
'file': {
'level': 'DEBUG',
'class': 'logging.FileHandler',
'filename': 'vanguard.log',
'formatter': 'simple'
},
},
'loggers': {
'vanguard': {
'handlers': ['file'],
'level': 'DEBUG',
},
}
}
| {
"content_hash": "d3dab0a585f557b0af6b02b59096d8f3",
"timestamp": "",
"source": "github",
"line_count": 147,
"max_line_length": 91,
"avg_line_length": 26.789115646258505,
"alnum_prop": 0.6368715083798883,
"repo_name": "svalleru/vanguard",
"id": "ea9ee5fd8c58da177ac8865a24903591d942659a",
"size": "3938",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vanguard/settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "16598"
}
],
"symlink_target": ""
} |
from cloudify.workflows import ctx
instance = next(ctx.node_instances)
instance.execute_operation('interface.op')
| {
"content_hash": "3c01369c73071c9d2677cc3591be2333",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 42,
"avg_line_length": 28.75,
"alnum_prop": 0.808695652173913,
"repo_name": "dankilman/clash",
"id": "12d2528e582f644f52bf58804c01157b1439dcdb",
"size": "115",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "clash/tests/resources/blueprints/envpath/blueprint_workflows/workflow3.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "86047"
},
{
"name": "Shell",
"bytes": "65"
}
],
"symlink_target": ""
} |
import stringcase
def normalize_acronyms(s: str) -> str:
"""Replaces variations of acronyms when converting various words.
Specifically when considering how to generate a CONST_CASE constant,
strings such as WiFi should not be WI_FI but rather WIFI
"""
return s.replace('WiFi', 'Wifi').replace('WI_FI', 'WIFI')
def RegisterCommonFilters(filtermap):
"""
Register filters that are NOT considered platform-generator specific.
Codegen often needs standardized names, like "method names are CamelCase"
or "command names need-to-be-spinal-case" so these filters are often
generally registered on all generators.
"""
# General casing for output naming
filtermap['camelcase'] = stringcase.camelcase
filtermap['capitalcase'] = stringcase.capitalcase
filtermap['constcase'] = stringcase.constcase
filtermap['pascalcase'] = stringcase.pascalcase
filtermap['snakecase'] = stringcase.snakecase
filtermap['spinalcase'] = stringcase.spinalcase
filtermap['normalize_acronyms'] = normalize_acronyms
| {
"content_hash": "8160eca217704c8afb51c4b323af4a89",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 77,
"avg_line_length": 35.733333333333334,
"alnum_prop": 0.7276119402985075,
"repo_name": "project-chip/connectedhomeip",
"id": "594bcff7a2ecb70b13dcb138be5b96b124d20933",
"size": "1659",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/idl/generators/filters.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "1759301"
},
{
"name": "C++",
"bytes": "19104548"
},
{
"name": "CMake",
"bytes": "140510"
},
{
"name": "Dockerfile",
"bytes": "50353"
},
{
"name": "Emacs Lisp",
"bytes": "1042"
},
{
"name": "Java",
"bytes": "167719"
},
{
"name": "JavaScript",
"bytes": "2106"
},
{
"name": "Jinja",
"bytes": "22322"
},
{
"name": "Objective-C",
"bytes": "930838"
},
{
"name": "Objective-C++",
"bytes": "435348"
},
{
"name": "Python",
"bytes": "1931007"
},
{
"name": "Shell",
"bytes": "195843"
},
{
"name": "Tcl",
"bytes": "311"
},
{
"name": "ZAP",
"bytes": "584219"
}
],
"symlink_target": ""
} |
"""Websocket API for Z-Wave JS."""
from __future__ import annotations
from collections.abc import Callable
import dataclasses
from functools import partial, wraps
from typing import Any
from aiohttp import web, web_exceptions, web_request
import voluptuous as vol
from zwave_js_server.client import Client
from zwave_js_server.const import (
CommandClass,
InclusionStrategy,
LogLevel,
Protocols,
QRCodeVersion,
SecurityClass,
ZwaveFeature,
)
from zwave_js_server.exceptions import (
BaseZwaveJSServerError,
FailedCommand,
InvalidNewValue,
NotFoundError,
SetValueFailed,
)
from zwave_js_server.firmware import begin_firmware_update
from zwave_js_server.model.controller import (
ControllerStatistics,
InclusionGrant,
ProvisioningEntry,
QRProvisioningInformation,
)
from zwave_js_server.model.firmware import (
FirmwareUpdateFinished,
FirmwareUpdateProgress,
)
from zwave_js_server.model.log_config import LogConfig
from zwave_js_server.model.log_message import LogMessage
from zwave_js_server.model.node import Node, NodeStatistics
from zwave_js_server.model.utils import async_parse_qr_code_string
from zwave_js_server.util.node import async_set_config_parameter
from homeassistant.components import websocket_api
from homeassistant.components.http.view import HomeAssistantView
from homeassistant.components.websocket_api.connection import ActiveConnection
from homeassistant.components.websocket_api.const import (
ERR_INVALID_FORMAT,
ERR_NOT_FOUND,
ERR_NOT_SUPPORTED,
ERR_UNKNOWN_ERROR,
)
from homeassistant.config_entries import ConfigEntry, ConfigEntryState
from homeassistant.const import CONF_URL
from homeassistant.core import HomeAssistant, callback
from homeassistant.exceptions import Unauthorized
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from homeassistant.helpers.device_registry import DeviceEntry
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from .config_validation import BITMASK_SCHEMA
from .const import (
CONF_DATA_COLLECTION_OPTED_IN,
DATA_CLIENT,
DOMAIN,
EVENT_DEVICE_ADDED_TO_REGISTRY,
LOGGER,
)
from .helpers import async_enable_statistics, update_data_collection_preference
from .migrate import (
ZWaveMigrationData,
async_get_migration_data,
async_map_legacy_zwave_values,
async_migrate_legacy_zwave,
)
DATA_UNSUBSCRIBE = "unsubs"
# general API constants
ID = "id"
ENTRY_ID = "entry_id"
ERR_NOT_LOADED = "not_loaded"
NODE_ID = "node_id"
COMMAND_CLASS_ID = "command_class_id"
TYPE = "type"
PROPERTY = "property"
PROPERTY_KEY = "property_key"
VALUE = "value"
# constants for log config commands
CONFIG = "config"
LEVEL = "level"
LOG_TO_FILE = "log_to_file"
FILENAME = "filename"
ENABLED = "enabled"
FORCE_CONSOLE = "force_console"
# constants for setting config parameters
VALUE_ID = "value_id"
STATUS = "status"
# constants for data collection
ENABLED = "enabled"
OPTED_IN = "opted_in"
# constants for granting security classes
SECURITY_CLASSES = "security_classes"
CLIENT_SIDE_AUTH = "client_side_auth"
# constants for migration
DRY_RUN = "dry_run"
# constants for inclusion
INCLUSION_STRATEGY = "inclusion_strategy"
PIN = "pin"
FORCE_SECURITY = "force_security"
PLANNED_PROVISIONING_ENTRY = "planned_provisioning_entry"
QR_PROVISIONING_INFORMATION = "qr_provisioning_information"
QR_CODE_STRING = "qr_code_string"
DSK = "dsk"
VERSION = "version"
GENERIC_DEVICE_CLASS = "generic_device_class"
SPECIFIC_DEVICE_CLASS = "specific_device_class"
INSTALLER_ICON_TYPE = "installer_icon_type"
MANUFACTURER_ID = "manufacturer_id"
PRODUCT_TYPE = "product_type"
PRODUCT_ID = "product_id"
APPLICATION_VERSION = "application_version"
MAX_INCLUSION_REQUEST_INTERVAL = "max_inclusion_request_interval"
UUID = "uuid"
SUPPORTED_PROTOCOLS = "supported_protocols"
ADDITIONAL_PROPERTIES = "additional_properties"
FEATURE = "feature"
UNPROVISION = "unprovision"
# https://github.com/zwave-js/node-zwave-js/blob/master/packages/core/src/security/QR.ts#L41
MINIMUM_QR_STRING_LENGTH = 52
def convert_planned_provisioning_entry(info: dict) -> ProvisioningEntry:
"""Handle provisioning entry dict to ProvisioningEntry."""
info = ProvisioningEntry(
dsk=info[DSK],
security_classes=[SecurityClass(sec_cls) for sec_cls in info[SECURITY_CLASSES]],
additional_properties={
k: v for k, v in info.items() if k not in (DSK, SECURITY_CLASSES)
},
)
return info
def convert_qr_provisioning_information(info: dict) -> QRProvisioningInformation:
"""Convert QR provisioning information dict to QRProvisioningInformation."""
protocols = [Protocols(proto) for proto in info.get(SUPPORTED_PROTOCOLS, [])]
info = QRProvisioningInformation(
version=QRCodeVersion(info[VERSION]),
security_classes=[SecurityClass(sec_cls) for sec_cls in info[SECURITY_CLASSES]],
dsk=info[DSK],
generic_device_class=info[GENERIC_DEVICE_CLASS],
specific_device_class=info[SPECIFIC_DEVICE_CLASS],
installer_icon_type=info[INSTALLER_ICON_TYPE],
manufacturer_id=info[MANUFACTURER_ID],
product_type=info[PRODUCT_TYPE],
product_id=info[PRODUCT_ID],
application_version=info[APPLICATION_VERSION],
max_inclusion_request_interval=info.get(MAX_INCLUSION_REQUEST_INTERVAL),
uuid=info.get(UUID),
supported_protocols=protocols if protocols else None,
additional_properties=info.get(ADDITIONAL_PROPERTIES, {}),
)
return info
# Helper schemas
PLANNED_PROVISIONING_ENTRY_SCHEMA = vol.All(
vol.Schema(
{
vol.Required(DSK): str,
vol.Required(SECURITY_CLASSES): vol.All(
cv.ensure_list,
[vol.Coerce(SecurityClass)],
),
},
# Provisioning entries can have extra keys for SmartStart
extra=vol.ALLOW_EXTRA,
),
convert_planned_provisioning_entry,
)
QR_PROVISIONING_INFORMATION_SCHEMA = vol.All(
vol.Schema(
{
vol.Required(VERSION): vol.Coerce(QRCodeVersion),
vol.Required(SECURITY_CLASSES): vol.All(
cv.ensure_list,
[vol.Coerce(SecurityClass)],
),
vol.Required(DSK): str,
vol.Required(GENERIC_DEVICE_CLASS): int,
vol.Required(SPECIFIC_DEVICE_CLASS): int,
vol.Required(INSTALLER_ICON_TYPE): int,
vol.Required(MANUFACTURER_ID): int,
vol.Required(PRODUCT_TYPE): int,
vol.Required(PRODUCT_ID): int,
vol.Required(APPLICATION_VERSION): str,
vol.Optional(MAX_INCLUSION_REQUEST_INTERVAL): vol.Any(int, None),
vol.Optional(UUID): vol.Any(str, None),
vol.Optional(SUPPORTED_PROTOCOLS): vol.All(
cv.ensure_list,
[vol.Coerce(Protocols)],
),
vol.Optional(ADDITIONAL_PROPERTIES): dict,
}
),
convert_qr_provisioning_information,
)
QR_CODE_STRING_SCHEMA = vol.All(str, vol.Length(min=MINIMUM_QR_STRING_LENGTH))
def async_get_entry(orig_func: Callable) -> Callable:
"""Decorate async function to get entry."""
@wraps(orig_func)
async def async_get_entry_func(
hass: HomeAssistant, connection: ActiveConnection, msg: dict
) -> None:
"""Provide user specific data and store to function."""
entry_id = msg[ENTRY_ID]
entry = hass.config_entries.async_get_entry(entry_id)
if entry is None:
connection.send_error(
msg[ID], ERR_NOT_FOUND, f"Config entry {entry_id} not found"
)
return
if entry.state is not ConfigEntryState.LOADED:
connection.send_error(
msg[ID], ERR_NOT_LOADED, f"Config entry {entry_id} not loaded"
)
return
client = hass.data[DOMAIN][entry_id][DATA_CLIENT]
await orig_func(hass, connection, msg, entry, client)
return async_get_entry_func
def async_get_node(orig_func: Callable) -> Callable:
"""Decorate async function to get node."""
@async_get_entry
@wraps(orig_func)
async def async_get_node_func(
hass: HomeAssistant,
connection: ActiveConnection,
msg: dict,
entry: ConfigEntry,
client: Client,
) -> None:
"""Provide user specific data and store to function."""
node_id = msg[NODE_ID]
node = client.driver.controller.nodes.get(node_id)
if node is None:
connection.send_error(msg[ID], ERR_NOT_FOUND, f"Node {node_id} not found")
return
await orig_func(hass, connection, msg, node)
return async_get_node_func
def async_handle_failed_command(orig_func: Callable) -> Callable:
"""Decorate async function to handle FailedCommand and send relevant error."""
@wraps(orig_func)
async def async_handle_failed_command_func(
hass: HomeAssistant,
connection: ActiveConnection,
msg: dict,
*args: Any,
**kwargs: Any,
) -> None:
"""Handle FailedCommand within function and send relevant error."""
try:
await orig_func(hass, connection, msg, *args, **kwargs)
except FailedCommand as err:
# Unsubscribe to callbacks
if unsubs := msg.get(DATA_UNSUBSCRIBE):
for unsub in unsubs:
unsub()
connection.send_error(msg[ID], err.error_code, err.args[0])
return async_handle_failed_command_func
@callback
def async_register_api(hass: HomeAssistant) -> None:
"""Register all of our api endpoints."""
websocket_api.async_register_command(hass, websocket_network_status)
websocket_api.async_register_command(hass, websocket_node_status)
websocket_api.async_register_command(hass, websocket_node_metadata)
websocket_api.async_register_command(hass, websocket_ping_node)
websocket_api.async_register_command(hass, websocket_add_node)
websocket_api.async_register_command(hass, websocket_grant_security_classes)
websocket_api.async_register_command(hass, websocket_validate_dsk_and_enter_pin)
websocket_api.async_register_command(hass, websocket_provision_smart_start_node)
websocket_api.async_register_command(hass, websocket_unprovision_smart_start_node)
websocket_api.async_register_command(hass, websocket_get_provisioning_entries)
websocket_api.async_register_command(hass, websocket_parse_qr_code_string)
websocket_api.async_register_command(hass, websocket_supports_feature)
websocket_api.async_register_command(hass, websocket_stop_inclusion)
websocket_api.async_register_command(hass, websocket_stop_exclusion)
websocket_api.async_register_command(hass, websocket_remove_node)
websocket_api.async_register_command(hass, websocket_remove_failed_node)
websocket_api.async_register_command(hass, websocket_replace_failed_node)
websocket_api.async_register_command(hass, websocket_begin_healing_network)
websocket_api.async_register_command(
hass, websocket_subscribe_heal_network_progress
)
websocket_api.async_register_command(hass, websocket_stop_healing_network)
websocket_api.async_register_command(hass, websocket_refresh_node_info)
websocket_api.async_register_command(hass, websocket_refresh_node_values)
websocket_api.async_register_command(hass, websocket_refresh_node_cc_values)
websocket_api.async_register_command(hass, websocket_heal_node)
websocket_api.async_register_command(hass, websocket_set_config_parameter)
websocket_api.async_register_command(hass, websocket_get_config_parameters)
websocket_api.async_register_command(hass, websocket_subscribe_log_updates)
websocket_api.async_register_command(hass, websocket_update_log_config)
websocket_api.async_register_command(hass, websocket_get_log_config)
websocket_api.async_register_command(
hass, websocket_update_data_collection_preference
)
websocket_api.async_register_command(hass, websocket_data_collection_status)
websocket_api.async_register_command(hass, websocket_abort_firmware_update)
websocket_api.async_register_command(
hass, websocket_subscribe_firmware_update_status
)
websocket_api.async_register_command(hass, websocket_check_for_config_updates)
websocket_api.async_register_command(hass, websocket_install_config_update)
websocket_api.async_register_command(
hass, websocket_subscribe_controller_statistics
)
websocket_api.async_register_command(hass, websocket_subscribe_node_statistics)
websocket_api.async_register_command(hass, websocket_node_ready)
websocket_api.async_register_command(hass, websocket_migrate_zwave)
hass.http.register_view(FirmwareUploadView())
@websocket_api.require_admin
@websocket_api.websocket_command(
{vol.Required(TYPE): "zwave_js/network_status", vol.Required(ENTRY_ID): str}
)
@websocket_api.async_response
@async_get_entry
async def websocket_network_status(
hass: HomeAssistant,
connection: ActiveConnection,
msg: dict,
entry: ConfigEntry,
client: Client,
) -> None:
"""Get the status of the Z-Wave JS network."""
controller = client.driver.controller
await controller.async_get_state()
data = {
"client": {
"ws_server_url": client.ws_server_url,
"state": "connected" if client.connected else "disconnected",
"driver_version": client.version.driver_version,
"server_version": client.version.server_version,
},
"controller": {
"home_id": controller.home_id,
"library_version": controller.library_version,
"type": controller.controller_type,
"own_node_id": controller.own_node_id,
"is_secondary": controller.is_secondary,
"is_using_home_id_from_other_network": controller.is_using_home_id_from_other_network,
"is_sis_present": controller.is_SIS_present,
"was_real_primary": controller.was_real_primary,
"is_static_update_controller": controller.is_static_update_controller,
"is_slave": controller.is_slave,
"serial_api_version": controller.serial_api_version,
"manufacturer_id": controller.manufacturer_id,
"product_id": controller.product_id,
"product_type": controller.product_type,
"supported_function_types": controller.supported_function_types,
"suc_node_id": controller.suc_node_id,
"supports_timers": controller.supports_timers,
"is_heal_network_active": controller.is_heal_network_active,
"inclusion_state": controller.inclusion_state,
"nodes": list(client.driver.controller.nodes),
},
}
connection.send_result(
msg[ID],
data,
)
@websocket_api.websocket_command(
{
vol.Required(TYPE): "zwave_js/node_ready",
vol.Required(ENTRY_ID): str,
vol.Required(NODE_ID): int,
}
)
@websocket_api.async_response
@async_get_node
async def websocket_node_ready(
hass: HomeAssistant,
connection: ActiveConnection,
msg: dict,
node: Node,
) -> None:
"""Subscribe to the node ready event of a Z-Wave JS node."""
@callback
def forward_event(event: dict) -> None:
"""Forward the event."""
connection.send_message(
websocket_api.event_message(msg[ID], {"event": event["event"]})
)
@callback
def async_cleanup() -> None:
"""Remove signal listeners."""
for unsub in unsubs:
unsub()
connection.subscriptions[msg["id"]] = async_cleanup
msg[DATA_UNSUBSCRIBE] = unsubs = [node.on("ready", forward_event)]
connection.send_result(msg[ID])
@websocket_api.websocket_command(
{
vol.Required(TYPE): "zwave_js/node_status",
vol.Required(ENTRY_ID): str,
vol.Required(NODE_ID): int,
}
)
@websocket_api.async_response
@async_get_node
async def websocket_node_status(
hass: HomeAssistant,
connection: ActiveConnection,
msg: dict,
node: Node,
) -> None:
"""Get the status of a Z-Wave JS node."""
data = {
"node_id": node.node_id,
"is_routing": node.is_routing,
"status": node.status,
"is_secure": node.is_secure,
"ready": node.ready,
"zwave_plus_version": node.zwave_plus_version,
"highest_security_class": node.highest_security_class,
"is_controller_node": node.is_controller_node,
}
connection.send_result(
msg[ID],
data,
)
@websocket_api.websocket_command(
{
vol.Required(TYPE): "zwave_js/node_metadata",
vol.Required(ENTRY_ID): str,
vol.Required(NODE_ID): int,
}
)
@websocket_api.async_response
@async_get_node
async def websocket_node_metadata(
hass: HomeAssistant,
connection: ActiveConnection,
msg: dict,
node: Node,
) -> None:
"""Get the metadata of a Z-Wave JS node."""
data = {
"node_id": node.node_id,
"exclusion": node.device_config.metadata.exclusion,
"inclusion": node.device_config.metadata.inclusion,
"manual": node.device_config.metadata.manual,
"wakeup": node.device_config.metadata.wakeup,
"reset": node.device_config.metadata.reset,
"device_database_url": node.device_database_url,
}
connection.send_result(
msg[ID],
data,
)
@websocket_api.websocket_command(
{
vol.Required(TYPE): "zwave_js/ping_node",
vol.Required(ENTRY_ID): str,
vol.Required(NODE_ID): int,
}
)
@websocket_api.async_response
@async_handle_failed_command
@async_get_node
async def websocket_ping_node(
hass: HomeAssistant,
connection: ActiveConnection,
msg: dict,
node: Node,
) -> None:
"""Ping a Z-Wave JS node."""
result = await node.async_ping()
connection.send_result(
msg[ID],
result,
)
@websocket_api.require_admin
@websocket_api.websocket_command(
{
vol.Required(TYPE): "zwave_js/add_node",
vol.Required(ENTRY_ID): str,
vol.Optional(INCLUSION_STRATEGY, default=InclusionStrategy.DEFAULT): vol.All(
vol.Coerce(int),
vol.In(
[
strategy.value
for strategy in InclusionStrategy
if strategy != InclusionStrategy.SMART_START
]
),
),
vol.Optional(FORCE_SECURITY): bool,
vol.Exclusive(
PLANNED_PROVISIONING_ENTRY, "options"
): PLANNED_PROVISIONING_ENTRY_SCHEMA,
vol.Exclusive(
QR_PROVISIONING_INFORMATION, "options"
): QR_PROVISIONING_INFORMATION_SCHEMA,
vol.Exclusive(QR_CODE_STRING, "options"): QR_CODE_STRING_SCHEMA,
}
)
@websocket_api.async_response
@async_handle_failed_command
@async_get_entry
async def websocket_add_node(
hass: HomeAssistant,
connection: ActiveConnection,
msg: dict,
entry: ConfigEntry,
client: Client,
) -> None:
"""Add a node to the Z-Wave network."""
controller = client.driver.controller
inclusion_strategy = InclusionStrategy(msg[INCLUSION_STRATEGY])
force_security = msg.get(FORCE_SECURITY)
provisioning = (
msg.get(PLANNED_PROVISIONING_ENTRY)
or msg.get(QR_PROVISIONING_INFORMATION)
or msg.get(QR_CODE_STRING)
)
@callback
def async_cleanup() -> None:
"""Remove signal listeners."""
for unsub in unsubs:
unsub()
@callback
def forward_event(event: dict) -> None:
connection.send_message(
websocket_api.event_message(msg[ID], {"event": event["event"]})
)
@callback
def forward_dsk(event: dict) -> None:
connection.send_message(
websocket_api.event_message(
msg[ID], {"event": event["event"], "dsk": event["dsk"]}
)
)
@callback
def forward_requested_grant(event: dict) -> None:
connection.send_message(
websocket_api.event_message(
msg[ID],
{
"event": event["event"],
"requested_grant": event["requested_grant"].to_dict(),
},
)
)
@callback
def forward_stage(event: dict) -> None:
connection.send_message(
websocket_api.event_message(
msg[ID], {"event": event["event"], "stage": event["stageName"]}
)
)
@callback
def node_added(event: dict) -> None:
node = event["node"]
interview_unsubs = [
node.on("interview started", forward_event),
node.on("interview completed", forward_event),
node.on("interview stage completed", forward_stage),
node.on("interview failed", forward_event),
]
unsubs.extend(interview_unsubs)
node_details = {
"node_id": node.node_id,
"status": node.status,
"ready": node.ready,
"low_security": event["result"].get("lowSecurity", False),
}
connection.send_message(
websocket_api.event_message(
msg[ID], {"event": "node added", "node": node_details}
)
)
@callback
def device_registered(device: DeviceEntry) -> None:
device_details = {
"name": device.name,
"id": device.id,
"manufacturer": device.manufacturer,
"model": device.model,
}
connection.send_message(
websocket_api.event_message(
msg[ID], {"event": "device registered", "device": device_details}
)
)
connection.subscriptions[msg["id"]] = async_cleanup
msg[DATA_UNSUBSCRIBE] = unsubs = [
controller.on("inclusion started", forward_event),
controller.on("inclusion failed", forward_event),
controller.on("inclusion stopped", forward_event),
controller.on("validate dsk and enter pin", forward_dsk),
controller.on("grant security classes", forward_requested_grant),
controller.on("node added", node_added),
async_dispatcher_connect(
hass, EVENT_DEVICE_ADDED_TO_REGISTRY, device_registered
),
]
try:
result = await controller.async_begin_inclusion(
inclusion_strategy, force_security=force_security, provisioning=provisioning
)
except ValueError as err:
connection.send_error(
msg[ID],
ERR_INVALID_FORMAT,
err.args[0],
)
return
connection.send_result(
msg[ID],
result,
)
@websocket_api.require_admin
@websocket_api.websocket_command(
{
vol.Required(TYPE): "zwave_js/grant_security_classes",
vol.Required(ENTRY_ID): str,
vol.Required(SECURITY_CLASSES): vol.All(
cv.ensure_list,
[vol.Coerce(SecurityClass)],
),
vol.Optional(CLIENT_SIDE_AUTH, default=False): bool,
}
)
@websocket_api.async_response
@async_handle_failed_command
@async_get_entry
async def websocket_grant_security_classes(
hass: HomeAssistant,
connection: ActiveConnection,
msg: dict,
entry: ConfigEntry,
client: Client,
) -> None:
"""Choose SecurityClass grants as part of S2 inclusion process."""
inclusion_grant = InclusionGrant(
[SecurityClass(sec_cls) for sec_cls in msg[SECURITY_CLASSES]],
msg[CLIENT_SIDE_AUTH],
)
await client.driver.controller.async_grant_security_classes(inclusion_grant)
connection.send_result(msg[ID])
@websocket_api.require_admin
@websocket_api.websocket_command(
{
vol.Required(TYPE): "zwave_js/validate_dsk_and_enter_pin",
vol.Required(ENTRY_ID): str,
vol.Required(PIN): str,
}
)
@websocket_api.async_response
@async_handle_failed_command
@async_get_entry
async def websocket_validate_dsk_and_enter_pin(
hass: HomeAssistant,
connection: ActiveConnection,
msg: dict,
entry: ConfigEntry,
client: Client,
) -> None:
"""Validate DSK and enter PIN as part of S2 inclusion process."""
await client.driver.controller.async_validate_dsk_and_enter_pin(msg[PIN])
connection.send_result(msg[ID])
@websocket_api.require_admin
@websocket_api.websocket_command(
{
vol.Required(TYPE): "zwave_js/provision_smart_start_node",
vol.Required(ENTRY_ID): str,
vol.Exclusive(
PLANNED_PROVISIONING_ENTRY, "options"
): PLANNED_PROVISIONING_ENTRY_SCHEMA,
vol.Exclusive(
QR_PROVISIONING_INFORMATION, "options"
): QR_PROVISIONING_INFORMATION_SCHEMA,
vol.Exclusive(QR_CODE_STRING, "options"): QR_CODE_STRING_SCHEMA,
}
)
@websocket_api.async_response
@async_handle_failed_command
@async_get_entry
async def websocket_provision_smart_start_node(
hass: HomeAssistant,
connection: ActiveConnection,
msg: dict,
entry: ConfigEntry,
client: Client,
) -> None:
"""Pre-provision a smart start node."""
try:
cv.has_at_least_one_key(
PLANNED_PROVISIONING_ENTRY, QR_PROVISIONING_INFORMATION, QR_CODE_STRING
)(msg)
except vol.Invalid as err:
connection.send_error(
msg[ID],
ERR_INVALID_FORMAT,
err.args[0],
)
return
provisioning_info = (
msg.get(PLANNED_PROVISIONING_ENTRY)
or msg.get(QR_PROVISIONING_INFORMATION)
or msg[QR_CODE_STRING]
)
if (
QR_PROVISIONING_INFORMATION in msg
and provisioning_info.version == QRCodeVersion.S2
):
connection.send_error(
msg[ID],
ERR_INVALID_FORMAT,
"QR code version S2 is not supported for this command",
)
return
await client.driver.controller.async_provision_smart_start_node(provisioning_info)
connection.send_result(msg[ID])
@websocket_api.require_admin
@websocket_api.websocket_command(
{
vol.Required(TYPE): "zwave_js/unprovision_smart_start_node",
vol.Required(ENTRY_ID): str,
vol.Exclusive(DSK, "input"): str,
vol.Exclusive(NODE_ID, "input"): int,
}
)
@websocket_api.async_response
@async_handle_failed_command
@async_get_entry
async def websocket_unprovision_smart_start_node(
hass: HomeAssistant,
connection: ActiveConnection,
msg: dict,
entry: ConfigEntry,
client: Client,
) -> None:
"""Unprovision a smart start node."""
try:
cv.has_at_least_one_key(DSK, NODE_ID)(msg)
except vol.Invalid as err:
connection.send_error(
msg[ID],
ERR_INVALID_FORMAT,
err.args[0],
)
return
dsk_or_node_id = msg.get(DSK) or msg[NODE_ID]
await client.driver.controller.async_unprovision_smart_start_node(dsk_or_node_id)
connection.send_result(msg[ID])
@websocket_api.require_admin
@websocket_api.websocket_command(
{
vol.Required(TYPE): "zwave_js/get_provisioning_entries",
vol.Required(ENTRY_ID): str,
}
)
@websocket_api.async_response
@async_handle_failed_command
@async_get_entry
async def websocket_get_provisioning_entries(
hass: HomeAssistant,
connection: ActiveConnection,
msg: dict,
entry: ConfigEntry,
client: Client,
) -> None:
"""Get provisioning entries (entries that have been pre-provisioned)."""
provisioning_entries = (
await client.driver.controller.async_get_provisioning_entries()
)
connection.send_result(
msg[ID], [dataclasses.asdict(entry) for entry in provisioning_entries]
)
@websocket_api.require_admin
@websocket_api.websocket_command(
{
vol.Required(TYPE): "zwave_js/parse_qr_code_string",
vol.Required(ENTRY_ID): str,
vol.Required(QR_CODE_STRING): QR_CODE_STRING_SCHEMA,
}
)
@websocket_api.async_response
@async_handle_failed_command
@async_get_entry
async def websocket_parse_qr_code_string(
hass: HomeAssistant,
connection: ActiveConnection,
msg: dict,
entry: ConfigEntry,
client: Client,
) -> None:
"""Parse a QR Code String and return QRProvisioningInformation dict."""
qr_provisioning_information = await async_parse_qr_code_string(
client, msg[QR_CODE_STRING]
)
connection.send_result(msg[ID], dataclasses.asdict(qr_provisioning_information))
@websocket_api.require_admin
@websocket_api.websocket_command(
{
vol.Required(TYPE): "zwave_js/supports_feature",
vol.Required(ENTRY_ID): str,
vol.Required(FEATURE): vol.Coerce(ZwaveFeature),
}
)
@websocket_api.async_response
@async_handle_failed_command
@async_get_entry
async def websocket_supports_feature(
hass: HomeAssistant,
connection: ActiveConnection,
msg: dict,
entry: ConfigEntry,
client: Client,
) -> None:
"""Check if controller supports a particular feature."""
supported = await client.driver.controller.async_supports_feature(msg[FEATURE])
connection.send_result(
msg[ID],
{"supported": supported},
)
@websocket_api.require_admin
@websocket_api.websocket_command(
{
vol.Required(TYPE): "zwave_js/stop_inclusion",
vol.Required(ENTRY_ID): str,
}
)
@websocket_api.async_response
@async_handle_failed_command
@async_get_entry
async def websocket_stop_inclusion(
hass: HomeAssistant,
connection: ActiveConnection,
msg: dict,
entry: ConfigEntry,
client: Client,
) -> None:
"""Cancel adding a node to the Z-Wave network."""
controller = client.driver.controller
result = await controller.async_stop_inclusion()
connection.send_result(
msg[ID],
result,
)
@websocket_api.require_admin
@websocket_api.websocket_command(
{
vol.Required(TYPE): "zwave_js/stop_exclusion",
vol.Required(ENTRY_ID): str,
}
)
@websocket_api.async_response
@async_handle_failed_command
@async_get_entry
async def websocket_stop_exclusion(
hass: HomeAssistant,
connection: ActiveConnection,
msg: dict,
entry: ConfigEntry,
client: Client,
) -> None:
"""Cancel removing a node from the Z-Wave network."""
controller = client.driver.controller
result = await controller.async_stop_exclusion()
connection.send_result(
msg[ID],
result,
)
@websocket_api.require_admin
@websocket_api.websocket_command(
{
vol.Required(TYPE): "zwave_js/remove_node",
vol.Required(ENTRY_ID): str,
vol.Optional(UNPROVISION): bool,
}
)
@websocket_api.async_response
@async_handle_failed_command
@async_get_entry
async def websocket_remove_node(
hass: HomeAssistant,
connection: ActiveConnection,
msg: dict,
entry: ConfigEntry,
client: Client,
) -> None:
"""Remove a node from the Z-Wave network."""
controller = client.driver.controller
@callback
def async_cleanup() -> None:
"""Remove signal listeners."""
for unsub in unsubs:
unsub()
@callback
def forward_event(event: dict) -> None:
connection.send_message(
websocket_api.event_message(msg[ID], {"event": event["event"]})
)
@callback
def node_removed(event: dict) -> None:
node = event["node"]
node_details = {
"node_id": node.node_id,
}
connection.send_message(
websocket_api.event_message(
msg[ID], {"event": "node removed", "node": node_details}
)
)
connection.subscriptions[msg["id"]] = async_cleanup
msg[DATA_UNSUBSCRIBE] = unsubs = [
controller.on("exclusion started", forward_event),
controller.on("exclusion failed", forward_event),
controller.on("exclusion stopped", forward_event),
controller.on("node removed", node_removed),
]
result = await controller.async_begin_exclusion(msg.get(UNPROVISION))
connection.send_result(
msg[ID],
result,
)
@websocket_api.require_admin
@websocket_api.websocket_command(
{
vol.Required(TYPE): "zwave_js/replace_failed_node",
vol.Required(ENTRY_ID): str,
vol.Required(NODE_ID): int,
vol.Optional(INCLUSION_STRATEGY, default=InclusionStrategy.DEFAULT): vol.All(
vol.Coerce(int),
vol.In(
[
strategy.value
for strategy in InclusionStrategy
if strategy != InclusionStrategy.SMART_START
]
),
),
vol.Optional(FORCE_SECURITY): bool,
vol.Exclusive(
PLANNED_PROVISIONING_ENTRY, "options"
): PLANNED_PROVISIONING_ENTRY_SCHEMA,
vol.Exclusive(
QR_PROVISIONING_INFORMATION, "options"
): QR_PROVISIONING_INFORMATION_SCHEMA,
vol.Exclusive(QR_CODE_STRING, "options"): QR_CODE_STRING_SCHEMA,
}
)
@websocket_api.async_response
@async_handle_failed_command
@async_get_entry
async def websocket_replace_failed_node(
hass: HomeAssistant,
connection: ActiveConnection,
msg: dict,
entry: ConfigEntry,
client: Client,
) -> None:
"""Replace a failed node with a new node."""
controller = client.driver.controller
node_id = msg[NODE_ID]
inclusion_strategy = InclusionStrategy(msg[INCLUSION_STRATEGY])
force_security = msg.get(FORCE_SECURITY)
provisioning = (
msg.get(PLANNED_PROVISIONING_ENTRY)
or msg.get(QR_PROVISIONING_INFORMATION)
or msg.get(QR_CODE_STRING)
)
@callback
def async_cleanup() -> None:
"""Remove signal listeners."""
for unsub in unsubs:
unsub()
@callback
def forward_event(event: dict) -> None:
connection.send_message(
websocket_api.event_message(msg[ID], {"event": event["event"]})
)
@callback
def forward_dsk(event: dict) -> None:
connection.send_message(
websocket_api.event_message(
msg[ID], {"event": event["event"], "dsk": event["dsk"]}
)
)
@callback
def forward_requested_grant(event: dict) -> None:
connection.send_message(
websocket_api.event_message(
msg[ID],
{
"event": event["event"],
"requested_grant": event["requested_grant"].to_dict(),
},
)
)
@callback
def forward_stage(event: dict) -> None:
connection.send_message(
websocket_api.event_message(
msg[ID], {"event": event["event"], "stage": event["stageName"]}
)
)
@callback
def node_added(event: dict) -> None:
node = event["node"]
interview_unsubs = [
node.on("interview started", forward_event),
node.on("interview completed", forward_event),
node.on("interview stage completed", forward_stage),
node.on("interview failed", forward_event),
]
unsubs.extend(interview_unsubs)
node_details = {
"node_id": node.node_id,
"status": node.status,
"ready": node.ready,
}
connection.send_message(
websocket_api.event_message(
msg[ID], {"event": "node added", "node": node_details}
)
)
@callback
def node_removed(event: dict) -> None:
node = event["node"]
node_details = {
"node_id": node.node_id,
}
connection.send_message(
websocket_api.event_message(
msg[ID], {"event": "node removed", "node": node_details}
)
)
@callback
def device_registered(device: DeviceEntry) -> None:
device_details = {
"name": device.name,
"id": device.id,
"manufacturer": device.manufacturer,
"model": device.model,
}
connection.send_message(
websocket_api.event_message(
msg[ID], {"event": "device registered", "device": device_details}
)
)
connection.subscriptions[msg["id"]] = async_cleanup
msg[DATA_UNSUBSCRIBE] = unsubs = [
controller.on("inclusion started", forward_event),
controller.on("inclusion failed", forward_event),
controller.on("inclusion stopped", forward_event),
controller.on("validate dsk and enter pin", forward_dsk),
controller.on("grant security classes", forward_requested_grant),
controller.on("node removed", node_removed),
controller.on("node added", node_added),
async_dispatcher_connect(
hass, EVENT_DEVICE_ADDED_TO_REGISTRY, device_registered
),
]
try:
result = await controller.async_replace_failed_node(
node_id,
inclusion_strategy,
force_security=force_security,
provisioning=provisioning,
)
except ValueError as err:
connection.send_error(
msg[ID],
ERR_INVALID_FORMAT,
err.args[0],
)
return
connection.send_result(
msg[ID],
result,
)
@websocket_api.require_admin
@websocket_api.websocket_command(
{
vol.Required(TYPE): "zwave_js/remove_failed_node",
vol.Required(ENTRY_ID): str,
vol.Required(NODE_ID): int,
}
)
@websocket_api.async_response
@async_handle_failed_command
@async_get_entry
async def websocket_remove_failed_node(
hass: HomeAssistant,
connection: ActiveConnection,
msg: dict,
entry: ConfigEntry,
client: Client,
) -> None:
"""Remove a failed node from the Z-Wave network."""
controller = client.driver.controller
node_id = msg[NODE_ID]
@callback
def async_cleanup() -> None:
"""Remove signal listeners."""
for unsub in unsubs:
unsub()
@callback
def node_removed(event: dict) -> None:
node = event["node"]
node_details = {
"node_id": node.node_id,
}
connection.send_message(
websocket_api.event_message(
msg[ID], {"event": "node removed", "node": node_details}
)
)
connection.subscriptions[msg["id"]] = async_cleanup
msg[DATA_UNSUBSCRIBE] = unsubs = [controller.on("node removed", node_removed)]
result = await controller.async_remove_failed_node(node_id)
connection.send_result(
msg[ID],
result,
)
@websocket_api.require_admin
@websocket_api.websocket_command(
{
vol.Required(TYPE): "zwave_js/begin_healing_network",
vol.Required(ENTRY_ID): str,
}
)
@websocket_api.async_response
@async_handle_failed_command
@async_get_entry
async def websocket_begin_healing_network(
hass: HomeAssistant,
connection: ActiveConnection,
msg: dict,
entry: ConfigEntry,
client: Client,
) -> None:
"""Begin healing the Z-Wave network."""
controller = client.driver.controller
result = await controller.async_begin_healing_network()
connection.send_result(
msg[ID],
result,
)
@websocket_api.require_admin
@websocket_api.websocket_command(
{
vol.Required(TYPE): "zwave_js/subscribe_heal_network_progress",
vol.Required(ENTRY_ID): str,
}
)
@websocket_api.async_response
@async_get_entry
async def websocket_subscribe_heal_network_progress(
hass: HomeAssistant,
connection: ActiveConnection,
msg: dict,
entry: ConfigEntry,
client: Client,
) -> None:
"""Subscribe to heal Z-Wave network status updates."""
controller = client.driver.controller
@callback
def async_cleanup() -> None:
"""Remove signal listeners."""
for unsub in unsubs:
unsub()
@callback
def forward_event(key: str, event: dict) -> None:
connection.send_message(
websocket_api.event_message(
msg[ID], {"event": event["event"], "heal_node_status": event[key]}
)
)
connection.subscriptions[msg["id"]] = async_cleanup
msg[DATA_UNSUBSCRIBE] = unsubs = [
controller.on("heal network progress", partial(forward_event, "progress")),
controller.on("heal network done", partial(forward_event, "result")),
]
connection.send_result(msg[ID], controller.heal_network_progress)
@websocket_api.require_admin
@websocket_api.websocket_command(
{
vol.Required(TYPE): "zwave_js/stop_healing_network",
vol.Required(ENTRY_ID): str,
}
)
@websocket_api.async_response
@async_handle_failed_command
@async_get_entry
async def websocket_stop_healing_network(
hass: HomeAssistant,
connection: ActiveConnection,
msg: dict,
entry: ConfigEntry,
client: Client,
) -> None:
"""Stop healing the Z-Wave network."""
controller = client.driver.controller
result = await controller.async_stop_healing_network()
connection.send_result(
msg[ID],
result,
)
@websocket_api.require_admin
@websocket_api.websocket_command(
{
vol.Required(TYPE): "zwave_js/heal_node",
vol.Required(ENTRY_ID): str,
vol.Required(NODE_ID): int,
}
)
@websocket_api.async_response
@async_handle_failed_command
@async_get_entry
async def websocket_heal_node(
hass: HomeAssistant,
connection: ActiveConnection,
msg: dict,
entry: ConfigEntry,
client: Client,
) -> None:
"""Heal a node on the Z-Wave network."""
controller = client.driver.controller
node_id = msg[NODE_ID]
result = await controller.async_heal_node(node_id)
connection.send_result(
msg[ID],
result,
)
@websocket_api.require_admin
@websocket_api.websocket_command(
{
vol.Required(TYPE): "zwave_js/refresh_node_info",
vol.Required(ENTRY_ID): str,
vol.Required(NODE_ID): int,
},
)
@websocket_api.async_response
@async_handle_failed_command
@async_get_node
async def websocket_refresh_node_info(
hass: HomeAssistant,
connection: ActiveConnection,
msg: dict,
node: Node,
) -> None:
"""Re-interview a node."""
@callback
def async_cleanup() -> None:
"""Remove signal listeners."""
for unsub in unsubs:
unsub()
@callback
def forward_event(event: dict) -> None:
connection.send_message(
websocket_api.event_message(msg[ID], {"event": event["event"]})
)
@callback
def forward_stage(event: dict) -> None:
connection.send_message(
websocket_api.event_message(
msg[ID], {"event": event["event"], "stage": event["stageName"]}
)
)
connection.subscriptions[msg["id"]] = async_cleanup
msg[DATA_UNSUBSCRIBE] = unsubs = [
node.on("interview started", forward_event),
node.on("interview completed", forward_event),
node.on("interview stage completed", forward_stage),
node.on("interview failed", forward_event),
]
result = await node.async_refresh_info()
connection.send_result(msg[ID], result)
@websocket_api.require_admin
@websocket_api.websocket_command(
{
vol.Required(TYPE): "zwave_js/refresh_node_values",
vol.Required(ENTRY_ID): str,
vol.Required(NODE_ID): int,
},
)
@websocket_api.async_response
@async_handle_failed_command
@async_get_node
async def websocket_refresh_node_values(
hass: HomeAssistant,
connection: ActiveConnection,
msg: dict,
node: Node,
) -> None:
"""Refresh node values."""
await node.async_refresh_values()
connection.send_result(msg[ID])
@websocket_api.require_admin
@websocket_api.websocket_command(
{
vol.Required(TYPE): "zwave_js/refresh_node_cc_values",
vol.Required(ENTRY_ID): str,
vol.Required(NODE_ID): int,
vol.Required(COMMAND_CLASS_ID): int,
},
)
@websocket_api.async_response
@async_handle_failed_command
@async_get_node
async def websocket_refresh_node_cc_values(
hass: HomeAssistant,
connection: ActiveConnection,
msg: dict,
node: Node,
) -> None:
"""Refresh node values for a particular CommandClass."""
command_class_id = msg[COMMAND_CLASS_ID]
try:
command_class = CommandClass(command_class_id)
except ValueError:
connection.send_error(
msg[ID], ERR_NOT_FOUND, f"Command class {command_class_id} not found"
)
return
await node.async_refresh_cc_values(command_class)
connection.send_result(msg[ID])
@websocket_api.require_admin
@websocket_api.websocket_command(
{
vol.Required(TYPE): "zwave_js/set_config_parameter",
vol.Required(ENTRY_ID): str,
vol.Required(NODE_ID): int,
vol.Required(PROPERTY): int,
vol.Optional(PROPERTY_KEY): int,
vol.Required(VALUE): vol.Any(int, BITMASK_SCHEMA),
}
)
@websocket_api.async_response
@async_handle_failed_command
@async_get_node
async def websocket_set_config_parameter(
hass: HomeAssistant,
connection: ActiveConnection,
msg: dict,
node: Node,
) -> None:
"""Set a config parameter value for a Z-Wave node."""
property_ = msg[PROPERTY]
property_key = msg.get(PROPERTY_KEY)
value = msg[VALUE]
try:
zwave_value, cmd_status = await async_set_config_parameter(
node, value, property_, property_key=property_key
)
except (InvalidNewValue, NotFoundError, NotImplementedError, SetValueFailed) as err:
code = ERR_UNKNOWN_ERROR
if isinstance(err, NotFoundError):
code = ERR_NOT_FOUND
elif isinstance(err, (InvalidNewValue, NotImplementedError)):
code = ERR_NOT_SUPPORTED
connection.send_error(
msg[ID],
code,
str(err),
)
return
connection.send_result(
msg[ID],
{
VALUE_ID: zwave_value.value_id,
STATUS: cmd_status,
},
)
@websocket_api.require_admin
@websocket_api.websocket_command(
{
vol.Required(TYPE): "zwave_js/get_config_parameters",
vol.Required(ENTRY_ID): str,
vol.Required(NODE_ID): int,
}
)
@websocket_api.async_response
@async_get_node
async def websocket_get_config_parameters(
hass: HomeAssistant, connection: ActiveConnection, msg: dict, node: Node
) -> None:
"""Get a list of configuration parameters for a Z-Wave node."""
values = node.get_configuration_values()
result = {}
for value_id, zwave_value in values.items():
metadata = zwave_value.metadata
result[value_id] = {
"property": zwave_value.property_,
"property_key": zwave_value.property_key,
"configuration_value_type": zwave_value.configuration_value_type.value,
"metadata": {
"description": metadata.description,
"label": metadata.label,
"type": metadata.type,
"min": metadata.min,
"max": metadata.max,
"unit": metadata.unit,
"writeable": metadata.writeable,
"readable": metadata.readable,
},
"value": zwave_value.value,
}
if zwave_value.metadata.states:
result[value_id]["metadata"]["states"] = zwave_value.metadata.states
connection.send_result(
msg[ID],
result,
)
def filename_is_present_if_logging_to_file(obj: dict) -> dict:
"""Validate that filename is provided if log_to_file is True."""
if obj.get(LOG_TO_FILE, False) and FILENAME not in obj:
raise vol.Invalid("`filename` must be provided if logging to file")
return obj
@websocket_api.require_admin
@websocket_api.websocket_command(
{
vol.Required(TYPE): "zwave_js/subscribe_log_updates",
vol.Required(ENTRY_ID): str,
}
)
@websocket_api.async_response
@async_handle_failed_command
@async_get_entry
async def websocket_subscribe_log_updates(
hass: HomeAssistant,
connection: ActiveConnection,
msg: dict,
entry: ConfigEntry,
client: Client,
) -> None:
"""Subscribe to log message events from the server."""
driver = client.driver
@callback
def async_cleanup() -> None:
"""Remove signal listeners."""
hass.async_create_task(driver.async_stop_listening_logs())
for unsub in unsubs:
unsub()
@callback
def log_messages(event: dict) -> None:
log_msg: LogMessage = event["log_message"]
connection.send_message(
websocket_api.event_message(
msg[ID],
{
"type": "log_message",
"log_message": {
"timestamp": log_msg.timestamp,
"level": log_msg.level,
"primary_tags": log_msg.primary_tags,
"message": log_msg.formatted_message,
},
},
)
)
@callback
def log_config_updates(event: dict) -> None:
log_config: LogConfig = event["log_config"]
connection.send_message(
websocket_api.event_message(
msg[ID],
{
"type": "log_config",
"log_config": dataclasses.asdict(log_config),
},
)
)
msg[DATA_UNSUBSCRIBE] = unsubs = [
driver.on("logging", log_messages),
driver.on("log config updated", log_config_updates),
]
connection.subscriptions[msg["id"]] = async_cleanup
await driver.async_start_listening_logs()
connection.send_result(msg[ID])
@websocket_api.require_admin
@websocket_api.websocket_command(
{
vol.Required(TYPE): "zwave_js/update_log_config",
vol.Required(ENTRY_ID): str,
vol.Required(CONFIG): vol.All(
vol.Schema(
{
vol.Optional(ENABLED): cv.boolean,
vol.Optional(LEVEL): vol.All(
str,
vol.Lower,
vol.Coerce(LogLevel),
),
vol.Optional(LOG_TO_FILE): cv.boolean,
vol.Optional(FILENAME): str,
vol.Optional(FORCE_CONSOLE): cv.boolean,
}
),
cv.has_at_least_one_key(
ENABLED, FILENAME, FORCE_CONSOLE, LEVEL, LOG_TO_FILE
),
filename_is_present_if_logging_to_file,
),
},
)
@websocket_api.async_response
@async_handle_failed_command
@async_get_entry
async def websocket_update_log_config(
hass: HomeAssistant,
connection: ActiveConnection,
msg: dict,
entry: ConfigEntry,
client: Client,
) -> None:
"""Update the driver log config."""
await client.driver.async_update_log_config(LogConfig(**msg[CONFIG]))
connection.send_result(
msg[ID],
)
@websocket_api.require_admin
@websocket_api.websocket_command(
{
vol.Required(TYPE): "zwave_js/get_log_config",
vol.Required(ENTRY_ID): str,
},
)
@websocket_api.async_response
@async_get_entry
async def websocket_get_log_config(
hass: HomeAssistant,
connection: ActiveConnection,
msg: dict,
entry: ConfigEntry,
client: Client,
) -> None:
"""Get log configuration for the Z-Wave JS driver."""
connection.send_result(
msg[ID],
dataclasses.asdict(client.driver.log_config),
)
@websocket_api.require_admin
@websocket_api.websocket_command(
{
vol.Required(TYPE): "zwave_js/update_data_collection_preference",
vol.Required(ENTRY_ID): str,
vol.Required(OPTED_IN): bool,
},
)
@websocket_api.async_response
@async_handle_failed_command
@async_get_entry
async def websocket_update_data_collection_preference(
hass: HomeAssistant,
connection: ActiveConnection,
msg: dict,
entry: ConfigEntry,
client: Client,
) -> None:
"""Update preference for data collection and enable/disable collection."""
opted_in = msg[OPTED_IN]
update_data_collection_preference(hass, entry, opted_in)
if opted_in:
await async_enable_statistics(client)
else:
await client.driver.async_disable_statistics()
connection.send_result(
msg[ID],
)
@websocket_api.require_admin
@websocket_api.websocket_command(
{
vol.Required(TYPE): "zwave_js/data_collection_status",
vol.Required(ENTRY_ID): str,
},
)
@websocket_api.async_response
@async_handle_failed_command
@async_get_entry
async def websocket_data_collection_status(
hass: HomeAssistant,
connection: ActiveConnection,
msg: dict,
entry: ConfigEntry,
client: Client,
) -> None:
"""Return data collection preference and status."""
result = {
OPTED_IN: entry.data.get(CONF_DATA_COLLECTION_OPTED_IN),
ENABLED: await client.driver.async_is_statistics_enabled(),
}
connection.send_result(msg[ID], result)
@websocket_api.require_admin
@websocket_api.websocket_command(
{
vol.Required(TYPE): "zwave_js/abort_firmware_update",
vol.Required(ENTRY_ID): str,
vol.Required(NODE_ID): int,
}
)
@websocket_api.async_response
@async_handle_failed_command
@async_get_node
async def websocket_abort_firmware_update(
hass: HomeAssistant,
connection: ActiveConnection,
msg: dict,
node: Node,
) -> None:
"""Abort a firmware update."""
await node.async_abort_firmware_update()
connection.send_result(msg[ID])
def _get_firmware_update_progress_dict(
progress: FirmwareUpdateProgress,
) -> dict[str, int]:
"""Get a dictionary of firmware update progress."""
return {
"sent_fragments": progress.sent_fragments,
"total_fragments": progress.total_fragments,
}
@websocket_api.require_admin
@websocket_api.websocket_command(
{
vol.Required(TYPE): "zwave_js/subscribe_firmware_update_status",
vol.Required(ENTRY_ID): str,
vol.Required(NODE_ID): int,
}
)
@websocket_api.async_response
@async_get_node
async def websocket_subscribe_firmware_update_status(
hass: HomeAssistant,
connection: ActiveConnection,
msg: dict,
node: Node,
) -> None:
"""Subscribe to the status of a firmware update."""
@callback
def async_cleanup() -> None:
"""Remove signal listeners."""
for unsub in unsubs:
unsub()
@callback
def forward_progress(event: dict) -> None:
progress: FirmwareUpdateProgress = event["firmware_update_progress"]
connection.send_message(
websocket_api.event_message(
msg[ID],
{
"event": event["event"],
**_get_firmware_update_progress_dict(progress),
},
)
)
@callback
def forward_finished(event: dict) -> None:
finished: FirmwareUpdateFinished = event["firmware_update_finished"]
connection.send_message(
websocket_api.event_message(
msg[ID],
{
"event": event["event"],
"status": finished.status,
"wait_time": finished.wait_time,
},
)
)
msg[DATA_UNSUBSCRIBE] = unsubs = [
node.on("firmware update progress", forward_progress),
node.on("firmware update finished", forward_finished),
]
connection.subscriptions[msg["id"]] = async_cleanup
progress = node.firmware_update_progress
connection.send_result(
msg[ID], _get_firmware_update_progress_dict(progress) if progress else None
)
class FirmwareUploadView(HomeAssistantView):
"""View to upload firmware."""
url = r"/api/zwave_js/firmware/upload/{config_entry_id}/{node_id:\d+}"
name = "api:zwave_js:firmware:upload"
async def post(
self, request: web.Request, config_entry_id: str, node_id: str
) -> web.Response:
"""Handle upload."""
if not request["hass_user"].is_admin:
raise Unauthorized()
hass = request.app["hass"]
if config_entry_id not in hass.data[DOMAIN]:
raise web_exceptions.HTTPBadRequest
entry = hass.config_entries.async_get_entry(config_entry_id)
client: Client = hass.data[DOMAIN][config_entry_id][DATA_CLIENT]
node = client.driver.controller.nodes.get(int(node_id))
if not node:
raise web_exceptions.HTTPNotFound
# Increase max payload
request._client_max_size = 1024 * 1024 * 10 # pylint: disable=protected-access
data = await request.post()
if "file" not in data or not isinstance(data["file"], web_request.FileField):
raise web_exceptions.HTTPBadRequest
uploaded_file: web_request.FileField = data["file"]
try:
await begin_firmware_update(
entry.data[CONF_URL],
node,
uploaded_file.filename,
await hass.async_add_executor_job(uploaded_file.file.read),
async_get_clientsession(hass),
)
except BaseZwaveJSServerError as err:
raise web_exceptions.HTTPBadRequest from err
return self.json(None)
@websocket_api.require_admin
@websocket_api.websocket_command(
{
vol.Required(TYPE): "zwave_js/check_for_config_updates",
vol.Required(ENTRY_ID): str,
}
)
@websocket_api.async_response
@async_handle_failed_command
@async_get_entry
async def websocket_check_for_config_updates(
hass: HomeAssistant,
connection: ActiveConnection,
msg: dict,
entry: ConfigEntry,
client: Client,
) -> None:
"""Check for config updates."""
config_update = await client.driver.async_check_for_config_updates()
connection.send_result(
msg[ID],
{
"update_available": config_update.update_available,
"new_version": config_update.new_version,
},
)
@websocket_api.require_admin
@websocket_api.websocket_command(
{
vol.Required(TYPE): "zwave_js/install_config_update",
vol.Required(ENTRY_ID): str,
}
)
@websocket_api.async_response
@async_handle_failed_command
@async_get_entry
async def websocket_install_config_update(
hass: HomeAssistant,
connection: ActiveConnection,
msg: dict,
entry: ConfigEntry,
client: Client,
) -> None:
"""Check for config updates."""
success = await client.driver.async_install_config_update()
connection.send_result(msg[ID], success)
def _get_controller_statistics_dict(
statistics: ControllerStatistics,
) -> dict[str, int]:
"""Get dictionary of controller statistics."""
return {
"messages_tx": statistics.messages_tx,
"messages_rx": statistics.messages_rx,
"messages_dropped_tx": statistics.messages_dropped_tx,
"messages_dropped_rx": statistics.messages_dropped_rx,
"nak": statistics.nak,
"can": statistics.can,
"timeout_ack": statistics.timeout_ack,
"timout_response": statistics.timeout_response,
"timeout_callback": statistics.timeout_callback,
}
@websocket_api.require_admin
@websocket_api.websocket_command(
{
vol.Required(TYPE): "zwave_js/subscribe_controller_statistics",
vol.Required(ENTRY_ID): str,
}
)
@websocket_api.async_response
@async_get_entry
async def websocket_subscribe_controller_statistics(
hass: HomeAssistant,
connection: ActiveConnection,
msg: dict,
entry: ConfigEntry,
client: Client,
) -> None:
"""Subsribe to the statistics updates for a controller."""
@callback
def async_cleanup() -> None:
"""Remove signal listeners."""
for unsub in unsubs:
unsub()
@callback
def forward_stats(event: dict) -> None:
statistics: ControllerStatistics = event["statistics_updated"]
connection.send_message(
websocket_api.event_message(
msg[ID],
{
"event": event["event"],
"source": "controller",
**_get_controller_statistics_dict(statistics),
},
)
)
controller = client.driver.controller
msg[DATA_UNSUBSCRIBE] = unsubs = [
controller.on("statistics updated", forward_stats)
]
connection.subscriptions[msg["id"]] = async_cleanup
connection.send_result(
msg[ID], _get_controller_statistics_dict(controller.statistics)
)
def _get_node_statistics_dict(statistics: NodeStatistics) -> dict[str, int]:
"""Get dictionary of node statistics."""
return {
"commands_tx": statistics.commands_tx,
"commands_rx": statistics.commands_rx,
"commands_dropped_tx": statistics.commands_dropped_tx,
"commands_dropped_rx": statistics.commands_dropped_rx,
"timeout_response": statistics.timeout_response,
}
@websocket_api.require_admin
@websocket_api.websocket_command(
{
vol.Required(TYPE): "zwave_js/subscribe_node_statistics",
vol.Required(ENTRY_ID): str,
vol.Required(NODE_ID): int,
}
)
@websocket_api.async_response
@async_get_node
async def websocket_subscribe_node_statistics(
hass: HomeAssistant,
connection: ActiveConnection,
msg: dict,
node: Node,
) -> None:
"""Subsribe to the statistics updates for a node."""
@callback
def async_cleanup() -> None:
"""Remove signal listeners."""
for unsub in unsubs:
unsub()
@callback
def forward_stats(event: dict) -> None:
statistics: NodeStatistics = event["statistics_updated"]
connection.send_message(
websocket_api.event_message(
msg[ID],
{
"event": event["event"],
"source": "node",
"node_id": node.node_id,
**_get_node_statistics_dict(statistics),
},
)
)
msg[DATA_UNSUBSCRIBE] = unsubs = [node.on("statistics updated", forward_stats)]
connection.subscriptions[msg["id"]] = async_cleanup
connection.send_result(msg[ID], _get_node_statistics_dict(node.statistics))
@websocket_api.require_admin
@websocket_api.websocket_command(
{
vol.Required(TYPE): "zwave_js/migrate_zwave",
vol.Required(ENTRY_ID): str,
vol.Optional(DRY_RUN, default=True): bool,
}
)
@websocket_api.async_response
@async_get_entry
async def websocket_migrate_zwave(
hass: HomeAssistant,
connection: ActiveConnection,
msg: dict,
entry: ConfigEntry,
client: Client,
) -> None:
"""Migrate Z-Wave device and entity data to Z-Wave JS integration."""
if "zwave" not in hass.config.components:
connection.send_message(
websocket_api.error_message(
msg["id"], "zwave_not_loaded", "Integration zwave is not loaded"
)
)
return
zwave = hass.components.zwave
zwave_config_entries = hass.config_entries.async_entries("zwave")
zwave_config_entry = zwave_config_entries[0] # zwave only has a single config entry
zwave_data: dict[str, ZWaveMigrationData] = await zwave.async_get_migration_data(
hass, zwave_config_entry
)
LOGGER.debug("Migration zwave data: %s", zwave_data)
zwave_js_config_entry = entry
zwave_js_data = await async_get_migration_data(hass, zwave_js_config_entry)
LOGGER.debug("Migration zwave_js data: %s", zwave_js_data)
migration_map = async_map_legacy_zwave_values(zwave_data, zwave_js_data)
zwave_entity_ids = [entry["entity_id"] for entry in zwave_data.values()]
zwave_js_entity_ids = [entry["entity_id"] for entry in zwave_js_data.values()]
migration_device_map = {
zwave_device_id: zwave_js_device_id
for zwave_js_device_id, zwave_device_id in migration_map.device_entries.items()
}
migration_entity_map = {
zwave_entry["entity_id"]: zwave_js_entity_id
for zwave_js_entity_id, zwave_entry in migration_map.entity_entries.items()
}
LOGGER.debug("Migration entity map: %s", migration_entity_map)
if not msg[DRY_RUN]:
await async_migrate_legacy_zwave(
hass, zwave_config_entry, zwave_js_config_entry, migration_map
)
connection.send_result(
msg[ID],
{
"migration_device_map": migration_device_map,
"zwave_entity_ids": zwave_entity_ids,
"zwave_js_entity_ids": zwave_js_entity_ids,
"migration_entity_map": migration_entity_map,
"migrated": not msg[DRY_RUN],
},
)
| {
"content_hash": "2c29aaa0df6275429c86480f6bad38a1",
"timestamp": "",
"source": "github",
"line_count": 2140,
"max_line_length": 98,
"avg_line_length": 30.445327102803738,
"alnum_prop": 0.6289196199714518,
"repo_name": "GenericStudent/home-assistant",
"id": "0e947de982b5537bfe9e27e74ae3e726b669660f",
"size": "65153",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "homeassistant/components/zwave_js/api.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "3070"
},
{
"name": "Python",
"bytes": "44491729"
},
{
"name": "Shell",
"bytes": "5092"
}
],
"symlink_target": ""
} |
"""Inventationery URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from __future__ import unicode_literals
from django.conf.urls.static import static
from django.conf import settings
from django.conf.urls import include, url
from django.contrib import admin
from django.views import defaults as default_views
from Inventationery.core.views import pdf_view
urlpatterns = [
# url(r'^admin/', include(admin.site.urls)),
# Django Admin, use {% url 'admin:index' %}
url(settings.ADMIN_URL, include(admin.site.urls)),
url(r'^', include('Inventationery.apps.login.urls')),
url(r'^', include('Inventationery.apps.home.urls', namespace='home')),
url(r'^', include('Inventationery.apps.Vendor.urls', namespace='vendor')),
url(r'^', include('Inventationery.apps.PurchOrder.urls', namespace='purch')),
url(r'^', include('Inventationery.apps.Inventory.urls', namespace='inventory')),
url(r'^', include('Inventationery.apps.Payments.urls', namespace='payment')),
url(r'^', include('Inventationery.apps.Company.urls', namespace='company')),
url(r'^', include('Inventationery.apps.Customer.urls', namespace='customer')),
url(r'^', include('Inventationery.apps.SalesOrder.urls', namespace='sales')),
url(r'^quick_guide/$', pdf_view, name='guide'),
url(r'^400/$', default_views.bad_request,
kwargs={'exception': Exception("Bad Request!")}),
url(r'^403/$', default_views.permission_denied,
kwargs={'exception': Exception("Permissin Denied")}),
url(r'^404/$', default_views.page_not_found,
kwargs={'exception': Exception("Page not Found")}),
url(r'^500/$', default_views.server_error),
#(r'^search/', include('haystack.urls')),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
admin.site.site_header = 'Inventationery admin'
| {
"content_hash": "d4e75e781acba2316a40360ed597a117",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 84,
"avg_line_length": 48.51020408163265,
"alnum_prop": 0.6924694993689524,
"repo_name": "alexharmenta/Inventationery",
"id": "50e788266a71e38270d6c11385006a99e9701adc",
"size": "2541",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "config/urls.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "127726"
},
{
"name": "HTML",
"bytes": "170879"
},
{
"name": "JavaScript",
"bytes": "118056"
},
{
"name": "Python",
"bytes": "243110"
}
],
"symlink_target": ""
} |
'''
Elixir package
A declarative layer on top of the `SQLAlchemy library
<http://www.sqlalchemy.org/>`_. It is a fairly thin wrapper, which provides
the ability to create simple Python classes that map directly to relational
database tables (this pattern is often referred to as the Active Record design
pattern), providing many of the benefits of traditional databases
without losing the convenience of Python objects.
Elixir is intended to replace the ActiveMapper SQLAlchemy extension, and the
TurboEntity project but does not intend to replace SQLAlchemy's core features,
and instead focuses on providing a simpler syntax for defining model objects
when you do not need the full expressiveness of SQLAlchemy's manual mapper
definitions.
'''
try:
set
except NameError:
from sets import Set as set
import sqlalchemy
from sqlalchemy.types import *
from elixir.options import using_options, using_table_options, \
using_mapper_options, options_defaults, \
using_options_defaults
from elixir.entity import Entity, EntityBase, EntityMeta, EntityDescriptor, \
setup_entities, cleanup_entities
from elixir.fields import has_field, Field
from elixir.relationships import belongs_to, has_one, has_many, \
has_and_belongs_to_many, \
ManyToOne, OneToOne, OneToMany, ManyToMany
from elixir.properties import has_property, GenericProperty, ColumnProperty, \
Synonym
from elixir.statements import Statement
from elixir.collection import EntityCollection, GlobalEntityCollection
__version__ = '0.8.0dev'
__all__ = ['Entity', 'EntityBase', 'EntityMeta', 'EntityCollection',
'entities',
'Field', 'has_field',
'has_property', 'GenericProperty', 'ColumnProperty', 'Synonym',
'belongs_to', 'has_one', 'has_many', 'has_and_belongs_to_many',
'ManyToOne', 'OneToOne', 'OneToMany', 'ManyToMany',
'using_options', 'using_table_options', 'using_mapper_options',
'options_defaults', 'using_options_defaults',
'metadata', 'session',
'create_all', 'drop_all',
'setup_all', 'cleanup_all',
'setup_entities', 'cleanup_entities'] + \
sqlalchemy.types.__all__
__doc_all__ = ['create_all', 'drop_all',
'setup_all', 'cleanup_all',
'metadata', 'session']
# default session
session = sqlalchemy.orm.scoped_session(sqlalchemy.orm.sessionmaker())
# default metadata
metadata = sqlalchemy.MetaData()
metadatas = set()
# default entity collection
entities = GlobalEntityCollection()
def create_all(*args, **kwargs):
'''Create the necessary tables for all declared entities'''
for md in metadatas:
md.create_all(*args, **kwargs)
def drop_all(*args, **kwargs):
'''Drop tables for all declared entities'''
for md in metadatas:
md.drop_all(*args, **kwargs)
def setup_all(create_tables=False, *args, **kwargs):
'''Setup the table and mapper of all entities in the default entity
collection.
'''
setup_entities(entities)
# issue the "CREATE" SQL statements
if create_tables:
create_all(*args, **kwargs)
def cleanup_all(drop_tables=False, *args, **kwargs):
'''Clear all mappers, clear the session, and clear all metadatas.
Optionally drops the tables.
'''
session.close()
cleanup_entities(entities)
sqlalchemy.orm.clear_mappers()
entities.clear()
if drop_tables:
drop_all(*args, **kwargs)
for md in metadatas:
md.clear()
metadatas.clear()
| {
"content_hash": "c3844c7fd8ed5c5a210185e227cad0a3",
"timestamp": "",
"source": "github",
"line_count": 114,
"max_line_length": 78,
"avg_line_length": 32.3421052631579,
"alnum_prop": 0.661513425549227,
"repo_name": "gjhiggins/elixir",
"id": "a242b538a123f5a88c61891de8359942ed5291ff",
"size": "3687",
"binary": false,
"copies": "27",
"ref": "refs/heads/master",
"path": "elixir/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "872"
},
{
"name": "Python",
"bytes": "297582"
}
],
"symlink_target": ""
} |
from django.db import models
from django.conf import settings
from actstream.models import Action
from . import messages
class ActionNotification(models.Model):
action = models.ForeignKey(Action, on_delete=models.CASCADE)
user = models.ForeignKey(settings.AUTH_USER_MODEL, db_index=True, related_name='+', on_delete=models.CASCADE)
is_should_email = models.BooleanField(default=False, db_index=True)
is_should_email_separately = models.BooleanField(default=False)
is_read = models.BooleanField(default=False, db_index=True)
is_emailed = models.BooleanField(default=False, db_index=True)
do_not_send_before = models.DateTimeField(blank=True, null=True)
when_emailed = models.DateTimeField(blank=True, null=True)
created = models.DateTimeField(auto_now_add=True)
class Meta:
ordering = ('-action__timestamp',)
unique_together = (
('action', 'user',),
)
def __str__(self):
return u'{} ({})'.format(
self.action.__str__(),
'read' if self.is_read else 'unread'
)
def _load_message(self):
if not hasattr(self, '_message'):
self._message = messages.get_message(self.action, self.user)
@property
def message_body(self):
self._load_message()
return self._message[0]
@property
def message_subject(self):
self._load_message()
return self._message[1]
@property
def message_from(self):
self._load_message()
if len(self._message) > 2:
return self._message[2]
return None
@property
def message_locale(self):
self._load_message()
if len(self._message) > 3:
return self._message[3]
return None
@property
def message_attachments(self):
self._load_message()
if len(self._message) > 4:
return self._message[4]
return []
class ActionNotificationPreference(models.Model):
EMAIL_NOTIFICATION_FREQUENCIES = (
('* * * * *', 'Immediately',),
('*/30 * * * *', 'Every 30 minutes',),
('@daily', 'Daily',),
)
action_verb = models.CharField(max_length=255, unique=True)
is_should_notify_actor = models.BooleanField(default=False)
is_should_notify_actor_when_target = models.BooleanField(default=False)
# Email preferences
is_should_email = models.BooleanField(default=False)
is_should_email_separately = models.BooleanField(default=False)
follow_topic = models.CharField(
default='',
blank=True,
max_length=255,
help_text='If a topic is set, only follow relationships with that topic are selected'
)
use_user_preference = models.BooleanField(
default=False,
help_text='Setting this true will cause frequency and is_email_separately to be ignored'
)
email_notification_frequency = models.CharField(
max_length=64,
choices=EMAIL_NOTIFICATION_FREQUENCIES,
default='@daily'
)
def __str__(self):
return u'notification preference for "{}"'.format(self.action_verb)
| {
"content_hash": "48a03585e90526bfc5177c7e5ab584f3",
"timestamp": "",
"source": "github",
"line_count": 103,
"max_line_length": 113,
"avg_line_length": 30.45631067961165,
"alnum_prop": 0.6305387312719158,
"repo_name": "burnsred/django-action-notifications",
"id": "5648a36311cb087b1ff94f269510b3d3adb506be",
"size": "3137",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "action_notifications/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "41534"
},
{
"name": "Shell",
"bytes": "262"
}
],
"symlink_target": ""
} |
import json
from dlkit.json_.assessment.sessions import ItemLookupSession
from dlkit.json_.assessment.objects import ItemList
from dlkit.primordium.id.primitives import Id
from ..qti.numeric_response_records import MagicNumericResponseItemLookupSession
from ..qti.inline_choice_records import RandomizedInlineChoiceItemLookupSession
from ...adaptive.multi_choice_questions.randomized_questions import RandomizedMCItemLookupSession
try:
# python 2
from urllib import unquote
except ImportError:
# python 3
from urllib.parse import unquote
class CLIxMagicItemLookupSession(MagicNumericResponseItemLookupSession,
RandomizedMCItemLookupSession,
RandomizedInlineChoiceItemLookupSession,
ItemLookupSession):
""" to federate across multiple item lookup sessions, because otherwise there
is no way to figure out which magic session is needed (since get_item_lookup_session
doesn't have the item_id passed to it)
"""
def __init__(self, *args, **kwargs):
super(CLIxMagicItemLookupSession, self).__init__(*args, **kwargs)
def get_item(self, item_id):
authority = item_id.authority
if authority in ['magic-randomize-choices-question-record',
'magic-randomize-inline-choices-question-record',
'qti-numeric-response']:
# for now, this will not work with aliased IDs...
magic_identifier = unquote(item_id.identifier)
original_identifier = magic_identifier.split('?')[0]
choice_ids = json.loads(magic_identifier.split('?')[-1])
original_item_id = Id(identifier=original_identifier,
namespace=item_id.namespace,
authority=self._catalog.ident.authority)
orig_item = super(CLIxMagicItemLookupSession, self).get_item(original_item_id)
orig_item.set_params(choice_ids)
return orig_item
else:
return super(CLIxMagicItemLookupSession, self).get_item(item_id)
def get_items_by_ids(self, item_ids):
item_list = []
for item_id in item_ids:
item_list.append(super(CLIxMagicItemLookupSession, self).get_item(item_id))
return ItemList(item_list, runtime=self._runtime, proxy=self._proxy)
| {
"content_hash": "6c6f43ee7b48d0e8d9d31562357edf8d",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 97,
"avg_line_length": 45.35849056603774,
"alnum_prop": 0.6568219633943427,
"repo_name": "mitsei/dlkit",
"id": "8808676b6c8157392bb9b5953f6f71c6bf61111b",
"size": "2404",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dlkit/records/assessment/clix/magic_item_lookup_sessions.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "25170465"
},
{
"name": "TeX",
"bytes": "1088"
}
],
"symlink_target": ""
} |
"""
A very basic, ORM-based backend for simple search during tests.
"""
from django.conf import settings
from django.db.models import Q
from haystack import connections
from haystack.backends import BaseEngine, BaseSearchBackend, BaseSearchQuery, SearchNode, log_query
from haystack.inputs import PythonData
from haystack.models import SearchResult
if settings.DEBUG:
import logging
class NullHandler(logging.Handler):
def emit(self, record):
pass
ch = logging.StreamHandler()
ch.setLevel(logging.WARNING)
ch.setFormatter(logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s'))
logger = logging.getLogger('haystack.simple_backend')
logger.setLevel(logging.WARNING)
logger.addHandler(NullHandler())
logger.addHandler(ch)
else:
logger = None
class SimpleSearchBackend(BaseSearchBackend):
def update(self, indexer, iterable, commit=True):
if logger is not None:
logger.warning('update is not implemented in this backend')
def remove(self, obj, commit=True):
if logger is not None:
logger.warning('remove is not implemented in this backend')
def clear(self, models=[], commit=True):
if logger is not None:
logger.warning('clear is not implemented in this backend')
@log_query
def search(self, query_string, **kwargs):
hits = 0
results = []
result_class = SearchResult
if kwargs.get('result_class'):
result_class = kwargs['result_class']
if query_string:
for model in connections[self.connection_alias].get_unified_index().get_indexed_models():
if query_string == '*':
qs = model.objects.all()
else:
for term in query_string.split():
queries = []
for field in model._meta._fields():
if hasattr(field, 'related'):
continue
if not field.get_internal_type() in ('TextField', 'CharField', 'SlugField'):
continue
queries.append(Q(**{'%s__icontains' % field.name: term}))
qs = model.objects.filter(reduce(lambda x, y: x|y, queries))
hits += len(qs)
for match in qs:
del(match.__dict__['score'])
result = result_class(match._meta.app_label, match._meta.module_name, match.pk, 0, **match.__dict__)
# For efficiency.
result._model = match.__class__
result._object = match
results.append(result)
return {
'results': results,
'hits': hits,
}
def prep_value(self, db_field, value):
return value
def more_like_this(self, model_instance, additional_query_string=None,
start_offset=0, end_offset=None,
limit_to_registered_models=None, result_class=None, **kwargs):
return {
'results': [],
'hits': 0
}
class SimpleSearchQuery(BaseSearchQuery):
def build_query(self):
if not self.query_filter:
return '*'
return self._build_sub_query(self.query_filter)
def _build_sub_query(self, search_node):
term_list = []
for child in search_node.children:
if isinstance(child, SearchNode):
term_list.append(self._build_sub_query(child))
else:
value = child[1]
if not hasattr(value, 'input_type_name'):
value = PythonData(value)
term_list.append(value.prepare(self))
return (' ').join(map(str, term_list))
class SimpleEngine(BaseEngine):
backend = SimpleSearchBackend
query = SimpleSearchQuery
| {
"content_hash": "3d4933698b588b5c4fa7d87faa14b03f",
"timestamp": "",
"source": "github",
"line_count": 124,
"max_line_length": 120,
"avg_line_length": 32.04838709677419,
"alnum_prop": 0.5624056366381479,
"repo_name": "josesanch/django-haystack",
"id": "36664da015f26e738bbdf83a9d959257f11e3b27",
"size": "3974",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "haystack/backends/simple_backend.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "676842"
},
{
"name": "Shell",
"bytes": "1583"
}
],
"symlink_target": ""
} |
import console
class State(object):
def __init__(self):
self.current_stack = None
def update(self):
pass
def draw(self):
pass
class UIState(State):
def __init__(self, ui_element):
super(UIState, self).__init__()
self.ui_element = ui_element
def draw(self):
self.ui_element.draw()
console.console.flush()
def update(self):
self.ui_element.update()
| {
"content_hash": "4086315899bd677c1e31503a0ffab2b1",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 39,
"avg_line_length": 17.72,
"alnum_prop": 0.5643340857787811,
"repo_name": "co/TheLastRogue",
"id": "ae9cd3a9fb749eb6576d6ff9054a65bc20e79e84",
"size": "443",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "state.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "696695"
}
],
"symlink_target": ""
} |
import _plotly_utils.basevalidators
class HoverinfosrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(self, plotly_name="hoverinfosrc", parent_name="funnel", **kwargs):
super(HoverinfosrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs,
)
| {
"content_hash": "5011f9b253ae6aee57f5a04f44af218d",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 83,
"avg_line_length": 37.09090909090909,
"alnum_prop": 0.6323529411764706,
"repo_name": "plotly/plotly.py",
"id": "f7497e22ec2fa7970e2dfdc56f589a3a69a00725",
"size": "408",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/funnel/_hoverinfosrc.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "545"
},
{
"name": "JavaScript",
"bytes": "2074"
},
{
"name": "PostScript",
"bytes": "565328"
},
{
"name": "Python",
"bytes": "31506317"
},
{
"name": "TypeScript",
"bytes": "71337"
}
],
"symlink_target": ""
} |
def generate(model, amount):
def wrap(n):
for i in range(n):
yield model.create()
return [y for x in wrap(amount) for y in x]
| {
"content_hash": "c12bf70740bfdebfc41aa8aca9b31501",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 47,
"avg_line_length": 30.8,
"alnum_prop": 0.5714285714285714,
"repo_name": "ahitrin/carlo",
"id": "601ed329092906363d64faa5ca4d8d25047a5f53",
"size": "154",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "carlo/event.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "319"
},
{
"name": "Python",
"bytes": "10522"
}
],
"symlink_target": ""
} |
"""
Allows for the computation of the PQ-Gram edit distance of two trees. To calculate the distance,
a Profile object must first be created for each tree, then the edit_distance function can be called.
For more information on the PQ-Gram algorithm, please see the README.
"""
import tree, copy
class Profile(object):
"""
Represents a PQ-Gram Profile, which is a list of PQ-Grams. Each PQ-Gram is represented by a
ShiftRegister. This class relies on both the ShiftRegister and tree.Node classes.
"""
def __init__(self, root, p=2, q=3):
"""
Builds the PQ-Gram Profile of the given tree, using the p and q parameters specified.
The p and q parameters do not need to be specified, however, different values will have
an effect on the distribution of the calculated edit distance. In general, smaller values
of p and q are better, though a value of (1, 1) is not recommended, and anything lower is
invalid.
"""
super(Profile, self).__init__()
ancestors = ShiftRegister(p)
self.list = list()
self.profile(root, p, q, ancestors)
self.sort()
def profile(self, root, p, q, ancestors):
"""
Recursively builds the PQ-Gram profile of the given subtree. This method should not be called
directly and is called from __init__.
"""
ancestors.shift(root.label)
siblings = ShiftRegister(q)
if(len(root.children) == 0):
self.append(ancestors.concatenate(siblings))
else:
for child in root.children:
siblings.shift(child.label)
self.append(ancestors.concatenate(siblings))
self.profile(child, p, q, copy.deepcopy(ancestors))
for i in range(q-1):
siblings.shift("*")
self.append(ancestors.concatenate(siblings))
def edit_distance(self, other):
"""
Computes the edit distance between two PQ-Gram Profiles. This value should always
be between 0.0 and 1.0. This calculation is reliant on the intersection method.
"""
union = len(self) + len(other)
return 1.0 - 2.0*(self.intersection(other)/union)
def intersection(self, other):
"""
Computes the set intersection of two PQ-Gram Profiles and returns the number of
elements in the intersection.
"""
intersect = 0.0
i = j = 0
while i < len(self) and j < len(other):
intersect += self.gram_edit_distance(self[i], other[j])
if self[i] == other[j]:
i += 1
j += 1
elif self[i] < other[j]:
i += 1
else:
j += 1
return intersect
def gram_edit_distance(self, gram1, gram2):
"""
Computes the edit distance between two different PQ-Grams. If the two PQ-Grams are the same
then the distance is 1.0, otherwise the distance is 0.0. Changing this will break the
metrics of the algorithm.
"""
distance = 0.0
if gram1 == gram2:
distance = 1.0
return distance
def sort(self):
"""
Sorts the PQ-Grams by the concatenation of their labels. This step is automatically performed
when a PQ-Gram Profile is created to ensure the intersection algorithm functions properly and
efficiently.
"""
self.list.sort(key=lambda x: ''.join)
def append(self, value):
self.list.append(value)
def __len__(self):
return len(self.list)
def __repr__(self):
return str(self.list)
def __str__(self):
return str(self.list)
def __getitem__(self, key):
return self.list[key]
def __iter__(self):
for x in self.list: yield x
class ShiftRegister(object):
"""
Represents a register which acts as a fixed size queue. There are only two valid
operations on a ShiftRegister: shift and concatenate. Shifting results in a new
value being pushed onto the end of the list and the value at the beginning list being
removed. Note that you cannot recover this value, nor do you need to for generating
PQ-Gram Profiles.
"""
def __init__(self, size):
"""
Creates an internal list of the specified size and fills it with the default value
of "*". Once a ShiftRegister is created you cannot change the size without
concatenating another ShiftRegister.
"""
self.register = list()
for i in range(size):
self.register.append("*")
def concatenate(self, reg):
"""
Concatenates two ShiftRegisters and returns the resulting ShiftRegister.
"""
temp = list(self.register)
temp.extend(reg.register)
return temp
def shift(self, el):
"""
Shift is the primary operation on a ShiftRegister. The new item given is pushed onto
the end of the ShiftRegister, the first value is removed, and all items in between shift
to accomodate the new value.
"""
self.register.pop(0)
self.register.append(el)
"""
The following methods are provided for visualization of the PQ-Gram Profile structure. They
are NOT intended for other use, and play no role in using the PQ-Gram algorithm.
"""
def build_extended_tree(root, p=1, q=1):
"""
This method will take a normal tree structure and the given values for p and q, returning
a new tree which represents the so-called PQ-Extended-Tree.
To do this, the following algorithm is used:
1) Add p-1 null ancestors to the root
2) Traverse tree, add q-1 null children before the first and
after the last child of every non-leaf node
3) For each leaf node add q null children
"""
original_root = root # store for later
# Step 1
for i in range(p-1):
node = tree.Node(label="*")
node.addkid(root)
root = node
# Steps 2 and 3
list_of_children = original_root.children
if(len(list_of_children) == 0):
q_append_leaf(original_root, q)
else:
q_append_non_leaf(original_root, q)
while(len(list_of_children) > 0):
temp_list = list()
for child in list_of_children:
if(child.label != "*"):
if(len(child.children) == 0):
q_append_leaf(child, q)
else:
q_append_non_leaf(child, q)
temp_list.extend(child.children)
list_of_children = temp_list
return root
##### Extended Tree Functions #####
def q_append_non_leaf(node, q):
"""
This method will append null node children to the given node. (Step 2)
When adding null nodes to a non-leaf node, the null nodes should exist on both side of
the real children. This is why the first of each pair of children added sets the flag
'before=True', ensuring that on the left and right (or start and end) of the list of
children a node is added.
"""
for i in range(q-1):
node.addkid(tree.Node("*"), before=True)
node.addkid(tree.Node("*"))
def q_append_leaf(node, q):
"""
This method will append q null node children to the given node. (Step 3)
"""
for i in range(q): node.addkid(tree.Node("*")) | {
"content_hash": "1e21c7f0d1556a390e0987f3aacdff48",
"timestamp": "",
"source": "github",
"line_count": 210,
"max_line_length": 105,
"avg_line_length": 36.695238095238096,
"alnum_prop": 0.5853880093433688,
"repo_name": "TylerGoeringer/PyGram",
"id": "6a11acbebe0fe51cbbcb0fc1b8a840397a444e17",
"size": "7706",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/PyGram.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "17915"
}
],
"symlink_target": ""
} |
import argparse
import sys
import logging
import os
import random
DEBUG=True
NotDEBUG=not DEBUG
parser = argparse.ArgumentParser(description="CNV heatmap",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-i', '--input', action='store', nargs='?', help='Input combined CNV files', required=NotDEBUG)
parser.add_argument('-o', '--output', action='store', nargs='?', help="Output file", required=NotDEBUG)
args = parser.parse_args()
if DEBUG:
args.input = "/scratch/cqs/shengq2/jennifer/20190906_lindsay_exomeseq_3772_hg38/GATK4_CNV_Germline_07_CombineGCNV/result/lindsay_exomeseq_3772.txt"
args.output = "/scratch/cqs/shengq2/jennifer/20190906_lindsay_exomeseq_3772_hg38/GATK4_CNV_Germline_07_CombineGCNV/result/lindsay_exomeseq_3772.heatmap.txt"
logger = logging.getLogger('cnvHeatmap')
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)-8s - %(message)s')
random.seed(20190930)
logger.info("reading " + args.input + " ...")
with open(args.input, "rt") as fin:
headers = fin.readline().split('\t')
samples = headers[2:]
with open(args.output, "wt") as fout:
fout.write("chr\tstart\tend\tmedian.bp\t" + "\t".join(samples))
for line in fin:
parts = line.split('\t')
list_set = set(part.split(',')[0] for part in parts[5:])
if len(list_set) == 1:
continue
chrom = parts[0]
start = int(parts[1])
end = int(parts[2])
median = (start + end) / 2
fout.write("%s\t%d\t%d\t%d" % (chr, start, end, median))
for part in parts[5:]:
part = part.rstrip()
uf = random.uniform(-0.01, 0.01)
if part == "":
fout.write("\t%.4f" % uf)
continue
cnvparts = part.split(',')
if cnvparts[2] == '0':
fout.write("\t%.4f" % (uf - 1))
continue
if cnvparts[2] == '1':
fout.write("\t%.4f" % (uf - 0.5))
continue
fout.write("\t%.1f" % (uf + float(cnvparts[2]) / 2.0))
fout.write('\n')
logger.info("done.")
| {
"content_hash": "062291ef9638cdd0723f972246b00c0e",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 158,
"avg_line_length": 32.65151515151515,
"alnum_prop": 0.5930394431554524,
"repo_name": "shengqh/ngsperl",
"id": "f48f363742e2f97718b334544190aca69be5ca8c",
"size": "2155",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/GATK4/cnvHeatmap.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "639"
},
{
"name": "Jupyter Notebook",
"bytes": "34901"
},
{
"name": "Perl",
"bytes": "2299329"
},
{
"name": "Python",
"bytes": "629212"
},
{
"name": "R",
"bytes": "993768"
},
{
"name": "Shell",
"bytes": "6043"
},
{
"name": "wdl",
"bytes": "5959"
}
],
"symlink_target": ""
} |
"""
Setup
"""
from setuptools import setup
from setuptools.command.install import install
URL = "http://dev.dataiku.com/~cstenac/dev-recruiting/us-census.db.gz"
LOCAL = "resources/us-census.db.gz"
LOCAL_EXTRACT = "resources/us-census.db"
class MyInstall(install):
def run(self):
install.run(self)
import urllib
urllib.urlretrieve(URL, LOCAL)
from subprocess import call
call(['gunzip', LOCAL])
config = {
'description': 'A web application querying a database',
'author': u'Sébastien Diemer',
'url': 'https://github.com/sebdiem/webapp',
'download_url': 'https://github.com/sebdiem/webapp',
'author_email': '[email protected]',
'version': '0.1',
'install_requires': ['flask'],
'packages': ['webapp'],
'scripts': [],
'name': 'webapp',
'cmdclass': {'install': MyInstall}
}
setup(**config)
| {
"content_hash": "23686b79e2c67d16b5d64d0e290b5c81",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 70,
"avg_line_length": 26.029411764705884,
"alnum_prop": 0.6418079096045197,
"repo_name": "sebdiem/webapp",
"id": "4215cb17fe240de86ecca67257065a4188288214",
"size": "1029",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "799"
},
{
"name": "JavaScript",
"bytes": "1650"
},
{
"name": "Python",
"bytes": "3713"
}
],
"symlink_target": ""
} |
"""
Provides serialization for API responses.
See `DRF serializer documentation <http://www.django-rest-framework.org/api-guide/serializers/>`_
Used by the View classes api/views.py to serialize API responses as JSON or HTML.
See DEFAULT_RENDERER_CLASSES setting in core.settings.contrib for the enabled renderers.
"""
import json
import logging
# -*- coding: utf-8 -*-
import pickle
from collections import OrderedDict
from audit_logging.models import AuditEvent
from django.conf import settings
from django.contrib.auth.models import User, Group
from django.contrib.contenttypes.models import ContentType
from django.contrib.gis.geos import GEOSGeometry
from django.core.cache import cache
from django.utils.translation import ugettext as _
from notifications.models import Notification
from rest_framework import serializers
from rest_framework.serializers import ValidationError
from rest_framework_gis import serializers as geo_serializers
from rest_framework_gis.fields import GeometrySerializerMethodField
from rest_framework_gis.serializers import GeoFeatureModelSerializer
from eventkit_cloud.api import validators
from eventkit_cloud.api.utils import get_run_zip_file
from eventkit_cloud.core.models import GroupPermission, GroupPermissionLevel, attribute_class_filter
from eventkit_cloud.jobs.helpers import get_valid_regional_justification
from eventkit_cloud.jobs.models import (
ExportFormat,
Projection,
DatamodelPreset,
Job,
Region,
RegionMask,
RegionalPolicy,
RegionalJustification,
DataProvider,
DataProviderTask,
License,
UserLicense,
UserJobActivity,
JobPermission,
)
from eventkit_cloud.tasks.enumerations import TaskState
from eventkit_cloud.tasks.helpers import get_celery_queue_group
from eventkit_cloud.tasks.models import (
DataProviderTaskRecord,
ExportRun,
ExportTaskException,
ExportTaskRecord,
FileProducingTaskResult,
RunZipFile,
)
from eventkit_cloud.tasks.views import generate_zipfile
from eventkit_cloud.user_requests.models import DataProviderRequest, SizeIncreaseRequest
from eventkit_cloud.utils.s3 import get_presigned_url
# Get an instance of a logger
logger = logging.getLogger(__name__)
class ProviderTaskSerializer(serializers.ModelSerializer):
formats = serializers.SlugRelatedField(
many=True,
queryset=ExportFormat.objects.all(),
slug_field="slug",
error_messages={"non_field_errors": _("Select an export format.")},
)
provider = serializers.CharField()
class Meta:
model = DataProviderTask
fields = ("provider", "formats", "min_zoom", "max_zoom")
def create(self, validated_data):
"""Creates an export DataProviderTask."""
formats = validated_data.pop("formats")
provider_slug = validated_data.get("provider")
try:
provider_model = DataProvider.objects.get(slug=provider_slug)
except DataProvider.DoesNotExist:
raise Exception(f"The DataProvider for {provider_slug} does not exist.")
provider_task = DataProviderTask.objects.create(provider=provider_model)
provider_task.formats.add(*formats)
provider_task.min_zoom = validated_data.pop("min_zoom", None)
provider_task.max_zoom = validated_data.pop("max_zoom", None)
provider_task.save()
return provider_task
@staticmethod
def update(instance, validated_data, **kwargs):
"""Not implemented.
:param **kwargs:
"""
raise NotImplementedError
def validate(self, data, **kwargs):
"""
Validates the data submitted during DataProviderTask creation.
See api/validators.py for validation code.
:param **kwargs:
"""
# selection = validators.validate_licenses(self.context['request'].data, user=self.context['request'].user)
return data
class FileProducingTaskResultSerializer(serializers.ModelSerializer):
"""Serialize FileProducingTaskResult models."""
url = serializers.SerializerMethodField()
size = serializers.SerializerMethodField()
uid = serializers.UUIDField(read_only=True)
def __init__(self, *args, **kwargs):
super(FileProducingTaskResultSerializer, self).__init__(*args, **kwargs)
if self.context.get("no_license"):
self.fields.pop("url")
class Meta:
model = FileProducingTaskResult
fields = ("uid", "filename", "size", "url", "deleted")
def get_url(self, obj):
request = self.context["request"]
return request.build_absolute_uri("/download?uid={}".format(obj.uid))
@staticmethod
def get_size(obj):
size = ""
if obj.size:
size = "{0:.3f} MB".format(obj.size)
return size
class ExportTaskExceptionSerializer(serializers.ModelSerializer):
"""Serialize ExportTaskExceptions."""
exception = serializers.SerializerMethodField()
class Meta:
model = ExportTaskException
fields = ("exception",)
@staticmethod
def get_exception(obj):
# set a default (incase not found)
exc_info = ["", "Exception info not found or unreadable."]
try:
exc_info = pickle.loads(obj.exception.encode()).exc_info
except Exception as te:
logger.error(str(te))
return str(exc_info[1])
class ExportTaskRecordSerializer(serializers.ModelSerializer):
"""Serialize ExportTasks models."""
result = serializers.SerializerMethodField()
errors = serializers.SerializerMethodField()
url = serializers.HyperlinkedIdentityField(view_name="api:tasks-detail", lookup_field="uid")
class Meta:
model = ExportTaskRecord
fields = (
"uid",
"url",
"name",
"status",
"progress",
"estimated_finish",
"started_at",
"finished_at",
"duration",
"result",
"errors",
"display",
"hide_download",
)
def get_result(self, obj):
"""Serialize the FileProducingTaskResult for this ExportTaskRecord."""
try:
result = obj.result
serializer = FileProducingTaskResultSerializer(result, many=False, context=self.context)
return serializer.data
except FileProducingTaskResult.DoesNotExist:
return None # no result yet
def get_errors(self, obj):
"""Serialize the ExportTaskExceptions for this ExportTaskRecord."""
try:
errors = obj.exceptions
serializer = ExportTaskExceptionSerializer(errors, many=True, context=self.context)
return serializer.data
except ExportTaskException.DoesNotExist:
return None
class ExportTaskListSerializer(serializers.BaseSerializer):
def to_representation(self, obj):
return obj.uid
class DataProviderTaskRecordSerializer(serializers.ModelSerializer):
provider = serializers.SerializerMethodField()
tasks = serializers.SerializerMethodField()
url = serializers.HyperlinkedIdentityField(view_name="api:provider_tasks-detail", lookup_field="uid")
preview_url = serializers.SerializerMethodField()
hidden = serializers.ReadOnlyField(default=False)
def get_provider(self, obj):
return DataProviderSerializer(obj.provider, context=self.context).data
def get_tasks(self, obj):
request = self.context["request"]
if request.query_params.get("slim"):
return ExportTaskListSerializer(obj.tasks, many=True, required=False, context=self.context).data
else:
return ExportTaskRecordSerializer(obj.tasks, many=True, required=False, context=self.context).data
def get_preview_url(self, obj):
from urllib.parse import urlsplit, ParseResult
preview = obj.preview
if preview is not None:
request = urlsplit(self.context["request"].build_absolute_uri())
if getattr(settings, "USE_S3", False):
return get_presigned_url(preview.download_url)
# Otherwise, grab the hostname from the request and tack on the relative url.
return ParseResult(
scheme=request.scheme,
netloc=request.netloc,
path=f"{preview.download_url}",
params="",
query="",
fragment="",
).geturl()
else:
return ""
class Meta:
model = DataProviderTaskRecord
fields = (
"uid",
"url",
"name",
"provider",
"started_at",
"finished_at",
"duration",
"tasks",
"status",
"display",
"slug",
"estimated_size",
"estimated_duration",
"preview_url",
"hidden",
)
class FilteredDataProviderTaskRecordSerializer(serializers.ModelSerializer):
hidden = serializers.ReadOnlyField(default=True)
display = serializers.SerializerMethodField(read_only=True)
class Meta:
model = DataProviderTaskRecord
fields = ("id", "uid", "hidden", "display")
read_only_fields = ("id", "uid")
def get_display(self, obj):
return False
class DataProviderListSerializer(serializers.BaseSerializer):
def to_representation(self, obj):
return obj.uid
class ProjectionSerializer(serializers.ModelSerializer):
"""Return a representation of the ExportFormat model."""
class Meta:
model = Projection
fields = ("uid", "srid", "name", "description")
class AuditEventSerializer(serializers.ModelSerializer):
"""Return a representation of the AuditEvent model."""
class Meta:
model = AuditEvent
fields = "__all__"
class SimpleJobSerializer(serializers.Serializer):
"""Return a sub-set of Job model attributes."""
def update(self, instance, validated_data):
super(SimpleJobSerializer, self).update(instance, validated_data)
uid = serializers.SerializerMethodField()
name = serializers.CharField()
event = serializers.CharField()
description = serializers.CharField()
url = serializers.HyperlinkedIdentityField(view_name="api:jobs-detail", lookup_field="uid")
extent = serializers.SerializerMethodField()
original_selection = serializers.SerializerMethodField(read_only=True)
published = serializers.BooleanField()
visibility = serializers.CharField()
featured = serializers.BooleanField()
formats = serializers.SerializerMethodField()
permissions = serializers.SerializerMethodField(read_only=True)
relationship = serializers.SerializerMethodField(read_only=True)
projections = ProjectionSerializer(many=True)
@staticmethod
def get_uid(obj):
return obj.uid
@staticmethod
def get_extent(obj):
return get_extent_geojson(obj)
@staticmethod
def get_original_selection(obj):
return get_selection_dict(obj)
@staticmethod
def get_permissions(obj):
permissions = JobPermission.jobpermissions(obj)
permissions["value"] = obj.visibility
return permissions
def get_relationship(self, obj):
request = self.context["request"]
user = request.user
return JobPermission.get_user_permissions(user, obj.uid)
def get_formats(self, obj):
formats = []
data_provider_tasks, filtered_data_provider_tasks = attribute_class_filter(
obj.data_provider_tasks.all(), self.context["request"].user
)
for data_provider_task in data_provider_tasks:
if hasattr(data_provider_task, "formats"):
for format in data_provider_task.formats.all():
if format.slug not in formats:
formats.append(format.slug)
return formats
class LicenseSerializer(serializers.ModelSerializer):
"""Serialize Licenses."""
class Meta:
model = License
fields = ("slug", "name", "text")
class ExportRunSerializer(serializers.ModelSerializer):
"""Serialize ExportRun."""
url = serializers.HyperlinkedIdentityField(view_name="api:runs-detail", lookup_field="uid")
job = serializers.SerializerMethodField() # nest the job details
provider_task_list_status = serializers.SerializerMethodField()
provider_tasks = serializers.SerializerMethodField()
user = serializers.SerializerMethodField()
zipfile = serializers.SerializerMethodField()
user = serializers.SerializerMethodField()
class Meta:
model = ExportRun
fields = (
"uid",
"url",
"created_at",
"updated_at",
"started_at",
"finished_at",
"duration",
"user",
"status",
"job",
"provider_task_list_status",
"provider_tasks",
"zipfile",
"expiration",
"deleted",
)
read_only_fields = ("created_at", "updated_at", "provider_task_list_status")
@staticmethod
def get_user(obj):
return obj.user.username
def get_provider_task_list_status(self, obj):
request = self.context["request"]
return get_provider_task_list_status(request.user, obj.data_provider_task_records.all())
def get_provider_tasks(self, obj):
if not obj.deleted:
request = self.context["request"]
data = []
data_provider_tasks, filtered_data_provider_tasks = attribute_class_filter(
obj.data_provider_task_records.all(), request.user
)
if data_provider_tasks.count() > 1: # The will always be a run task.
if request.query_params.get("slim"):
data = DataProviderListSerializer(data_provider_tasks, many=True, context=self.context).data
else:
data = DataProviderTaskRecordSerializer(data_provider_tasks, many=True, context=self.context).data
if filtered_data_provider_tasks:
if request.query_params.get("slim"):
data += DataProviderListSerializer(
filtered_data_provider_tasks, many=True, context=self.context
).data
else:
data += FilteredDataProviderTaskRecordSerializer(
filtered_data_provider_tasks, many=True, context=self.context
).data
return data
def get_zipfile(self, obj):
request = self.context["request"]
data_provider_task_records, filtered_data_provider_task_records = attribute_class_filter(
obj.data_provider_task_records.exclude(slug="run"), request.user
)
if filtered_data_provider_task_records:
data = None
else:
data = {"status": TaskState.PENDING.value}
run_zip_file = get_run_zip_file(values=data_provider_task_records).first()
if run_zip_file:
data = RunZipFileSerializer(run_zip_file, context=self.context).data
return data
def get_job(self, obj):
data = SimpleJobSerializer(obj.job, context=self.context).data
return data
class ExportRunGeoFeatureSerializer(ExportRunSerializer, GeoFeatureModelSerializer):
run_geom = GeometrySerializerMethodField()
bbox = GeometrySerializerMethodField()
class Meta(ExportRunSerializer.Meta):
geo_field = "run_geom"
bbox_geo_field = "bbox"
def get_run_geom(self, obj):
return obj.job.the_geom
def get_bbox(self, obj):
return obj.job.the_geom.extent
class RunZipFileSerializer(serializers.ModelSerializer):
data_provider_task_records = serializers.SerializerMethodField()
message = serializers.SerializerMethodField()
run = serializers.SerializerMethodField()
status = serializers.SerializerMethodField()
url = serializers.SerializerMethodField()
size = serializers.SerializerMethodField()
class Meta:
model = RunZipFile
fields = "__all__"
def get_data_provider_task_records(self, obj):
return DataProviderListSerializer(obj.data_provider_task_records, many=True, context=self.context).data
def get_message(self, obj):
if obj.finished_at:
return "Completed"
return obj.message
def get_run(self, obj):
if obj.run:
return obj.run.uid
return ""
def get_status(self, obj):
if obj.downloadable_file:
try:
return ExportTaskRecord.objects.get(result=obj.downloadable_file).status
except ExportTaskRecord.DoesNotExist:
logger.error(f"ExportTaskRecord does not exist for file: {obj.downloadable_file}")
return obj.status
def get_url(self, obj):
request = self.context["request"]
if obj.downloadable_file:
return request.build_absolute_uri("/download?uid={}".format(obj.downloadable_file.uid))
return ""
def get_size(self, obj):
if obj.downloadable_file:
return obj.downloadable_file.size
def create(self, validated_data, **kwargs):
request = self.context["request"]
data_provider_task_record_uids = request.data.get("data_provider_task_record_uids")
queryset = get_run_zip_file(field="uid", values=data_provider_task_record_uids)
# If there are no results, that means there's no zip file and we need to create one.
if not queryset.exists():
obj = RunZipFile.objects.create()
obj.status = TaskState.PENDING.value
data_provider_task_records = DataProviderTaskRecord.objects.filter(
uid__in=data_provider_task_record_uids
).exclude(slug="run")
obj.data_provider_task_records.set(data_provider_task_records)
run_zip_task_chain = generate_zipfile(data_provider_task_record_uids, obj)
celery_queue_group = get_celery_queue_group(run_uid=obj.run.uid)
run_zip_task_chain.apply_async(queue=celery_queue_group, routing_key=celery_queue_group)
return obj
else:
raise serializers.ValidationError("Duplicate Zip File already exists.")
class GroupPermissionSerializer(serializers.ModelSerializer):
class Meta:
model = GroupPermission
fields = ("group", "user", "permission")
class JobPermissionSerializer(serializers.ModelSerializer):
class Meta:
model = JobPermission
fields = ("job", "content_type", "object_id", "permission")
class GroupSerializer(serializers.ModelSerializer):
members = serializers.SerializerMethodField()
administrators = serializers.SerializerMethodField()
restricted = serializers.SerializerMethodField()
class Meta:
model = Group
fields = ("id", "name", "members", "administrators", "restricted")
@staticmethod
def get_restricted(instance):
if hasattr(instance, "restricted"):
return instance.restricted
return False
@staticmethod
def get_group_permissions(instance):
return GroupPermission.objects.filter(group=instance).prefetch_related("user", "group")
def get_members(self, instance):
qs = self.get_group_permissions(instance).filter(permission=GroupPermissionLevel.MEMBER.value)
return [permission.user.username for permission in qs]
def get_administrators(self, instance):
qs = self.get_group_permissions(instance).filter(permission=GroupPermissionLevel.ADMIN.value)
return [permission.user.username for permission in qs]
@staticmethod
def get_identification(instance):
if hasattr(instance, "oauth"):
return instance.oauth.identification
else:
return None
class GroupUserSerializer(serializers.ModelSerializer):
members = serializers.SerializerMethodField()
class Meta:
model = Group
fields = ("name", "members")
def get_members(self, instance):
request = self.context["request"]
limit = 1000
if request.query_params.get("limit"):
limit = int(request.query_params.get("limit"))
gp_admins = GroupPermission.objects.filter(group=instance).filter(permission=GroupPermissionLevel.ADMIN.value)[
:limit
]
admins = [gp.user for gp in gp_admins]
members = []
gp_members = (
GroupPermission.objects.filter(group=instance)
.filter(permission=GroupPermissionLevel.MEMBER.value)
.exclude(user__in=admins)[: limit - gp_admins.count()]
)
for gp in gp_members:
if gp.user not in admins:
members.append(gp.user)
return [self.user_representation(user, GroupPermissionLevel.ADMIN.value) for user in admins] + [
self.user_representation(user, GroupPermissionLevel.MEMBER.value) for user in members
]
@staticmethod
def user_representation(user, permission_lvl):
return dict(
username=user.username,
last_name=user.last_name,
first_name=user.first_name,
email=user.email,
permission=permission_lvl,
)
class UserSerializer(serializers.ModelSerializer):
username = serializers.CharField()
first_name = serializers.CharField()
last_name = serializers.CharField()
email = serializers.CharField()
class Meta:
model = User
fields = (
"username",
"first_name",
"last_name",
"email",
)
read_only_fields = (
"username",
"first_name",
"last_name",
"email",
)
class UserSerializerFull(serializers.ModelSerializer):
username = serializers.CharField()
first_name = serializers.CharField()
last_name = serializers.CharField()
email = serializers.CharField()
last_login = serializers.DateTimeField(read_only=True)
date_joined = serializers.DateTimeField(read_only=True)
identification = serializers.SerializerMethodField()
commonname = serializers.SerializerMethodField()
class Meta:
model = User
fields = (
"username",
"first_name",
"last_name",
"email",
"last_login",
"date_joined",
"identification",
"commonname",
)
read_only_fields = (
"username",
"first_name",
"last_name",
"email",
"last_login",
"date_joined",
)
@staticmethod
def get_identification(instance):
if hasattr(instance, "oauth"):
return instance.oauth.identification
else:
return None
@staticmethod
def get_commonname(instance):
if hasattr(instance, "oauth"):
return instance.oauth.commonname
else:
return None
class UserDataSerializer(serializers.Serializer):
"""
Return a GeoJSON representation of the user data.
"""
user = serializers.SerializerMethodField()
accepted_licenses = serializers.SerializerMethodField()
accepted_policies = serializers.SerializerMethodField()
groups = serializers.SerializerMethodField()
restricted = serializers.SerializerMethodField()
class Meta:
fields = ("user", "accepted_licenses")
read_only_fields = ("user",)
def get_accepted_licenses(self, instance):
licenses = dict()
request = self.context["request"]
if request.user != instance:
return licenses
user_licenses = UserLicense.objects.filter(user=instance)
for license in License.objects.all():
licenses[license.slug] = user_licenses.filter(license=license).exists()
return licenses
def get_accepted_policies(self, instance):
policies = dict()
request = self.context["request"]
if request.user != instance:
return policies
for policy in RegionalPolicy.objects.all().prefetch_related("justifications"):
policies[str(policy.uid)] = get_valid_regional_justification(policy, instance) is not None
return policies
@staticmethod
def get_restricted(instance):
if hasattr(instance, "restricted"):
return instance.restricted
return False
def get_user(self, instance):
request = self.context["request"]
if request.user.is_superuser or request.user == instance:
return UserSerializerFull(instance).data
return UserSerializer(instance).data
@staticmethod
def get_user_accepted_licenses(instance):
licenses = dict()
user_licenses = UserLicense.objects.filter(user=instance)
for license in License.objects.all():
if user_licenses.filter(license=license):
licenses[license.slug] = True
else:
licenses[license.slug] = False
return licenses
@staticmethod
def get_groups(instance):
group_ids = [
perm.group.id for perm in GroupPermission.objects.filter(user=instance).filter(permission="MEMBER")
]
return group_ids
def update(self, instance, validated_data):
if self.context.get("request").data.get("accepted_licenses"):
for slug, selected in self.context.get("request").data.get("accepted_licenses").items():
user_license = UserLicense.objects.filter(user=instance, license=License.objects.get(slug=slug))
if user_license and not selected:
user_license.delete()
if not user_license and selected:
UserLicense.objects.create(user=instance, license=License.objects.get(slug=slug))
return instance
def create(self, validated_data, **kwargs):
raise NotImplementedError("UserData can only be updated using this interface.")
class RegionMaskSerializer(geo_serializers.GeoFeatureModelSerializer):
"""Return a GeoJSON representation of the region mask."""
class Meta:
model = RegionMask
geo_field = "the_geom"
fields = ("the_geom",)
class RegionSerializer(geo_serializers.GeoFeatureModelSerializer):
"""Serializer returning GeoJSON representation of Regions."""
url = serializers.HyperlinkedIdentityField(view_name="api:regions-detail", lookup_field="uid")
id = serializers.SerializerMethodField()
class Meta:
model = Region
geo_field = "the_geom"
fields = ("id", "uid", "name", "description", "url", "the_geom")
@staticmethod
def get_id(obj):
return obj.uid
class SimpleRegionSerializer(serializers.ModelSerializer):
"""Serializer for returning Region model data without geometry."""
url = serializers.HyperlinkedIdentityField(view_name="api:regions-detail", lookup_field="uid")
class Meta:
model = Region
fields = ("uid", "name", "description", "url")
class RegionalPolicySerializer(serializers.Serializer):
"""Serializer for returning RegionalPolicy model data."""
uid = serializers.SerializerMethodField()
name = serializers.CharField()
region = RegionSerializer(read_only=True)
providers = serializers.SerializerMethodField()
policies = serializers.JSONField()
policy_title_text = serializers.CharField()
policy_header_text = serializers.CharField()
policy_footer_text = serializers.CharField()
policy_cancel_text = serializers.CharField()
policy_cancel_button_text = serializers.CharField()
justification_options = serializers.JSONField()
url = serializers.HyperlinkedIdentityField(view_name="api:regional_policies-detail", lookup_field="uid")
class Meta:
model = RegionalPolicy
fields = "__all__"
@staticmethod
def get_uid(obj):
return obj.uid
@staticmethod
def get_providers(obj):
providers = []
for provider in obj.providers.all():
providers.append({"uid": provider.uid, "name": provider.name, "slug": provider.slug})
return providers
class RegionalJustificationSerializer(serializers.ModelSerializer):
"""Serializer for creating and returning RegionalPolicyJustification model data."""
uid = serializers.SerializerMethodField()
justification_id = serializers.IntegerField()
justification_name = serializers.CharField(required=False)
justification_suboption_value = serializers.CharField(required=False)
regional_policy = serializers.SerializerMethodField()
user = serializers.SerializerMethodField()
class Meta:
model = RegionalJustification
fields = "__all__"
@staticmethod
def create(validated_data):
justification_id = validated_data.get("justification_id")
justification_suboption_value = validated_data.get("justification_suboption_value")
regional_policy_uid = validated_data.get("regional_policy_uid")
user = validated_data.get("user")
try:
regional_policy = RegionalPolicy.objects.get(uid=regional_policy_uid)
except RegionalPolicy.DoesNotExist:
raise Exception(f"The Regional Policy for UID {regional_policy_uid} does not exist.")
regional_policy_options = regional_policy.justification_options
# Now get the justification option based on the ID passed.
selected_option = [
regional_policy_option
for regional_policy_option in regional_policy_options
if regional_policy_option["id"] == justification_id
][0]
selected_suboption = selected_option.get("suboption")
if selected_suboption:
if selected_suboption.get("type") == "dropdown":
if justification_suboption_value not in selected_suboption["options"]:
raise ValidationError(code="invalid_suboption", detail="Invalid suboption selected.")
else:
if justification_suboption_value:
raise ValidationError(
code="invalid_description",
detail="No suboption was available, so justification_suboption_value cannot be used.",
)
regional_justification = RegionalJustification.objects.create(
justification_id=justification_id,
justification_name=selected_option["name"],
justification_suboption_value=justification_suboption_value,
regional_policy=regional_policy,
user=user,
)
for provider in regional_policy.providers.all():
cache.delete(f"mapproxy-config-{user}-{provider.slug}")
return regional_justification
def validate(self, data):
request = self.context["request"]
data["regional_policy_uid"] = request.data["regional_policy_uid"]
data["user"] = request.user
return data
@staticmethod
def get_uid(obj):
return obj.uid
@staticmethod
def get_regional_policy(obj):
return obj.regional_policy.uid
@staticmethod
def get_user(obj):
return obj.user.username
class ExportFormatSerializer(serializers.ModelSerializer):
"""Return a representation of the ExportFormat model."""
url = serializers.HyperlinkedIdentityField(view_name="api:formats-detail", lookup_field="slug")
supported_projections = serializers.SerializerMethodField(read_only=True)
class Meta:
model = ExportFormat
fields = ("uid", "url", "slug", "name", "description", "supported_projections")
@staticmethod
def get_supported_projections(obj):
return obj.supported_projections.all().values("uid", "name", "srid", "description")
class FilteredDataProviderSerializer(serializers.ModelSerializer):
hidden = serializers.ReadOnlyField(default=True)
display = serializers.SerializerMethodField(read_only=True)
class Meta:
model = DataProvider
fields = ("id", "uid", "hidden", "display")
read_only_fields = ("id", "uid", "hidden", "display")
def get_display(self, obj):
return False
class DataProviderSerializer(serializers.ModelSerializer):
model_url = serializers.HyperlinkedIdentityField(view_name="api:providers-detail", lookup_field="slug")
type = serializers.SerializerMethodField(read_only=True)
supported_formats = serializers.SerializerMethodField(read_only=True)
thumbnail_url = serializers.SerializerMethodField(read_only=True)
license = LicenseSerializer(required=False)
metadata = serializers.SerializerMethodField(read_only=True)
footprint_url = serializers.SerializerMethodField(read_only=True)
max_data_size = serializers.SerializerMethodField(read_only=True)
max_selection = serializers.SerializerMethodField(read_only=True)
use_bbox = serializers.SerializerMethodField(read_only=True)
hidden = serializers.ReadOnlyField(default=False)
data_type = serializers.ReadOnlyField(default=False)
class Meta:
model = DataProvider
extra_kwargs = {
"url": {"write_only": True},
"user": {"write_only": True},
"config": {"write_only": True},
}
read_only_fields = ("uid",)
exclude = ("thumbnail",)
@staticmethod
def create(validated_data, **kwargs):
# try to get existing export Provider
url = validated_data.get("url")
user = validated_data.get("user")
license_data = validated_data.pop("license", None)
if license_data:
License.objects.create(**license_data)
ep = DataProvider.objects.filter(url=url, user=user).first()
if not ep:
ep = DataProvider.objects.create(**validated_data)
return ep
@staticmethod
def get_type(obj):
return obj.export_provider_type.type_name
def get_supported_formats(self, obj):
fields = ["uid", "name", "slug", "description"]
export_formats = obj.export_provider_type.supported_formats.all().values(*fields) | ExportFormat.objects.filter(
options__providers__contains=obj.slug
).values(*fields)
return export_formats.distinct()
def get_thumbnail_url(self, obj):
from urllib.parse import urlsplit, ParseResult
thumbnail = obj.thumbnail
if thumbnail is not None:
request = urlsplit(self.context["request"].build_absolute_uri())
if getattr(settings, "USE_S3", False):
return get_presigned_url(thumbnail.download_url, expires=3000)
# Otherwise, grab the hostname from the request and tack on the relative url.
return ParseResult(
scheme=request.scheme,
netloc=request.netloc,
path=f"{thumbnail.download_url}",
params="",
query="",
fragment="",
).geturl()
else:
return ""
@staticmethod
def get_metadata(obj):
return obj.metadata
@staticmethod
def get_footprint_url(obj):
return obj.footprint_url
def get_max_data_size(self, obj):
user = None
request = self.context.get("request")
if request and hasattr(request, "user"):
user = request.user
return obj.get_max_data_size(user)
def get_max_selection(self, obj):
user = None
request = self.context.get("request")
if request and hasattr(request, "user"):
user = request.user
return obj.get_max_selection_size(user)
def get_use_bbox(self, obj):
return obj.get_use_bbox()
class DataProviderGeoFeatureSerializer(DataProviderSerializer, GeoFeatureModelSerializer):
data_provider_geom = GeometrySerializerMethodField()
bbox = GeometrySerializerMethodField()
class Meta(DataProviderSerializer.Meta):
geo_field = "data_provider_geom"
bbox_geo_field = "bbox"
def get_data_provider_geom(self, obj):
return obj.the_geom
def get_bbox(self, obj):
return obj.the_geom.extent
class FilteredDataProviderGeoFeatureSerializer(FilteredDataProviderSerializer, GeoFeatureModelSerializer):
"""
Used to mixin geojson views.
"""
data_provider_geom = GeometrySerializerMethodField()
bbox = GeometrySerializerMethodField()
class Meta(DataProviderSerializer.Meta):
geo_field = "data_provider_geom"
bbox_geo_field = "bbox"
def get_data_provider_geom(self, obj):
return None
def get_bbox(self, obj):
return []
class ListJobSerializer(serializers.Serializer):
"""
Return a sub-set of Job model attributes.
Provides a stripped down set of export attributes.
Removes the selected Tags from the Job representation.
Used to display the list of exports in the export browser
where tag info is not required.
"""
def update(self, instance, validated_data):
super(ListJobSerializer, self).update(instance, validated_data)
uid = serializers.SerializerMethodField()
url = serializers.HyperlinkedIdentityField(view_name="api:jobs-detail", lookup_field="uid")
name = serializers.CharField()
description = serializers.CharField()
event = serializers.CharField()
created_at = serializers.DateTimeField(read_only=True)
owner = serializers.SerializerMethodField(read_only=True)
extent = serializers.SerializerMethodField()
original_selection = serializers.SerializerMethodField(read_only=True)
region = SimpleRegionSerializer(read_only=True)
published = serializers.BooleanField()
visibility = serializers.CharField()
featured = serializers.BooleanField()
permissions = serializers.SerializerMethodField(read_only=True)
relationship = serializers.SerializerMethodField(read_only=True)
@staticmethod
def get_uid(obj):
return obj.uid
@staticmethod
def get_extent(obj):
return get_extent_geojson(obj)
@staticmethod
def get_original_selection(obj):
return get_selection_dict(obj)
@staticmethod
def get_owner(obj):
return obj.user.username
def get_relationship(self, obj):
request = self.context["request"]
user = request.user
return JobPermission.get_user_permissions(user, obj.uid)
@staticmethod
def get_permissions(obj):
permissions = JobPermission.jobpermissions(obj)
permissions["value"] = obj.visibility
return permissions
class JobSerializer(serializers.Serializer):
"""
Return a full representation of an export Job.
This is the core representation of the API.
"""
provider_tasks = serializers.SerializerMethodField()
provider_task_list_status = serializers.SerializerMethodField()
uid = serializers.UUIDField(read_only=True)
url = serializers.HyperlinkedIdentityField(view_name="api:jobs-detail", lookup_field="uid")
name = serializers.CharField(
max_length=100,
)
description = serializers.CharField(
max_length=255,
)
event = serializers.CharField(max_length=100, allow_blank=True, required=False)
created_at = serializers.DateTimeField(read_only=True)
updated_at = serializers.DateTimeField(read_only=True)
owner = serializers.SerializerMethodField(read_only=True)
permissions = serializers.SerializerMethodField(read_only=True)
relationship = serializers.SerializerMethodField(read_only=True)
exports = serializers.SerializerMethodField()
preset = serializers.PrimaryKeyRelatedField(queryset=DatamodelPreset.objects.all(), required=False)
published = serializers.BooleanField(required=False)
visibility = serializers.CharField(required=False)
featured = serializers.BooleanField(required=False)
region = SimpleRegionSerializer(read_only=True)
extent = serializers.SerializerMethodField(read_only=True)
original_selection = serializers.SerializerMethodField(read_only=True)
user = serializers.HiddenField(default=serializers.CurrentUserDefault())
tags = serializers.SerializerMethodField()
include_zipfile = serializers.BooleanField(required=False, default=False)
@staticmethod
def create(validated_data, **kwargs):
"""Creates an export Job.
:param **kwargs:
"""
return Job.objects.create(**validated_data)
@staticmethod
def update(instance, validated_data, **kwargs):
"""Not implemented as Jobs are cloned rather than updated.
:param **kwargs:
"""
raise NotImplementedError
def validate(self, data, **kwargs):
"""
Validates the data submitted during Job creation.
See api/validators.py for validation code.
"""
user = data["user"]
selection = validators.validate_selection(self.context["request"].data, user=user)
data["the_geom"] = selection
original_selection = validators.validate_original_selection(self.context["request"].data)
data["original_selection"] = original_selection
data.pop("provider_tasks", None)
return data
@staticmethod
def get_extent(obj):
return get_extent_geojson(obj)
@staticmethod
def get_original_selection(obj):
return get_selection_dict(obj)
def get_exports(self, obj):
"""Return the export formats selected for this export."""
exports = []
data_provider_tasks, filtered_tasks = attribute_class_filter(
obj.data_provider_tasks.all(), self.context["request"].user
)
for data_provider_task in data_provider_tasks:
serializer = ExportFormatSerializer(
data_provider_task.formats,
many=True,
context={"request": self.context["request"]},
)
exports.append({"provider": data_provider_task.provider.name, "formats": serializer.data})
for data_provider_task in filtered_tasks:
exports.append({"provider": data_provider_task.uid})
return exports
def get_provider_task_list_status(self, obj):
request = self.context["request"]
return get_provider_task_list_status(request.user, obj.data_provider_tasks.all())
def get_provider_tasks(self, obj):
"""Return the export formats selected for this export."""
exports = []
data_provider_tasks, filtered_data_provider_tasks = attribute_class_filter(
obj.data_provider_tasks.all(), self.context["request"].user
)
for data_provider_task in data_provider_tasks:
if hasattr(data_provider_task, "formats"):
serializer = ProviderTaskSerializer(data_provider_task, context={"request": self.context["request"]})
if hasattr(data_provider_task, "provider"):
exports.append(serializer.data)
return exports
def get_providers(self, obj):
"""Return the export formats selected for this export."""
providers, filtered_providers = attribute_class_filter(obj.providers.all(), self.context["request"].user)
providers = [provider_format for provider_format in providers]
provider_serializer = DataProviderSerializer(providers, many=True, context={"request": self.context["request"]})
filtered_providers = [provider_format for provider_format in filtered_providers]
filtered_providers_serializer = FilteredDataProviderSerializer(
filtered_providers, many=True, context={"request": self.context["request"]}
)
return provider_serializer.data + filtered_providers_serializer.data
@staticmethod
def get_tags(obj):
"""Return the Tags selected for this export."""
return obj.json_tags
@staticmethod
def get_owner(obj):
"""Return the username for the owner of this export."""
return obj.user.username
def get_relationship(self, obj):
request = self.context["request"]
user = request.user
return JobPermission.get_user_permissions(user, obj.uid)
@staticmethod
def get_permissions(obj):
permissions = JobPermission.jobpermissions(obj)
permissions["value"] = obj.visibility
return permissions
class UserJobActivitySerializer(serializers.ModelSerializer):
last_export_run = serializers.SerializerMethodField()
class Meta:
model = UserJobActivity
fields = ("last_export_run", "type", "created_at")
def get_last_export_run(self, obj):
if obj.job.last_export_run:
serializer = ExportRunSerializer(obj.job.last_export_run, context={"request": self.context["request"]})
return serializer.data
else:
return None
class GenericNotificationRelatedSerializer(serializers.BaseSerializer):
def to_representation(self, referenced_object):
if isinstance(referenced_object, User):
serializer = UserSerializer(referenced_object)
elif isinstance(referenced_object, Job):
serializer = NotificationJobSerializer(referenced_object, context={"request": self.context["request"]})
elif isinstance(referenced_object, ExportRun):
serializer = NotificationRunSerializer(referenced_object, context={"request": self.context["request"]})
elif isinstance(referenced_object, Group):
serializer = GroupSerializer(referenced_object)
return serializer.data
class NotificationSerializer(serializers.ModelSerializer):
actor = serializers.SerializerMethodField()
target = serializers.SerializerMethodField()
action_object = serializers.SerializerMethodField()
class Meta:
model = Notification
fields = (
"unread",
"deleted",
"level",
"verb",
"description",
"id",
"timestamp",
"recipient_id",
"actor",
"target",
"action_object",
)
def get_related_object(self, obj, related_type=None):
if not related_type:
return None
type_id = getattr(obj, "{}_content_type_id".format(related_type))
if type_id:
return {
"type": str(ContentType.objects.get(id=type_id).model),
"id": getattr(obj, "{0}_object_id".format(related_type)),
"details": GenericNotificationRelatedSerializer(
getattr(obj, related_type),
context={"request": self.context["request"]},
).data,
}
def get_actor(self, obj):
return self.get_related_object(obj, "actor")
def get_target(self, obj):
return self.get_related_object(obj, "target")
def get_action_object(self, obj):
return self.get_related_object(obj, "action_object")
class NotificationJobSerializer(serializers.Serializer):
"""Return a slimmed down representation of a Job model."""
def update(self, instance, validated_data):
super(NotificationJobSerializer, self).update(instance, validated_data)
uid = serializers.SerializerMethodField()
name = serializers.CharField()
event = serializers.CharField()
description = serializers.CharField()
published = serializers.BooleanField()
visibility = serializers.CharField()
featured = serializers.BooleanField()
@staticmethod
def get_uid(obj):
return obj.uid
class NotificationRunSerializer(serializers.ModelSerializer):
"""Return a slimmed down representation of a ExportRun model."""
job = serializers.SerializerMethodField() # nest the job details
user = serializers.SerializerMethodField()
expiration = serializers.SerializerMethodField()
created_at = serializers.SerializerMethodField()
started_at = serializers.SerializerMethodField()
finished_at = serializers.SerializerMethodField()
duration = serializers.SerializerMethodField()
status = serializers.SerializerMethodField()
expiration = serializers.SerializerMethodField()
class Meta:
model = ExportRun
fields = (
"uid",
"created_at",
"updated_at",
"started_at",
"finished_at",
"duration",
"user",
"status",
"job",
"expiration",
"deleted",
)
read_only_fields = ("created_at", "updated_at")
@staticmethod
def get_user(obj):
if not obj.deleted:
return obj.user.username
def get_created_at(self, obj):
if not obj.deleted:
return obj.created_at
def get_started_at(self, obj):
if not obj.deleted:
return obj.started_at
def get_finished_at(self, obj):
if not obj.deleted:
return obj.finished_at
def get_duration(self, obj):
if not obj.deleted:
return obj.duration
def get_status(self, obj):
if not obj.deleted:
return obj.status
def get_job(self, obj):
data = NotificationJobSerializer(obj.job, context=self.context).data
if not obj.deleted:
return data
else:
return {"uid": data["uid"], "name": data["name"]}
def get_expiration(self, obj):
if not obj.deleted:
return obj.expiration
class DataProviderRequestSerializer(serializers.ModelSerializer):
user = serializers.HiddenField(default=serializers.CurrentUserDefault())
class Meta:
model = DataProviderRequest
fields = "__all__"
class SizeIncreaseRequestSerializer(serializers.ModelSerializer):
extent = serializers.SerializerMethodField(read_only=True)
user = serializers.HiddenField(default=serializers.CurrentUserDefault())
class Meta:
model = SizeIncreaseRequest
fields = "__all__"
@staticmethod
def create(validated_data, **kwargs):
"""Creates an export Job.
:param **kwargs:
"""
return SizeIncreaseRequest.objects.create(**validated_data)
def validate(self, data, **kwargs):
"""
Validates the data submitted during Job creation.
See api/validators.py for validation code.
"""
user = data["user"]
selection = validators.validate_selection(self.context["request"].data, user=user)
data["the_geom"] = selection
return data
@staticmethod
def get_extent(obj):
return get_extent_geojson(obj)
def get_extent_geojson(obj):
"""Return the export extent as a GeoJSON Feature."""
uid = str(obj.uid)
if hasattr(obj, "name"):
name = obj.name
else:
name = ""
geom = obj.the_geom
geometry = json.loads(GEOSGeometry(geom).geojson)
feature = OrderedDict()
feature["type"] = "Feature"
feature["properties"] = {"uid": uid, "name": name}
feature["geometry"] = geometry
return feature
def get_selection_dict(obj):
"""Return the selection as a feature collection dictionary."""
geom_collection = obj.original_selection
if not geom_collection:
return None
feature_collection = OrderedDict()
feature_collection["type"] = "FeatureCollection"
feature_collection["features"] = []
for geom in geom_collection:
geojson_geom = json.loads(geom.geojson)
feature = OrderedDict()
feature["type"] = "Feature"
feature["geometry"] = geojson_geom
feature_collection["features"].append(feature)
return feature_collection
def get_provider_task_list_status(user, data_provider_task_records):
if data_provider_task_records and isinstance(data_provider_task_records.first(), DataProviderTaskRecord):
data_provider_task_records = data_provider_task_records.exclude(slug="run")
data_provider_task_records, filtered_provider_tasks = attribute_class_filter(data_provider_task_records, user)
if not data_provider_task_records:
return "EMPTY"
if not filtered_provider_tasks:
return "COMPLETE"
return "PARTIAL"
| {
"content_hash": "87361e8de04ee89131f0284664938ecf",
"timestamp": "",
"source": "github",
"line_count": 1513,
"max_line_length": 120,
"avg_line_length": 34.66622604097819,
"alnum_prop": 0.6509437559580553,
"repo_name": "venicegeo/eventkit-cloud",
"id": "aa5af2b5a05d5e314a328b81bf1e9b462dc5994a",
"size": "52450",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "eventkit_cloud/api/serializers.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "90420"
},
{
"name": "Dockerfile",
"bytes": "2466"
},
{
"name": "HTML",
"bytes": "85741"
},
{
"name": "Java",
"bytes": "123740"
},
{
"name": "JavaScript",
"bytes": "597810"
},
{
"name": "Python",
"bytes": "1145801"
},
{
"name": "Shell",
"bytes": "6127"
},
{
"name": "TypeScript",
"bytes": "1456680"
}
],
"symlink_target": ""
} |
from oslo_log import helpers
from oslo_log import log as logging
import oslo_messaging
LOG = logging.getLogger(__name__)
class NwaL3ProxyCallback(object):
target = oslo_messaging.Target(version='1.0')
def __init__(self, context, agent):
self.context = context
self.agent = agent
@helpers.log_method_call
def create_tenant_fw(self, context, **kwargs):
return self.agent.create_tenant_fw(context, **kwargs)
@helpers.log_method_call
def delete_tenant_fw(self, context, **kwargs):
return self.agent.delete_tenant_fw(context, **kwargs)
@helpers.log_method_call
def setting_nat(self, context, **kwargs):
return self.agent.setting_nat(context, **kwargs)
@helpers.log_method_call
def delete_nat(self, context, **kwargs):
return self.agent.delete_nat(context, **kwargs)
| {
"content_hash": "2e237904ed61a5d037d17683c7516522",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 61,
"avg_line_length": 28.6,
"alnum_prop": 0.6748251748251748,
"repo_name": "openstack/networking-nec",
"id": "c1b19fdd6c288ceabdfaa6f875028c809ef3879f",
"size": "1494",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "networking_nec/nwa/l3/rpc/nwa_l3_proxy_callback.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "431"
},
{
"name": "Python",
"bytes": "387801"
},
{
"name": "Shell",
"bytes": "8526"
}
],
"symlink_target": ""
} |
import os.path
import os
import numpy as np
import copy
import PDielec.Calculator as Calculator
from PyQt5.QtWidgets import QPushButton, QWidget
from PyQt5.QtWidgets import QComboBox, QLabel, QLineEdit, QListWidget
from PyQt5.QtWidgets import QApplication
from PyQt5.QtWidgets import QVBoxLayout, QHBoxLayout, QFormLayout
from PyQt5.QtWidgets import QSpinBox,QDoubleSpinBox
from PyQt5.QtWidgets import QSizePolicy
from PyQt5.QtCore import QCoreApplication, Qt
from PDielec.DielectricFunction import DielectricFunction
from PDielec.Constants import wavenumber, PI, avogadro_si, angstrom, speed_light_si
from PDielec.Utilities import Debug
from PDielec.GUI.ScenarioTab import ScenarioTab
from functools import partial
class SingleCrystalScenarioTab(ScenarioTab):
def __init__(self, parent, debug=False ):
ScenarioTab.__init__(self,parent)
global debugger
debugger = Debug(debug,'SingleCrystalScenarioTab:')
debugger.print('Start:: initialiser')
self.refreshRequired = True
self.calculationRequired = False
self.scenarioType = 'Single crystal'
self.settings['Scenario type'] = 'Single crystal'
self.settings['Unique direction - h'] = 0
self.settings['Unique direction - k'] = 0
self.settings['Unique direction - l'] = 1
self.settings['Azimuthal angle'] = 0.0
self.settings['Angle of incidence'] = 0.0
self.settings['Superstrate dielectric'] = 1.0
self.settings['Substrate dielectric'] = 1.0
self.settings['Superstrate depth'] = 999.0
self.settings['Substrate depth'] = 999.0
self.settings['Film thickness'] = 100.0
self.settings['Mode'] = 'Thick slab'
self.settings['Frequency units'] = 'wavenumber'
self.p_reflectance = []
self.s_reflectance = []
self.p_transmittance = []
self.s_transmittance = []
self.p_absorbtance = []
self.s_absorbtance = []
self.epsilon = []
# store the notebook
self.notebook = parent
# get the reader from the main tab
self.reader = self.notebook.mainTab.reader
# Get the last unit cell in the reader
if self.reader is not None:
self.cell = self.reader.unit_cells[-1]
# Create last tab - SingleCrystalTab
vbox = QVBoxLayout()
form = QFormLayout()
#
# Chose mode of operation
#
self.mode_cb = QComboBox(self)
self.mode_cb.setToolTip('Set the mode of operation for this tab;\n Thick slab means that only reflections are significant (the film thickness has no effect and there are only two media; the incident and the crystal),\n Coherent thin film assumes there are three media; the incident, the crystal and the substrate')
self.mode_cb.addItems( ['Thick slab','Coherent thin film'] )
self.settings['Mode'] = 'Thick slab'
self.mode_cb.activated.connect(self.on_mode_cb_activated)
label = QLabel('Single crystal mode', self)
label.setToolTip('Set the mode of operation for this tab;\n Thick slab means that only reflections are significant (the film thickness has no effect and there are only two media; the incident and the crystal),\n Coherent thin film assumes there are three media; the incident, the crystal and the substrate')
form.addRow(label, self.mode_cb)
#
# Define the slab surface in crystal coordinates
#
self.h_sb = QSpinBox(self)
self.h_sb.setToolTip('Define the h dimension of the unique direction')
self.h_sb.setRange(-20,20)
self.h_sb.setSingleStep(1)
self.h_sb.setValue(self.settings['Unique direction - h'])
self.h_sb.valueChanged.connect(self.on_h_sb_changed)
self.k_sb = QSpinBox(self)
self.k_sb.setToolTip('Define the k dimension of the unique direction')
self.k_sb.setRange(-20,20)
self.k_sb.setSingleStep(1)
self.k_sb.setValue(self.settings['Unique direction - k'])
self.k_sb.valueChanged.connect(self.on_k_sb_changed)
self.l_sb = QSpinBox(self)
self.l_sb.setToolTip('Define the l dimension of the unique direction')
self.l_sb.setRange(-20,20)
self.l_sb.setSingleStep(1)
self.l_sb.setValue(self.settings['Unique direction - l'])
self.l_sb.valueChanged.connect(self.on_l_sb_changed)
hbox = QHBoxLayout()
hbox.addWidget(self.h_sb)
hbox.addWidget(self.k_sb)
hbox.addWidget(self.l_sb)
hkl_label = QLabel('Crystal surface (hkl)',self)
hkl_label.setToolTip('Define the crystal surface (hkl). Defines the unique direction in crystallographic units.')
form.addRow(hkl_label, hbox)
#
# Define the rotation angle of the slab and the angle of incidence
#
self.azimuthal_angle_sb = QDoubleSpinBox(self)
self.azimuthal_angle_sb.setToolTip('Define the slab azimuthal angle')
self.azimuthal_angle_sb.setRange(-180,360)
self.azimuthal_angle_sb.setSingleStep(10)
self.azimuthal_angle_sb.setValue(self.settings['Azimuthal angle'])
self.azimuthal_angle_sb.valueChanged.connect(self.on_azimuthal_angle_sb_changed)
label = QLabel('Azimuthal angle',self)
label.setToolTip('Define the azimuthal angle (rotation of the crystal about the lab Z-axis)')
form.addRow(label, self.azimuthal_angle_sb)
self.angle_of_incidence_sb = QDoubleSpinBox(self)
self.angle_of_incidence_sb.setToolTip('Define the angle of incidence, (normal incidence is 0 degrees)')
self.angle_of_incidence_sb.setRange(0,90)
self.angle_of_incidence_sb.setSingleStep(5)
self.angle_of_incidence_sb.setValue(self.settings['Angle of incidence'])
self.angle_of_incidence_sb.valueChanged.connect(self.on_angle_of_incidence_sb_changed)
label = QLabel('Angle of incidence',self)
label.setToolTip('Define the angle of incidence, (normal incidence is 0 degrees).')
form.addRow(label, self.angle_of_incidence_sb)
#
# Provide information on the lab frame
#
labframe_l = QLabel('Lab frame information', self)
labframe_l.setToolTip('The normal to the surface defines the Z-axis in the lab frame\nThe incident and reflected light lie in the XZ plane\nThe p-polarization is direction lies in the XZ plane, s-polarisation is parallel to Y')
self.labframe_w = QListWidget(self)
fm = self.labframe_w.fontMetrics()
h = fm.ascent() + fm.descent()
self.labframe_w.setMaximumHeight(6*h)
self.labframe_w.setToolTip('The normal to the surface defines the Z-axis in the lab frame\nThe incident and reflected light lie in the XZ plane\nThe p-polarization is direction lies in the XZ plane, s-polarisation is parallel to Y')
form.addRow(labframe_l,self.labframe_w)
#
# Define the superstrate and substrate dielectrics
#
self.superstrate_dielectric_sb = QDoubleSpinBox(self)
self.superstrate_dielectric_sb.setToolTip('Define the incident medium permittivity')
self.superstrate_dielectric_sb.setRange(1,1000)
self.superstrate_dielectric_sb.setSingleStep(0.1)
self.superstrate_dielectric_sb.setValue(self.settings['Superstrate dielectric'])
self.superstrate_dielectric_sb.valueChanged.connect(self.on_superstrate_dielectric_sb_changed)
self.substrate_dielectric_sb = QDoubleSpinBox(self)
self.substrate_dielectric_sb.setToolTip('Define the substrate permittivity')
self.substrate_dielectric_sb.setRange(1,1000)
self.substrate_dielectric_sb.setSingleStep(0.1)
self.substrate_dielectric_sb.setValue(self.settings['Substrate dielectric'])
self.substrate_dielectric_sb.valueChanged.connect(self.on_substrate_dielectric_sb_changed)
hbox = QHBoxLayout()
hbox.addWidget(self.superstrate_dielectric_sb)
hbox.addWidget(self.substrate_dielectric_sb)
superstrate_label = QLabel('Incident medium and substrate permittivities',self)
superstrate_label.setToolTip('Define the permttivites of the incident medium and substrate')
form.addRow(superstrate_label, hbox)
#
#
# Define the Film thickness
#
self.film_thickness_sb = QDoubleSpinBox(self)
self.film_thickness_sb.setToolTip('Define the thin film thickness in nanometres')
self.film_thickness_sb.setRange(0,100000)
self.film_thickness_sb.setSingleStep(1)
self.film_thickness_sb.setValue(self.settings['Film thickness'])
self.film_thickness_sb.valueChanged.connect(self.on_film_thickness_sb_changed)
hbox = QHBoxLayout()
hbox.addWidget(self.film_thickness_sb)
film_thickness_label = QLabel('Film thickness (nm)',self)
film_thickness_label.setToolTip('Define the depth of the thin film in nanometres.')
form.addRow(film_thickness_label, hbox)
#
# Add a legend option
#
self.legend_le = QLineEdit(self)
self.legend_le.setToolTip('The legend will be used to describe the results in the plot')
self.legend_le.setText(self.settings['Legend'])
self.legend_le.textChanged.connect(self.on_legend_le_changed)
label = QLabel('Scenario legend',self)
label.setToolTip('The legend will be used to describe the results in the plot')
form.addRow(label, self.legend_le)
#
#
# Final buttons for changing/deleting and switching the scenarios
#
hbox = self.add_scenario_buttons()
form.addRow(hbox)
vbox.addLayout(form)
# finalise the layout
self.setLayout(vbox)
QCoreApplication.processEvents()
debugger.print('Finished:: initialiser')
def on_film_thickness_sb_changed(self,value):
debugger.print(self.settings['Legend'],'on_film_thickness_sb', value)
self.refreshRequired = True
self.settings['Film thickness'] = value
return
def on_superstrate_dielectric_sb_changed(self,value):
debugger.print(self.settings['Legend'],'on_superstrate_dielectric_sb', value)
self.refreshRequired = True
self.settings['Superstrate dielectric'] = value
return
def on_substrate_dielectric_sb_changed(self,value):
debugger.print(self.settings['Legend'],'on_substrate_dielectric_sb', value)
self.refreshRequired = True
self.settings['Substrate dielectric'] = value
return
def on_azimuthal_angle_sb_changed(self,value):
debugger.print(self.settings['Legend'],'on_azimuthal_angl_sb_changed', value)
self.refreshRequired = True
self.settings['Azimuthal angle'] = value
self.calculate_euler_angles()
return
def on_angle_of_incidence_sb_changed(self,value):
debugger.print(self.settings['Legend'],'on_angle_of_incidence_sb_changed', value)
self.refreshRequired = True
self.settings['Angle of incidence'] = value
return
def on_h_sb_changed(self,value):
debugger.print(self.settings['Legend'],'on_h_sb_changed', value)
self.refreshRequired = True
self.settings['Unique direction - h'] = value
self.calculate_euler_angles()
return
def on_k_sb_changed(self,value):
debugger.print(self.settings['Legend'],'on_k_sb_changed', value)
self.refreshRequired = True
self.settings['Unique direction - k'] = value
self.calculate_euler_angles()
return
def on_l_sb_changed(self,value):
debugger.print(self.settings['Legend'],'on_l_sb_changed', value)
self.refreshRequired = True
self.settings['Unique direction - l'] = value
self.calculate_euler_angles()
return
def refresh(self,force=False):
debugger.print(self.settings['Legend'],'Start:: refresh, force =', force)
if not self.refreshRequired and not force :
debugger.print(self.settings['Legend'],'Finished:: refreshing widget aborted', self.refreshRequired,force)
return
# Force recalculation
self.calculationRequired = True
# Change any greyed out items
self.greyed_out()
#
# Block signals during refresh
#
for w in self.findChildren(QWidget):
w.blockSignals(True)
# Now refresh values
index = self.mode_cb.findText(self.settings['Mode'], Qt.MatchFixedString)
self.mode_cb.setCurrentIndex(index)
self.legend_le.setText(self.settings['Legend'])
self.h_sb.setValue(self.settings['Unique direction - h'])
self.k_sb.setValue(self.settings['Unique direction - k'])
self.l_sb.setValue(self.settings['Unique direction - l'])
self.azimuthal_angle_sb.setValue(self.settings['Azimuthal angle'])
self.angle_of_incidence_sb.setValue(self.settings['Angle of incidence'])
self.superstrate_dielectric_sb.setValue(self.settings['Superstrate dielectric'])
self.substrate_dielectric_sb.setValue(self.settings['Substrate dielectric'])
self.film_thickness_sb.setValue(self.settings['Film thickness'])
self.reader = self.notebook.mainTab.reader
if self.reader is not None:
self.cell = self.reader.unit_cells[-1]
# Refresh the widgets that depend on the reader
self.reader = self.notebook.reader
self.calculate_euler_angles()
#
# Unblock signals after refresh
#
for w in self.findChildren(QWidget):
w.blockSignals(False)
QCoreApplication.processEvents()
self.refreshRequired = False
debugger.print(self.settings['Legend'],'Finished:: refresh, force =', force)
return
def on_mode_cb_activated(self, index):
debugger.print(self.settings['Legend'],'Start:: on_mode_cb_activated')
if index == 0:
self.settings['Mode'] = 'Thick slab'
elif index == 1:
self.settings['Mode'] = 'Coherent thin film'
else:
self.settings['Mode'] = 'Incoherent thin film'
self.refreshRequired = True
self.refresh()
self.refreshRequired = True
debugger.print(self.settings['Legend'],'Mode changed to ', self.settings['Mode'])
debugger.print(self.settings['Legend'],'Finished:: on_mode_cb_activated')
return
def calculate(self,vs_cm1):
debugger.print(self.settings['Legend'],'Start:: calculate - number of frequencies',len(vs_cm1))
if not self.calculationRequired:
debugger(self.settings['Legend'],'Finished:: calculate aborted because calculationRequired false')
return
QCoreApplication.processEvents()
# Assemble the mainTab settings
settings = self.notebook.mainTab.settings
program = settings['Program']
filename = self.notebook.mainTab.getFullFileName()
if self.reader is None:
debugger.print(self.settings['Legend'],'Finished:: Calculate aborting - no reader')
return
if program == '':
debugger.print(self.settings['Legend'],'Finished:: Calculate aborting - no program')
return
if filename == '':
debugger.print(self.settings['Legend'],'Finished:: Calculate aborting - no file')
return
# Assemble the settingsTab settings
settings = self.notebook.settingsTab.settings
sigmas_cm1 = self.notebook.settingsTab.sigmas_cm1
sigmas = np.array(sigmas_cm1) * wavenumber
modes_selected = self.notebook.settingsTab.modes_selected
frequencies_cm1 = self.notebook.settingsTab.frequencies_cm1
frequencies = np.array(frequencies_cm1) * wavenumber
# The dielectric variables are functions of frequency
superstrateDielectric = self.settings['Superstrate dielectric']
substrateDielectric = self.settings['Substrate dielectric']
superstrateDielectricFunction = DielectricFunction(epsType='constant',units='hz',parameters=superstrateDielectric).function()
substrateDielectricFunction = DielectricFunction(epsType='constant',units='hz',parameters=substrateDielectric).function()
# The crystal dielectric has already been defined in the SettingsTab
# Force a call to get the permittivity, if it needs recalculating it will be
temp = self.notebook.settingsTab.get_crystal_permittivity(vs_cm1)
# Make sure the system knows that frequency will be supplied using Hz
self.notebook.settingsTab.CrystalPermittivity.setUnits('hz')
# Actually use the permittivity function in what follows
crystalPermittivityFunction = self.notebook.settingsTab.CrystalPermittivity.function()
# Create 3 layers, thickness is converted from microns to metres
superstrateDepth = self.settings['Superstrate depth']
substrateDepth = self.settings['Substrate depth']
crystalDepth = self.settings['Film thickness']
# Determine the euler angles
theta,phi,psi = self.calculate_euler_angles()
# Set the angle of incidence in radians
angle = self.settings['Angle of incidence']
angleOfIncidence = np.pi / 180.0 * angle
mode = self.settings['Mode']
#
# Initialise the partial function to pass through to the pool
#
partial_function = partial(Calculator.solve_single_crystal_equations,
superstrateDielectricFunction,
substrateDielectricFunction,
crystalPermittivityFunction,
superstrateDepth,
substrateDepth,
crystalDepth,
mode,
theta,
phi,
psi,
angleOfIncidence)
results = []
# About to call
debugger.print(self.settings['Legend'],'About to calculate single crystal scenario using pool')
if self.notebook.pool is None:
self.notebook.startPool()
for result in self.notebook.pool.imap(partial_function, vs_cm1, chunksize=20):
self.notebook.progressbars_update()
results.append(result)
QCoreApplication.processEvents()
# Initialise plotting variables
self.vs_cm1= []
self.p_reflectance = []
self.s_reflectance = []
self.p_transmittance = []
self.s_transmittance = []
self.p_absorbtance = []
self.s_absorbtance = []
self.epsilon = []
debugger.print(self.settings['Legend'],'About to extract results for single crystal scenario')
for v,r,R,t,T,epsilon in results:
self.vs_cm1.append(v)
self.p_reflectance.append(R[0]+R[2])
self.s_reflectance.append(R[1]+R[3])
self.p_transmittance.append(T[0])
self.s_transmittance.append(T[1])
self.p_absorbtance.append(R[0]+R[2]+T[0])
self.s_absorbtance.append(R[1]+R[3]+T[1])
self.epsilon.append(epsilon)
self.calculationRequired = False
QCoreApplication.processEvents()
debugger.print(self.settings['Legend'],'Finished:: calculate - number of frequencies',len(vs_cm1))
return
def writeSpreadsheet(self):
debugger.print(self.settings['Legend'],'Start:: writeSpreadsheet')
if self.notebook.spreadsheet is None:
debugger.print(self.settings['Legend'],'Finished:: writeSpreadsheet')
return
sp = self.notebook.spreadsheet
sp.selectWorkSheet('Single Crystal')
sp.delete()
sp.writeNextRow(['Settings for the single crystal calculation of absorption and reflection'],col=1)
sp.writeNextRow([''],col=1)
sp.writeNextRow([ 'Single crystal mode', self.settings['Mode'] ],col=1)
sp.writeNextRow([ 'Minimum frequency', self.settings['Minimum frequency'] ],col=1)
sp.writeNextRow([ 'Maximum frequency', self.settings['Maximum frequency'] ],col=1)
sp.writeNextRow([ 'Frequency increment', self.settings['Frequency increment'] ],col=1)
sp.writeNextRow([ 'Surface definition (h)', self.settings['Unique direction - h'] ],col=1)
sp.writeNextRow([ 'Surface definition (k)', self.settings['Unique direction - k'] ],col=1)
sp.writeNextRow([ 'Surface definition (l)', self.settings['Unique direction - l'] ],col=1)
sp.writeNextRow([ 'Azimuthal angle', self.settings['Azimuthal angle'] ],col=1)
sp.writeNextRow([ 'Angle of incidence', self.settings['Angle of incidence'] ],col=1)
sp.writeNextRow([ 'Superstrate dielectric', self.settings['Superstrate dielectric'] ],col=1)
sp.writeNextRow([ 'Substrate dielectric', self.settings['Substrate dielectric'] ],col=1)
sp.writeNextRow([ 'Film thickness(nm)', self.settings['Film thickness'] ],col=1)
headings = ['R_p', 'R_s', 'T_p', 'T_s']
self.write_results(sp, 'Crystal R&T', self.vs_cm1, [self.p_reflectance, self.s_reflectance, self.p_transmittance, self.s_transmittance], headings)
debugger.print(self.settings['Legend'],'Finished:: writeSpreadsheet')
return
def write_results(self, sp, name, vs, yss, headings):
"""
sp is the spreadsheet object
name is the worksheet name used for writing
vs an np.array of the frequencies
yss a list of np.arrays of the reflections and transmittance ]
headings the heading names for the yss
"""
debugger.print(self.settings['Legend'],'Start:: write_results')
sp.selectWorkSheet(name)
sp.delete()
headers = ['frequencies (cm-1)']
headers.extend(headings)
sp.writeNextRow(headers,row=0, col=1)
for iv,v in enumerate(vs):
output = [v]
for ys in yss:
output.append(ys[iv])
sp.writeNextRow(output, col=1,check=1)
debugger.print(self.settings['Legend'],'Finished:: write_results')
return
def calculate_euler_angles(self):
'''Calculate the Euler angles for the crystal to lab transformation'''
debugger.print(self.settings['Legend'],'Start:: calculate_euler_angles')
# Get plane specification
hkl = [ self.settings['Unique direction - h'] , self.settings['Unique direction - k'], self.settings['Unique direction - l'] ]
sum2 = hkl[0]*hkl[0] + hkl[1]*hkl[1] + hkl[2]*hkl[2]
if sum2 < 1:
debugger.print(self.settings['Legend'],'Finished:: calculate_euler_angles')
return 0,0,0
x = 0
y = 1
z = 2
# convert normal to plane to a direction in xyz coordinates
planez = self.cell.convert_hkl_to_xyz(hkl)
planez /= np.linalg.norm(planez)
plane = np.zeros( (3,3) )
lab = np.identity(3)
plane[z] = planez
if plane[z][2] < 0.99999999 and plane[z][2] > -0.99999999:
plane[x] = np.cross(plane[z], lab[z])
plane[y] = np.cross(plane[z], plane[x])
plane[x] /= np.linalg.norm(plane[y])
plane[y] /= np.linalg.norm(plane[y])
else:
plane[x] = lab[x]
plane[y] = lab[y]
plane[z] = lab[z]
# Calculate the rotation matrix which transforms us to a unit matrix
rotation = np.linalg.pinv(plane)
# Because the formula used for the Euler transform is based on an active transform
# We calculate the angles using the transpose of the rotation matrix
rotation = rotation.T
# Negative sign for angles because of the passive / active problem
alpha = -np.arctan2(rotation[z][0],-rotation[z][1])
beta = -np.arccos(rotation[z][2])
gamma = -np.arctan2(rotation[x][2],rotation[y][2])
gamma = -self.settings['Azimuthal angle'] * np.pi / 180.0
#
# Some confusion here as to the role of the euler angles
#
psi = alpha
theta = beta
phi = gamma
normal_to_plane_lab = Calculator.euler_rotation(plane[z], theta, phi, psi)
if normal_to_plane_lab[2] < 0.9999 and normal_to_plane_lab[2] > -0.9999:
print('Error in Euler rotations - surface normal is not along Z-axis', normal_to_plane_lab)
exit()
a = Calculator.euler_rotation(self.cell.lattice[0], theta, phi, psi)
b = Calculator.euler_rotation(self.cell.lattice[1], theta, phi, psi)
c = Calculator.euler_rotation(self.cell.lattice[2], theta, phi, psi)
self.labframe_w.clear()
self.labframe_w.addItem('a-axis in lab frame: {: 3.5f}, {: 3.5f}, {: 3.5f}'.format(a[0],a[1],a[2]) )
self.labframe_w.addItem('b-axis in lab frame: {: 3.5f}, {: 3.5f}, {: 3.5f}'.format(b[0],b[1],b[2]) )
self.labframe_w.addItem('c-axis in lab frame: {: 3.5f}, {: 3.5f}, {: 3.5f}'.format(c[0],c[1],c[2]) )
self.labframe_a = a
self.labframe_b = b
self.labframe_c = c
#a = a / np.linalg.norm(a)
#b = b / np.linalg.norm(b)
#c = c / np.linalg.norm(c)
# print('Projection of a,b,c onto the lab Y-axis (s-pol)', a[1],b[1],c[1])
# print('Projection of a,b,c onto the lab X-axis (p-pol)', a[0],b[0],c[0])
debugger.print(self.settings['Legend'],'Finished:: calculate_euler_angles')
return (theta, phi, psi)
def get_result(self, vs_cm1, plot_type):
"""Return a particular result"""
debugger.print(self.settings['Legend'],'Start:: get_result',len(vs_cm1),plot_type)
self.get_results(vs_cm1)
debugger.print(self.settings['Legend'],'Finished:: get_result',len(vs_cm1),plot_type)
if plot_type == 'Crystal Reflectance (P polarisation)':
return self.p_reflectance
elif plot_type == 'Crystal Reflectance (S polarisation)':
return self.s_reflectance
elif plot_type == 'Crystal Transmittance (P polarisation)':
return self.p_transmittance
elif plot_type == 'Crystal Transmittance (S polarisation)':
return self.s_transmittance
elif plot_type == 'Crystal Absorbtance (P polarisation)':
return self.p_absorbtance
elif plot_type == 'Crystal Absorbtance (S polarisation)':
return self.s_absorbtance
else:
# print('Error in returning result from CrystalScenarioTab: ',plot_type)
return None
def get_results(self, vs_cm1):
"""Return the results of the effective medium theory calculation"""
debugger.print(self.settings['Legend'],'Start:: get_results',len(vs_cm1),self.refreshRequired)
if len(vs_cm1)>0 and (self.refreshRequired or len(self.vs_cm1) != len(vs_cm1) or self.vs_cm1[0] != vs_cm1[0] or self.vs_cm1[1] != vs_cm1[1]) :
debugger.print(self.settings['Legend'],'get_results recalculating')
self.refresh()
self.calculate(vs_cm1)
else:
debugger.print(self.settings['Legend'],'get_results no need for recalculation')
self.notebook.progressbars_update(increment=len(vs_cm1))
debugger.print(self.settings['Legend'],'Finished:: get_results',len(vs_cm1),self.refreshRequired)
return
def greyed_out(self):
"""Have a look through the settings and see if we need to grey anything out"""
# If the single crystal mode is Thick Slab, there is no need for film thickness or substrate permittivity
debugger.print(self.settings['Legend'],'Start:: greyed_out')
if self.settings['Mode'] == 'Thick slab':
self.film_thickness_sb.setEnabled(False)
self.substrate_dielectric_sb.setEnabled(False)
else:
self.film_thickness_sb.setEnabled(True)
self.substrate_dielectric_sb.setEnabled(True)
debugger.print(self.settings['Legend'],'Finished:: greyed_out')
| {
"content_hash": "2379ba0c8c7ca8838dda6e6c8b73202d",
"timestamp": "",
"source": "github",
"line_count": 552,
"max_line_length": 322,
"avg_line_length": 51.69202898550725,
"alnum_prop": 0.6336651012826803,
"repo_name": "JohnKendrick/PDielec",
"id": "4e2fe0b1aad7752c9a3015ab4e50216f4776dc4d",
"size": "28557",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "PDielec/GUI/SingleCrystalScenarioTab.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "811"
},
{
"name": "Makefile",
"bytes": "802"
},
{
"name": "Python",
"bytes": "879573"
},
{
"name": "TeX",
"bytes": "70400"
}
],
"symlink_target": ""
} |
from django import forms
class GitTokenForm(forms.Form):
user = forms.CharField(max_length=15)
password = forms.CharField(max_length=100, widget=forms.PasswordInput(render_value=False))
| {
"content_hash": "a52e179181a19467182d45e53bf45c53",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 94,
"avg_line_length": 28.142857142857142,
"alnum_prop": 0.7614213197969543,
"repo_name": "emergence-lab/emergence-lab",
"id": "c1d49a9b9a4a55aec03e924ea912ad3bfdff64fe",
"size": "197",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "users/forms.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "4017"
},
{
"name": "HTML",
"bytes": "255519"
},
{
"name": "JavaScript",
"bytes": "16743"
},
{
"name": "Python",
"bytes": "539642"
}
],
"symlink_target": ""
} |
nos = 0
while nos < 10:
nos += 1
if nos%2 == 0:
continue
print(nos)
# AVOID INFINITE LOOP
# if the loop has no condition that can end, the loop will run infinitely
# here if we forgot to add "nos += 1, the loop will become infinite loop | {
"content_hash": "33e666c7e1ff6116a5f0167ddee0be8b",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 73,
"avg_line_length": 25.7,
"alnum_prop": 0.6498054474708171,
"repo_name": "KT26/PythonCourse",
"id": "41272f933ac706bb835692a9eaf7ef4b4ce367ee",
"size": "465",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "6. User Input and While Loops/8.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "52736"
}
],
"symlink_target": ""
} |
def test_bitrise_den_agent(host):
bitrise_den_agent = host.file("/home/linuxbrew/bitrise-den-agent")
assert bitrise_den_agent.user == "linuxbrew"
assert bitrise_den_agent.group == "staff"
assert bitrise_den_agent.mode == 0o755
| {
"content_hash": "39179b2d35694d815c7ce3eb7b5d11c9",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 70,
"avg_line_length": 48.6,
"alnum_prop": 0.7078189300411523,
"repo_name": "bitrise-io/osx-box-bootstrap",
"id": "542a452bc3416ee8e0c5e878cf64a3cafb5477dd",
"size": "243",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "roles/bitrise-agent/tests/test_bitrise_cli.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "AppleScript",
"bytes": "505"
},
{
"name": "Jinja",
"bytes": "4408"
},
{
"name": "Python",
"bytes": "16668"
},
{
"name": "Ruby",
"bytes": "509"
},
{
"name": "Shell",
"bytes": "24197"
}
],
"symlink_target": ""
} |
from datetime import datetime, timedelta, time
import pytz
from time import mktime
import iso8601
import re
from unidecode import unidecode
from vumi.utils import get_first_word
from vusion.const import PLUS_REGEX, ZEROS_REGEX
def get_default(kwargs, field, default_value):
return kwargs[field] if field in kwargs else default_value
def get_local_code(from_addr):
return (from_addr or '').split('-')[1]
def clean_keyword(keyword):
keyword = keyword.replace('\n', ' ').strip()
if isinstance(keyword, str):
keyword = keyword.decode('utf-8')
return unidecode(keyword).lower()
def clean_msg(content):
if content is None:
return ''
content = content.replace('\n', ' ')
return content
def time_to_vusion_format(timestamp):
return timestamp.strftime('%Y-%m-%dT%H:%M:%S')
def time_to_vusion_format_date(timestamp):
return timestamp.strftime('%Y-%m-%d')
def get_now_timestamp():
return time_to_vusion_format(datetime.now())
def time_from_vusion_format(date_time_str):
return iso8601.parse_date(date_time_str).replace(tzinfo=None)
def date_from_vusion_format(date_time_str):
return time_from_vusion_format(date_time_str).replace(hour=0, minute=0, second=0)
def get_local_time(timezone):
if timezone is None or timezone in pytz.all_timezones:
return datetime.utcnow()
return datetime.utcnow().replace(tzinfo=pytz.utc).astimezone(
pytz.timezone(timezone)).replace(tzinfo=None)
def get_local_time_as_timestamp(local_time):
return long("%s%s" % (long(mktime(local_time.timetuple())),
local_time.microsecond))
##TODO rename is_prefixed_code_a_shortcode
def is_shortcode_address(address):
if address is None:
return False
regex_NATIONAL_SHORTCODE = re.compile('^[0-9]+-[0-9]+$')
if re.match(regex_NATIONAL_SHORTCODE, address):
return True
return False
##TODO rename is_prefixed_code_a_longcode
def is_longcode_address(address):
regex_LONGCODE = re.compile('/^\+[0-9]+$/')
if re.match(regex_LONGCODE, address):
return True
return False
##TODO rename from_prefixed_code_to_code
def get_shortcode_value(shortcode):
if shortcode is None :
return None
if is_shortcode_address(shortcode):
return shortcode.split('-')[1]
return shortcode
##TODO rename from_prefixed_code_to_prefix
def get_shortcode_international_prefix(shortcode):
if shortcode is None :
return None
if is_shortcode_address(shortcode):
return shortcode.split('-')[0]
return shortcode
##TODO move function in Shortcode model
def get_shortcode_address(shortcode):
if shortcode['supported-internationally'] == 0:
return ("%s-%s" % (shortcode['international-prefix'], shortcode['shortcode']))
return shortcode['shortcode']
def get_offset_date_time(reference_time, days, at_time):
sending_day = reference_time + timedelta(days=int(days))
time_of_sending = at_time.split(':', 1)
return datetime.combine(sending_day, time(int(time_of_sending[0]), int(time_of_sending[1])))
def split_keywords(keywords):
return [k.lower() for k in (keywords or '').split(', ')]
def add_char_to_pattern(string, pattern):
regex = re.compile('[a-zA-Z]')
l = list(string)
for index, char in enumerate(l):
if regex.match(char):
l[index] = "%%%s" % char
return ''.join(l)
def is_int(s):
try:
int(s)
return True
except ValueError:
return False
def get_keyword(msg):
return clean_keyword(get_first_msg_word(msg))
def get_first_msg_word(content):
return get_first_word(clean_msg(content))
def get_word(content, position=0, delimiter=' '):
splits = (content or '').split(delimiter)
if position < len(splits):
return splits[position]
return None
def get_words(content, start, end, delimiter=' '):
start = start - 1
splits = (content or '').split(delimiter)
if start < len(splits):
if end == 'end':
return ' '.join(splits[start:])
else:
end = int(end)
return ' '.join(splits[start:end])
return None
def clean_phone(phone):
if phone in [None,'']:
return None
if (re.match(ZEROS_REGEX, phone)):
return re.sub(ZEROS_REGEX, "+", phone)
if (not re.match(PLUS_REGEX, phone)):
return '+%s' % phone
return phone
def dynamic_content_notation_to_string(domain, keys):
tmp = domain
for key, value in sorted(keys.iteritems()):
tmp = '%s.%s' % (tmp, value)
return '[%s]' % tmp
def escape_nested(instance, to_escape):
if isinstance(instance, dict):
result = {}
for key, value in instance.iteritems():
result[escape_nested(key, to_escape)] = escape_nested(value, to_escape)
return result
elif isinstance(instance, list):
return [escape_nested(value, to_escape) for value in instance]
elif isinstance(instance, basestring) or isinstance(instance, unicode):
return re.sub(to_escape, '\\%s' % to_escape, instance)
else:
return instance
def unescape_nested(instance, to_unescape):
if isinstance(instance, dict):
result = {}
for key, value in instance.iteritems():
result[unescape_nested(key, to_unescape)] = unescape_nested(value, to_unescape)
return result
elif isinstance(instance, list):
return [unescape_nested(value, to_unescape) for value in instance]
elif isinstance(instance, basestring) or isinstance(instance, unicode):
return re.sub('\\\\\%s' % to_unescape, to_unescape, instance)
else:
return instance
#TODO remove DataLayerUtils in tests package
class DataLayerUtils:
def __init__(self):
self.collections = {}
def setup_collections(self, names):
for name in names:
self.setup_collection(name)
def setup_collection(self, name):
if name in self.db.collection_names():
self.collections[name] = self.db[name]
else:
self.collections[name] = self.db.create_collection(name)
def drop_collections(self):
for name, collection in self.collections.items():
collection.drop()
| {
"content_hash": "8e55f4be6b698a6d1ca3b6eb7ffbad46",
"timestamp": "",
"source": "github",
"line_count": 219,
"max_line_length": 96,
"avg_line_length": 28.598173515981735,
"alnum_prop": 0.6495289797221778,
"repo_name": "texttochange/vusion-backend",
"id": "5b0eb9a6ff5bf0be0ef37d7a90b2cbf9447b2d8e",
"size": "6263",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "vusion/utils.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "1510"
},
{
"name": "Python",
"bytes": "1204678"
},
{
"name": "Shell",
"bytes": "798"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
# Copyright (c) 2010-2016 openpyxl
from openpyxl.descriptors import (
Integer,
Alias
)
from openpyxl.descriptors.excel import Relation
from openpyxl.descriptors.serialisable import Serialisable
class SheetBackgroundPicture(Serialisable):
tagname = "picture"
id = Relation()
def __init__(self, id):
self.id = id
class DrawingHF(Serialisable):
id = Relation()
lho = Integer(allow_none=True)
leftHeaderOddPages = Alias('lho')
lhe = Integer(allow_none=True)
leftHeaderEvenPages = Alias('lhe')
lhf = Integer(allow_none=True)
leftHeaderFirstPage = Alias('lhf')
cho = Integer(allow_none=True)
centerHeaderOddPages = Alias('cho')
che = Integer(allow_none=True)
centerHeaderEvenPages = Alias('che')
chf = Integer(allow_none=True)
centerHeaderFirstPage = Alias('chf')
rho = Integer(allow_none=True)
rightHeaderOddPages = Alias('rho')
rhe = Integer(allow_none=True)
rightHeaderEvenPages = Alias('rhe')
rhf = Integer(allow_none=True)
rightHeaderFirstPage = Alias('rhf')
lfo = Integer(allow_none=True)
leftFooterOddPages = Alias('lfo')
lfe = Integer(allow_none=True)
leftFooterEvenPages = Alias('lfe')
lff = Integer(allow_none=True)
leftFooterFirstPage = Alias('lff')
cfo = Integer(allow_none=True)
centerFooterOddPages = Alias('cfo')
cfe = Integer(allow_none=True)
centerFooterEvenPages = Alias('cfe')
cff = Integer(allow_none=True)
centerFooterFirstPage = Alias('cff')
rfo = Integer(allow_none=True)
rightFooterOddPages = Alias('rfo')
rfe = Integer(allow_none=True)
rightFooterEvenPages = Alias('rfe')
rff = Integer(allow_none=True)
rightFooterFirstPage = Alias('eff')
def __init__(self,
id=None,
lho=None,
lhe=None,
lhf=None,
cho=None,
che=None,
chf=None,
rho=None,
rhe=None,
rhf=None,
lfo=None,
lfe=None,
lff=None,
cfo=None,
cfe=None,
cff=None,
rfo=None,
rfe=None,
rff=None,
):
self.id = id
self.lho = lho
self.lhe = lhe
self.lhf = lhf
self.cho = cho
self.che = che
self.chf = chf
self.rho = rho
self.rhe = rhe
self.rhf = rhf
self.lfo = lfo
self.lfe = lfe
self.lff = lff
self.cfo = cfo
self.cfe = cfe
self.cff = cff
self.rfo = rfo
self.rfe = rfe
self.rff = rff
| {
"content_hash": "8d8b7d5dd58e12a5b113d06df542f83d",
"timestamp": "",
"source": "github",
"line_count": 98,
"max_line_length": 58,
"avg_line_length": 28.26530612244898,
"alnum_prop": 0.5613718411552346,
"repo_name": "aragos/tichu-tournament",
"id": "49970a32b91d69249767a29cfed814cd10883f86",
"size": "2770",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "python/openpyxl/chartsheet/relation.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "AMPL",
"bytes": "8008"
},
{
"name": "CSS",
"bytes": "1695"
},
{
"name": "HTML",
"bytes": "63890"
},
{
"name": "JavaScript",
"bytes": "320642"
},
{
"name": "Python",
"bytes": "3432940"
}
],
"symlink_target": ""
} |
import webbrowser
import os
import re
import urllib
import json
# Styles and scripting for the page
main_page_head = '''
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<title>Fresh Tomatoes!</title>
<!-- Bootstrap 3 -->
<link rel="stylesheet" href="https://netdna.bootstrapcdn.com/bootstrap/3.1.0/css/bootstrap.min.css">
<link rel="stylesheet" href="https://netdna.bootstrapcdn.com/bootstrap/3.1.0/css/bootstrap-theme.min.css">
<script src="http://code.jquery.com/jquery-1.10.1.min.js"></script>
<script src="https://netdna.bootstrapcdn.com/bootstrap/3.1.0/js/bootstrap.min.js"></script>
<style type="text/css" media="screen">
body {
padding-top: 80px;
}
#trailer .modal-dialog {
margin-top: 200px;
width: 640px;
height: 480px;
}
.hanging-close {
position: absolute;
top: -12px;
right: -12px;
z-index: 9001;
}
#trailer-video {
width: 100%;
height: 100%;
}
.movie-tile {
margin-bottom: 20px;
padding-top: 20px;
}
.movie-tile:hover {
background-color: #EEE;
cursor: pointer;
}
.scale-media {
padding-bottom: 56.25%;
position: relative;
}
.scale-media iframe {
border: none;
height: 100%;
position: absolute;
width: 100%;
left: 0;
top: 0;
background-color: white;
}
</style>
<script type="text/javascript" charset="utf-8">
// Pause the video when the modal is closed
$(document).on('click', '.hanging-close, .modal-backdrop, .modal', function (event) {
// Remove the src so the player itself gets removed, as this is the only
// reliable way to ensure the video stops playing in IE
$("#trailer-video-container").empty();
});
// Start playing the video whenever the trailer modal is opened
$(document).on('click', '.movie-tile', function (event) {
var trailerYouTubeId = $(this).attr('data-trailer-youtube-id')
var sourceUrl = 'http://www.youtube.com/embed/' + trailerYouTubeId + '?autoplay=1&html5=1';
$("#trailer-video-container").empty().append($("<iframe></iframe>", {
'id': 'trailer-video',
'type': 'text-html',
'src': sourceUrl,
'frameborder': 0
}));
});
// Animate in the movies when the page loads
$(document).ready(function () {
$('.movie-tile').hide().first().show("fast", function showNext() {
$(this).next("div").show("fast", showNext);
});
});
</script>
</head>
'''
# The main page layout and title bar
main_page_content = '''
<body>
<!-- Trailer Video Modal -->
<div class="modal" id="trailer">
<div class="modal-dialog">
<div class="modal-content">
<a href="#" class="hanging-close" data-dismiss="modal" aria-hidden="true">
<img src="https://lh5.ggpht.com/v4-628SilF0HtHuHdu5EzxD7WRqOrrTIDi_MhEG6_qkNtUK5Wg7KPkofp_VJoF7RS2LhxwEFCO1ICHZlc-o_=s0#w=24&h=24"/>
</a>
<div class="scale-media" id="trailer-video-container">
</div>
</div>
</div>
</div>
<!-- Main Page Content -->
<div class="container">
<div class="navbar navbar-inverse navbar-fixed-top" role="navigation">
<div class="container">
<div class="navbar-header">
<a class="navbar-brand" href="#">Fresh Tomatoes Movie Trailers</a>
</div>
</div>
</div>
</div>
<div class="container">
{movie_tiles}
</div>
</body>
</html>
'''
# A single movie entry html template
movie_tile_content = '''
<div class="col-md-6 col-lg-4 movie-tile text-center" data-trailer-youtube-id="{trailer_youtube_id}" data-toggle="modal" data-target="#trailer">
<img src="{poster_image_url}" width="220" height="342">
<h2>{movie_title}</h2>
<div>IMDb <img src="http://pngimg.com/upload/small/star_PNG1575.png" width="20px" height="20px" style="margin-top:-5px;"/> {imdb_rating}</div>
</div>
'''
# Call IMDb webservice for movie rating
def get_imdb_rating(movie):
url = "http://www.omdbapi.com/?t=" + urllib.quote(movie.title)
connection = urllib.urlopen(url)
output = connection.read() # get json
connection.close()
# Parse json so it can be used as a normal dictionary
parsed_json = json.loads(output)
if 'imdbRating' in parsed_json.keys():
return parsed_json['imdbRating']
else:
return "N/A"
def create_movie_tiles_content(movies):
# The HTML content for this section of the page
content = ''
for movie in movies:
# Extract the youtube ID from the url
youtube_id_match = re.search(
r'(?<=v=)[^&#]+', movie.trailer_youtube_url)
youtube_id_match = youtube_id_match or re.search(
r'(?<=be/)[^&#]+', movie.trailer_youtube_url)
trailer_youtube_id = (youtube_id_match.group(0) if youtube_id_match
else None)
# Append the tile for the movie with its content filled in
content += movie_tile_content.format(
movie_title=movie.title,
poster_image_url=movie.poster_image_url,
trailer_youtube_id=trailer_youtube_id,
imdb_rating=get_imdb_rating(movie)
)
return content
def open_movies_page(movies):
# Create or overwrite the output file
output_file = open('fresh_tomatoes.html', 'w')
# Replace the movie tiles placeholder generated content
rendered_content = main_page_content.format(
movie_tiles=create_movie_tiles_content(movies))
# Output the file
output_file.write(main_page_head + rendered_content)
output_file.close()
# Open the output file in the browser (in a new tab, if possible)
url = os.path.abspath(output_file.name)
webbrowser.open('file://' + url, new=2)
| {
"content_hash": "c9a3c3a762ca5aef4943472202bcbb74",
"timestamp": "",
"source": "github",
"line_count": 184,
"max_line_length": 146,
"avg_line_length": 33.64673913043478,
"alnum_prop": 0.5740591180746245,
"repo_name": "MariiaSurmenok/Movie-Trailer-Website",
"id": "b70a96e3fb79f8604112e46fb7575485acd11327",
"size": "6191",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fresh_tomatoes.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "8300"
}
],
"symlink_target": ""
} |
from datetime import datetime
from django import forms
from django.utils.translation import ugettext_lazy as _
from pows.apps.forum.models import Topic, Reply
class PostForm(forms.ModelForm):
subject = forms.CharField(label=_('Subject'), widget=forms.TextInput(
attrs={'size':'80'}))
message = forms.CharField(label=_('Message'), widget=forms.Textarea(
attrs={'cols':'95', 'rows':'14'}))
#attachments = forms.Field(label=_('Attachments'), required=False,
# widget=forms.SelectMultiple())
class Meta:
model = Reply
fields = ('message',)
def __init__(self, *args, **kwargs):
self.user = kwargs.pop('user', None)
self.topic = kwargs.pop('topic', None)
self.forum = kwargs.pop('forum', None)
self.ip = kwargs.pop('ip', None)
super(PostForm, self).__init__(*args, **kwargs)
#self.fields.keyOrder = ['subject', 'message', 'attachments']
self.fields.keyOrder = ['subject', 'message']
class EditPostForm(PostForm):
"""
Edit Post
"""
def __init__(self, *args, **kwargs):
super(EditPostForm, self).__init__(*args, **kwargs)
self.initial['subject'] = self.instance.topic.subject
if not self.instance.topic_post:
self.fields['subject'].required = False
def save(self):
post = self.instance
post.message = self.cleaned_data['message']
post.updated_on = datetime.now()
post.edited_by = self.user.username
#attachments = self.cleaned_data['attachments']
#post.update_attachments(attachments)
post.save()
if post.topic_post:
post.topic.subject = self.cleaned_data['subject']
post.topic.save()
return post
class NewPostForm(PostForm):
"""
New Topics and Posts Form.
"""
def __init__(self, *args, **kwargs):
super(NewPostForm, self).__init__(*args, **kwargs)
if self.topic:
self.fields['subject'].required = False
def save(self):
topic_post = False
if not self.topic:
topic = Topic(forum=self.forum, posted_by=self.user,
subject=self.cleaned_data['subject'])
topic_post = True
topic.save()
else:
topic = self.topic
post = Post(topic=topic, posted_by=self.user,
poster_ip=self.ip, message=self.cleaned_data['message'],
topic_post=topic_post)
post.save()
#attachments = self.cleaned_data['attachments']
#post.update_attachments(attachments)
return post
| {
"content_hash": "a7cb460c864f0b77403ad0722ba75305",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 73,
"avg_line_length": 34.896103896103895,
"alnum_prop": 0.5764793449944176,
"repo_name": "indexofire/gork",
"id": "6e19b0e378fe2132969ab18be05685fa287d144f",
"size": "2712",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/gork/application/forum/forms.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "199039"
},
{
"name": "JavaScript",
"bytes": "89817"
},
{
"name": "Python",
"bytes": "1120919"
},
{
"name": "Shell",
"bytes": "6713"
}
],
"symlink_target": ""
} |
from __future__ import print_function, division
import argparse
import requests
# See https://www.ncbi.nlm.nih.gov/books/NBK25499/#chapter4.EFetch
# for URL format details.
URL = ('https://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi?'
'db=%(database)s&id=%(id)s&rettype=fasta&retmode=text')
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description=('Fetch a sequence by id from NCBI and write it to stdout '
'as FASTA.'))
parser.add_argument(
'id', help='The id of the sequence to fetch.')
parser.add_argument(
'--database', default='nucleotide', choices=('nucleotide', 'protein'),
help='The name of the NCBI database to query.')
args = parser.parse_args()
print(requests.get(
URL % {'database': args.database, 'id': args.id}
).text.rstrip('\n'))
| {
"content_hash": "a2c881c0a6d3c375ef325d09f114f3aa",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 75,
"avg_line_length": 30.5,
"alnum_prop": 0.6908665105386417,
"repo_name": "bamueh/dark-matter",
"id": "5f45d66db529ccfaa520815b1862e581b77b10ca",
"size": "877",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bin/ncbi-fetch-id.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "1249"
},
{
"name": "Python",
"bytes": "1451852"
},
{
"name": "Shell",
"bytes": "1125"
}
],
"symlink_target": ""
} |
import os
DEBUG = True
LOGIN_REDIRECT_URL = '/'
# SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTOCOL', 'https')
# SECURE_SSL_REDIRECT = True
# SECURE_BROWSER_XSS_FILTER = True
SESSION_COOKIE_HTTPONLY = True
CSRF_COOKIE_HTTPONLY = True
TEST_RUNNER = 'django.test.runner.DiscoverRunner'
URL_PREFIX = ''
# Uncomment this line if you enable SSL
# SESSION_COOKIE_SECURE = True
# CSRF_COOKIE_SECURE = True
ADMINS = (
('Your Name', 'your.name@yourdomain')
)
MANAGERS = ADMINS
DOJO_ROOT = 'DOJODIR'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql', # Add 'postgresql_psycopg2',
# 'mysql','sqlite3' or 'oracle'.
'NAME': 'MYSQLDB', # Or path to database file if using sqlite3.
# The following settings are not used with sqlite3:
'USER': 'MYSQLUSER',
'PASSWORD': 'MYSQLPWD',
'HOST': 'MYSQLHOST', # Empty for localhost through domain sockets
# or '127.0.0.1' for localhost through TCP.
'PORT': 'MYSQLPORT', # Set to empty string for default.
}
}
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = []
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = 'DOJO_MEDIA_ROOT'
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://example.com/media/", "http://media.example.com/"
MEDIA_URL = '/media/'
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/var/www/example.com/static/"
STATIC_ROOT = "DOJO_STATIC_ROOT"
# URL prefix for static files.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'djangobower.finders.BowerFinder',
)
FILE_UPLOAD_HANDLERS = ("django.core.files.uploadhandler.TemporaryFileUploadHandler",)
# where should bower install components
# collect static will move them to the static root
BOWER_COMPONENTS_ROOT = 'BOWERDIR'
# what components should be installed
BOWER_INSTALLED_APPS = (
'fontawesome',
'https://github.com/BlackrockDigital/startbootstrap-sb-admin-2.git',
'fullcalendar',
'jquery-cookie',
'jquery-ui',
'jquery-highlight',
# directly from github since no bower comp available
'https://github.com/jumjum123/JUMFlot.git',
'https://github.com/markrcote/flot-axislabels.git',
'chosen',
'chosen-bootstrap',
'bootswatch-dist#readable',
'bootstrap-wysiwyg-steveathon',
'justgage'
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'DOJOSECRET'
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
# 'django.middleware.security.SecurityMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'dojo.middleware.LoginRequiredMiddleware',
)
ROOT_URLCONF = 'dojo.urls'
LOGIN_URL = '/login'
LOGIN_EXEMPT_URLS = (
r'^%sstatic/' % URL_PREFIX,
r'^%swebhook/' % URL_PREFIX,
r'^%smetrics/all$' % URL_PREFIX,
r'^%smetrics$' % URL_PREFIX,
r'^%smetrics/product/type/(?P<mtype>\d+)$' % URL_PREFIX,
r'^%smetrics/simple$' % URL_PREFIX,
r'^%sapi/v1/' % URL_PREFIX,
r'^%sajax/v1/' % URL_PREFIX,
r'^%sreports/cover$' % URL_PREFIX,
r'^%sfinding/image/(?P<token>[^/]+)$' % URL_PREFIX
)
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'dojo.wsgi.application'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'APP_DIRS': True,
'OPTIONS': {
'debug': DEBUG,
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'polymorphic', # provides admin templates
'overextends',
'django.contrib.admin',
'django.contrib.humanize',
'gunicorn',
'tastypie',
'djangobower',
'auditlog',
'dojo',
'tastypie_swagger',
'watson',
'tagging',
'custom_field',
'imagekit',
)
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_HOST = 'smtpout.your_domain.com'
EMAIL_PORT = '25'
EMAIL_USE_TLS = True
PORT_SCAN_CONTACT_EMAIL = 'email@your_host'
PORT_SCAN_RESULT_EMAIL_FROM = 'email@your_host'
PORT_SCAN_EXTERNAL_UNIT_EMAIL_LIST = ['email@your_host']
PORT_SCAN_SOURCE_IP = '127.0.0.1'
# Used in a few places to prefix page headings and in email
# salutations
TEAM_NAME = 'Security Engineering'
# Celery settings
BROKER_URL = 'sqla+sqlite:///dojo.celerydb.sqlite'
CELERY_SEND_TASK_ERROR_EMAILS = True
CELERY_IGNORE_RESULT = True
CELERY_TIMEZONE = TIME_ZONE
CELERY_TASK_RESULT_EXPIRES = 86400
CELERYBEAT_SCHEDULE_FILENAME = DOJO_ROOT + '/dojo.celery.beat.db'
CELERY_ACCEPT_CONTENT = ['pickle', 'json', 'msgpack', 'yaml']
# wkhtmltopdf settings
WKHTMLTOPDF_PATH = '/usr/local/bin/wkhtmltopdf'
# django-tagging settings
FORCE_LOWERCASE_TAGS = True
MAX_TAG_LENGTH = 25
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'verbose': {
'format': '[%(asctime)s] %(levelname)s [%(name)s:%(lineno)d] %(message)s',
'datefmt': '%d/%b/%Y %H:%M:%S',
},
'simple': {
'format': '%(levelname)s %(funcName)s %(lineno)d %(message)s'
},
},
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
| {
"content_hash": "a2233a4b9ba71d86ec8f64ee86b1b1bc",
"timestamp": "",
"source": "github",
"line_count": 262,
"max_line_length": 86,
"avg_line_length": 31.404580152671755,
"alnum_prop": 0.6707583859990277,
"repo_name": "grendel513/django-DefectDojo",
"id": "d1b600285769e77a29348d74eacc2e00d7f5ddc2",
"size": "8264",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dojo/settings.dist.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "18368"
},
{
"name": "HTML",
"bytes": "839185"
},
{
"name": "JavaScript",
"bytes": "6717"
},
{
"name": "Nginx",
"bytes": "1810"
},
{
"name": "Python",
"bytes": "834622"
},
{
"name": "Shell",
"bytes": "22931"
},
{
"name": "XSLT",
"bytes": "6624"
}
],
"symlink_target": ""
} |
import bz2
import csv
import datetime
import gzip
import json
import os
import random
import shutil
import tempfile
import unittest
import mock
from contextlib import closing
from freezegun import freeze_time
from exporters.exceptions import ConfigurationError
from exporters.export_formatter.csv_export_formatter import CSVExportFormatter
from exporters.export_formatter.xml_export_formatter import XMLExportFormatter
from exporters.records.base_record import BaseRecord
from exporters.write_buffers.base import WriteBuffer
from exporters.write_buffers.grouping import GroupingBufferFilesTracker
from exporters.writers import FSWriter
from exporters.writers.base_writer import BaseWriter, InconsistentWriteState
from exporters.writers.console_writer import ConsoleWriter
from exporters.writers.filebase_base_writer import Filebase
from exporters.export_formatter.json_export_formatter import JsonExportFormatter
from exporters.groupers import PythonExpGrouper
from exporters.writers.filebase_base_writer import FilebaseBaseWriter
from .utils import meta
RESERVOIR_SAMPLING_BUFFER_CLASS = \
'exporters.write_buffers.reservoir_sampling_buffer.ReservoirSamplingWriteBuffer'
class BaseWriterTest(unittest.TestCase):
def setUp(self):
self.options = {
'log_level': 'DEBUG',
'logger_name': 'export-pipeline'
}
self.writer = BaseWriter(self.options, meta())
def tearDown(self):
self.writer.close()
def test_write_not_implemented(self):
with self.assertRaises(NotImplementedError):
self.writer.write('', '')
class FakeWriter(BaseWriter):
"""CustomWriter writing records to self.custom_output
to test BaseWriter extensibility
"""
def __init__(self, options, *args, **kwargs):
super(FakeWriter, self).__init__(options, meta(), *args, **kwargs)
self.custom_output = {}
self.fake_files_already_written = []
self.set_metadata('written_files', self.fake_files_already_written)
def write(self, path, key):
with gzip.open(path) as f:
self.custom_output[key] = f.read()
self.fake_files_already_written.append(path)
class FakeFilebaseWriter(FilebaseBaseWriter):
"""CustomWriter writing records to self.custom_output
to test BaseWriter extensibility
"""
def __init__(self, options, *args, **kwargs):
super(FakeFilebaseWriter, self).__init__(options, meta(), *args, **kwargs)
self.custom_output = {}
self.fake_files_already_written = []
self.set_metadata('written_files', self.fake_files_already_written)
def write(self, path, key, file_name=None):
if file_name:
with open(path) as f:
self.custom_output[key] = f.read()
self.fake_files_already_written.append(file_name)
else:
with gzip.open(path) as f:
self.custom_output[key] = f.read()
self.fake_files_already_written.append(path)
class CustomWriterTest(unittest.TestCase):
def setUp(self):
self.batch = [
BaseRecord({u'key1': u'value11', u'key2': u'value21'}),
BaseRecord({u'key1': u'value12', u'key2': u'value22'}),
BaseRecord({u'key1': u'value13', u'key2': u'value23'}),
]
def test_custom_writer(self):
# given:
writer = FakeWriter({}, {})
# when:
try:
writer.write_batch(self.batch)
writer.flush()
finally:
writer.close()
# then:
output = writer.custom_output[()]
self.assertEquals([json.dumps(item) for item in self.batch],
output.splitlines())
self.assertEquals('jl', writer.write_buffer.items_group_files.file_extension)
def test_write_buffer_removes_files(self):
# given:
writer = FakeWriter({}, {})
writer.write_buffer.items_per_buffer_write = 1
# when:
try:
writer.write_batch(self.batch)
# then
self.assertEqual(len(writer.fake_files_already_written), 3,
'Wrong number of files written')
for f in writer.fake_files_already_written:
self.assertFalse(os.path.exists(f))
self.assertFalse(os.path.exists(f[:-3]))
finally:
writer.close()
def test_custom_writer_with_csv_formatter(self):
# given:
options = {
'name': 'exporters.export_formatter.csv_export_formatter.CSVExportFormatter',
'options': {'show_titles': False, 'fields': ['key1', 'key2']}
}
formatter = CSVExportFormatter(options)
writer = FakeWriter({}, {}, export_formatter=formatter)
# when:
try:
writer.write_batch(self.batch)
writer.flush()
finally:
writer.close()
# then:
output = writer.custom_output[()].splitlines()
self.assertEquals(
[
['value11', 'value21'],
['value12', 'value22'],
['value13', 'value23'],
],
[l for l in csv.reader(output)]
)
self.assertEquals('csv', writer.write_buffer.items_group_files.file_extension)
def test_custom_writer_with_xml_formatter(self):
from xml.dom.minidom import parseString
# given:
options = {
'name': 'exporters.export_formatter.xml_export_formatter.XMLExportFormatter',
'options': {
}
}
formatter = XMLExportFormatter(options)
writer = FakeWriter({}, {}, export_formatter=formatter)
# when:
try:
writer.write_batch(self.batch)
writer.flush()
finally:
writer.close()
# then:
output = writer.custom_output[()].splitlines()
expected_list = [
parseString(
'<item><key2 type="str">value21</key2><key1 type="str">value11</key1></item>'),
parseString(
'<item><key2 type="str">value22</key2><key1 type="str">value12</key1></item>'),
parseString(
'<item><key2 type="str">value23</key2><key1 type="str">value13</key1></item>')
]
expected = ['<?xml version="1.0" encoding="UTF-8"?>', '<root>'] + \
[{'key1': item.getElementsByTagName('key1')[0].firstChild.nodeValue,
'key2': item.getElementsByTagName('key2')[0].firstChild.nodeValue}
for item in expected_list] + \
['</root>']
out = [output[0], output[1]] + \
[{'key1': parseString(l).getElementsByTagName('key1')[0].firstChild.nodeValue,
'key2': parseString(l).getElementsByTagName('key2')[0].firstChild.nodeValue}
for l in output[2:-1]] + \
[output[-1]]
self.assertEquals(expected, out)
self.assertEquals('xml', writer.write_buffer.items_group_files.file_extension)
def test_custom_writer_with_xml_formatter_with_options(self):
from xml.dom.minidom import parseString
# given:
options = {'name': 'exporters.export_formatter.xml_export_formatter.XMLExportFormatter',
'options': {
'attr_type': False,
'fields_order': ['key1', 'key2'],
'item_name': 'XmlItem',
'root_name': 'RootItem'}
}
formatter = XMLExportFormatter(options)
writer = FakeWriter({}, {}, export_formatter=formatter)
# when:
try:
writer.write_batch(self.batch)
writer.flush()
finally:
writer.close()
# then:
output = writer.custom_output[()].splitlines()
expected_list = [
parseString(
'<XmlItem><key1>value11</key1><key2>value21</key2></XmlItem>'),
parseString(
'<XmlItem><key1>value12</key1><key2>value22</key2></XmlItem>'),
parseString(
'<XmlItem><key1>value13</key1><key2>value23</key2></XmlItem>')
]
expected = ['<?xml version="1.0" encoding="UTF-8"?>', '<RootItem>']
expected += [[node.localName for node in item.getElementsByTagName('XmlItem')[0].childNodes]
for item in expected_list]
expected += ['</RootItem>']
out = [output[0], output[1]]
out += [
[
node.localName
for node in parseString(l).getElementsByTagName('XmlItem')[0].childNodes
]
for l in output[2:-1]
]
out += [output[-1]]
self.assertEquals(expected, out)
self.assertEquals('xml', writer.write_buffer.items_group_files.file_extension)
def test_md5sum_file(self):
# given:
with tempfile.NamedTemporaryFile() as tmp:
writer = FakeFilebaseWriter(
{'options': {'filebase': tmp.name, 'generate_md5': True}}, {})
# when:
try:
writer.write_batch(self.batch)
writer.flush()
writer.finish_writing()
finally:
writer.close()
self.assertIn('md5checksum.md5', writer.fake_files_already_written)
@mock.patch('exporters.writers.base_writer.BaseWriter._check_write_consistency')
def test_consistency_check(self, consistency_mock):
# given:
writer = FakeWriter({'options': {'check_consistency': True}})
# when:
try:
writer.write_batch(self.batch)
writer.flush()
writer.finish_writing()
finally:
writer.close()
# then:
consistency_mock.assert_called_once_with()
def test_custom_writer_with_json_file_formatter(self):
# given:
options = {
'name': 'exporters.export_formatter.json_export_formatter.JSONExportFormatter',
'options': {
'jsonlines': False
}
}
formatter = JsonExportFormatter(options, meta())
writer = FakeWriter({}, {}, export_formatter=formatter)
# when:
try:
writer.write_batch(self.batch)
writer.flush()
finally:
writer.close()
# then:
output = writer.custom_output[()]
out = json.loads(output)
self.assertEquals(self.batch, out)
self.assertEquals('json', writer.write_buffer.items_group_files.file_extension)
class WriteBufferTest(unittest.TestCase):
def setUp(self):
item_writer = GroupingBufferFilesTracker(JsonExportFormatter({}, meta()), 'gz')
self.write_buffer = WriteBuffer({}, meta(),
items_per_buffer_write=1000,
size_per_buffer_write=1000,
items_group_files_handler=item_writer)
def tearDown(self):
self.write_buffer.close()
def test_get_metadata(self):
# given:
self.write_buffer.set_metadata_for_file('somekey', **{'items': 10})
# then
self.assertEqual(self.write_buffer.get_metadata('somekey').get('items'), 10,
'Wrong metadata')
self.assertIsNone(self.write_buffer.get_metadata('somekey').get('nokey'))
class ReservoirSamplingWriterTest(unittest.TestCase):
def setUp(self):
self.sample_size = 10
self.batch = [BaseRecord({u'key1': u'value1{}'.format(i),
u'key2': u'value2{}'.format(i)}) for i in range(100)]
def run_fake_writer(self):
# given:
writer = FakeWriter({'options': {
'write_buffer': RESERVOIR_SAMPLING_BUFFER_CLASS,
'write_buffer_options': {'sample_size': self.sample_size}}},
{})
# when:
try:
writer.write_batch(self.batch)
writer.flush()
finally:
writer.close()
# then:
return writer.custom_output[()]
def test_sample_writer(self):
output = self.run_fake_writer()
self.assertEquals(self.sample_size, len(output.strip().splitlines()))
# test duplicates
self.assertEquals(self.sample_size, len(set(output.strip().splitlines())))
def test_different_samples(self):
outputs = [self.run_fake_writer() for i in range(2)]
self.assertNotEquals(outputs[0].splitlines(), outputs[1].splitlines())
class ConsoleWriterTest(unittest.TestCase):
def setUp(self):
self.options = {
'log_level': 'DEBUG',
'logger_name': 'export-pipeline'
}
self.writer = ConsoleWriter(self.options, meta())
def tearDown(self):
self.writer.close()
def test_write_console(self):
items_to_write = []
for i in range(0, 10):
item = BaseRecord()
item['key'] = i
item['value'] = random.randint(0, 10000)
items_to_write.append(item)
self.writer.write_batch(items_to_write)
self.assertEqual(self.writer.get_metadata('items_count'), 10)
class FilebaseBaseWriterTest(unittest.TestCase):
def test_get_file_number_not_implemented(self):
writer_config = {
'options': {
'filebase': '/tmp/',
}
}
writer = FilebaseBaseWriter(writer_config, meta())
self.assertIsInstance(writer.get_file_suffix('', ''), basestring)
path, file_name = writer.create_filebase_name([])
self.assertEqual(path, '/tmp')
writer.close()
def test_get_full_filebase(self):
writer_config = {
'options': {
'filebase': '/tmp/some_file_',
}
}
writer = FilebaseBaseWriter(writer_config, meta())
writer.close()
self.assertEqual(writer.filebase.template, '/tmp/some_file_')
def test_create_filebase_name(self):
writer_config = {
'options': {
'filebase': '/tmp/%m/%Y-some_folder_{groups[0]}/{groups[1]}_{file_number}_',
}
}
writer = FilebaseBaseWriter(writer_config, meta())
writer.close()
date = datetime.datetime.now()
expected = (date.strftime('/tmp/%m/%Y-some_folder_g1'), 'filename')
self.assertEqual(writer.create_filebase_name(('g1', 'g2'), file_name='filename'), expected)
def test_wrong_file_number_in_filebase(self):
writer_config = {
'options': {
'filebase': '/tmp/%m/%Y-some_folder_{file_number}/{groups[1]}_',
}
}
writer = FilebaseBaseWriter(writer_config, meta())
writer.close()
with self.assertRaisesRegexp(KeyError, 'filebase option should not contain'):
writer.create_filebase_name(('g1', 'g2'), file_name='filename')
class FilebaseTest(unittest.TestCase):
def setUp(self):
self.filebase = Filebase('/tmp/output/{groups[0]}/{groups[1]}_test_{file_number}_file_')
@freeze_time('2010-01-01')
def test_filebase_init(self):
filebase = Filebase('/tmp/output/%Y/{groups[0]}/{groups[1]}_test_{file_number}_file_')
expected_dir = '/tmp/output/2010/{groups[0]}'
expected_prefix = '{groups[1]}_test_{file_number}_file_'
expected = '/'.join([expected_dir, expected_prefix])
self.assertEqual(filebase.template, expected)
self.assertEqual(filebase.prefix_template, expected_prefix)
self.assertEqual(filebase.dirname_template, expected_dir)
def test_get_dirname_with_group_info(self):
# then
self.assertEqual(
self.filebase.formatted_dirname(
groups=('us', 'es')), '/tmp/output/us')
def test_formatted_prefix(self):
# then
self.assertEqual(self.filebase.formatted_prefix(
groups=('us', 'es'), file_number=0), 'es_test_0_file_')
def test_has_group_info(self):
# then
self.assertTrue(self.filebase._has_key_info('groups'))
class FSWriterTest(unittest.TestCase):
def get_batch(self):
data = [
{'name': 'Roberto', 'birthday': '12/05/1987'},
{'name': 'Claudia', 'birthday': '21/12/1985'},
]
return [BaseRecord(d) for d in data]
def get_writer_config(self):
return {
'name': 'exporters.writers.fs_writer.FSWriter',
'options': {
'filebase': '{}/exporter_test'.format(self.tmp_dir),
}
}
def setUp(self):
self.tmp_dir = tempfile.mkdtemp()
def tearDown(self):
try:
shutil.rmtree(self.tmp_dir)
except OSError:
pass
def test_get_file_number(self):
writer_config = self.get_writer_config()
writer = FSWriter(writer_config, meta())
try:
writer.write_batch(self.get_batch())
writer.flush()
finally:
writer.close()
expected_file = '{}/exporter_test0000.jl.gz'.format(self.tmp_dir)
self.assertTrue(expected_file in writer.written_files)
def test_compression_gzip_format(self):
writer_config = self.get_writer_config()
writer_config['options'].update({'compression': 'gz'})
writer = FSWriter(writer_config, meta())
try:
writer.write_batch(self.get_batch())
writer.flush()
finally:
writer.close()
expected_file = '{}/exporter_test0000.jl.gz'.format(self.tmp_dir)
self.assertTrue(expected_file in writer.written_files)
written = []
with gzip.open(expected_file, 'r') as fin:
for line in fin:
written.append(json.loads(line))
self.assertEqual(written, self.get_batch())
def test_compression_zip_format(self):
writer_config = self.get_writer_config()
writer_config['options'].update({'compression': 'zip'})
writer = FSWriter(writer_config, meta())
try:
writer.write_batch(self.get_batch())
writer.flush()
finally:
writer.close()
expected_file = '{}/exporter_test0000.jl.zip'.format(self.tmp_dir)
self.assertTrue(expected_file in writer.written_files)
import zipfile
written = []
with zipfile.ZipFile(expected_file) as z:
with z.open('exporter_test0000.jl') as f:
for line in f:
written.append(json.loads(line))
self.assertEqual(written, self.get_batch())
def test_compression_bz2_format(self):
writer_config = self.get_writer_config()
writer_config['options'].update({'compression': 'bz2'})
writer = FSWriter(writer_config, meta())
try:
writer.write_batch(self.get_batch())
writer.flush()
finally:
writer.close()
expected_file = '{}/exporter_test0000.jl.bz2'.format(self.tmp_dir)
self.assertTrue(expected_file in writer.written_files)
written = []
with bz2.BZ2File(expected_file, 'r') as fin:
for line in fin:
written.append(json.loads(line))
self.assertEqual(written, self.get_batch())
def test_no_compression(self):
writer_config = self.get_writer_config()
writer_config['options'].update({'compression': 'none'})
writer = FSWriter(writer_config, meta())
try:
writer.write_batch(self.get_batch())
writer.flush()
finally:
writer.close()
expected_file = '{}/exporter_test0000.jl'.format(self.tmp_dir)
self.assertTrue(expected_file in writer.written_files)
written = []
with open(expected_file, 'r') as fin:
for line in fin:
written.append(json.loads(line))
self.assertEqual(written, self.get_batch())
def test_invalid_compression_format(self):
options = self.get_writer_config()
options['options']['compression'] = 'unknown'
self.assertRaisesRegexp(ConfigurationError,
'The compression format can only be '
'one of the following:',
FilebaseBaseWriter,
options,
meta())
def test_get_file_number_with_date(self):
file_path = '/tmp/%Y%m%d/'
file_name = '{file_number}_exporter_test_%m%d%y'
start_file_count = 1
writer_config = self.get_writer_config()
writer_config.update({'options': {
'filebase': file_path + file_name,
'start_file_count': start_file_count
}})
writer = FSWriter(writer_config, meta())
try:
writer.write_batch(self.get_batch())
writer.flush()
finally:
writer.close()
file_path = datetime.datetime.now().strftime(file_path).format(file_number=start_file_count)
file_name = datetime.datetime.now().strftime(file_name).format(file_number=start_file_count)
self.assertIn(file_path + file_name + '.jl.gz', writer.written_files)
def test_check_writer_consistency(self):
# given
options = self.get_writer_config()
options['options']['check_consistency'] = True
# when:
writer = FSWriter(options, meta())
try:
writer.write_batch(self.get_batch())
writer.flush()
finally:
writer.close()
# Consistency check passes
writer.finish_writing()
with open(os.path.join(self.tmp_dir, 'exporter_test0000.jl.gz'), 'w'):
with self.assertRaisesRegexp(InconsistentWriteState, 'Wrong size for file'):
writer.finish_writing()
os.remove(os.path.join(self.tmp_dir, 'exporter_test0000.jl.gz'))
with self.assertRaisesRegexp(InconsistentWriteState, 'file is not present at destination'):
writer.finish_writing()
def test_writer_md5_generation(self):
# given
options = self.get_writer_config()
options['options']['generate_md5'] = True
# when:
writer = FSWriter(options, meta())
with closing(writer) as w:
w.write_batch(self.get_batch())
w.flush()
w.finish_writing()
self.assertTrue(os.path.isfile(os.path.join(self.tmp_dir, 'md5checksum.md5')),
"Didn't found an expected md5checksum.md5 file")
def _build_grouped_batch(self, batch, python_expressions):
grouper_options = {
'name': 'exporters.groupers.python_exp_grouper.PythonExpGrouper',
'options': {'python_expressions': python_expressions}
}
grouper = PythonExpGrouper(options=grouper_options)
return grouper.group_batch(batch)
def test_writer_with_grouped_data(self):
# given:
batch = [
BaseRecord(city=u'Madrid', country=u'ES', monument='Royal Palace'),
BaseRecord(city=u'Valencia', country=u'ES', monument='Torres de Serranos'),
BaseRecord(city=u'Paris', country=u'FR', monument='Eiffel Tour'),
BaseRecord(city=u'Paris', country=u'FR', monument='Champ de Mars'),
BaseRecord(city=u'Paris', country=u'FR', monument='Arc de Triomphe'),
]
grouped_batch = self._build_grouped_batch(
batch, python_expressions=["item['country']", "item['city']"])
options = self.get_writer_config()
options['options']['filebase'] = os.path.join(self.tmp_dir, '{groups[0]}/{groups[1]}/file')
options['options']['items_per_buffer_write'] = 2
writer = FSWriter(options=options, metadata=meta())
# when:
with closing(writer) as w:
w.write_batch(grouped_batch)
w.flush()
w.finish_writing()
# then:
expected_files = [
'ES/Madrid/file0000.jl.gz',
'ES/Valencia/file0000.jl.gz',
'FR/Paris/file0000.jl.gz',
'FR/Paris/file0001.jl.gz',
]
expected = [os.path.join(self.tmp_dir, f) for f in expected_files]
def listdir_recursive(path):
return [os.path.join(d, f)
for d, _, fnames in os.walk(path)
for f in fnames]
self.assertEqual(sorted(expected), sorted(listdir_recursive(self.tmp_dir)))
| {
"content_hash": "14e53ec172ab2475c1104513a950bd7c",
"timestamp": "",
"source": "github",
"line_count": 695,
"max_line_length": 100,
"avg_line_length": 35.48057553956834,
"alnum_prop": 0.5721643213431201,
"repo_name": "scrapinghub/exporters",
"id": "0064b5250559acf92573895876318f6ae789768f",
"size": "24659",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_writers.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "1715"
},
{
"name": "Python",
"bytes": "501414"
}
],
"symlink_target": ""
} |
"""
Mock unit tests for the NetApp block storage driver interfaces
"""
from cinder import test
from cinder.volume.drivers.netapp.dataontap import block_7mode
from cinder.volume.drivers.netapp.dataontap import block_cmode
from cinder.volume.drivers.netapp.dataontap import fc_7mode
from cinder.volume.drivers.netapp.dataontap import fc_cmode
from cinder.volume.drivers.netapp.dataontap import iscsi_7mode
from cinder.volume.drivers.netapp.dataontap import iscsi_cmode
class NetAppBlockStorageDriverInterfaceTestCase(test.TestCase):
def setUp(self):
super(NetAppBlockStorageDriverInterfaceTestCase, self).setUp()
self.mock_object(block_cmode.NetAppBlockStorageCmodeLibrary,
'__init__',
return_value=None)
self.mock_object(block_7mode.NetAppBlockStorage7modeLibrary,
'__init__',
return_value=None)
self.iscsi_7mode_driver = iscsi_7mode.NetApp7modeISCSIDriver()
self.iscsi_cmode_driver = iscsi_cmode.NetAppCmodeISCSIDriver()
self.fc_7mode_driver = fc_7mode.NetApp7modeFibreChannelDriver()
self.fc_cmode_driver = fc_cmode.NetAppCmodeFibreChannelDriver()
def test_driver_interfaces_match(self):
"""Ensure the NetApp block storage driver interfaces match.
The four block storage Cinder drivers from NetApp (iSCSI/FC,
7-mode/C-mode) are merely passthrough shim layers atop a common
block storage library. Bugs have been introduced when a Cinder
method was exposed via a subset of those driver shims. This test
ensures they remain in sync and the library features are uniformly
available in the four drivers.
"""
# Get local functions of each driver interface
iscsi_7mode = self._get_local_functions(self.iscsi_7mode_driver)
iscsi_cmode = self._get_local_functions(self.iscsi_cmode_driver)
fc_7mode = self._get_local_functions(self.fc_7mode_driver)
fc_cmode = self._get_local_functions(self.fc_cmode_driver)
# Ensure NetApp block storage driver shims are identical
self.assertSetEqual(iscsi_7mode, iscsi_cmode)
self.assertSetEqual(iscsi_7mode, fc_7mode)
self.assertSetEqual(iscsi_7mode, fc_cmode)
def _get_local_functions(self, obj):
"""Get function names of an object without superclass functions."""
return set([key for key, value in type(obj).__dict__.items()
if callable(value)])
| {
"content_hash": "3336562a38a77f8f4b34c99a9e679e39",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 75,
"avg_line_length": 44.35087719298246,
"alnum_prop": 0.6950158227848101,
"repo_name": "ge0rgi/cinder",
"id": "6a42fde1ced31ef6610fda5b1fd2a0b0ce0ec77b",
"size": "3161",
"binary": false,
"copies": "1",
"ref": "refs/heads/stable/ocata",
"path": "cinder/tests/unit/volume/drivers/netapp/dataontap/test_block_driver_interfaces.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "19963591"
}
],
"symlink_target": ""
} |
from django.contrib.auth.models import User
from django.db import models
from settings import UserSettings
class Setting(models.Model):
user = models.ForeignKey(User, unique=False)
name = models.CharField(max_length=200, unique=True)
value = models.CharField(max_length=200)
# attach a UserSettings object in the User object
User.setting = property(lambda u: UserSettings(user=u))
| {
"content_hash": "c2777b1634c984dbebb02f2965c49843",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 56,
"avg_line_length": 30.846153846153847,
"alnum_prop": 0.7605985037406484,
"repo_name": "ychaim/mybitbank",
"id": "63ef4a35d78c2d62e621a15b49ab2bf0e176a99f",
"size": "401",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mybitbank/apps/login/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "105105"
},
{
"name": "HTML",
"bytes": "74263"
},
{
"name": "JavaScript",
"bytes": "182459"
},
{
"name": "Python",
"bytes": "227251"
}
],
"symlink_target": ""
} |
"""
Get and parse one url
"""
import os
import sys
import requests
from bs4 import BeautifulSoup
import re
__author__ = 'Rob Edwards'
if True:
url = 'https://www.altmetric.com/details/1386636'
headers = {'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/96.0.4664.110 Safari/537.36'}
r = requests.get(url, headers=headers)
if r.status_code != 200:
sys.stderr.write(f"FATAL: There was an error retrieving {url} and I can't continue\n")
soup = BeautifulSoup(r.text, 'lxml')
else:
print("Reading file")
text = ""
with open("1386636.html", 'r') as f:
for l in f:
text += l
soup = BeautifulSoup(text, 'lxml')
print("Done")
dh = soup.find('div', class_="document-header").find("a")
for c in dh:
if c.text:
title = c.text
print(f"Title: {title}")
summ = soup.find_all('div', class_='summary')
summaries = []
for c in summ:
summaries.append(c.text)
print(summaries)
ex = re.compile('(\d+)\s+(\S.*)$')
# extract mentions
mentions = {}
mc = soup.find_all('dl', class_='mention-counts')
for c in mc:
for lnk in c.find_all('a'):
m = ex.match(lnk.text)
if m:
mentions[m.groups()[1]] = m.groups()[0]
print(mentions)
# extract citations
citations = {}
mc = soup.find_all('dl', class_='scholarly-citation-counts')
for c in mc:
for lnk in c.find_all('a'):
m = ex.match(lnk.text)
if m:
citations[m.groups()[1]] = m.groups()[0]
print(citations) | {
"content_hash": "17d5c26a346b7aaf995c064fc963dd5f",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 147,
"avg_line_length": 25.55,
"alnum_prop": 0.6086105675146771,
"repo_name": "linsalrob/EdwardsLab",
"id": "05a4255318ef061f929e68b1a209cb846e72fddb",
"size": "1533",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "refs_and_citations/altmetric_one.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "227276"
},
{
"name": "C++",
"bytes": "21508"
},
{
"name": "Jupyter Notebook",
"bytes": "490830"
},
{
"name": "Makefile",
"bytes": "936"
},
{
"name": "Perl",
"bytes": "280086"
},
{
"name": "Python",
"bytes": "1102051"
},
{
"name": "Shell",
"bytes": "13759"
}
],
"symlink_target": ""
} |
import tensorflow as tf
import numpy as np
import glob
from os import path, makedirs
from matplotlib import pyplot as plt
from batchnorm import ConvolutionalBatchNormalizer
from argparse import ArgumentParser
# Default values for parameters
NUM_EPOCHS = 1e+9
IMAGE_SAVE_RATE = 1000
MODEL_SAVE_RATE = 100000
FINAL_MODEL_PATH = 'final.tfmodel'
# Command-line arguments
parser = ArgumentParser(description="Trains a recolorization CNN with the "
"given parameters under the images in rgb_imgs/. The model is "
"incrementally saved to model.chkpt.")
parser.add_argument("image_dir", type=str, help="The directory "
"containing the JPEG images to run testing on.")
parser.add_argument("summary_dir", type=str, help="The output directory to "
"place the intermediate results of training into. The results are the "
"grayscale, training result, and original images concatenated together "
"at the every image_save_rate steps.")
parser.add_argument("-f", "--final-model", dest='final_model_path',
default=FINAL_MODEL_PATH, type=str, help="The path to the file to "
"store the final model in after training is completed or stopped. This "
"will also save a TensorFlow meta file under <final_model>.meta.")
parser.add_argument("-e", "--epochs", dest='num_epochs', default=NUM_EPOCHS,
type=int, help="The number of epochs to run training for. An epoch is "
"a complete iteration over all the input images.")
parser.add_argument("-i", "--image-save-rate", dest="image_save_rate", type=int,
default=IMAGE_SAVE_RATE, help="How often to save an image while "
"training. Every N images will be saved to 'summary/'.")
parser.add_argument("-m", "--model-save-rate", dest="model_save_rate", type=int,
default=MODEL_SAVE_RATE, help="How often to update the increment model "
"that has been trained so far. After every N images are processed, the "
"model will be saved to 'model.chkpt'")
args = parser.parse_args()
filenames = sorted(glob.glob(path.join(args.image_dir, "*.jpg")))
batch_size = 1
num_epochs = args.num_epochs
image_save_rate = args.image_save_rate
model_save_rate = args.model_save_rate
global_step = tf.Variable(0, name='global_step', trainable=False)
phase_train = tf.placeholder(tf.bool, name='phase_train')
uv = tf.placeholder(tf.uint8, name='uv')
def read_my_file_format(filename_queue, randomize=False):
reader = tf.WholeFileReader()
key, file = reader.read(filename_queue)
uint8image = tf.image.decode_jpeg(file, channels=3)
uint8image = tf.random_crop(uint8image, (224, 224, 3))
if randomize:
uint8image = tf.image.random_flip_left_right(uint8image)
uint8image = tf.image.random_flip_up_down(uint8image, seed=None)
float_image = tf.div(tf.cast(uint8image, tf.float32), 255)
return float_image
def input_pipeline(filenames, batch_size, num_epochs=None):
filename_queue = tf.train.string_input_producer(
filenames, num_epochs=num_epochs, shuffle=False)
example = read_my_file_format(filename_queue, randomize=False)
min_after_dequeue = 100
capacity = min_after_dequeue + 3 * batch_size
example_batch = tf.train.shuffle_batch(
[example], batch_size=batch_size, capacity=capacity,
min_after_dequeue=min_after_dequeue)
return example_batch
def batch_norm(x, depth, phase_train):
with tf.variable_scope('batchnorm'):
ewma = tf.train.ExponentialMovingAverage(decay=0.9999)
bn = ConvolutionalBatchNormalizer(depth, 0.001, ewma, True)
update_assignments = bn.get_assigner()
x = bn.normalize(x, train=phase_train)
return x
def conv2d(_X, w, sigmoid=False, bn=False):
with tf.variable_scope('conv2d'):
_X = tf.nn.conv2d(_X, w, [1, 1, 1, 1], 'SAME')
if bn:
_X = batch_norm(_X, w.get_shape()[3], phase_train)
if sigmoid:
return tf.sigmoid(_X)
else:
_X = tf.nn.relu(_X)
return tf.maximum(0.01 * _X, _X)
def colornet(_tensors):
"""
Network architecture http://tinyclouds.org/colorize/residual_encoder.png
"""
with tf.variable_scope('colornet'):
# Bx28x28x512 -> batch norm -> 1x1 conv = Bx28x28x256
conv1 = tf.nn.relu(tf.nn.conv2d(batch_norm(_tensors[
"conv4_3"], 512, phase_train),
_tensors["weights"]["wc1"], [1, 1, 1, 1], 'SAME'))
# upscale to 56x56x256
conv1 = tf.image.resize_bilinear(conv1, (56, 56))
conv1 = tf.add(conv1, batch_norm(
_tensors["conv3_3"], 256, phase_train))
# Bx56x56x256-> 3x3 conv = Bx56x56x128
conv2 = conv2d(conv1, _tensors["weights"][
'wc2'], sigmoid=False, bn=True)
# upscale to 112x112x128
conv2 = tf.image.resize_bilinear(conv2, (112, 112))
conv2 = tf.add(conv2, batch_norm(
_tensors["conv2_2"], 128, phase_train))
# Bx112x112x128 -> 3x3 conv = Bx112x112x64
conv3 = conv2d(conv2, _tensors["weights"][
'wc3'], sigmoid=False, bn=True)
# upscale to Bx224x224x64
conv3 = tf.image.resize_bilinear(conv3, (224, 224))
conv3 = tf.add(conv3, batch_norm(_tensors["conv1_2"], 64, phase_train))
# Bx224x224x64 -> 3x3 conv = Bx224x224x3
conv4 = conv2d(conv3, _tensors["weights"][
'wc4'], sigmoid=False, bn=True)
conv4 = tf.add(conv4, batch_norm(
_tensors["grayscale"], 3, phase_train))
# Bx224x224x3 -> 3x3 conv = Bx224x224x3
conv5 = conv2d(conv4, _tensors["weights"][
'wc5'], sigmoid=False, bn=True)
# Bx224x224x3 -> 3x3 conv = Bx224x224x2
conv6 = conv2d(conv5, _tensors["weights"][
'wc6'], sigmoid=True, bn=True)
return conv6
def concat_images(imga, imgb):
"""
Combines two color image ndarrays side-by-side.
"""
ha, wa = imga.shape[:2]
hb, wb = imgb.shape[:2]
max_height = np.max([ha, hb])
total_width = wa + wb
new_img = np.zeros(shape=(max_height, total_width, 3), dtype=np.float32)
new_img[:ha, :wa] = imga
new_img[:hb, wa:wa + wb] = imgb
return new_img
def rgb2yuv(rgb):
"""
Convert RGB image into YUV https://en.wikipedia.org/wiki/YUV
"""
rgb2yuv_filter = tf.constant(
[[[[0.299, -0.169, 0.499],
[0.587, -0.331, -0.418],
[0.114, 0.499, -0.0813]]]])
rgb2yuv_bias = tf.constant([0., 0.5, 0.5])
temp = tf.nn.conv2d(rgb, rgb2yuv_filter, [1, 1, 1, 1], 'SAME')
temp = tf.nn.bias_add(temp, rgb2yuv_bias)
return temp
def yuv2rgb(yuv):
"""
Convert YUV image into RGB https://en.wikipedia.org/wiki/YUV
"""
yuv = tf.mul(yuv, 255)
yuv2rgb_filter = tf.constant(
[[[[1., 1., 1.],
[0., -0.34413999, 1.77199996],
[1.40199995, -0.71414, 0.]]]])
yuv2rgb_bias = tf.constant([-179.45599365, 135.45983887, -226.81599426])
temp = tf.nn.conv2d(yuv, yuv2rgb_filter, [1, 1, 1, 1], 'SAME')
temp = tf.nn.bias_add(temp, yuv2rgb_bias)
temp = tf.maximum(temp, tf.zeros(temp.get_shape(), dtype=tf.float32))
temp = tf.minimum(temp, tf.mul(
tf.ones(temp.get_shape(), dtype=tf.float32), 255))
temp = tf.div(temp, 255)
return temp
with open("vgg/tensorflow-vgg16/vgg16-20160129.tfmodel", mode='rb') as f:
fileContent = f.read()
graph_def = tf.GraphDef()
graph_def.ParseFromString(fileContent)
with tf.variable_scope('colornet'):
# Store layers weight
weights = {
# 1x1 conv, 512 inputs, 256 outputs
'wc1': tf.Variable(tf.truncated_normal([1, 1, 512, 256], stddev=0.01)),
# 3x3 conv, 512 inputs, 128 outputs
'wc2': tf.Variable(tf.truncated_normal([3, 3, 256, 128], stddev=0.01)),
# 3x3 conv, 256 inputs, 64 outputs
'wc3': tf.Variable(tf.truncated_normal([3, 3, 128, 64], stddev=0.01)),
# 3x3 conv, 128 inputs, 3 outputs
'wc4': tf.Variable(tf.truncated_normal([3, 3, 64, 3], stddev=0.01)),
# 3x3 conv, 6 inputs, 3 outputs
'wc5': tf.Variable(tf.truncated_normal([3, 3, 3, 3], stddev=0.01)),
# 3x3 conv, 3 inputs, 2 outputs
'wc6': tf.Variable(tf.truncated_normal([3, 3, 3, 2], stddev=0.01)),
}
colorimage = input_pipeline(filenames, batch_size, num_epochs=num_epochs)
colorimage_yuv = rgb2yuv(colorimage)
grayscale = tf.image.rgb_to_grayscale(colorimage)
grayscale_rgb = tf.image.grayscale_to_rgb(grayscale)
grayscale_yuv = rgb2yuv(grayscale_rgb)
grayscale = tf.concat(3, [grayscale, grayscale, grayscale])
tf.import_graph_def(graph_def, input_map={"images": grayscale})
graph = tf.get_default_graph()
with tf.variable_scope('vgg'):
conv1_2 = graph.get_tensor_by_name("import/conv1_2/Relu:0")
conv2_2 = graph.get_tensor_by_name("import/conv2_2/Relu:0")
conv3_3 = graph.get_tensor_by_name("import/conv3_3/Relu:0")
conv4_3 = graph.get_tensor_by_name("import/conv4_3/Relu:0")
tensors = {
"conv1_2": conv1_2,
"conv2_2": conv2_2,
"conv3_3": conv3_3,
"conv4_3": conv4_3,
"grayscale": grayscale,
"weights": weights
}
# Construct model
pred = colornet(tensors)
pred_yuv = tf.concat(3, [tf.split(3, 3, grayscale_yuv)[0], pred])
pred_rgb = yuv2rgb(pred_yuv)
loss = tf.square(tf.sub(pred, tf.concat(
3, [tf.split(3, 3, colorimage_yuv)[1], tf.split(3, 3, colorimage_yuv)[2]])))
if uv == 1:
loss = tf.split(3, 2, loss)[0]
elif uv == 2:
loss = tf.split(3, 2, loss)[1]
else:
loss = (tf.split(3, 2, loss)[0] + tf.split(3, 2, loss)[1]) / 2
if phase_train is not None:
optimizer = tf.train.GradientDescentOptimizer(5*0.0001)
opt = optimizer.minimize(
loss, global_step=global_step, gate_gradients=optimizer.GATE_NONE)
# Summaries
tf.histogram_summary("weights1", weights["wc1"])
tf.histogram_summary("weights2", weights["wc2"])
tf.histogram_summary("weights3", weights["wc3"])
tf.histogram_summary("weights4", weights["wc4"])
tf.histogram_summary("weights5", weights["wc5"])
tf.histogram_summary("weights6", weights["wc6"])
tf.histogram_summary("instant_loss", tf.reduce_mean(loss))
tf.image_summary("colorimage", colorimage, max_images=1)
tf.image_summary("pred_rgb", pred_rgb, max_images=1)
tf.image_summary("grayscale", grayscale_rgb, max_images=1)
# Saver.
saver = tf.train.Saver()
# Create the graph, etc.
init_op = tf.initialize_all_variables()
init_op2 = tf.initialize_local_variables()
# Create a session for running operations in the Graph.
sess = tf.Session()
# Initialize the variables.
sess.run(init_op)
sess.run(init_op2)
# Start input enqueue threads.
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
# Create the summary directory if it doesn't exist
if not path.exists(args.summary_dir):
makedirs(args.summary_dir)
num_images = len(filenames)
print('Beginning training...')
print("Found {} images under the '{}' directory".format(num_images,
args.image_dir))
try:
while not coord.should_stop():
# Run training steps
training_opt = sess.run(opt, feed_dict={phase_train: True, uv: 1})
training_opt = sess.run(opt, feed_dict={phase_train: True, uv: 2})
step = sess.run(global_step)
if step % 1 == 0:
pred_, pred_rgb_, colorimage_, grayscale_rgb_, cost = sess.run(
[pred, pred_rgb, colorimage, grayscale_rgb, loss], feed_dict={phase_train: False, uv: 3})
print {
"step": step,
"cost": np.mean(cost)
}
if step % image_save_rate == 0:
summary_image = concat_images(grayscale_rgb_[0], pred_rgb_[0])
summary_image = concat_images(summary_image, colorimage_[0])
summary_path = path.join(args.summary_dir, "{}_{}".format(
step / num_images, step % num_images))
plt.imsave(summary_path, summary_image)
print("Image summary saved to file '{}'".format(summary_path +
".jpg"))
if (step % model_save_rate == 0) and (step != 0):
save_path = saver.save(sess, "model.ckpt")
print("Model saved to file '{}'".format(save_path))
except tf.errors.OutOfRangeError:
print('Done training -- epoch limit reached')
except KeyboardInterrupt:
print('Training stopped at the request of the user')
finally:
# When done, ask the threads to stop.
coord.request_stop()
# Save the final model
model_path = saver.save(sess, args.final_model_path)
print("Saving final model to '{}'".format(model_path))
# Wait for threads to finish.
coord.join(threads)
sess.close()
| {
"content_hash": "0a24e3c29169336222abbeb889e4b5d5",
"timestamp": "",
"source": "github",
"line_count": 340,
"max_line_length": 105,
"avg_line_length": 37.40588235294118,
"alnum_prop": 0.6333543009907218,
"repo_name": "bperez77/ensemble_colorization",
"id": "585bb6e42aa38b587cad0d6c732c9f2bff305b12",
"size": "12718",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "train.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1120"
},
{
"name": "Makefile",
"bytes": "491"
},
{
"name": "Python",
"bytes": "39276"
},
{
"name": "Shell",
"bytes": "3434"
},
{
"name": "TeX",
"bytes": "91805"
}
],
"symlink_target": ""
} |
import logging
from pitz.entity import Entity
from pitz.cmdline import PitzScript
log = logging.getLogger('pitz.cmdline.pitzedit')
class PitzEdit(PitzScript):
"""
Edit an attribute's value
"""
script_name = 'pitz-edit'
def handle_p(self, p):
p.set_usage('%prog frag attribute')
def handle_options_and_args(self, p, options, args):
if not args or len(args) != 2:
p.print_usage()
raise SystemExit
def handle_proj(self, p, options, args, proj):
e = proj[args[0]]
if not isinstance(e, Entity):
print("I couldn't find an entity %s" % args[0])
raise SystemExit
else:
print(e.one_line_view)
e.edit(args[1])
print("Edited %s on %s." % (args[1], args[0]))
| {
"content_hash": "3b4d3fe8e420b064988e09d3fc6c5c86",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 59,
"avg_line_length": 23.142857142857142,
"alnum_prop": 0.5703703703703704,
"repo_name": "mw44118/pitz",
"id": "3e24ffd581883f956adc4b2a9c7497bcc3067d7f",
"size": "859",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pitz/cmdline/pitzedit.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "363275"
},
{
"name": "Python",
"bytes": "225391"
},
{
"name": "Shell",
"bytes": "878"
},
{
"name": "VimL",
"bytes": "5430"
}
],
"symlink_target": ""
} |
import torch
from numpy.testing import assert_almost_equal
import numpy
from allennlp.common import Params
from allennlp.common.testing.test_case import AllenNlpTestCase
from allennlp.modules.matrix_attention import CosineMatrixAttention
from allennlp.modules.matrix_attention.matrix_attention import MatrixAttention
class TestCosineMatrixAttention(AllenNlpTestCase):
def test_can_init_cosine(self):
legacy_attention = MatrixAttention.from_params(Params({"type": "cosine"}))
isinstance(legacy_attention, CosineMatrixAttention)
def test_cosine_similarity(self):
# example use case: a batch of size 2.
# With a time element component (e.g. sentences of length 2) each word is a vector of length 3.
# It is comparing this with another input of the same type
output = CosineMatrixAttention()(
torch.FloatTensor([[[0, 0, 0], [4, 5, 6]], [[-7, -8, -9], [10, 11, 12]]]),
torch.FloatTensor([[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]]),
)
# For the first batch there is
# no correlation between the first words of the input matrix
# but perfect correlation for the second word
# For the second batch there is
# negative correlation for the first words
# correlation for the second word
assert_almost_equal(
output.numpy(), numpy.array([[[0, 0], [0.97, 1]], [[-1, -0.99], [0.99, 1]]]), decimal=2
)
| {
"content_hash": "6e640855114d43d757206a8d8eb804ff",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 103,
"avg_line_length": 45,
"alnum_prop": 0.6491582491582492,
"repo_name": "allenai/allennlp",
"id": "579e8b38e0fc3e88676dfde71b352a1aab9d9400",
"size": "1485",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tests/modules/matrix_attention/cosine_matrix_attention_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "39870"
},
{
"name": "Dockerfile",
"bytes": "1190"
},
{
"name": "Jsonnet",
"bytes": "4469"
},
{
"name": "Makefile",
"bytes": "5306"
},
{
"name": "Perl",
"bytes": "101"
},
{
"name": "Python",
"bytes": "3575059"
},
{
"name": "Scilab",
"bytes": "4085"
},
{
"name": "Shell",
"bytes": "2092"
}
],
"symlink_target": ""
} |
# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
INCLUDES = """
#include <openssl/hmac.h>
"""
TYPES = """
typedef ... HMAC_CTX;
"""
FUNCTIONS = """
int HMAC_Init_ex(HMAC_CTX *, const void *, int, const EVP_MD *, ENGINE *);
int HMAC_Update(HMAC_CTX *, const unsigned char *, size_t);
int HMAC_Final(HMAC_CTX *, unsigned char *, unsigned int *);
int HMAC_CTX_copy(HMAC_CTX *, HMAC_CTX *);
HMAC_CTX *Cryptography_HMAC_CTX_new(void);
void Cryptography_HMAC_CTX_free(HMAC_CTX *ctx);
"""
MACROS = """
"""
CUSTOMIZATIONS = """
HMAC_CTX *Cryptography_HMAC_CTX_new(void) {
#if CRYPTOGRAPHY_OPENSSL_110_OR_GREATER && !defined(LIBRESSL_VERSION_NUMBER)
return HMAC_CTX_new();
#else
/* This uses OPENSSL_zalloc in 1.1.0, which is malloc + memset */
HMAC_CTX *ctx = (HMAC_CTX *)OPENSSL_malloc(sizeof(HMAC_CTX));
memset(ctx, 0, sizeof(HMAC_CTX));
return ctx;
#endif
}
void Cryptography_HMAC_CTX_free(HMAC_CTX *ctx) {
#if CRYPTOGRAPHY_OPENSSL_110_OR_GREATER && !defined(LIBRESSL_VERSION_NUMBER)
return HMAC_CTX_free(ctx);
#else
if (ctx != NULL) {
HMAC_CTX_cleanup(ctx);
OPENSSL_free(ctx);
}
#endif
}
"""
| {
"content_hash": "31bdaf37224bb083debab845336f6cbe",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 79,
"avg_line_length": 27.294117647058822,
"alnum_prop": 0.646551724137931,
"repo_name": "hipnusleo/laserjet",
"id": "3a48f6680fe2db88512fc5b4a0e45a172e219d3c",
"size": "1392",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "resource/pypi/cryptography-1.7.1/src/_cffi_src/openssl/hmac.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "3096"
},
{
"name": "Batchfile",
"bytes": "13184"
},
{
"name": "C",
"bytes": "672858"
},
{
"name": "C++",
"bytes": "9678"
},
{
"name": "Go",
"bytes": "6671"
},
{
"name": "HTML",
"bytes": "850945"
},
{
"name": "Java",
"bytes": "14456"
},
{
"name": "Makefile",
"bytes": "14373"
},
{
"name": "Python",
"bytes": "5156663"
}
],
"symlink_target": ""
} |
import unittest
import pybullet
from pybullet_utils import bullet_client
PLANE_PATH = "plane.urdf"
ROBOT_PATH = "r2d2.urdf"
class TestUserDataMethods(unittest.TestCase):
@classmethod
def setUpClass(cls):
TestUserDataMethods.server = bullet_client.BulletClient(
connection_mode=pybullet.SHARED_MEMORY_SERVER)
@classmethod
def tearDownClass(cls):
del TestUserDataMethods.server
def setUp(self):
self.client = bullet_client.BulletClient(pybullet.SHARED_MEMORY)
def tearDown(self):
self.client.resetSimulation()
del self.client
def testAddUserData(self):
plane_id = self.client.loadURDF(PLANE_PATH)
uid1 = self.client.addUserData(plane_id, "MyKey1", "MyValue1")
uid2 = self.client.addUserData(plane_id, "MyKey2", "MyValue2")
uid3 = self.client.addUserData(plane_id, "MyKey3", "MyValue3")
uid4 = self.client.addUserData(plane_id, "MyKey4", "MyValue4")
# Retrieve user data and make sure it's correct.
self.assertEqual(b"MyValue1", self.client.getUserData(uid1))
self.assertEqual(b"MyValue2", self.client.getUserData(uid2))
self.assertEqual(b"MyValue3", self.client.getUserData(uid3))
self.assertEqual(b"MyValue4", self.client.getUserData(uid4))
# Disconnect/reconnect and make sure that the user data is synced back.
del self.client
self.client = bullet_client.BulletClient(pybullet.SHARED_MEMORY)
self.assertEqual(b"MyValue1", self.client.getUserData(uid1))
self.assertEqual(b"MyValue2", self.client.getUserData(uid2))
self.assertEqual(b"MyValue3", self.client.getUserData(uid3))
self.assertEqual(b"MyValue4", self.client.getUserData(uid4))
self.client.resetSimulation()
self.assertEqual(None, self.client.getUserData(uid1))
self.assertEqual(None, self.client.getUserData(uid2))
self.assertEqual(None, self.client.getUserData(uid3))
self.assertEqual(None, self.client.getUserData(uid4))
def testGetNumUserData(self):
plane_id = self.client.loadURDF(PLANE_PATH)
uid1 = self.client.addUserData(plane_id, "MyKey1", "MyValue1")
uid2 = self.client.addUserData(plane_id, "MyKey2", "MyValue2")
uid3 = self.client.addUserData(plane_id, "MyKey3", "MyValue3")
uid4 = self.client.addUserData(plane_id, "MyKey4", "MyValue4")
self.assertEqual(4, self.client.getNumUserData(plane_id))
del self.client
self.client = bullet_client.BulletClient(pybullet.SHARED_MEMORY)
self.assertEqual(4, self.client.getNumUserData(plane_id))
def testReplaceUserData(self):
plane_id = self.client.loadURDF(PLANE_PATH)
uid = self.client.addUserData(plane_id, "MyKey", "MyValue")
self.assertEqual(b"MyValue", self.client.getUserData(uid))
new_uid = self.client.addUserData(plane_id, "MyKey", "MyNewValue")
self.assertEqual(uid, new_uid)
self.assertEqual(b"MyNewValue", self.client.getUserData(uid))
del self.client
self.client = bullet_client.BulletClient(pybullet.SHARED_MEMORY)
self.assertEqual(b"MyNewValue", self.client.getUserData(uid))
def testGetUserDataId(self):
plane_id = self.client.loadURDF(PLANE_PATH)
uid1 = self.client.addUserData(plane_id, "MyKey1", "MyValue1")
uid2 = self.client.addUserData(plane_id, "MyKey2", "MyValue2")
uid3 = self.client.addUserData(plane_id, "MyKey3", "MyValue3")
uid4 = self.client.addUserData(plane_id, "MyKey4", "MyValue4")
self.assertEqual(uid1, self.client.getUserDataId(plane_id, "MyKey1"))
self.assertEqual(uid2, self.client.getUserDataId(plane_id, "MyKey2"))
self.assertEqual(uid3, self.client.getUserDataId(plane_id, "MyKey3"))
self.assertEqual(uid4, self.client.getUserDataId(plane_id, "MyKey4"))
del self.client
self.client = bullet_client.BulletClient(pybullet.SHARED_MEMORY)
self.assertEqual(uid1, self.client.getUserDataId(plane_id, "MyKey1"))
self.assertEqual(uid2, self.client.getUserDataId(plane_id, "MyKey2"))
self.assertEqual(uid3, self.client.getUserDataId(plane_id, "MyKey3"))
self.assertEqual(uid4, self.client.getUserDataId(plane_id, "MyKey4"))
def testRemoveUserData(self):
plane_id = self.client.loadURDF(PLANE_PATH)
uid1 = self.client.addUserData(plane_id, "MyKey1", "MyValue1")
uid2 = self.client.addUserData(plane_id, "MyKey2", "MyValue2")
uid3 = self.client.addUserData(plane_id, "MyKey3", "MyValue3")
uid4 = self.client.addUserData(plane_id, "MyKey4", "MyValue4")
self.client.removeUserData(uid2)
self.assertEqual(3, self.client.getNumUserData(plane_id))
self.assertEqual(-1, self.client.getUserDataId(plane_id, "MyKey2"))
self.assertEqual(None, self.client.getUserData(uid2))
self.assertEqual(b"MyValue1", self.client.getUserData(uid1))
self.assertEqual(b"MyValue3", self.client.getUserData(uid3))
self.assertEqual(b"MyValue4", self.client.getUserData(uid4))
del self.client
self.client = bullet_client.BulletClient(pybullet.SHARED_MEMORY)
self.assertEqual(3, self.client.getNumUserData(plane_id))
self.assertEqual(-1, self.client.getUserDataId(plane_id, "MyKey2"))
self.assertEqual(None, self.client.getUserData(uid2))
self.assertEqual(b"MyValue1", self.client.getUserData(uid1))
self.assertEqual(b"MyValue3", self.client.getUserData(uid3))
self.assertEqual(b"MyValue4", self.client.getUserData(uid4))
def testIterateAllUserData(self):
plane_id = self.client.loadURDF(PLANE_PATH)
uid1 = self.client.addUserData(plane_id, "MyKey1", "MyValue1")
uid2 = self.client.addUserData(plane_id, "MyKey2", "MyValue2")
uid3 = self.client.addUserData(plane_id, "MyKey3", "MyValue3")
uid4 = self.client.addUserData(plane_id, "MyKey4", "MyValue4")
entries = set()
for i in range(self.client.getNumUserData(plane_id)):
userDataId, key, bodyId, linkIndex, visualShapeIndex = self.client.getUserDataInfo(
plane_id, i)
value = self.client.getUserData(userDataId)
entries.add((userDataId, key, value, bodyId, linkIndex, visualShapeIndex))
self.assertTrue((uid1, b"MyKey1", b"MyValue1", plane_id, -1, -1) in entries)
self.assertTrue((uid2, b"MyKey2", b"MyValue2", plane_id, -1, -1) in entries)
self.assertTrue((uid3, b"MyKey3", b"MyValue3", plane_id, -1, -1) in entries)
self.assertTrue((uid4, b"MyKey4", b"MyValue4", plane_id, -1, -1) in entries)
self.assertEqual(4, len(entries))
def testRemoveBody(self):
plane_id = self.client.loadURDF(PLANE_PATH)
uid1 = self.client.addUserData(plane_id, "MyKey1", "MyValue1")
uid2 = self.client.addUserData(plane_id, "MyKey2", "MyValue2")
uid3 = self.client.addUserData(plane_id, "MyKey3", "MyValue3")
uid4 = self.client.addUserData(plane_id, "MyKey4", "MyValue4")
self.client.removeBody(plane_id)
self.assertEqual(None, self.client.getUserData(uid1))
self.assertEqual(None, self.client.getUserData(uid2))
self.assertEqual(None, self.client.getUserData(uid3))
self.assertEqual(None, self.client.getUserData(uid4))
del self.client
self.client = bullet_client.BulletClient(pybullet.SHARED_MEMORY)
self.assertEqual(None, self.client.getUserData(uid1))
self.assertEqual(None, self.client.getUserData(uid2))
self.assertEqual(None, self.client.getUserData(uid3))
self.assertEqual(None, self.client.getUserData(uid4))
def testMultipleBodies(self):
plane1 = self.client.loadURDF(PLANE_PATH)
plane2 = self.client.loadURDF(PLANE_PATH)
uid1 = self.client.addUserData(plane1, "MyKey1", "This is plane 1 - 1")
uid2 = self.client.addUserData(plane1, "MyKey2", "This is plane 1 - 2")
uid3 = self.client.addUserData(plane2, "MyKey1", "This is plane 2 - 1")
uid4 = self.client.addUserData(plane2, "MyKey2", "This is plane 2 - 2")
uid5 = self.client.addUserData(plane2, "MyKey3", "This is plane 2 - 3")
self.assertEqual(b"This is plane 1 - 1",
self.client.getUserData(self.client.getUserDataId(plane1, "MyKey1")))
self.assertEqual(b"This is plane 1 - 2",
self.client.getUserData(self.client.getUserDataId(plane1, "MyKey2")))
self.assertEqual(b"This is plane 2 - 1",
self.client.getUserData(self.client.getUserDataId(plane2, "MyKey1")))
self.assertEqual(b"This is plane 2 - 2",
self.client.getUserData(self.client.getUserDataId(plane2, "MyKey2")))
self.assertEqual(b"This is plane 2 - 3",
self.client.getUserData(self.client.getUserDataId(plane2, "MyKey3")))
def testMultipleLinks(self):
body_id = self.client.loadURDF(ROBOT_PATH)
num_links = self.client.getNumJoints(body_id)
self.assertTrue(num_links > 1)
for link_index in range(num_links):
uid1 = self.client.addUserData(body_id, "MyKey1", "Value1 for link %s" % link_index,
link_index)
uid2 = self.client.addUserData(body_id, "MyKey2", "Value2 for link %s" % link_index,
link_index)
for link_index in range(num_links):
uid1 = self.client.getUserDataId(body_id, "MyKey1", link_index)
uid2 = self.client.getUserDataId(body_id, "MyKey2", link_index)
self.assertEqual(("Value1 for link %s" % link_index).encode(), self.client.getUserData(uid1))
self.assertEqual(("Value2 for link %s" % link_index).encode(), self.client.getUserData(uid2))
def testMultipleClients(self):
client1 = self.client
client2 = bullet_client.BulletClient(pybullet.SHARED_MEMORY)
plane_id = client1.loadURDF(PLANE_PATH)
client2.syncBodyInfo()
# Add user data on client 1, check on client 1
uid = client1.addUserData(plane_id, "MyKey", "MyValue")
self.assertEqual(None, client2.getUserData(uid))
client2.syncUserData()
self.assertEqual(b"MyValue", client2.getUserData(uid))
# Overwrite the value on client 2, check on client 1
client2.addUserData(plane_id, "MyKey", "MyNewValue")
self.assertEqual(b"MyValue", client1.getUserData(uid))
client1.syncUserData()
self.assertEqual(b"MyNewValue", client1.getUserData(uid))
# Remove user data on client 1, check on client 2
client1.removeUserData(uid)
self.assertEqual(b"MyNewValue", client2.getUserData(uid))
client2.syncUserData()
self.assertEqual(None, client2.getUserData(uid))
del client2
def testUserDataOnVisualShapes(self):
body_id = self.client.loadURDF(ROBOT_PATH)
num_links = self.client.getNumJoints(body_id)
visual_shapes = self.client.getVisualShapeData(body_id)
self.assertTrue(num_links > 0)
self.assertTrue(len(visual_shapes) > 0)
user_data_entries = set()
for link_index in range(-1, num_links):
num_shapes = sum([1 for shape in visual_shapes if shape[1] == link_index])
for shape_index in range(num_shapes):
key = "MyKey"
value = "MyValue %s, %s" % (link_index, shape_index)
uid = self.client.addUserData(body_id, key, value, link_index, shape_index)
user_data_entries.add((uid, key, value.encode(), body_id, link_index, shape_index))
self.assertEqual(len(visual_shapes), self.client.getNumUserData(body_id))
for uid, key, value, body_id, link_index, shape_index in user_data_entries:
self.assertEqual(value, self.client.getUserData(uid))
self.assertEqual(uid, self.client.getUserDataId(body_id, key, link_index, shape_index))
if __name__ == "__main__":
unittest.main()
| {
"content_hash": "3db07c82593b811a95aa557e9271ed44",
"timestamp": "",
"source": "github",
"line_count": 264,
"max_line_length": 99,
"avg_line_length": 43.19318181818182,
"alnum_prop": 0.7023590283258792,
"repo_name": "MTASZTAKI/ApertusVR",
"id": "1f0d06e8ed1639e82136b8ec819419b498ae7648",
"size": "11403",
"binary": false,
"copies": "2",
"ref": "refs/heads/0.9",
"path": "plugins/physics/bulletPhysics/3rdParty/bullet3/examples/pybullet/unittests/userDataTest.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "7599"
},
{
"name": "C++",
"bytes": "1207412"
},
{
"name": "CMake",
"bytes": "165066"
},
{
"name": "CSS",
"bytes": "1816"
},
{
"name": "GLSL",
"bytes": "223507"
},
{
"name": "HLSL",
"bytes": "141879"
},
{
"name": "HTML",
"bytes": "34827"
},
{
"name": "JavaScript",
"bytes": "140550"
},
{
"name": "Python",
"bytes": "1370"
}
],
"symlink_target": ""
} |
import itertools
import struct
from binascii import unhexlify
from codecs import getincrementaldecoder
from typing import Dict, Optional, Tuple, Union
import pytest
from wsproto import extensions as wpext, frame_protocol as fp
class TestBuffer:
def test_consume_at_most_zero_bytes(self) -> None:
buf = fp.Buffer(b"xxyyy")
assert buf.consume_at_most(0) == bytearray()
def test_consume_at_most_with_no_data(self) -> None:
buf = fp.Buffer()
assert buf.consume_at_most(1) == bytearray()
def test_consume_at_most_with_sufficient_data(self) -> None:
buf = fp.Buffer(b"xx")
assert buf.consume_at_most(2) == b"xx"
def test_consume_at_most_with_more_than_sufficient_data(self) -> None:
buf = fp.Buffer(b"xxyyy")
assert buf.consume_at_most(2) == b"xx"
def test_consume_at_most_with_insufficient_data(self) -> None:
buf = fp.Buffer(b"xx")
assert buf.consume_at_most(3) == b"xx"
def test_consume_exactly_with_sufficient_data(self) -> None:
buf = fp.Buffer(b"xx")
assert buf.consume_exactly(2) == b"xx"
def test_consume_exactly_with_more_than_sufficient_data(self) -> None:
buf = fp.Buffer(b"xxyyy")
assert buf.consume_exactly(2) == b"xx"
def test_consume_exactly_with_insufficient_data(self) -> None:
buf = fp.Buffer(b"xx")
assert buf.consume_exactly(3) is None
def test_feed(self) -> None:
buf = fp.Buffer()
assert buf.consume_at_most(1) == b""
assert buf.consume_exactly(1) is None
buf.feed(b"xy")
assert buf.consume_at_most(1) == b"x"
assert buf.consume_exactly(1) == b"y"
def test_rollback(self) -> None:
buf = fp.Buffer()
buf.feed(b"xyz")
assert buf.consume_exactly(2) == b"xy"
assert buf.consume_exactly(1) == b"z"
assert buf.consume_at_most(1) == b""
buf.rollback()
assert buf.consume_at_most(3) == b"xyz"
def test_commit(self) -> None:
buf = fp.Buffer()
buf.feed(b"xyz")
assert buf.consume_exactly(2) == b"xy"
assert buf.consume_exactly(1) == b"z"
assert buf.consume_at_most(1) == b""
buf.commit()
assert buf.consume_at_most(3) == b""
def test_length(self) -> None:
buf = fp.Buffer()
data = b"xyzabc"
buf.feed(data)
assert len(buf) == len(data)
class TestMessageDecoder:
def test_single_binary_frame(self) -> None:
payload = b"x" * 23
decoder = fp.MessageDecoder()
frame = fp.Frame(
opcode=fp.Opcode.BINARY,
payload=payload,
frame_finished=True,
message_finished=True,
)
frame = decoder.process_frame(frame)
assert frame.opcode is fp.Opcode.BINARY
assert frame.message_finished is True
assert frame.payload == payload
def test_follow_on_binary_frame(self) -> None:
payload = b"x" * 23
decoder = fp.MessageDecoder()
decoder.opcode = fp.Opcode.BINARY
frame = fp.Frame(
opcode=fp.Opcode.CONTINUATION,
payload=payload,
frame_finished=True,
message_finished=False,
)
frame = decoder.process_frame(frame)
assert frame.opcode is fp.Opcode.BINARY
assert frame.message_finished is False
assert frame.payload == payload
def test_single_text_frame(self) -> None:
text_payload = "fñör∂"
binary_payload = text_payload.encode("utf8")
decoder = fp.MessageDecoder()
frame = fp.Frame(
opcode=fp.Opcode.TEXT,
payload=binary_payload,
frame_finished=True,
message_finished=True,
)
frame = decoder.process_frame(frame)
assert frame.opcode is fp.Opcode.TEXT
assert frame.message_finished is True
assert frame.payload == text_payload
def test_follow_on_text_frame(self) -> None:
text_payload = "fñör∂"
binary_payload = text_payload.encode("utf8")
decoder = fp.MessageDecoder()
decoder.opcode = fp.Opcode.TEXT
decoder.decoder = getincrementaldecoder("utf-8")()
assert decoder.decoder.decode(binary_payload[:4]) == text_payload[:2]
binary_payload = binary_payload[4:-2]
text_payload = text_payload[2:-1]
frame = fp.Frame(
opcode=fp.Opcode.CONTINUATION,
payload=binary_payload,
frame_finished=True,
message_finished=False,
)
frame = decoder.process_frame(frame)
assert frame.opcode is fp.Opcode.TEXT
assert frame.message_finished is False
assert frame.payload == text_payload
def test_final_text_frame(self) -> None:
text_payload = "fñör∂"
binary_payload = text_payload.encode("utf8")
decoder = fp.MessageDecoder()
decoder.opcode = fp.Opcode.TEXT
decoder.decoder = getincrementaldecoder("utf-8")()
assert decoder.decoder.decode(binary_payload[:-2]) == text_payload[:-1]
binary_payload = binary_payload[-2:]
text_payload = text_payload[-1:]
frame = fp.Frame(
opcode=fp.Opcode.CONTINUATION,
payload=binary_payload,
frame_finished=True,
message_finished=True,
)
frame = decoder.process_frame(frame)
assert frame.opcode is fp.Opcode.TEXT
assert frame.message_finished is True
assert frame.payload == text_payload
def test_start_with_continuation(self) -> None:
payload = b"x" * 23
decoder = fp.MessageDecoder()
frame = fp.Frame(
opcode=fp.Opcode.CONTINUATION,
payload=payload,
frame_finished=True,
message_finished=True,
)
with pytest.raises(fp.ParseFailed):
decoder.process_frame(frame)
def test_missing_continuation_1(self) -> None:
payload = b"x" * 23
decoder = fp.MessageDecoder()
decoder.opcode = fp.Opcode.BINARY
frame = fp.Frame(
opcode=fp.Opcode.BINARY,
payload=payload,
frame_finished=True,
message_finished=True,
)
with pytest.raises(fp.ParseFailed):
decoder.process_frame(frame)
def test_missing_continuation_2(self) -> None:
payload = b"x" * 23
decoder = fp.MessageDecoder()
decoder.opcode = fp.Opcode.TEXT
frame = fp.Frame(
opcode=fp.Opcode.BINARY,
payload=payload,
frame_finished=True,
message_finished=True,
)
with pytest.raises(fp.ParseFailed):
decoder.process_frame(frame)
def test_incomplete_unicode(self) -> None:
payload = "fñör∂".encode()
payload = payload[:4]
decoder = fp.MessageDecoder()
frame = fp.Frame(
opcode=fp.Opcode.TEXT,
payload=payload,
frame_finished=True,
message_finished=True,
)
with pytest.raises(fp.ParseFailed) as excinfo:
decoder.process_frame(frame)
assert excinfo.value.code is fp.CloseReason.INVALID_FRAME_PAYLOAD_DATA
def test_not_even_unicode(self) -> None:
payload = "fñörd".encode("iso-8859-1")
decoder = fp.MessageDecoder()
frame = fp.Frame(
opcode=fp.Opcode.TEXT,
payload=payload,
frame_finished=True,
message_finished=False,
)
with pytest.raises(fp.ParseFailed) as excinfo:
decoder.process_frame(frame)
assert excinfo.value.code is fp.CloseReason.INVALID_FRAME_PAYLOAD_DATA
def test_bad_unicode(self) -> None:
payload = unhexlify("cebae1bdb9cf83cebcceb5eda080656469746564")
decoder = fp.MessageDecoder()
frame = fp.Frame(
opcode=fp.Opcode.TEXT,
payload=payload,
frame_finished=True,
message_finished=True,
)
with pytest.raises(fp.ParseFailed) as excinfo:
decoder.process_frame(frame)
assert excinfo.value.code is fp.CloseReason.INVALID_FRAME_PAYLOAD_DATA
def test_split_message(self) -> None:
text_payload = "x" * 65535
payload = text_payload.encode("utf-8")
split = 32777
decoder = fp.MessageDecoder()
frame = fp.Frame(
opcode=fp.Opcode.TEXT,
payload=payload[:split],
frame_finished=False,
message_finished=True,
)
frame = decoder.process_frame(frame)
assert frame.opcode is fp.Opcode.TEXT
assert frame.message_finished is False
assert frame.payload == text_payload[:split]
frame = fp.Frame(
opcode=fp.Opcode.CONTINUATION,
payload=payload[split:],
frame_finished=True,
message_finished=True,
)
frame = decoder.process_frame(frame)
assert frame.opcode is fp.Opcode.TEXT
assert frame.message_finished is True
assert frame.payload == text_payload[split:]
def test_split_unicode_message(self) -> None:
text_payload = "∂" * 64
payload = text_payload.encode("utf-8")
split = 64
decoder = fp.MessageDecoder()
frame = fp.Frame(
opcode=fp.Opcode.TEXT,
payload=payload[:split],
frame_finished=False,
message_finished=True,
)
frame = decoder.process_frame(frame)
assert frame.opcode is fp.Opcode.TEXT
assert frame.message_finished is False
assert frame.payload == text_payload[: (split // 3)]
frame = fp.Frame(
opcode=fp.Opcode.CONTINUATION,
payload=payload[split:],
frame_finished=True,
message_finished=True,
)
frame = decoder.process_frame(frame)
assert frame.opcode is fp.Opcode.TEXT
assert frame.message_finished is True
assert frame.payload == text_payload[(split // 3) :]
def send_frame_to_validator(self, payload: bytes, finished: bool) -> None:
decoder = fp.MessageDecoder()
frame = fp.Frame(
opcode=fp.Opcode.TEXT,
payload=payload,
frame_finished=finished,
message_finished=True,
)
frame = decoder.process_frame(frame)
class TestFrameDecoder:
def _single_frame_test(
self,
client: bool,
frame_bytes: bytes,
opcode: fp.Opcode,
payload: bytes,
frame_finished: bool,
message_finished: bool,
) -> None:
decoder = fp.FrameDecoder(client=client)
decoder.receive_bytes(frame_bytes)
frame = decoder.process_buffer()
assert frame is not None
assert frame.opcode is opcode
assert frame.payload == payload
assert frame.frame_finished is frame_finished
assert frame.message_finished is message_finished
def _split_frame_test(
self,
client: bool,
frame_bytes: bytes,
opcode: fp.Opcode,
payload: bytes,
frame_finished: bool,
message_finished: bool,
split: int,
) -> None:
decoder = fp.FrameDecoder(client=client)
decoder.receive_bytes(frame_bytes[:split])
assert decoder.process_buffer() is None
decoder.receive_bytes(frame_bytes[split:])
frame = decoder.process_buffer()
assert frame is not None
assert frame.opcode is opcode
assert frame.payload == payload
assert frame.frame_finished is frame_finished
assert frame.message_finished is message_finished
def _split_message_test(
self,
client: bool,
frame_bytes: bytes,
opcode: fp.Opcode,
payload: bytes,
split: int,
) -> None:
decoder = fp.FrameDecoder(client=client)
decoder.receive_bytes(frame_bytes[:split])
frame = decoder.process_buffer()
assert frame is not None
assert frame.opcode is opcode
assert frame.payload == payload[: len(frame.payload)]
assert frame.frame_finished is False
assert frame.message_finished is True
decoder.receive_bytes(frame_bytes[split:])
frame = decoder.process_buffer()
assert frame is not None
assert frame.opcode is fp.Opcode.CONTINUATION
assert frame.payload == payload[-len(frame.payload) :]
assert frame.frame_finished is True
assert frame.message_finished is True
def _parse_failure_test(
self, client: bool, frame_bytes: bytes, close_reason: fp.CloseReason
) -> None:
decoder = fp.FrameDecoder(client=client)
with pytest.raises(fp.ParseFailed) as excinfo:
decoder.receive_bytes(frame_bytes)
decoder.process_buffer()
assert excinfo.value.code is close_reason
def test_zero_length_message(self) -> None:
self._single_frame_test(
client=True,
frame_bytes=b"\x81\x00",
opcode=fp.Opcode.TEXT,
payload=b"",
frame_finished=True,
message_finished=True,
)
def test_short_server_message_frame(self) -> None:
self._single_frame_test(
client=True,
frame_bytes=b"\x81\x02xy",
opcode=fp.Opcode.TEXT,
payload=b"xy",
frame_finished=True,
message_finished=True,
)
def test_short_client_message_frame(self) -> None:
self._single_frame_test(
client=False,
frame_bytes=b"\x81\x82abcd\x19\x1b",
opcode=fp.Opcode.TEXT,
payload=b"xy",
frame_finished=True,
message_finished=True,
)
def test_reject_masked_server_frame(self) -> None:
self._parse_failure_test(
client=True,
frame_bytes=b"\x81\x82abcd\x19\x1b",
close_reason=fp.CloseReason.PROTOCOL_ERROR,
)
def test_reject_unmasked_client_frame(self) -> None:
self._parse_failure_test(
client=False,
frame_bytes=b"\x81\x02xy",
close_reason=fp.CloseReason.PROTOCOL_ERROR,
)
def test_reject_bad_opcode(self) -> None:
self._parse_failure_test(
client=True,
frame_bytes=b"\x8e\x02xy",
close_reason=fp.CloseReason.PROTOCOL_ERROR,
)
def test_reject_unfinished_control_frame(self) -> None:
self._parse_failure_test(
client=True,
frame_bytes=b"\x09\x02xy",
close_reason=fp.CloseReason.PROTOCOL_ERROR,
)
def test_reject_reserved_bits(self) -> None:
self._parse_failure_test(
client=True,
frame_bytes=b"\x91\x02xy",
close_reason=fp.CloseReason.PROTOCOL_ERROR,
)
self._parse_failure_test(
client=True,
frame_bytes=b"\xa1\x02xy",
close_reason=fp.CloseReason.PROTOCOL_ERROR,
)
self._parse_failure_test(
client=True,
frame_bytes=b"\xc1\x02xy",
close_reason=fp.CloseReason.PROTOCOL_ERROR,
)
def test_long_message_frame(self) -> None:
payload = b"x" * 512
payload_len = struct.pack("!H", len(payload))
frame_bytes = b"\x81\x7e" + payload_len + payload
self._single_frame_test(
client=True,
frame_bytes=frame_bytes,
opcode=fp.Opcode.TEXT,
payload=payload,
frame_finished=True,
message_finished=True,
)
def test_very_long_message_frame(self) -> None:
payload = b"x" * (128 * 1024)
payload_len = struct.pack("!Q", len(payload))
frame_bytes = b"\x81\x7f" + payload_len + payload
self._single_frame_test(
client=True,
frame_bytes=frame_bytes,
opcode=fp.Opcode.TEXT,
payload=payload,
frame_finished=True,
message_finished=True,
)
def test_insufficiently_long_message_frame(self) -> None:
payload = b"x" * 64
payload_len = struct.pack("!H", len(payload))
frame_bytes = b"\x81\x7e" + payload_len + payload
self._parse_failure_test(
client=True,
frame_bytes=frame_bytes,
close_reason=fp.CloseReason.PROTOCOL_ERROR,
)
def test_insufficiently_very_long_message_frame(self) -> None:
payload = b"x" * 512
payload_len = struct.pack("!Q", len(payload))
frame_bytes = b"\x81\x7f" + payload_len + payload
self._parse_failure_test(
client=True,
frame_bytes=frame_bytes,
close_reason=fp.CloseReason.PROTOCOL_ERROR,
)
def test_very_insufficiently_very_long_message_frame(self) -> None:
payload = b"x" * 64
payload_len = struct.pack("!Q", len(payload))
frame_bytes = b"\x81\x7f" + payload_len + payload
self._parse_failure_test(
client=True,
frame_bytes=frame_bytes,
close_reason=fp.CloseReason.PROTOCOL_ERROR,
)
def test_not_enough_for_header(self) -> None:
payload = b"xy"
frame_bytes = b"\x81\x02" + payload
self._split_frame_test(
client=True,
frame_bytes=frame_bytes,
opcode=fp.Opcode.TEXT,
payload=payload,
frame_finished=True,
message_finished=True,
split=1,
)
def test_not_enough_for_long_length(self) -> None:
payload = b"x" * 512
payload_len = struct.pack("!H", len(payload))
frame_bytes = b"\x81\x7e" + payload_len + payload
self._split_frame_test(
client=True,
frame_bytes=frame_bytes,
opcode=fp.Opcode.TEXT,
payload=payload,
frame_finished=True,
message_finished=True,
split=3,
)
def test_not_enough_for_very_long_length(self) -> None:
payload = b"x" * (128 * 1024)
payload_len = struct.pack("!Q", len(payload))
frame_bytes = b"\x81\x7f" + payload_len + payload
self._split_frame_test(
client=True,
frame_bytes=frame_bytes,
opcode=fp.Opcode.TEXT,
payload=payload,
frame_finished=True,
message_finished=True,
split=7,
)
def test_eight_byte_length_with_msb_set(self) -> None:
frame_bytes = b"\x81\x7f\x80\x80\x80\x80\x80\x80\x80\x80"
self._parse_failure_test(
client=True,
frame_bytes=frame_bytes,
close_reason=fp.CloseReason.PROTOCOL_ERROR,
)
def test_not_enough_for_mask(self) -> None:
payload = bytearray(b"xy")
mask = bytearray(b"abcd")
masked_payload = bytearray([payload[0] ^ mask[0], payload[1] ^ mask[1]])
frame_bytes = b"\x81\x82" + mask + masked_payload
self._split_frame_test(
client=False,
frame_bytes=frame_bytes,
opcode=fp.Opcode.TEXT,
payload=payload,
frame_finished=True,
message_finished=True,
split=4,
)
def test_partial_message_frames(self) -> None:
chunk_size = 1024
payload = b"x" * (128 * chunk_size)
payload_len = struct.pack("!Q", len(payload))
frame_bytes = b"\x81\x7f" + payload_len + payload
header_len = len(frame_bytes) - len(payload)
decoder = fp.FrameDecoder(client=True)
decoder.receive_bytes(frame_bytes[:header_len])
assert decoder.process_buffer() is None
frame_bytes = frame_bytes[header_len:]
payload_sent = 0
expected_opcode = fp.Opcode.TEXT
for offset in range(0, len(frame_bytes), chunk_size):
chunk = frame_bytes[offset : offset + chunk_size]
decoder.receive_bytes(chunk)
frame = decoder.process_buffer()
payload_sent += chunk_size
all_payload_sent = payload_sent == len(payload)
assert frame is not None
assert frame.opcode is expected_opcode
assert frame.frame_finished is all_payload_sent
assert frame.message_finished is True
assert frame.payload == payload[offset : offset + chunk_size]
expected_opcode = fp.Opcode.CONTINUATION
def test_partial_control_frame(self) -> None:
chunk_size = 11
payload = b"x" * 64
frame_bytes = b"\x89" + bytearray([len(payload)]) + payload
decoder = fp.FrameDecoder(client=True)
for offset in range(0, len(frame_bytes) - chunk_size, chunk_size):
chunk = frame_bytes[offset : offset + chunk_size]
decoder.receive_bytes(chunk)
assert decoder.process_buffer() is None
decoder.receive_bytes(frame_bytes[-chunk_size:])
frame = decoder.process_buffer()
assert frame is not None
assert frame.opcode is fp.Opcode.PING
assert frame.frame_finished is True
assert frame.message_finished is True
assert frame.payload == payload
def test_long_message_sliced(self) -> None:
payload = b"x" * 65535
payload_len = struct.pack("!H", len(payload))
frame_bytes = b"\x81\x7e" + payload_len + payload
self._split_message_test(
client=True,
frame_bytes=frame_bytes,
opcode=fp.Opcode.TEXT,
payload=payload,
split=65535,
)
def test_overly_long_control_frame(self) -> None:
payload = b"x" * 128
payload_len = struct.pack("!H", len(payload))
frame_bytes = b"\x89\x7e" + payload_len + payload
self._parse_failure_test(
client=True,
frame_bytes=frame_bytes,
close_reason=fp.CloseReason.PROTOCOL_ERROR,
)
class TestFrameDecoderExtensions:
class FakeExtension(wpext.Extension):
name = "fake"
def __init__(self) -> None:
self._inbound_header_called = False
self._inbound_rsv_bit_set = False
self._inbound_payload_data_called = False
self._inbound_complete_called = False
self._fail_inbound_complete = False
self._outbound_rsv_bit_set = False
def enabled(self) -> bool:
return True
def frame_inbound_header(
self,
proto: Union[fp.FrameDecoder, fp.FrameProtocol],
opcode: fp.Opcode,
rsv: fp.RsvBits,
payload_length: int,
) -> Union[fp.CloseReason, fp.RsvBits]:
self._inbound_header_called = True
if opcode is fp.Opcode.PONG:
return fp.CloseReason.MANDATORY_EXT
self._inbound_rsv_bit_set = rsv.rsv3
return fp.RsvBits(False, False, True)
def frame_inbound_payload_data(
self, proto: Union[fp.FrameDecoder, fp.FrameProtocol], data: bytes
) -> Union[bytes, fp.CloseReason]:
self._inbound_payload_data_called = True
if data == b"party time":
return fp.CloseReason.POLICY_VIOLATION
elif data == b"ragequit":
self._fail_inbound_complete = True
if self._inbound_rsv_bit_set:
data = data.decode("utf-8").upper().encode("utf-8")
return data
def frame_inbound_complete(
self, proto: Union[fp.FrameDecoder, fp.FrameProtocol], fin: bool
) -> Union[bytes, fp.CloseReason, None]:
self._inbound_complete_called = True
if self._fail_inbound_complete:
return fp.CloseReason.ABNORMAL_CLOSURE
if fin and self._inbound_rsv_bit_set:
return "™".encode()
return None
def frame_outbound(
self,
proto: Union[fp.FrameDecoder, fp.FrameProtocol],
opcode: fp.Opcode,
rsv: fp.RsvBits,
data: bytes,
fin: bool,
) -> Tuple[fp.RsvBits, bytes]:
if opcode is fp.Opcode.TEXT:
rsv = fp.RsvBits(rsv.rsv1, rsv.rsv2, True)
self._outbound_rsv_bit_set = True
if fin and self._outbound_rsv_bit_set:
data += "®".encode()
self._outbound_rsv_bit_set = False
return rsv, data
def test_rsv_bit(self) -> None:
ext = self.FakeExtension()
decoder = fp.FrameDecoder(client=True, extensions=[ext])
frame_bytes = b"\x91\x00"
decoder.receive_bytes(frame_bytes)
frame = decoder.process_buffer()
assert frame is not None
assert ext._inbound_header_called
assert ext._inbound_rsv_bit_set
def test_wrong_rsv_bit(self) -> None:
ext = self.FakeExtension()
decoder = fp.FrameDecoder(client=True, extensions=[ext])
frame_bytes = b"\xa1\x00"
decoder.receive_bytes(frame_bytes)
with pytest.raises(fp.ParseFailed) as excinfo:
decoder.receive_bytes(frame_bytes)
decoder.process_buffer()
assert excinfo.value.code is fp.CloseReason.PROTOCOL_ERROR
def test_header_error_handling(self) -> None:
ext = self.FakeExtension()
decoder = fp.FrameDecoder(client=True, extensions=[ext])
frame_bytes = b"\x9a\x00"
decoder.receive_bytes(frame_bytes)
with pytest.raises(fp.ParseFailed) as excinfo:
decoder.receive_bytes(frame_bytes)
decoder.process_buffer()
assert excinfo.value.code is fp.CloseReason.MANDATORY_EXT
def test_payload_processing(self) -> None:
ext = self.FakeExtension()
decoder = fp.FrameDecoder(client=True, extensions=[ext])
payload = "fñör∂"
expected_payload = payload.upper().encode("utf-8")
bytes_payload = payload.encode("utf-8")
frame_bytes = b"\x11" + bytearray([len(bytes_payload)]) + bytes_payload
decoder.receive_bytes(frame_bytes)
frame = decoder.process_buffer()
assert frame is not None
assert ext._inbound_header_called
assert ext._inbound_rsv_bit_set
assert ext._inbound_payload_data_called
assert frame.payload == expected_payload
def test_no_payload_processing_when_not_wanted(self) -> None:
ext = self.FakeExtension()
decoder = fp.FrameDecoder(client=True, extensions=[ext])
payload = "fñör∂"
expected_payload = payload.encode("utf-8")
bytes_payload = payload.encode("utf-8")
frame_bytes = b"\x01" + bytearray([len(bytes_payload)]) + bytes_payload
decoder.receive_bytes(frame_bytes)
frame = decoder.process_buffer()
assert frame is not None
assert ext._inbound_header_called
assert not ext._inbound_rsv_bit_set
assert ext._inbound_payload_data_called
assert frame.payload == expected_payload
def test_payload_error_handling(self) -> None:
ext = self.FakeExtension()
decoder = fp.FrameDecoder(client=True, extensions=[ext])
payload = b"party time"
frame_bytes = b"\x91" + bytearray([len(payload)]) + payload
decoder.receive_bytes(frame_bytes)
with pytest.raises(fp.ParseFailed) as excinfo:
decoder.receive_bytes(frame_bytes)
decoder.process_buffer()
assert excinfo.value.code is fp.CloseReason.POLICY_VIOLATION
def test_frame_completion(self) -> None:
ext = self.FakeExtension()
decoder = fp.FrameDecoder(client=True, extensions=[ext])
payload = "fñör∂"
expected_payload = (payload + "™").upper().encode("utf-8")
bytes_payload = payload.encode("utf-8")
frame_bytes = b"\x91" + bytearray([len(bytes_payload)]) + bytes_payload
decoder.receive_bytes(frame_bytes)
frame = decoder.process_buffer()
assert frame is not None
assert ext._inbound_header_called
assert ext._inbound_rsv_bit_set
assert ext._inbound_payload_data_called
assert ext._inbound_complete_called
assert frame.payload == expected_payload
def test_no_frame_completion_when_not_wanted(self) -> None:
ext = self.FakeExtension()
decoder = fp.FrameDecoder(client=True, extensions=[ext])
payload = "fñör∂"
expected_payload = payload.encode("utf-8")
bytes_payload = payload.encode("utf-8")
frame_bytes = b"\x81" + bytearray([len(bytes_payload)]) + bytes_payload
decoder.receive_bytes(frame_bytes)
frame = decoder.process_buffer()
assert frame is not None
assert ext._inbound_header_called
assert not ext._inbound_rsv_bit_set
assert ext._inbound_payload_data_called
assert ext._inbound_complete_called
assert frame.payload == expected_payload
def test_completion_error_handling(self) -> None:
ext = self.FakeExtension()
decoder = fp.FrameDecoder(client=True, extensions=[ext])
payload = b"ragequit"
frame_bytes = b"\x91" + bytearray([len(payload)]) + payload
decoder.receive_bytes(frame_bytes)
with pytest.raises(fp.ParseFailed) as excinfo:
decoder.receive_bytes(frame_bytes)
decoder.process_buffer()
assert excinfo.value.code is fp.CloseReason.ABNORMAL_CLOSURE
def test_outbound_handling_single_frame(self) -> None:
ext = self.FakeExtension()
proto = fp.FrameProtocol(client=False, extensions=[ext])
payload = "😃😄🙃😉"
data = proto.send_data(payload, fin=True)
payload_bytes = (payload + "®").encode("utf8")
assert data == b"\x91" + bytearray([len(payload_bytes)]) + payload_bytes
def test_outbound_handling_multiple_frames(self) -> None:
ext = self.FakeExtension()
proto = fp.FrameProtocol(client=False, extensions=[ext])
payload = "😃😄🙃😉"
data = proto.send_data(payload, fin=False)
payload_bytes = payload.encode("utf8")
assert data == b"\x11" + bytearray([len(payload_bytes)]) + payload_bytes
payload = r"¯\_(ツ)_/¯"
data = proto.send_data(payload, fin=True)
payload_bytes = (payload + "®").encode("utf8")
assert data == b"\x80" + bytearray([len(payload_bytes)]) + payload_bytes
class TestFrameProtocolReceive:
def test_long_text_message(self) -> None:
payload = "x" * 65535
encoded_payload = payload.encode("utf-8")
payload_len = struct.pack("!H", len(encoded_payload))
frame_bytes = b"\x81\x7e" + payload_len + encoded_payload
protocol = fp.FrameProtocol(client=True, extensions=[])
protocol.receive_bytes(frame_bytes)
frames = list(protocol.received_frames())
assert len(frames) == 1
frame = frames[0]
assert frame.opcode == fp.Opcode.TEXT
assert len(frame.payload) == len(payload)
assert frame.payload == payload
def _close_test(
self,
code: Optional[int],
reason: Optional[str] = None,
reason_bytes: Optional[bytes] = None,
) -> None:
payload = b""
if code:
payload += struct.pack("!H", code)
if reason:
payload += reason.encode("utf8")
elif reason_bytes:
payload += reason_bytes
frame_bytes = b"\x88" + bytearray([len(payload)]) + payload
protocol = fp.FrameProtocol(client=True, extensions=[])
protocol.receive_bytes(frame_bytes)
frames = list(protocol.received_frames())
assert len(frames) == 1
frame = frames[0]
assert frame.opcode == fp.Opcode.CLOSE
assert frame.payload[0] == code or fp.CloseReason.NO_STATUS_RCVD
if reason:
assert frame.payload[1] == reason
else:
assert not frame.payload[1]
def test_close_no_code(self) -> None:
self._close_test(None)
def test_close_one_byte_code(self) -> None:
frame_bytes = b"\x88\x01\x0e"
protocol = fp.FrameProtocol(client=True, extensions=[])
with pytest.raises(fp.ParseFailed) as exc:
protocol.receive_bytes(frame_bytes)
list(protocol.received_frames())
assert exc.value.code == fp.CloseReason.PROTOCOL_ERROR
def test_close_bad_code(self) -> None:
with pytest.raises(fp.ParseFailed) as exc:
self._close_test(123)
assert exc.value.code == fp.CloseReason.PROTOCOL_ERROR
def test_close_unknown_code(self) -> None:
with pytest.raises(fp.ParseFailed) as exc:
self._close_test(2998)
assert exc.value.code == fp.CloseReason.PROTOCOL_ERROR
def test_close_local_only_code(self) -> None:
with pytest.raises(fp.ParseFailed) as exc:
self._close_test(fp.CloseReason.NO_STATUS_RCVD)
assert exc.value.code == fp.CloseReason.PROTOCOL_ERROR
def test_close_no_payload(self) -> None:
self._close_test(fp.CloseReason.NORMAL_CLOSURE)
def test_close_easy_payload(self) -> None:
self._close_test(fp.CloseReason.NORMAL_CLOSURE, "tarah old chap")
def test_close_utf8_payload(self) -> None:
self._close_test(fp.CloseReason.NORMAL_CLOSURE, "fñør∂")
def test_close_bad_utf8_payload(self) -> None:
payload = unhexlify("cebae1bdb9cf83cebcceb5eda080656469746564")
with pytest.raises(fp.ParseFailed) as exc:
self._close_test(fp.CloseReason.NORMAL_CLOSURE, reason_bytes=payload)
assert exc.value.code == fp.CloseReason.INVALID_FRAME_PAYLOAD_DATA
def test_close_incomplete_utf8_payload(self) -> None:
payload = "fñør∂".encode()[:-1]
with pytest.raises(fp.ParseFailed) as exc:
self._close_test(fp.CloseReason.NORMAL_CLOSURE, reason_bytes=payload)
assert exc.value.code == fp.CloseReason.INVALID_FRAME_PAYLOAD_DATA
def test_random_control_frame(self) -> None:
payload = b"give me one ping vasily"
frame_bytes = b"\x89" + bytearray([len(payload)]) + payload
protocol = fp.FrameProtocol(client=True, extensions=[])
protocol.receive_bytes(frame_bytes)
frames = list(protocol.received_frames())
assert len(frames) == 1
frame = frames[0]
assert frame.opcode == fp.Opcode.PING
assert len(frame.payload) == len(payload)
assert frame.payload == payload
class TestFrameProtocolSend:
def test_simplest_possible_close(self) -> None:
proto = fp.FrameProtocol(client=False, extensions=[])
data = proto.close()
assert data == b"\x88\x00"
def test_unreasoning_close(self) -> None:
proto = fp.FrameProtocol(client=False, extensions=[])
data = proto.close(code=fp.CloseReason.NORMAL_CLOSURE)
assert data == b"\x88\x02\x03\xe8"
def test_reasoned_close(self) -> None:
proto = fp.FrameProtocol(client=False, extensions=[])
reason = r"¯\_(ツ)_/¯"
expected_payload = struct.pack(
"!H", fp.CloseReason.NORMAL_CLOSURE
) + reason.encode("utf8")
data = proto.close(code=fp.CloseReason.NORMAL_CLOSURE, reason=reason)
assert data == b"\x88" + bytearray([len(expected_payload)]) + expected_payload
def test_overly_reasoned_close(self) -> None:
proto = fp.FrameProtocol(client=False, extensions=[])
reason = r"¯\_(ツ)_/¯" * 10
data = proto.close(code=fp.CloseReason.NORMAL_CLOSURE, reason=reason)
assert bytes(data[0:1]) == b"\x88"
assert len(data) <= 127
assert data[4:].decode("utf8")
def test_reasoned_but_uncoded_close(self) -> None:
proto = fp.FrameProtocol(client=False, extensions=[])
with pytest.raises(TypeError):
proto.close(reason="termites")
def test_no_status_rcvd_close_reason(self) -> None:
proto = fp.FrameProtocol(client=False, extensions=[])
data = proto.close(code=fp.CloseReason.NO_STATUS_RCVD)
assert data == b"\x88\x00"
def test_local_only_close_reason(self) -> None:
proto = fp.FrameProtocol(client=False, extensions=[])
data = proto.close(code=fp.CloseReason.ABNORMAL_CLOSURE)
assert data == b"\x88\x02\x03\xe8"
def test_ping_without_payload(self) -> None:
proto = fp.FrameProtocol(client=False, extensions=[])
data = proto.ping()
assert data == b"\x89\x00"
def test_ping_with_payload(self) -> None:
proto = fp.FrameProtocol(client=False, extensions=[])
payload = r"¯\_(ツ)_/¯".encode()
data = proto.ping(payload)
assert data == b"\x89" + bytearray([len(payload)]) + payload
def test_pong_without_payload(self) -> None:
proto = fp.FrameProtocol(client=False, extensions=[])
data = proto.pong()
assert data == b"\x8a\x00"
def test_pong_with_payload(self) -> None:
proto = fp.FrameProtocol(client=False, extensions=[])
payload = r"¯\_(ツ)_/¯".encode()
data = proto.pong(payload)
assert data == b"\x8a" + bytearray([len(payload)]) + payload
def test_single_short_binary_data(self) -> None:
proto = fp.FrameProtocol(client=False, extensions=[])
payload = b"it's all just ascii, right?"
data = proto.send_data(payload, fin=True)
assert data == b"\x82" + bytearray([len(payload)]) + payload
def test_single_short_text_data(self) -> None:
proto = fp.FrameProtocol(client=False, extensions=[])
payload = "😃😄🙃😉"
data = proto.send_data(payload, fin=True)
payload_bytes = payload.encode("utf8")
assert data == b"\x81" + bytearray([len(payload_bytes)]) + payload_bytes
def test_multiple_short_binary_data(self) -> None:
proto = fp.FrameProtocol(client=False, extensions=[])
payload = b"it's all just ascii, right?"
data = proto.send_data(payload, fin=False)
assert data == b"\x02" + bytearray([len(payload)]) + payload
payload = b"sure no worries"
data = proto.send_data(payload, fin=True)
assert data == b"\x80" + bytearray([len(payload)]) + payload
def test_multiple_short_text_data(self) -> None:
proto = fp.FrameProtocol(client=False, extensions=[])
payload = "😃😄🙃😉"
data = proto.send_data(payload, fin=False)
payload_bytes = payload.encode("utf8")
assert data == b"\x01" + bytearray([len(payload_bytes)]) + payload_bytes
payload = "🙈🙉🙊"
data = proto.send_data(payload, fin=True)
payload_bytes = payload.encode("utf8")
assert data == b"\x80" + bytearray([len(payload_bytes)]) + payload_bytes
def test_mismatched_data_messages1(self) -> None:
proto = fp.FrameProtocol(client=False, extensions=[])
payload = "😃😄🙃😉"
data = proto.send_data(payload, fin=False)
payload_bytes = payload.encode("utf8")
assert data == b"\x01" + bytearray([len(payload_bytes)]) + payload_bytes
payload_bytes = b"seriously, all ascii"
with pytest.raises(TypeError):
proto.send_data(payload_bytes)
def test_mismatched_data_messages2(self) -> None:
proto = fp.FrameProtocol(client=False, extensions=[])
payload = b"it's all just ascii, right?"
data = proto.send_data(payload, fin=False)
assert data == b"\x02" + bytearray([len(payload)]) + payload
payload_str = "✔️☑️✅✔︎☑"
with pytest.raises(TypeError):
proto.send_data(payload_str)
def test_message_length_max_short(self) -> None:
proto = fp.FrameProtocol(client=False, extensions=[])
payload = b"x" * 125
data = proto.send_data(payload, fin=True)
assert data == b"\x82" + bytearray([len(payload)]) + payload
def test_message_length_min_two_byte(self) -> None:
proto = fp.FrameProtocol(client=False, extensions=[])
payload = b"x" * 126
data = proto.send_data(payload, fin=True)
assert data == b"\x82\x7e" + struct.pack("!H", len(payload)) + payload
def test_message_length_max_two_byte(self) -> None:
proto = fp.FrameProtocol(client=False, extensions=[])
payload = b"x" * (2**16 - 1)
data = proto.send_data(payload, fin=True)
assert data == b"\x82\x7e" + struct.pack("!H", len(payload)) + payload
def test_message_length_min_eight_byte(self) -> None:
proto = fp.FrameProtocol(client=False, extensions=[])
payload = b"x" * (2**16)
data = proto.send_data(payload, fin=True)
assert data == b"\x82\x7f" + struct.pack("!Q", len(payload)) + payload
def test_client_side_masking_short_frame(self) -> None:
proto = fp.FrameProtocol(client=True, extensions=[])
payload = b"x" * 125
data = proto.send_data(payload, fin=True)
assert data[0] == 0x82
assert struct.unpack("!B", data[1:2])[0] == len(payload) | 0x80
masking_key = data[2:6]
maskbytes = itertools.cycle(masking_key)
assert data[6:] == bytearray(b ^ next(maskbytes) for b in bytearray(payload))
def test_client_side_masking_two_byte_frame(self) -> None:
proto = fp.FrameProtocol(client=True, extensions=[])
payload = b"x" * 126
data = proto.send_data(payload, fin=True)
assert data[0] == 0x82
assert data[1] == 0xFE
assert struct.unpack("!H", data[2:4])[0] == len(payload)
masking_key = data[4:8]
maskbytes = itertools.cycle(masking_key)
assert data[8:] == bytearray(b ^ next(maskbytes) for b in bytearray(payload))
def test_client_side_masking_eight_byte_frame(self) -> None:
proto = fp.FrameProtocol(client=True, extensions=[])
payload = b"x" * 65536
data = proto.send_data(payload, fin=True)
assert data[0] == 0x82
assert data[1] == 0xFF
assert struct.unpack("!Q", data[2:10])[0] == len(payload)
masking_key = data[10:14]
maskbytes = itertools.cycle(masking_key)
assert data[14:] == bytearray(b ^ next(maskbytes) for b in bytearray(payload))
def test_control_frame_with_overly_long_payload(self) -> None:
proto = fp.FrameProtocol(client=False, extensions=[])
payload = b"x" * 126
with pytest.raises(ValueError):
proto.pong(payload)
def test_data_we_have_no_idea_what_to_do_with(self) -> None:
proto = fp.FrameProtocol(client=False, extensions=[])
payload: Dict[str, str] = dict()
with pytest.raises(ValueError):
# Intentionally passing illegal type.
proto.send_data(payload) # type: ignore
def test_xor_mask_simple() -> None:
masker = fp.XorMaskerSimple(b"1234")
assert masker.process(b"") == b""
assert masker.process(b"some very long data for masking by websocket") == (
b"B]^Q\x11DVFH\x12_[_U\x13PPFR\x14W]A\x14\\S@_X\\T\x14SK\x13CTP@[RYV@"
)
| {
"content_hash": "d16e919e0cbc749b804b9aa52f0b4ec4",
"timestamp": "",
"source": "github",
"line_count": 1204,
"max_line_length": 86,
"avg_line_length": 35.848837209302324,
"alnum_prop": 0.5926972800148279,
"repo_name": "python-hyper/wsproto",
"id": "76c40e45cf180623ef70c3a5df6554f6f4df220a",
"size": "43318",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "test/test_frame_protocol.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "191747"
}
],
"symlink_target": ""
} |
import ast
import yaml
import copy
import argparse
from argparse import ArgumentParser, RawDescriptionHelpFormatter
class ArgsParser(ArgumentParser):
def __init__(self):
super(ArgsParser, self).__init__(
formatter_class=RawDescriptionHelpFormatter)
self.add_argument(
"-o", "--opt", nargs='*', help="set configuration options")
def parse_args(self, argv=None):
args = super(ArgsParser, self).parse_args(argv)
assert args.config is not None, \
"Please specify --config=configure_file_path."
args.opt = self._parse_opt(args.opt)
return args
def _parse_opt(self, opts):
config = {}
if not opts:
return config
for s in opts:
s = s.strip()
k, v = s.split('=', 1)
if '.' not in k:
config[k] = yaml.load(v, Loader=yaml.Loader)
else:
keys = k.split('.')
if keys[0] not in config:
config[keys[0]] = {}
cur = config[keys[0]]
for idx, key in enumerate(keys[1:]):
if idx == len(keys) - 2:
cur[key] = yaml.load(v, Loader=yaml.Loader)
else:
cur[key] = {}
cur = cur[key]
return config
def argsparser():
parser = ArgsParser()
parser.add_argument(
"--config",
type=str,
default='pipeline/config/infer_cfg_pphuman.yml',
help=("Path of configure"))
parser.add_argument(
"--image_file", type=str, default=None, help="Path of image file.")
parser.add_argument(
"--image_dir",
type=str,
default=None,
help="Dir of image file, `image_file` has a higher priority.")
parser.add_argument(
"--video_file",
type=str,
default=None,
help="Path of video file, `video_file` or `camera_id` has a highest priority."
)
parser.add_argument(
"--video_dir",
type=str,
default=None,
help="Dir of video file, `video_file` has a higher priority.")
parser.add_argument(
"--rtsp",
type=str,
nargs='+',
default=None,
help="list of rtsp inputs, for one or multiple rtsp input.")
parser.add_argument(
"--camera_id",
type=int,
default=-1,
help="device id of camera to predict.")
parser.add_argument(
"--output_dir",
type=str,
default="output",
help="Directory of output visualization files.")
parser.add_argument(
"--pushurl",
type=str,
default="",
help="url of output visualization stream.")
parser.add_argument(
"--run_mode",
type=str,
default='paddle',
help="mode of running(paddle/trt_fp32/trt_fp16/trt_int8)")
parser.add_argument(
"--device",
type=str,
default='cpu',
help="Choose the device you want to run, it can be: CPU/GPU/XPU, default is CPU."
)
parser.add_argument(
"--enable_mkldnn",
type=ast.literal_eval,
default=False,
help="Whether use mkldnn with CPU.")
parser.add_argument(
"--cpu_threads", type=int, default=1, help="Num of threads with CPU.")
parser.add_argument(
"--trt_min_shape", type=int, default=1, help="min_shape for TensorRT.")
parser.add_argument(
"--trt_max_shape",
type=int,
default=1280,
help="max_shape for TensorRT.")
parser.add_argument(
"--trt_opt_shape",
type=int,
default=640,
help="opt_shape for TensorRT.")
parser.add_argument(
"--trt_calib_mode",
type=bool,
default=False,
help="If the model is produced by TRT offline quantitative "
"calibration, trt_calib_mode need to set True.")
parser.add_argument(
"--do_entrance_counting",
type=bool,
default=False,
help="Whether counting the numbers of identifiers entering "
"or getting out from the entrance. Note that only support single-class MOT."
)
parser.add_argument(
"--do_break_in_counting",
type=bool,
default=False,
help="Whether counting the numbers of identifiers break in "
"the area. Note that only support single-class MOT and "
"the video should be taken by a static camera.")
parser.add_argument(
"--illegal_parking_time",
type=int,
default=-1,
help="illegal parking time which units are seconds, default is -1 which means not recognition illegal parking"
)
parser.add_argument(
"--region_type",
type=str,
default='horizontal',
help="Area type for entrance counting or break in counting, 'horizontal' and "
"'vertical' used when do entrance counting. 'custom' used when do break in counting. "
"Note that only support single-class MOT, and the video should be taken by a static camera."
)
parser.add_argument(
'--region_polygon',
nargs='+',
type=int,
default=[],
help="Clockwise point coords (x0,y0,x1,y1...) of polygon of area when "
"do_break_in_counting. Note that only support single-class MOT and "
"the video should be taken by a static camera.")
parser.add_argument(
"--secs_interval",
type=int,
default=2,
help="The seconds interval to count after tracking")
parser.add_argument(
"--draw_center_traj",
type=bool,
default=False,
help="Whether drawing the trajectory of center")
parser.add_argument('--avtivity_list', nargs='+', type=str)
return parser
def merge_cfg(args):
# load config
with open(args.config) as f:
pred_config = yaml.safe_load(f)
def merge(cfg, arg):
# update cfg from arg directly
merge_cfg = copy.deepcopy(cfg)
for k, v in cfg.items():
if k in arg:
merge_cfg[k] = arg[k]
else:
if isinstance(v, dict):
merge_cfg[k] = merge(v, arg)
return merge_cfg
def merge_opt(cfg, arg):
merge_cfg = copy.deepcopy(cfg)
# merge opt
if 'opt' in arg.keys() and arg['opt']:
for name, value in arg['opt'].items(
): # example: {'MOT': {'batch_size': 3}}
if name not in merge_cfg.keys():
print("No", name, "in config file!")
continue
for sub_k, sub_v in value.items():
if sub_k not in merge_cfg[name].keys():
print("No", sub_k, "in config file of", name, "!")
continue
merge_cfg[name][sub_k] = sub_v
return merge_cfg
args_dict = vars(args)
pred_config = merge(pred_config, args_dict)
pred_config = merge_opt(pred_config, args_dict)
return pred_config
def print_arguments(cfg):
print('----------- Running Arguments -----------')
buffer = yaml.dump(cfg)
print(buffer)
print('------------------------------------------')
| {
"content_hash": "4d251915d345c29d034d1d80053c0ad0",
"timestamp": "",
"source": "github",
"line_count": 224,
"max_line_length": 118,
"avg_line_length": 32.504464285714285,
"alnum_prop": 0.5449800851531383,
"repo_name": "PaddlePaddle/models",
"id": "4af0737186061c611aeb353d5a4ed0c73e16ba14",
"size": "7281",
"binary": false,
"copies": "1",
"ref": "refs/heads/release/2.3",
"path": "modelcenter/PP-HumanV2/APP/pipeline/cfg_utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "46835"
},
{
"name": "CMake",
"bytes": "8248"
},
{
"name": "Jupyter Notebook",
"bytes": "1720166"
},
{
"name": "Makefile",
"bytes": "2920"
},
{
"name": "Python",
"bytes": "3099697"
},
{
"name": "Shell",
"bytes": "70177"
}
],
"symlink_target": ""
} |
from sqlalchemy import Integer, String, ForeignKey, func, desc, and_, or_
from sqlalchemy.orm import interfaces, relationship, mapper, \
clear_mappers, create_session, joinedload, joinedload_all, \
subqueryload, subqueryload_all, polymorphic_union, aliased,\
class_mapper, with_polymorphic
from sqlalchemy import exc as sa_exc
from sqlalchemy.engine import default
from sqlalchemy.testing import AssertsCompiledSQL, fixtures
from sqlalchemy import testing
from sqlalchemy.testing.schema import Table, Column
from sqlalchemy.testing import assert_raises, eq_
from _poly_fixtures import Company, Person, Engineer, Manager, Boss, \
Machine, Paperwork, _PolymorphicFixtureBase, _Polymorphic,\
_PolymorphicPolymorphic, _PolymorphicUnions, _PolymorphicJoins,\
_PolymorphicAliasedJoins
class _WithPolymorphicBase(_PolymorphicFixtureBase):
def test_join_base_to_sub(self):
sess = create_session()
pa = with_polymorphic(Person, [Engineer])
def go():
eq_(sess.query(pa)
.filter(pa.Engineer.primary_language == 'java').all(),
self._emps_wo_relationships_fixture()[0:1])
self.assert_sql_count(testing.db, go, 1)
def test_col_expression_base_plus_two_subs(self):
sess = create_session()
pa = with_polymorphic(Person, [Engineer, Manager])
eq_(
sess.query(pa.name, pa.Engineer.primary_language, pa.Manager.manager_name).\
filter(or_(pa.Engineer.primary_language=='java',
pa.Manager.manager_name=='dogbert')).\
order_by(pa.Engineer.type).all(),
[
(u'dilbert', u'java', None),
(u'dogbert', None, u'dogbert'),
]
)
def test_join_to_join_entities(self):
sess = create_session()
pa = with_polymorphic(Person, [Engineer])
pa_alias = with_polymorphic(Person, [Engineer], aliased=True)
eq_(
[(p1.name, type(p1), p2.name, type(p2)) for (p1, p2) in sess.query(
pa, pa_alias
).join(pa_alias,
or_(
pa.Engineer.primary_language==\
pa_alias.Engineer.primary_language,
and_(
pa.Engineer.primary_language == None,
pa_alias.Engineer.primary_language == None,
pa.person_id > pa_alias.person_id
)
)
).order_by(pa.name, pa_alias.name)],
[
(u'dilbert', Engineer, u'dilbert', Engineer),
(u'dogbert', Manager, u'pointy haired boss', Boss),
(u'vlad', Engineer, u'vlad', Engineer),
(u'wally', Engineer, u'wally', Engineer)
]
)
def test_join_to_join_columns(self):
sess = create_session()
pa = with_polymorphic(Person, [Engineer])
pa_alias = with_polymorphic(Person, [Engineer], aliased=True)
eq_(
[row for row in sess.query(
pa.name, pa.Engineer.primary_language,
pa_alias.name, pa_alias.Engineer.primary_language
).join(pa_alias,
or_(
pa.Engineer.primary_language==\
pa_alias.Engineer.primary_language,
and_(
pa.Engineer.primary_language == None,
pa_alias.Engineer.primary_language == None,
pa.person_id > pa_alias.person_id
)
)
).order_by(pa.name, pa_alias.name)],
[
(u'dilbert', u'java', u'dilbert', u'java'),
(u'dogbert', None, u'pointy haired boss', None),
(u'vlad', u'cobol', u'vlad', u'cobol'),
(u'wally', u'c++', u'wally', u'c++')
]
)
class PolymorphicTest(_WithPolymorphicBase, _Polymorphic):
pass
class PolymorphicPolymorphicTest(_WithPolymorphicBase, _PolymorphicPolymorphic):
pass
class PolymorphicUnionsTest(_WithPolymorphicBase, _PolymorphicUnions):
pass
class PolymorphicAliasedJoinsTest(_WithPolymorphicBase, _PolymorphicAliasedJoins):
pass
class PolymorphicJoinsTest(_WithPolymorphicBase, _PolymorphicJoins):
pass
| {
"content_hash": "e2244ab5977f2bd9049983d50b4a3651",
"timestamp": "",
"source": "github",
"line_count": 114,
"max_line_length": 88,
"avg_line_length": 38.89473684210526,
"alnum_prop": 0.5579612088407758,
"repo_name": "rclmenezes/sqlalchemy",
"id": "93cccee7a3df68fd70962bce4dd4a94d603c782f",
"size": "4434",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/orm/inheritance/test_with_poly.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "38103"
},
{
"name": "CSS",
"bytes": "7760"
},
{
"name": "JavaScript",
"bytes": "244"
},
{
"name": "Makefile",
"bytes": "7072"
},
{
"name": "Python",
"bytes": "7243712"
},
{
"name": "TeX",
"bytes": "13927"
}
],
"symlink_target": ""
} |
from CIM15.Element import Element
class OperatingShare(Element):
"""Specifies the contract relationship between a PowerSystemResource and a contract participant.Specifies the contract relationship between a PowerSystemResource and a contract participant.
"""
def __init__(self, percentage=0.0, PowerSystemResource=None, OperatingParticipant=None, *args, **kw_args):
"""Initialises a new 'OperatingShare' instance.
@param percentage: Percentage ownership for this device. The percentage indicates the percentage ownership of the PSROwner for the PowerSystemResource. The total percentage ownership for a PowerSystemResource should add to 100%.
@param PowerSystemResource: The PowerSystemResource to which the attribues apply. The percentage ownership of all owners of a PowerSystemResource should add to 100%.
@param OperatingParticipant: The linkage to a owners and its linkage attributes like percentage ownership. The ownership percentage should add to 100% for all owners of a PowerSystemResource, but a PSROwner may own any percentage of any number of PowerSystemResource objects.
"""
#: Percentage ownership for this device. The percentage indicates the percentage ownership of the PSROwner for the PowerSystemResource. The total percentage ownership for a PowerSystemResource should add to 100%.
self.percentage = percentage
self._PowerSystemResource = None
self.PowerSystemResource = PowerSystemResource
self._OperatingParticipant = None
self.OperatingParticipant = OperatingParticipant
super(OperatingShare, self).__init__(*args, **kw_args)
_attrs = ["percentage"]
_attr_types = {"percentage": float}
_defaults = {"percentage": 0.0}
_enums = {}
_refs = ["PowerSystemResource", "OperatingParticipant"]
_many_refs = []
def getPowerSystemResource(self):
"""The PowerSystemResource to which the attribues apply. The percentage ownership of all owners of a PowerSystemResource should add to 100%.
"""
return self._PowerSystemResource
def setPowerSystemResource(self, value):
if self._PowerSystemResource is not None:
filtered = [x for x in self.PowerSystemResource.OperatingShare if x != self]
self._PowerSystemResource._OperatingShare = filtered
self._PowerSystemResource = value
if self._PowerSystemResource is not None:
if self not in self._PowerSystemResource._OperatingShare:
self._PowerSystemResource._OperatingShare.append(self)
PowerSystemResource = property(getPowerSystemResource, setPowerSystemResource)
def getOperatingParticipant(self):
"""The linkage to a owners and its linkage attributes like percentage ownership. The ownership percentage should add to 100% for all owners of a PowerSystemResource, but a PSROwner may own any percentage of any number of PowerSystemResource objects.
"""
return self._OperatingParticipant
def setOperatingParticipant(self, value):
if self._OperatingParticipant is not None:
filtered = [x for x in self.OperatingParticipant.OperatingShare if x != self]
self._OperatingParticipant._OperatingShare = filtered
self._OperatingParticipant = value
if self._OperatingParticipant is not None:
if self not in self._OperatingParticipant._OperatingShare:
self._OperatingParticipant._OperatingShare.append(self)
OperatingParticipant = property(getOperatingParticipant, setOperatingParticipant)
| {
"content_hash": "0eb1660e6ae14e0fc807409267f5a9c4",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 286,
"avg_line_length": 55.723076923076924,
"alnum_prop": 0.729431253451132,
"repo_name": "rwl/PyCIM",
"id": "2bdb4dd7b036dfcaad24f0b2c86e699895ec079c",
"size": "4722",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "CIM15/IEC61970/Core/OperatingShare.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7420564"
}
],
"symlink_target": ""
} |
from ...external.qt import QtGui, QtCore
__all__ = ["CompletionTextEdit"]
class CompletionTextEdit(QtGui.QTextEdit):
def __init__(self, parent=None):
super(CompletionTextEdit, self).__init__(parent)
self.setMinimumWidth(400)
self.completer = None
self.word_list = None
self.moveCursor(QtGui.QTextCursor.End)
def set_word_list(self, word_list):
self.word_list = word_list
self.set_completer(QtGui.QCompleter(word_list))
def set_completer(self, completer):
if self.completer:
self.disconnect(self.completer, 0, self, 0)
if not completer:
return
self.completer = completer
self.completer.setWidget(self)
self.completer.setCompletionMode(QtGui.QCompleter.PopupCompletion)
self.completer.setCaseSensitivity(QtCore.Qt.CaseInsensitive)
self.completer.activated.connect(self.insert_completion)
def insert_completion(self, completion):
tc = self.textCursor()
tc.select(QtGui.QTextCursor.WordUnderCursor)
tc.deleteChar()
completion = completion + " "
self.setTextCursor(tc)
self.insertPlainText(completion)
def text_under_cursor(self):
tc = self.textCursor()
tc.select(QtGui.QTextCursor.WordUnderCursor)
return tc.selectedText()
# The following methods override methods in QTextEdit and should not be
# renamed.
def focusInEvent(self, event):
if self.completer:
self.completer.setWidget(self)
QtGui.QTextEdit.focusInEvent(self, event)
def keyPressEvent(self, event):
if self.completer and self.completer.popup().isVisible():
if event.key() in (
QtCore.Qt.Key_Enter,
QtCore.Qt.Key_Return,
QtCore.Qt.Key_Escape,
QtCore.Qt.Key_Tab,
QtCore.Qt.Key_Backtab):
event.ignore()
return
# Check if TAB has been pressed
is_shortcut = event.key() == QtCore.Qt.Key_Tab
if not self.completer or not is_shortcut:
QtGui.QTextEdit.keyPressEvent(self, event)
return
eow = "~!@#$%^&*()_+{}|:\"<>?,./;'[]\\-="
completion_prefix = self.text_under_cursor()
if not is_shortcut and (len(event.text()) == 0 or event.text()[-1:] in eow):
self.completer.popup().hide()
return
if (completion_prefix != self.completer.completionPrefix()):
self.completer.setCompletionPrefix(completion_prefix)
popup = self.completer.popup()
popup.setCurrentIndex(self.completer.completionModel().index(0, 0))
cr = self.cursorRect()
cr.setWidth(self.completer.popup().sizeHintForColumn(0) +
self.completer.popup().verticalScrollBar().sizeHint().width())
self.completer.complete(cr)
| {
"content_hash": "dac27e825eb65f0c946025e086de264d",
"timestamp": "",
"source": "github",
"line_count": 97,
"max_line_length": 84,
"avg_line_length": 30.52577319587629,
"alnum_prop": 0.6045254981425194,
"repo_name": "JudoWill/glue",
"id": "ebb297a33f4eb36a3413d99393e15b43776b129c",
"size": "3150",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "glue/utils/qt/autocomplete_widget.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "1842"
},
{
"name": "PowerShell",
"bytes": "2352"
},
{
"name": "Python",
"bytes": "1387891"
},
{
"name": "Shell",
"bytes": "1968"
}
],
"symlink_target": ""
} |
def extractSilenttranslationsCom(item):
'''
Parser for 'silenttranslations.com'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('Legend', 'Legend', 'translated'),
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False | {
"content_hash": "6dc163ac43981dc0477ca352497f0723",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 104,
"avg_line_length": 29.476190476190474,
"alnum_prop": 0.6058158319870759,
"repo_name": "fake-name/ReadableWebProxy",
"id": "189ec5ebf3581a1691ee708a1ef1e8aa800c4bba",
"size": "619",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "WebMirror/management/rss_parser_funcs/feed_parse_extractSilenttranslationsCom.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "105811"
},
{
"name": "Dockerfile",
"bytes": "1178"
},
{
"name": "HTML",
"bytes": "119737"
},
{
"name": "JavaScript",
"bytes": "3006524"
},
{
"name": "Jupyter Notebook",
"bytes": "148075"
},
{
"name": "Mako",
"bytes": "1454"
},
{
"name": "Python",
"bytes": "5264346"
},
{
"name": "Shell",
"bytes": "1059"
}
],
"symlink_target": ""
} |
import yaml
import sys
import argparse
outfile = None
def print_outfile(string):
print(string, file=outfile)
def print_log_to_stderr(string):
print(string, file=sys.stderr)
def get_dep_key(group_id, artifact_id, version):
return (group_id, artifact_id, version)
def get_version_string(version):
if type(version) == str:
return version
else:
return str(version)
def module_to_upper(module):
extensions_offset = module.lower().find("extensions")
if extensions_offset < 0:
return module.upper()
elif extensions_offset == 0:
return module[0:len("extensions")].upper() + module[len("extensions"):len(module)]
else:
raise Exception("Expected extensions at 0, but {}".format(extensions_offset))
def is_non_empty(dic, key):
if key in dic and dic[key] is not None:
if type(dic[key]) == str:
return len(dic[key]) > 0
else:
return True
else:
return False
def print_license_phrase(license_phrase):
remaining = license_phrase
while len(remaining) > 0:
if len(remaining) > 120:
chars_of_200 = remaining[0:120]
phrase_len = chars_of_200.rfind(" ")
if phrase_len < 0:
raise Exception("Can't find whitespace in {}".format(chars_of_200))
print_outfile(" {}".format(remaining[0:phrase_len]))
remaining = remaining[phrase_len:]
else:
print_outfile(" {}".format(remaining))
remaining = ""
def print_license(license):
license_phrase = "This product"
if license['license_category'] == "source":
license_phrase += " contains"
elif license['license_category'] == "binary":
license_phrase += " bundles"
license_phrase += " {}".format(license['name'])
if is_non_empty(license, 'version'):
license_phrase += " version {}".format(license['version'])
if is_non_empty(license, 'copyright'):
license_phrase += ", copyright {}".format(license['copyright'])
if is_non_empty(license, 'additional_copyright_statement'):
license_phrase += ", {}".format(license['additional_copyright_statement'])
if license['license_name'] != 'Apache License version 2.0':
license_phrase += " which is available under {}".format(license['license_name'])
if is_non_empty(license, 'additional_license_statement'):
license_phrase += ", {}".format(license['additional_license_statement'])
if is_non_empty(license, 'license_file_path'):
license_file_list = []
if type(license['license_file_path']) == list:
license_file_list.extend(license['license_file_path'])
else:
license_file_list.append(license['license_file_path'])
if len(license_file_list) == 1:
license_phrase += ". For details, see {}".format(license_file_list[0])
else:
license_phrase += ". For details, "
for each_file in license_file_list:
if each_file == license_file_list[-1]:
license_phrase += ", and {}".format(each_file)
elif each_file == license_file_list[0]:
license_phrase += "see {}".format(each_file)
else:
license_phrase += ", {}".format(each_file)
license_phrase += "."
print_license_phrase(license_phrase)
if 'source_paths' in license:
for source_path in license['source_paths']:
if type(source_path) is dict:
for class_name, path in source_path.items():
print_outfile(" {}:".format(class_name))
print_outfile(" * {}".format(path))
else:
print_outfile(" * {}".format(source_path))
if 'libraries' in license:
for library in license['libraries']:
if type(library) is not dict:
raise Exception("Expected dict but got {}[{}]".format(type(library), library))
if len(library) > 1:
raise Exception("Expected 1 groupId and artifactId, but got [{}]".format(library))
for group_id, artifact_id in library.items():
print_outfile(" * {}:{}".format(group_id, artifact_id))
def print_license_name_underbar(license_name):
underbar = ""
for _ in range(len(license_name)):
underbar += "="
print_outfile("{}\n".format(underbar))
def generate_license(apache_license_v2, license_yaml):
print_log_to_stderr("=== Generating the contents of LICENSE.BINARY file ===\n")
# Print Apache license first.
print_outfile(apache_license_v2)
with open(license_yaml, encoding='utf-8') as registry_file:
licenses_list = list(yaml.load_all(registry_file, Loader=yaml.Loader))
# Group licenses by license_name, license_category, and then module.
licenses_map = {}
for license in licenses_list:
if license['license_name'] not in licenses_map:
licenses_map[license['license_name']] = {}
licenses_of_name = licenses_map[license['license_name']]
if license['license_category'] not in licenses_of_name:
licenses_of_name[license['license_category']] = {}
licenses_of_category = licenses_of_name[license['license_category']]
if license['module'] not in licenses_of_category:
licenses_of_category[license['module']] = []
licenses_of_module = licenses_of_category[license['module']]
licenses_of_module.append(license)
for license_name, licenses_of_name in sorted(licenses_map.items()):
print_outfile(license_name)
print_license_name_underbar(license_name)
for license_category, licenses_of_category in licenses_of_name.items():
for module, licenses in licenses_of_category.items():
print_outfile("{}/{}".format(license_category.upper(), module_to_upper(module)))
for license in licenses:
print_license(license)
print_outfile("")
print_outfile("")
if __name__ == "__main__":
try:
parser = argparse.ArgumentParser(description='Check and generate license file.')
parser.add_argument('apache_license', metavar='<path to apache license file>', type=str)
parser.add_argument('license_yaml', metavar='<path to license.yaml>', type=str)
parser.add_argument('out_path', metavar='<path to output file>', type=str)
args = parser.parse_args()
with open(args.apache_license, encoding="ascii") as apache_license_file:
apache_license_v2 = apache_license_file.read()
license_yaml = args.license_yaml
with open(args.out_path, "w", encoding="utf-8") as outfile:
generate_license(apache_license_v2, license_yaml)
except KeyboardInterrupt:
print('Interrupted, closing.')
| {
"content_hash": "b0fcc542f3aa3fc835049e5dc83e62fa",
"timestamp": "",
"source": "github",
"line_count": 167,
"max_line_length": 98,
"avg_line_length": 41.461077844311376,
"alnum_prop": 0.6031195840554593,
"repo_name": "monetate/druid",
"id": "ec564e13b4696ad3a283826893fae2657061c6d5",
"size": "7730",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "distribution/bin/generate-binary-license.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "5547"
},
{
"name": "CSS",
"bytes": "3690"
},
{
"name": "Dockerfile",
"bytes": "13080"
},
{
"name": "FreeMarker",
"bytes": "10369"
},
{
"name": "HTML",
"bytes": "2540"
},
{
"name": "Java",
"bytes": "44823997"
},
{
"name": "JavaScript",
"bytes": "65990"
},
{
"name": "Makefile",
"bytes": "659"
},
{
"name": "PostScript",
"bytes": "5"
},
{
"name": "Python",
"bytes": "76196"
},
{
"name": "R",
"bytes": "17002"
},
{
"name": "Roff",
"bytes": "3617"
},
{
"name": "SCSS",
"bytes": "183582"
},
{
"name": "Shell",
"bytes": "137617"
},
{
"name": "Smarty",
"bytes": "3517"
},
{
"name": "TeX",
"bytes": "399468"
},
{
"name": "Thrift",
"bytes": "1003"
},
{
"name": "TypeScript",
"bytes": "1985704"
}
],
"symlink_target": ""
} |
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "stagecraft.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| {
"content_hash": "72bf965caf2bb5736ba17da540e09d97",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 74,
"avg_line_length": 25.666666666666668,
"alnum_prop": 0.7142857142857143,
"repo_name": "alphagov/stagecraft",
"id": "e9daeeb53d51490fc5f0b58db47135d7efd30897",
"size": "253",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "manage.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "134"
},
{
"name": "HTML",
"bytes": "855"
},
{
"name": "JavaScript",
"bytes": "3223"
},
{
"name": "Python",
"bytes": "622720"
},
{
"name": "Shell",
"bytes": "14467"
}
],
"symlink_target": ""
} |
import configparser
import glob
import os
import sys
import syslog
import time
import traceback
from libdaemon import Daemon
# constants
DEBUG = False
IS_JOURNALD = os.path.isfile('/bin/journalctl')
MYID = "".join(list(filter(str.isdigit, os.path.realpath(__file__).split('/')[-1])))
MYAPP = os.path.realpath(__file__).split('/')[-2]
NODE = os.uname()[1]
# SENSOR CALIBRATION PROCEDURE
# Given the existing gain and offset.
# 1 Determine a linear least-squares fit between the output of this program and
# data obtained from a reference sensor
# 2 The least-squares fit will yield the gain(calc) and offset(calc)
# 3 Determine gain(new) and offset(new) as shown here:
# gain(new) = gain(old) * gain(calc)
# offset(new) = offset(old) * gain(calc) + offset(calc)
# 4 Replace the existing values for gain(old) and offset(old) with the values
# found for gain(new) and offset(new)
# gain(old)
DS18B20_gain = 1.0
# offset(old)
DS18B20_offset = 0.0
OWdir = '/sys/bus/w1/devices/'
OWdev = glob.glob(OWdir + '28*')[0]
OWfile = OWdev + '/w1_slave'
class MyDaemon(Daemon):
"""Definition of daemon."""
@staticmethod
def run():
iniconf = configparser.ConfigParser()
inisection = MYID
home = os.path.expanduser('~')
s = iniconf.read(home + '/' + MYAPP + '/config.ini')
syslog_trace("Config file : {0}".format(s), False, DEBUG)
syslog_trace("Options : {0}".format(iniconf.items(inisection)), False, DEBUG)
reporttime = iniconf.getint(inisection, "reporttime")
cycles = iniconf.getint(inisection, "cycles")
samplespercycle = iniconf.getint(inisection, "samplespercycle")
flock = iniconf.get(inisection, "lockfile")
fdata = iniconf.get(inisection, "resultfile")
samples = samplespercycle * cycles # total number of samples averaged
sampletime = reporttime/samplespercycle # time [s] between samples
# cycleTime = samples * sampletime # time [s] per cycle
data = [] # array for holding sampledata
while True:
try:
starttime = time.time()
result = do_work()
syslog_trace("Result : {0}".format(result), False, DEBUG)
if (result is not None):
data.append(float(result))
if (len(data) > samples):
data.pop(0)
syslog_trace("Data : {0}".format(data), False, DEBUG)
# report sample average
if (starttime % reporttime < sampletime):
averages = format(sum(data[:]) / len(data), '.2f')
syslog_trace("Averages : {0}".format(averages), False, DEBUG)
do_report(averages, flock, fdata)
# endif result not None
waittime = sampletime - (time.time() - starttime) - (starttime % sampletime)
if (waittime > 0):
syslog_trace("Waiting : {0}s".format(waittime), False, DEBUG)
syslog_trace("................................", False, DEBUG)
time.sleep(waittime)
except Exception:
syslog_trace("Unexpected error in run()", syslog.LOG_CRIT, DEBUG)
syslog_trace(traceback.format_exc(), syslog.LOG_CRIT, DEBUG)
raise
def read_temp_raw():
lines = "NOPE"
if not(os.path.isfile(OWfile)):
syslog_trace("1-wire sensor not available", syslog.LOG_ERR, DEBUG)
else:
with open(OWfile, 'r') as f:
lines = f.readlines()
return lines
def do_work():
T = T0 = None
# read the temperature sensor
lines = read_temp_raw()
if lines[0].strip()[-3:] == 'YES':
equals_pos = lines[1].find('t=')
if equals_pos != -1:
temp_string = lines[1][equals_pos+2:]
T0 = float(temp_string) / 1000.0
# correct the temperature reading
if T0 is not None:
T = T0 * DS18B20_gain + DS18B20_offset
syslog_trace(" T0 = {0:0.1f}*C T = {1:0.1f}degC".format(T0, T), False, DEBUG)
# validate the temperature
if (T is not None) and (T > 45.0):
# can't believe my sensors. Probably a glitch. Log this and return with no result
syslog_trace("Tambient (HIGH): {0}".format(T), syslog.LOG_WARNING, DEBUG)
T = None
return T
def do_report(result, flock, fdata):
# Get the time and date in human-readable form and UN*X-epoch...
outDate = time.strftime('%Y-%m-%dT%H:%M:%S')
outEpoch = int(time.strftime('%s'))
# round to current minute to ease database JOINs
outEpoch = outEpoch - (outEpoch % 60)
lock(flock)
with open(fdata, 'a') as f:
f.write('{0}, {1}, {2}\n'.format(outDate, outEpoch, result))
unlock(flock)
def lock(fname):
open(fname, 'a').close()
def unlock(fname):
if os.path.isfile(fname):
os.remove(fname)
def syslog_trace(trace, logerr, out2console):
# Log a python stack trace to syslog
log_lines = trace.split('\n')
for line in log_lines:
if line and logerr:
syslog.syslog(logerr, line)
if line and out2console:
print(line)
if __name__ == "__main__":
daemon = MyDaemon('/tmp/' + MYAPP + '/' + MYID + '.pid')
if len(sys.argv) == 2:
if 'start' == sys.argv[1]:
daemon.start()
elif 'stop' == sys.argv[1]:
daemon.stop()
elif 'restart' == sys.argv[1]:
daemon.restart()
elif 'foreground' == sys.argv[1]:
# assist with debugging.
print("Debug-mode started. Use <Ctrl>+C to stop.")
DEBUG = True
syslog_trace("Daemon logging is ON", syslog.LOG_DEBUG, DEBUG)
daemon.run()
else:
print("Unknown command")
sys.exit(2)
sys.exit(0)
else:
print("usage: {0!s} start|stop|restart|foreground".format(sys.argv[0]))
sys.exit(2)
| {
"content_hash": "73cfb7251afe0c30d10aa290d75a5f5f",
"timestamp": "",
"source": "github",
"line_count": 172,
"max_line_length": 91,
"avg_line_length": 33.20348837209303,
"alnum_prop": 0.6023463491507617,
"repo_name": "Mausy5043/domod",
"id": "76c651dfe3b3a36c44ddabbd06efc85bc2c22d85",
"size": "5982",
"binary": false,
"copies": "1",
"ref": "refs/heads/v3",
"path": "again21d.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "12440"
},
{
"name": "Python",
"bytes": "51591"
},
{
"name": "Shell",
"bytes": "7293"
}
],
"symlink_target": ""
} |
import os
class Gperf_P2:
conf_lst = {}
e = False
root_dir = ""
def init(self, c_lst, ex, root_dir):
self.conf_lst = c_lst
self.e = ex
self.root_dir = root_dir
self.config = {
"name": "gperf", # Name of the package
"version": "3.0.4", # Version of the package
"size": 5.4, # Size of the installed package (MB)
"archive": "gperf-3.0.4.tar.gz", # Archive name
"SBU": 0.1, # SBU (Compilation time)
"tmp_install": False, # Is this package part of the temporary install
"next": "expat", # Next package to install
"before": False,
"after": False,
"urls": [ # Url to download the package. The first one must be morphux servers
"https://install.morphux.org/packages/gperf-3.0.4.tar.gz"
]
}
return self.config
def configure(self):
return self.e(["./configure",
"--prefix=/usr",
"--docdir=/usr/share/doc/gperf-3.0.4"
])
def make(self):
return self.e(["make", "-j", self.conf_lst["cpus"]])
def install(self):
return self.e(["make", "install"])
| {
"content_hash": "655de162f902060546ec0ad6319278bd",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 90,
"avg_line_length": 32.15384615384615,
"alnum_prop": 0.49681020733652315,
"repo_name": "Morphux/installer",
"id": "365304aa62914d63382c6cc25465cd2eb1f2912b",
"size": "2556",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pkgs/gperf_p2/gperf_p2.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "6786"
},
{
"name": "Python",
"bytes": "706930"
},
{
"name": "Shell",
"bytes": "66"
},
{
"name": "VimL",
"bytes": "149"
}
],
"symlink_target": ""
} |
import _plotly_utils.basevalidators
class NameValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(
self,
plotly_name="name",
parent_name="scatter.marker.colorbar.tickformatstop",
**kwargs
):
super(NameValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
role=kwargs.pop("role", "style"),
**kwargs
)
| {
"content_hash": "680ce67e339e4e3228197ab58e8a5a07",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 66,
"avg_line_length": 29.823529411764707,
"alnum_prop": 0.5779092702169625,
"repo_name": "plotly/python-api",
"id": "46af8d2d5f70b83b6f9dc23404e615af3cb5939c",
"size": "507",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/scatter/marker/colorbar/tickformatstop/_name.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "6870"
},
{
"name": "Makefile",
"bytes": "1708"
},
{
"name": "Python",
"bytes": "823245"
},
{
"name": "Shell",
"bytes": "3238"
}
],
"symlink_target": ""
} |
"""
IBM Cloud provider class
"""
from ipaddress import ip_network
from kvirt import common
from kvirt.common import pprint, error
from kvirt.defaults import METADATA_FIELDS
from ibm_vpc import VpcV1, vpc_v1
import ibm_boto3
from ibm_botocore.client import Config
from ibm_cloud_sdk_core.authenticators import IAMAuthenticator
from ibm_cloud_sdk_core.api_exception import ApiException
from ibm_platform_services import GlobalTaggingV1, ResourceControllerV2, IamPolicyManagementV1, IamIdentityV1
from ibm_platform_services.iam_policy_management_v1 import PolicySubject, SubjectAttribute, PolicyResource, PolicyRole
from ibm_platform_services.iam_policy_management_v1 import ResourceAttribute
from ibm_cloud_networking_services import DnsRecordsV1, ZonesV1
import os
from shutil import which
from time import sleep
from requests import get, post
import webbrowser
def get_zone_href(region, zone):
return "{}/regions/{}/zones/{}".format(
"https://%s.iaas.cloud.ibm.com/v1" % region,
region,
zone
)
def get_s3_endpoint(region):
return 'https://s3.{}.cloud-object-storage.appdomain.cloud'.format(region)
def get_service_instance_id(iam_api_key, name):
if 'crn' in name:
return name
service_id = None
headers = {'content-type': 'application/x-www-form-urlencoded', 'accept': 'application/json'}
data = 'grant_type=urn%%3Aibm%%3Aparams%%3Aoauth%%3Agrant-type%%3Aapikey&apikey=%s' % iam_api_key
req = post("https://iam.cloud.ibm.com/identity/token", data=data, headers=headers)
token = req.json()['access_token']
req = get("https://resource-controller.cloud.ibm.com/v2/resource_instances", headers=headers)
headers = {'Authorization': 'Bearer %s' % token}
req = get("https://resource-controller.cloud.ibm.com/v2/resource_instances", headers=headers)
for entry in req.json()['resources']:
if entry['name'] == name:
service_id = entry['id']
break
return service_id
class Kibm(object):
"""
"""
def __init__(self, iam_api_key, region, zone, vpc, debug=False, cos_api_key=None, cos_resource_instance_id=None,
cis_resource_instance_id=None):
self.debug = debug
self.authenticator = IAMAuthenticator(iam_api_key)
self.iam_api_key = iam_api_key
self.conn = VpcV1(authenticator=self.authenticator)
self.conn.set_service_url("https://%s.iaas.cloud.ibm.com/v1" % region)
if cos_api_key is not None and cos_resource_instance_id is not None:
cos_resource_instance_id = get_service_instance_id(iam_api_key, cos_resource_instance_id)
self.s3 = ibm_boto3.client(
's3',
ibm_api_key_id=cos_api_key,
ibm_service_instance_id=cos_resource_instance_id,
ibm_auth_endpoint="https://iam.bluemix.net/oidc/token",
config=Config(signature_version="oauth"),
endpoint_url=get_s3_endpoint(region)
)
self.cos_resource_instance_id = cos_resource_instance_id
self.global_tagging_service = GlobalTaggingV1(authenticator=self.authenticator)
self.global_tagging_service.set_service_url('https://tags.global-search-tagging.cloud.ibm.com')
if cis_resource_instance_id is not None:
cis_resource_instance_id = get_service_instance_id(iam_api_key, cis_resource_instance_id)
self.dns = ZonesV1(authenticator=self.authenticator, crn=cis_resource_instance_id)
self.dns.set_service_url('https://api.cis.cloud.ibm.com')
self.cis_resource_instance_id = cis_resource_instance_id
self.resources = ResourceControllerV2(authenticator=self.authenticator)
self.resources.set_service_url('https://resource-controller.cloud.ibm.com')
self.iam_api_key = iam_api_key
self.region = region
self.zone = zone if region in zone else "%s-2" % region
self.vpc = vpc
def close(self):
return
def exists(self, name):
try:
return self._get_vm(name) is not None
except ApiException as exc:
error("Unable to retrieve VM. %s" % exc)
return False
def net_exists(self, name):
try:
return self._get_subnet(name) is not None
except ApiException as exc:
error("Unable to retrieve available subnets. %s" % (exc))
return False
def disk_exists(self, pool, name):
print("not implemented")
def create(self, name, virttype=None, profile='', flavor=None, plan='kvirt',
cpumodel='Westmere', cpuflags=[], cpupinning=[], numcpus=2, memory=512,
guestid='guestrhel764', pool='default', image=None,
disks=[{'size': 10}], disksize=10, diskthin=True,
diskinterface='virtio', nets=[], iso=None, vnc=True,
cloudinit=True, reserveip=False, reservedns=False,
reservehost=False, start=True, keys=[], cmds=[], ips=None,
netmasks=None, gateway=None, nested=True, dns=None, domain=None,
tunnel=False, files=[], enableroot=True, alias=[], overrides={},
tags=[], storemetadata=False, sharedfolders=[], kernel=None, initrd=None, cmdline=None,
cpuhotplug=False, memoryhotplug=False, numamode=None, numa=[], pcidevices=[], tpm=False,
placement=[], autostart=False, rng=False, metadata={}, securitygroups=[], vmuser=None):
try:
vpcs = self.conn.list_vpcs().result['vpcs']
for vpc in vpcs:
if self.vpc == vpc['name']:
vpc_id = vpc['id']
resource_group_id = vpc['resource_group']['id']
break
else:
return {'result': 'failure', 'reason': 'VPC %s does not exist' % self.vpc}
except ApiException as exc:
return {'result': 'failure', 'reason': 'Unable to retrieve vpc information. %s' % exc}
if self.exists(name):
return {'result': 'failure', 'reason': "VM %s already exists" % name}
if not keys:
return {'result': 'failure', 'reason': 'SSH Keys not found in configuration'}
key_list = []
try:
ssh_keys = {x['name']: x for x in self.conn.list_keys().result['keys']}
for key in keys:
if key not in ssh_keys:
return {'result': 'failure', 'reason': 'Key %s not found' % key}
key_list.append(ssh_keys[key]['id'])
except ApiException as exc:
return {'result': 'failure', 'reason': 'Unable to check keys. %s' % exc}
if cloudinit:
if image is not None and common.needs_ignition(image):
version = common.ignition_version(image)
userdata = common.ignition(name=name, keys=keys, cmds=cmds, nets=nets, gateway=gateway, dns=dns,
domain=domain, files=files, enableroot=enableroot,
overrides=overrides, version=version, plan=plan, image=image,
vmuser=vmuser)
else:
userdata = common.cloudinit(name=name, keys=keys, cmds=cmds, nets=nets, gateway=gateway, dns=dns,
domain=domain, files=files, enableroot=enableroot,
overrides=overrides, fqdn=True, storemetadata=storemetadata,
vmuser=vmuser)[0]
else:
userdata = ''
if len(nets) == 0:
return {'result': 'failure', 'reason': 'Network not found in configuration'}
net_list = []
subnets = {x['name']: x for x in self._get_subnets()}
try:
default_subnet = None
subnets = {}
for x in self._get_subnets():
subnet_name = x['name']
subnets[subnet_name] = x
if x['vpc']['name'] == self.vpc and x['zone']['name'] == self.zone:
default_subnet = subnet_name
for index, net in enumerate(nets):
if isinstance(net, str):
netname = net
elif isinstance(net, dict) and 'name' in net:
netname = net['name']
if netname == 'default' or netname == self.vpc:
netname = default_subnet
elif netname not in subnets:
return {'result': 'failure', 'reason': 'Network %s not found' % netname}
subnet = subnets[netname]
if subnet['zone']['name'] != self.zone:
return {'result': 'failure', 'reason': 'Network %s is not in zone %s' % (netname, self.zone)}
net_list.append(
vpc_v1.NetworkInterfacePrototype(
subnet=vpc_v1.SubnetIdentityById(id=subnet['id']),
allow_ip_spoofing=False,
name="eth{}".format(index)
# TODO: security groups, ip address
)
)
except ApiException as exc:
return {'result': 'failure', 'reason': 'Unable to check networks. %s' % exc}
if flavor is None:
flavors = [f for f in self.flavors() if f[1] >= numcpus and f[2] * 1024 >= memory]
if flavors:
flavor = min(flavors, key=lambda f: f[2])[0]
pprint("Using flavor %s" % flavor)
else:
return {'result': 'failure', 'reason': "Couldn't find a flavor matching cpu/memory requirements"}
try:
provisioned_profiles = self._get_profiles()
except ApiException as exc:
return {'result': 'failure', 'reason': 'Unable to check flavors. %s' % exc}
if flavor not in provisioned_profiles:
return {'result': 'failure', 'reason': 'Flavor %s not found' % flavor}
try:
image = self._get_image(image)
if image is None:
return {'result': 'failure', 'reason': 'Image %s not found' % image}
image_id = image['id']
except ApiException as exc:
return {'result': 'failure', 'reason': 'Unable to check provisioned images. %s' % exc}
volume_attachments = []
for index, disk in enumerate(disks[1:]):
disksize = int(disk.get('size')) if isinstance(disk, dict) and 'size' in disk else int(disk)
diskname = "%s-disk%s" % (name, index + 1)
volume_by_capacity = {'capacity': disksize, 'name': diskname, 'profile': {'name': 'general-purpose'}}
volume_attachment = {'delete_volume_on_instance_delete': True, 'volume': volume_by_capacity}
volume_attachments.append(vpc_v1.VolumeAttachmentPrototypeInstanceContext.from_dict(volume_attachment))
try:
result_create = self.conn.create_instance(
vpc_v1.InstancePrototypeInstanceByImage(
image=vpc_v1.ImageIdentityById(id=image_id),
primary_network_interface=net_list[0],
zone=vpc_v1.ZoneIdentityByHref(get_zone_href(self.region, self.zone)),
keys=[vpc_v1.KeyIdentityById(id=x) for x in key_list],
name=name,
network_interfaces=net_list[1:],
profile=vpc_v1.InstanceProfileIdentityByName(
name=flavor),
resource_group=vpc_v1.ResourceGroupIdentityById(id=resource_group_id),
volume_attachments=volume_attachments,
vpc=vpc_v1.VPCIdentityById(id=vpc_id),
user_data=userdata
)
).result
except ApiException as exc:
return {'result': 'failure', 'reason': 'Unable to create VM %s. %s' % (name, exc)}
tag_names = []
for entry in [field for field in metadata if field in METADATA_FIELDS]:
tag_names.append('%s:%s' % (entry, metadata[entry]))
resource_model = {'resource_id': result_create['crn']}
try:
self.global_tagging_service.attach_tag(resources=[resource_model],
tag_names=tag_names, tag_type='user').get_result()
except ApiException as exc:
return {'result': 'failure', 'reason': 'Unable to attach tags. %s' % exc}
try:
result_ip = self.conn.create_floating_ip(vpc_v1.FloatingIPPrototypeFloatingIPByTarget(
target=vpc_v1.FloatingIPByTargetNetworkInterfaceIdentityNetworkInterfaceIdentityById(
id=result_create['network_interfaces'][0]['id']
),
name=name,
resource_group=vpc_v1.ResourceGroupIdentityById(
id=resource_group_id),
)).result
except ApiException as exc:
return {'result': 'failure', 'reason': 'Unable to create floating ip. %s' % exc}
try:
self.conn.add_instance_network_interface_floating_ip(
instance_id=result_create['id'],
network_interface_id=result_create['network_interfaces'][0]['id'],
id=result_ip['id']
)
except ApiException as exc:
return {'result': 'failure', 'reason': 'Unable to add floating ip. %s' % exc}
if reservedns and domain is not None:
self.reserve_dns(name, nets=nets, domain=domain, alias=alias, instanceid=name)
return {'result': 'success'}
def start(self, name):
try:
vm = self._get_vm(name)
if vm is None:
return {'result': 'failure', 'reason': 'VM %s not found' % name}
vm_id = vm['id']
except ApiException as exc:
return {'result': 'failure', 'reason': 'Unable to retrieve VM %s. %s' % (name, exc)}
try:
self.conn.create_instance_action(instance_id=vm_id, type='start')
except ApiException as exc:
return {'result': 'failure', 'reason': 'Unable to start VM %s. %s' % (name, exc)}
return {'result': 'success'}
def stop(self, name, soft=False):
try:
vm = self._get_vm(name)
if vm is None:
return {'result': 'failure', 'reason': 'VM %s not found' % name}
vm_id = vm['id']
except ApiException as exc:
return {'result': 'failure', 'reason': 'Unable to retrieve VM %s. %s' % (name, exc)}
try:
self.conn.create_instance_action(instance_id=vm_id, type='stop')
except ApiException as exc:
return {'result': 'failure', 'reason': 'Unable to stop VM %s. %s' % (name, exc)}
return {'result': 'success'}
def create_snapshot(self, name, base):
print("not implemented")
return {'result': 'success'}
def delete_snapshot(self, name, base):
print("not implemented")
return {'result': 'success'}
def list_snapshots(self, base):
print("not implemented")
return []
def revert_snapshot(self, name, base):
print("not implemented")
return {'result': 'success'}
def restart(self, name):
try:
vm = self._get_vm(name)
if vm is None:
return {'result': 'failure', 'reason': 'VM %s not found' % name}
vm_id = vm['id']
except ApiException as exc:
return {'result': 'failure', 'reason': 'Unable to retrieve VM %s. %s' % (name, exc)}
try:
self.conn.create_instance_action(instance_id=vm_id, type='reboot')
except ApiException as exc:
return {'result': 'failure', 'reason': 'Unable to restart VM %s. %s' % (name, exc)}
return {'result': 'success'}
def report(self):
print("Region:", self.region)
print("Zone:", self.zone)
print("VPC:", self.vpc)
return
def status(self, name):
try:
vm = self._get_vm(name)
if vm is None:
return {'result': 'failure', 'reason': 'VM %s not found' % name}
return vm['status']
except ApiException as exc:
return {'result': 'failure', 'reason': 'Unable to retrieve VM %s. %s' % (name, exc)}
def list(self):
vms = []
try:
provisioned_vms = self._get_vms()
except ApiException as exc:
error('Unable to retrieve VMs. %s' % exc)
return vms
try:
floating_ips = {x['target']['id']: x for x in self.conn.list_floating_ips(
).result['floating_ips'] if x['status'] == 'available' and 'target' in x}
except ApiException as exc:
error('Unable to retrieve floating ips. %s' % exc)
return vms
for vm in provisioned_vms:
vms.append(self.info(vm['name'], vm=vm, ignore_volumes=True, floating_ips=floating_ips))
return sorted(vms, key=lambda x: x['name'])
def console(self, name, tunnel=False, web=False):
try:
vm = self._get_vm(name)
if vm is None:
error("VM %s not found" % name)
return None
except ApiException as exc:
error("Unable to retrieve VM %s. %s" % (name, exc))
return None
try:
# url = self.conn.create_instance_console_access_token(
# instance_id=vm['id'], console_type='serial').result['href']
url = "https://cloud.ibm.com/vpc-ext/compute/vs/%s~%s/vnc" % (self.region, vm['id'])
except ApiException as exc:
error("Unable to retrieve console access. %s" % exc)
return None
if web:
return url
if self.debug or os.path.exists("/i_am_a_container"):
msg = "Open the following url:\n%s" % url if os.path.exists("/i_am_a_container") else url
pprint(msg)
else:
pprint("Opening url: %s" % url)
webbrowser.open(url, new=2, autoraise=True)
return None
def serialconsole(self, name, web=False):
try:
vm = self._get_vm(name)
if vm is None:
error("VM %s not found" % name)
return None
except ApiException as exc:
error("Unable to retrieve VM %s. %s" % (name, exc))
return None
try:
url = "https://cloud.ibm.com/vpc-ext/compute/vs/%s~%s/serial" % (self.region, vm['id'])
except ApiException as exc:
error("Unable to retrieve console access. %s" % exc)
return None
if web:
return url
if self.debug or os.path.exists("/i_am_a_container"):
msg = "Open the following url:\n%s" % url if os.path.exists("/i_am_a_container") else url
pprint(msg)
else:
pprint("Opening url: %s" % url)
webbrowser.open(url, new=2, autoraise=True)
return None
def info(self, name, output='plain', fields=[], values=False, vm=None, ignore_volumes=False, floating_ips=None,
debug=False):
yamlinfo = {}
if vm is None:
try:
vm = self._get_vm(name)
if vm is None:
error('VM %s not found' % name)
return yamlinfo
except ApiException as exc:
error('Unable to retrieve VM %s. %s' % (name, exc))
return yamlinfo
state = vm['status']
if floating_ips is None:
try:
floating_ips = {x['target']['id']: x for x in
self.conn.list_floating_ips().result['floating_ips'] if x['status'] == 'available'}
except ApiException as exc:
error('Unable to retrieve floating ips. %s' % exc)
return yamlinfo
ips = []
for network in vm['network_interfaces']:
if network['id'] not in floating_ips:
continue
ips.append(floating_ips[network['id']]['address'])
ip = ','.join(ips)
# zone = vm['zone']['name']
image = vm['image']['name']
yamlinfo['profile'] = vm['profile']['name']
yamlinfo['name'] = name
yamlinfo['status'] = state
# yamlinfo['region'] = self.region
# yamlinfo['zone'] = zone
yamlinfo['ip'] = ip
# yamlinfo['bandwidth'] = vm['bandwidth']
yamlinfo['flavor'] = vm['profile']['name']
yamlinfo['cpus'] = vm['vcpu']['count']
yamlinfo['memory'] = vm['memory']
yamlinfo['image'] = image
yamlinfo['user'] = common.get_user(image)
yamlinfo['creationdate'] = vm['created_at']
yamlinfo['id'] = vm['id']
# yamlinfo['resource_group'] = vm['resource_group']['name']
# yamlinfo['resource_type'] = vm['resource_type']
# yamlinfo['startable'] = vm['startable']
# yamlinfo['vpc'] = vm['vpc']['name']
yamlinfo['profile'] = ''
yamlinfo['plan'] = ''
tag_list = self.global_tagging_service.list_tags(attached_to=vm['crn']).get_result().items()
for entry in tag_list:
if entry[0] != 'items':
continue
tags = entry[1]
for tag in tags:
tagname = tag['name']
if tagname.count(':') == 1:
key, value = tagname.split(':')
if key in METADATA_FIELDS:
yamlinfo[key] = value
break
nets = []
for interface in vm['network_interfaces']:
network = interface['subnet']['name']
device = interface['name']
private_ip = interface['primary_ipv4_address']
nets.append({'device': device, 'net': network, 'type': private_ip, 'mac': 'N/A'})
yamlinfo['private_ip'] = private_ip
if nets:
yamlinfo['nets'] = nets
# yamlinfo['primary_network_interface'] = vm['primary_network_interface']['name']
disks = []
if ignore_volumes is False:
try:
volumes = self._get_volumes()
except ApiException as exc:
error("Unable to retrieve volume information. %s" % exc)
return yamlinfo
for attachment in vm['volume_attachments']:
devname = attachment['volume']['name']
if devname in volumes:
volume = volumes[devname]
disksize = volume['capacity']
drivertype = volume['profile']['name']
diskformat = 'N/A'
path = 'N/A'
disks.append({'device': devname, 'size': disksize, 'format': diskformat, 'type': drivertype,
'path': path})
if disks:
yamlinfo['disks'] = disks
if debug:
yamlinfo['debug'] = vm
return yamlinfo
def ip(self, name):
ips = []
try:
vm = self._get_vm(name)
if vm is None:
error('VM %s not found' % name)
return ""
for network in vm['network_interfaces']:
response = self.conn.list_instance_network_interface_floating_ips(vm['id'], network['id'])
ips.extend([x['address'] for x in response.result['floating_ips'] if x['status'] == 'available'])
except ApiException as exc:
error("Unable to retrieve IP for %s. %s" % (name, exc))
return ','.join(ips)
def internalip(self, name):
try:
vm = self._get_vm(name)
except ApiException:
return None
if 'primary_network_interface' not in vm:
return None
return vm['primary_network_interface']['primary_ipv4_address']
def volumes(self, iso=False):
image_list = []
try:
images = self.conn.list_images().result['images']
for image in images:
if image['status'] not in ['available', 'deprecated'] or \
image['operating_system']['name'].startswith('windows'):
continue
image_list.append(image['name'])
except ApiException as exc:
error("Unable to retrieve volume information. %s" % exc)
return image_list
return sorted(image_list, key=str.lower)
def delete(self, name, snapshots=False):
conn = self.conn
try:
vm = self._get_vm(name)
if vm is None:
return {'result': 'failure', 'reason': 'VM %s not found' % name}
except ApiException as exc:
return {'result': 'failure', 'reason': 'Unable to retrieve VM %s. %s' % (name, exc)}
tags = []
try:
tags = self.global_tagging_service.list_tags(attached_to=vm['crn']).result['items']
except Exception as exc:
error('Unable to retrieve tags. %s' % exc)
dnsclient, domain = None, None
for tag in tags:
tagname = tag['name']
if tagname.count(':') == 1:
key, value = tagname.split(':')
if key == 'domain':
domain = value
if key == 'dnsclient':
dnsclient = value
try:
for network in vm['network_interfaces']:
response = conn.list_instance_network_interface_floating_ips(instance_id=vm['id'],
network_interface_id=network['id']).result
if len(response['floating_ips']) == 0:
continue
for floating_ip in response['floating_ips']:
conn.remove_instance_network_interface_floating_ip(id=floating_ip['id'],
instance_id=vm['id'],
network_interface_id=network['id'])
conn.delete_floating_ip(id=floating_ip['id'])
except ApiException as exc:
return {'result': 'failure',
'reason': 'Unable to remove floating IPs for VM %s. %s' % (name, exc)}
try:
conn.delete_instance(id=vm['id'])
except ApiException as exc:
return {'result': 'failure', 'reason': 'Unable to delete VM. %s' % exc}
if domain is not None and dnsclient is None:
self.delete_dns(name, domain, name)
return {'result': 'success'}
def dnsinfo(self, name):
dnsclient, domain = None, None
try:
vm = self._get_vm(name)
if vm is None:
error('VM %s not found' % name)
return dnsclient, domain
except ApiException as exc:
error('Unable to retrieve VM. %s' % exc)
return dnsclient, domain
try:
tags = self.global_tagging_service.list_tags(attached_to=vm['crn']).result['items']
except ApiException as exc:
error('Unable to retrieve tags. %s' % exc)
return None, None
for tag in tags:
tagname = tag['name']
if tagname.count(':') == 1:
key, value = tagname.split(':')
if key == 'dnsclient':
dnsclient = value
if key == 'domain':
domain = value
return dnsclient, domain
def clone(self, old, new, full=False, start=False):
print("not implemented")
def update_metadata(self, name, metatype, metavalue, append=False):
try:
vm = self._get_vm(name)
if vm is None:
error('VM %s not found' % name)
return
except ApiException as exc:
error('Unable to retrieve VM %s. %s' % (name, exc))
return
resource_model = {'resource_id': vm['crn']}
tag_names = ["%s:%s" % (metatype, metavalue)]
try:
self.global_tagging_service.attach_tag(resources=[resource_model],
tag_names=tag_names, tag_type='user').get_result()
except ApiException as exc:
return {'result': 'failure', 'reason': 'Unable to attach tags. %s' % exc}
def update_memory(self, name, memory):
print("not implemented")
def update_cpus(self, name, numcpus):
print("not implemented")
def update_start(self, name, start=True):
print("not implemented")
def update_information(self, name, information):
self.update_metadata(name, 'information', information)
def update_iso(self, name, iso):
print("not implemented")
def update_flavor(self, name, flavor):
try:
vm = self._get_vm(name)
if vm is None:
return {'result': 'failure', 'reason': 'VM %s not found' % name}
except ApiException as exc:
return {'result': 'failure', 'reason': 'Unable to retrieve VM %s. %s' % (name, exc)}
if vm['status'] != 'stopped':
return {'result': 'failure', 'reason': 'VM %s must be stopped' % name}
try:
provisioned_profiles = self._get_profiles()
except ApiException as exc:
return {'result': 'failure', 'reason': 'Unable to retrieve flavors. %s' % exc}
if flavor not in provisioned_profiles:
return {'result': 'failure', 'reason': 'Flavor %s not found' % flavor}
try:
self.conn.update_instance(id=vm['id'], instance_patch=vpc_v1.InstancePatch(
profile=vpc_v1.InstancePatchProfileInstanceProfileIdentityByName(name=flavor)))
except ApiException as exc:
return {'result': 'failure', 'reason': 'Unable to update instance. %s' % exc}
return {'result': 'success'}
def create_disk(self, name, size, pool=None, thin=True, image=None):
print("not implemented")
def add_disk(self, name, size, pool=None, thin=True, image=None,
shareable=False, existing=None, interface='virtio'):
print("not implemented")
def delete_disk(self, name, diskname, pool=None, novm=False):
print("not implemented")
def list_disks(self):
print("not implemented")
return {}
def add_nic(self, name, network):
try:
vm = self._get_vm(name)
if vm is None:
error('VM %s not found' % name)
return
except ApiException as exc:
error('Unable to retrieve VM %s. %s' % (name, exc))
return
try:
subnet = self._get_subnet(network)
if subnet is None:
error('Network %s not found' % network)
return
except ApiException as exc:
error('Unable to retrieve network information. %s' % exc)
return
try:
# TODO: better name. Follow ethX scheme.
self.conn.create_instance_network_interface(
instance_id=vm['id'],
subnet=vpc_v1.SubnetIdentityById(id=subnet['id']),
allow_ip_spoofing=False
)
except ApiException as exc:
error('Unable to create NIC. %s' % exc)
def delete_nic(self, name, interface):
try:
vm = self._get_vm(name)
if vm is None:
error('VM %s not found' % name)
return
except ApiException as exc:
error('Unable to retrieve VM %s. %s' % (name, exc))
try:
for network in vm['network_interfaces']:
if network['name'] == interface:
response = self.conn.delete_instance_network_interface(instance_id=vm['id'],
id=network['id'])
if response.status_code != 204:
error('Unexpected status code received: %d' % response.status_code)
except ApiException as exc:
error('Unable to delete NIC. %s' % exc)
def create_pool(self, name, poolpath, pooltype='dir', user='qemu', thinpool=None):
print("not implemented")
def delete_image(self, image, pool=None):
try:
image = self._get_image(image)
if image is None:
return {'result': 'failure', 'reason': 'Image %s not found' % image}
except ApiException as exc:
return {'result': 'failure', 'reason': 'Unable to retrieve images. %s' % exc}
try:
result = self.conn.delete_image(id=image['id'])
if result.status_code != 202:
return {'result': 'failure', 'reason': 'Unexpected status code received: %d' % result.status_code}
except ApiException as exc:
return {'result': 'failure', 'reason': 'Unable to delete image. %s' % exc}
return {'result': 'success'}
def add_image(self, url, pool, short=None, cmd=None, name=None, size=None):
cos_id = self.cos_resource_instance_id.split(':')[7]
identity_client = IamIdentityV1(authenticator=self.authenticator)
api_key_detail = identity_client.get_api_keys_details(iam_api_key=self.iam_api_key).get_result()
account_id = api_key_detail['account_id']
iam_policy_management_service = IamPolicyManagementV1(authenticator=self.authenticator)
image_policy_found = False
for policy in iam_policy_management_service.list_policies(account_id).result['policies']:
policy_type = policy['type']
if policy_type != 'authorization':
continue
cos_found = [x for x in policy['resources'][0]['attributes'] if x['name'] == 'serviceInstance' and
x['value'] == cos_id]
if cos_found:
image_policy_found = True
break
if not image_policy_found:
pprint("Adding authorization between image service and cloud storage instance")
subject = PolicySubject(attributes=[SubjectAttribute(name='serviceName', value='is'),
SubjectAttribute(name='accountId', value=account_id),
SubjectAttribute(name='resourceType', value='image')])
resources = PolicyResource(attributes=[ResourceAttribute(name='accountId', value=account_id),
ResourceAttribute(name='serviceName', value='cloud-object-storage'),
ResourceAttribute(name='serviceInstance', value=cos_id)])
roles = [PolicyRole(role_id='crn:v1:bluemix:public:iam::::serviceRole:Writer')]
iam_policy_management_service.create_policy(type='authorization', subjects=[subject],
roles=roles, resources=[resources]).get_result()
if pool not in self.list_buckets():
return {'result': 'failure', 'reason': "Bucket %s doesn't exist" % pool}
shortimage = os.path.basename(url).split('?')[0]
shortimage_unzipped = shortimage.replace('.gz', '')
if shortimage_unzipped in self.volumes():
return {'result': 'success'}
delete_cos_image = False
if shortimage_unzipped not in self.list_bucketfiles(pool):
if not os.path.exists('/tmp/%s' % shortimage):
downloadcmd = "curl -Lko /tmp/%s -f '%s'" % (shortimage, url)
code = os.system(downloadcmd)
if code != 0:
return {'result': 'failure', 'reason': "Unable to download indicated image"}
if shortimage.endswith('gz'):
if which('gunzip') is not None:
uncompresscmd = "gunzip /tmp/%s" % (shortimage)
os.system(uncompresscmd)
else:
error("gunzip not found. Can't uncompress image")
return {'result': 'failure', 'reason': "gunzip not found. Can't uncompress image"}
shortimage = shortimage_unzipped
pprint("Uploading image to bucket")
self.upload_to_bucket(pool, '/tmp/%s' % shortimage)
os.remove('/tmp/%s' % shortimage)
delete_cos_image = True
pprint("Importing image as template")
image_file_prototype_model = {}
image_file_prototype_model['href'] = "cos://%s/%s/%s" % (self.region, pool, shortimage_unzipped)
operating_system_identity_model = {}
operating_system_identity_model['name'] = 'centos-8-amd64'
image_prototype_model = {}
clean_image_name = shortimage_unzipped.replace('.', '-').replace('_', '-').lower()
image_prototype_model['name'] = clean_image_name
image_prototype_model['file'] = image_file_prototype_model
image_prototype_model['operating_system'] = operating_system_identity_model
image_prototype = image_prototype_model
result_create = self.conn.create_image(image_prototype).get_result()
while True:
image = self._get_image(clean_image_name)
if image['status'] == 'available':
break
else:
pprint("Waiting for image %s to be available" % clean_image_name)
sleep(10)
tag_names = ["image:%s" % shortimage_unzipped]
resource_model = {'resource_id': result_create['crn']}
self.global_tagging_service.attach_tag(resources=[resource_model],
tag_names=tag_names, tag_type='user').get_result()
if delete_cos_image:
self.delete_from_bucket(pool, shortimage_unzipped)
return {'result': 'success'}
def create_network(self, name, cidr=None, dhcp=True, nat=True, domain=None, plan='kvirt', overrides={}):
if cidr is not None:
try:
network = ip_network(cidr)
except:
return {'result': 'failure', 'reason': "Invalid Cidr %s" % cidr}
if str(network.version) == "6":
return {'result': 'failure', 'reason': 'IPv6 is not allowed'}
try:
vpcs = self.conn.list_vpcs().result['vpcs']
for vpc in vpcs:
if self.vpc == vpc['name']:
vpc_id = vpc['id']
resource_group_id = vpc['resource_group']['id']
break
else:
return {'result': 'failure', 'reason': 'vpc %s does not exist' % self.vpc}
except ApiException as exc:
return {'result': 'failure', 'reason': 'Unable to retrieve vpc information. %s' % exc}
try:
self.conn.create_subnet(vpc_v1.SubnetPrototypeSubnetByCIDR(
name=name,
ipv4_cidr_block=cidr,
vpc=vpc_v1.VPCIdentityById(id=vpc_id),
resource_group=vpc_v1.ResourceGroupIdentityById(id=resource_group_id),
zone=vpc_v1.ZoneIdentityByHref(
href=get_zone_href(self.region, self.zone)
),
))
except ApiException as exc:
return {'result': 'failure', 'reason': 'Unable to create network. %s' % exc}
return {'result': 'success'}
def delete_network(self, name=None, cidr=None):
try:
subnets = self._get_subnets()
for subnet in subnets:
if name == subnet['name']:
subnet_id = subnet['id']
break
else:
return {'result': 'failure', 'reason': 'Subnet %s not found' % name}
except ApiException as exc:
return {'result': 'failure', 'reason': 'Unable to retrieve subnet %s information. %s' % (name, exc)}
try:
self.conn.delete_subnet(id=subnet_id)
except ApiException as exc:
return {'result': 'failure', 'reason': 'Unable to delete subnet %s. %s' % (name, exc)}
return {'result': 'success'}
def list_pools(self):
print("not implemented")
def list_networks(self):
networks = {}
subnets = {}
for subnet in self.conn.list_subnets().result['subnets']:
newsubnet = {'name': subnet['name'], 'cidr': subnet['ipv4_cidr_block']}
vpcid = subnet['vpc']['id']
if vpcid in subnets:
subnets[vpcid].append(newsubnet)
else:
subnets[vpcid] = [newsubnet]
for net in self.conn.list_vpcs().result['vpcs']:
networkname = net['name']
vpcid = net['id']
dhcp = net['default_network_acl']['name']
mode = net['default_routing_table']['name']
cidr = ''
if vpcid in subnets:
for subnet in subnets[vpcid]:
cidr = subnet['cidr']
if subnet['name'] == networkname:
break
networks[networkname] = {'cidr': cidr, 'dhcp': dhcp, 'domain': vpcid, 'type': 'routed', 'mode': mode}
return networks
def info_network(self, name):
networkinfo = common.info_network(self, name)
return networkinfo
def list_subnets(self):
subnets = {}
try:
provisioned_subnets = self._get_subnets()
except ApiException as exc:
error('Unable to retrieve subnets. %s' % exc)
return subnets
for subnet in provisioned_subnets:
subnets[subnet['name']] = {
'az': subnet['zone']['name'],
'cidr': subnet['ipv4_cidr_block'],
'network': subnet['vpc']['name']
}
return subnets
def delete_pool(self, name, full=False):
print("not implemented")
def network_ports(self, name):
return []
def vm_ports(self, name):
return []
def get_pool_path(self, pool):
print("not implemented")
def flavors(self):
flavor_list = []
try:
for profile in self.conn.list_instance_profiles().result['profiles']:
flavor_list.append([profile['name'], profile['vcpu_count']['value'], profile['memory']['value']])
except ApiException as exc:
error("Unable to retrieve available flavors. %s" % exc)
return []
return flavor_list
def export(self, name, image=None):
print("not implemented")
def _wait_lb_active(self, id):
while True:
result = self.conn.get_load_balancer(id=id).result
if result['provisioning_status'] == 'active':
break
pprint("Waiting 10s for lb to go active...")
sleep(10)
def _wait_lb_dead(self, id):
while True:
try:
self.conn.get_load_balancer(id=id).result
pprint("Waiting 10s for lb to disappear...")
sleep(10)
except:
break
def create_loadbalancer(self, name, ports=[], checkpath='/index.html', vms=[], domain=None, checkport=80, alias=[],
internal=False, dnsclient=None, subnetid=None):
ports = [int(port) for port in ports]
internal = False if internal is None else internal
clean_name = name.replace('.', '-')
pprint("Creating Security Group %s" % clean_name)
security_group_ports = ports + [int(checkport)] if int(checkport) not in ports else ports
security_group_id = self.create_security_group(clean_name, security_group_ports)
subnets = set()
member_list = []
resource_group_id = None
if vms:
for vm in vms:
try:
virtual_machine = self._get_vm(vm)
except ApiException as exc:
return {'result': 'failure', 'reason': 'Unable to retrieve VM %s. %s' % (vm, exc)}
member_list.append(virtual_machine['primary_network_interface']['primary_ipv4_address'])
if 'primary_network_interface' in virtual_machine:
subnets.add(virtual_machine['primary_network_interface']['subnet']['id'])
nic_id = virtual_machine['primary_network_interface']['id']
self.conn.add_security_group_network_interface(security_group_id, nic_id)
if resource_group_id is None:
resource_group_id = virtual_machine['resource_group']['id']
self.update_metadata(vm, 'loadbalancer', clean_name, append=True)
pprint("Creating load balancer pool...")
try:
lb = self.conn.create_load_balancer(
is_public=not internal,
name=clean_name,
pools=[vpc_v1.LoadBalancerPoolPrototype(
algorithm='round_robin',
health_monitor=vpc_v1.LoadBalancerPoolHealthMonitorPrototype(
delay=20,
max_retries=2,
timeout=3,
# type='http',
type='tcp',
url_path=checkpath,
port=checkport,
),
protocol='tcp',
members=[vpc_v1.LoadBalancerPoolMemberPrototype(
port=port,
target=vpc_v1.LoadBalancerPoolMemberTargetPrototypeIP(address=m)
) for m in member_list],
name="%s-%s" % (clean_name, port),
) for port in ports],
subnets=[vpc_v1.SubnetIdentityById(id=x) for x in subnets],
resource_group_id=vpc_v1.ResourceGroupIdentityById(id=resource_group_id),
security_groups=[vpc_v1.SecurityGroupIdentityById(id=security_group_id)],
).result
self._wait_lb_active(id=lb['id'])
except ApiException as exc:
error('Unable to create load balancer. %s' % exc)
return {'result': 'failure', 'reason': 'Unable to create load balancer. %s' % exc}
pprint("Creating listeners...")
for index, port in enumerate(ports):
try:
self.conn.create_load_balancer_listener(
load_balancer_id=lb['id'],
port=port,
protocol='tcp',
default_pool=vpc_v1.LoadBalancerPoolIdentityById(id=lb['pools'][index]['id'])
)
try:
self._wait_lb_active(id=lb['id'])
except ApiException as exc:
error('Unable to create load balancer. %s' % exc)
return {'result': 'failure', 'reason': 'Unable to create load balancer. %s' % exc}
except ApiException as exc:
error('Unable to create load balancer listener. %s' % exc)
return {'result': 'failure', 'reason': 'Unable to create load balancer listener. %s' % exc}
pprint("Load balancer DNS name %s" % lb['hostname'])
resource_model = {'resource_id': lb['crn']}
try:
tag_names = ['realname:%s' % name]
if domain is not None:
tag_names.append('domain:%s' % domain)
if dnsclient is not None:
tag_names.append('dnsclient:%s' % dnsclient)
self.global_tagging_service.attach_tag(resources=[resource_model],
tag_names=tag_names,
tag_type='user')
except ApiException as exc:
error('Unable to attach tags. %s' % exc)
return {'result': 'failure', 'reason': 'Unable to attach tags. %s' % exc}
if domain is not None:
while True:
try:
result = self.conn.get_load_balancer(id=lb['id']).result
except ApiException as exc:
pprint('Unable to check load balancer ip. %s' % exc)
return {'result': 'failure', 'reason': 'Unable to check load balancer ip. %s' % exc}
if len(result['private_ips']) == 0:
pprint("Waiting 10s to get private ips assigned")
sleep(10)
continue
break
ip = result['public_ips'][0]['address']
if dnsclient is not None:
return ip
self.reserve_dns(name, ip=ip, domain=domain, alias=alias)
return {'result': 'success'}
def delete_loadbalancer(self, name):
domain = None
dnsclient = None
clean_name = name.replace('.', '-')
try:
lbs = {x['name']: x for x in self.conn.list_load_balancers().result['load_balancers']}
if clean_name not in lbs:
error('Load balancer %s not found' % name)
return
lb = lbs[clean_name]
except ApiException as exc:
error('Unable to retrieve load balancers. %s' % exc)
return
try:
tags = self.global_tagging_service.list_tags(attached_to=lb['crn']).result['items']
except ApiException as exc:
error('Unable to retrieve tags. %s' % exc)
return
realname = name
for tag in tags:
tagname = tag['name']
if tagname.count(':') == 1:
key, value = tagname.split(':')
if key == 'domain':
domain = value
if key == 'dnsclient':
dnsclient = value
if key == 'realname':
realname = value
try:
self.conn.delete_load_balancer(id=lb['id'])
except ApiException as exc:
error('Unable to delete load balancer. %s' % exc)
return
if domain is not None and dnsclient is None:
pprint("Deleting DNS %s.%s" % (realname, domain))
self.delete_dns(realname, domain, name)
self._wait_lb_dead(id=lb['id'])
try:
pprint("Deleting Security Group %s" % clean_name)
self.delete_security_group(clean_name)
except Exception as exc:
error('Unable to delete security group. %s' % exc)
if dnsclient is not None:
return dnsclient
def list_loadbalancers(self):
results = []
try:
lbs = self.conn.list_load_balancers().result['load_balancers']
except ApiException as exc:
error('Unable to retrieve LoadBalancers. %s' % exc)
return results
if lbs:
vms_by_addresses = {}
for vm in self.conn.list_instances().get_result()['instances']:
vms_by_addresses[vm['network_interfaces'][0]['primary_ipv4_address']] = vm['name']
for lb in lbs:
protocols = set()
ports = []
lb_id = lb['id']
name = lb['name']
ip = lb['hostname']
try:
listeners = self.conn.list_load_balancer_listeners(load_balancer_id=lb_id).result['listeners']
except ApiException as exc:
error('Unable to retrieve listeners for load balancer %s. %s' % (name, exc))
continue
for listener in listeners:
protocols.add(listener['protocol'])
ports.append(str(listener['port']))
target = []
if 'pools' in lb:
pool_id = lb['pools'][0]['id']
pool = self.conn.get_load_balancer_pool(id=pool_id, load_balancer_id=lb_id).get_result()
for member in pool['members']:
member_data = self.conn.get_load_balancer_pool_member(lb_id, pool_id, member['id']).get_result()
if member_data['target']['address'] in vms_by_addresses:
member_name = vms_by_addresses[member_data['target']['address']]
target.append(member_name)
target = ','.join(target)
results.append([name, ip, ','.join(protocols), '+'.join(ports), target])
return results
def create_bucket(self, bucket, public=False):
if bucket in self.list_buckets():
error("Bucket %s already there" % bucket)
return
# location = {'LocationConstraint': self.region}
args = {'Bucket': bucket} # , "CreateBucketConfiguration": location} #TODO: fix this.
if public:
args['ACL'] = 'public-read'
self.s3.create_bucket(**args)
def delete_bucket(self, bucket):
if bucket not in self.list_buckets():
error("Inexistent bucket %s" % bucket)
return
for obj in self.s3.list_objects(Bucket=bucket).get('Contents', []):
key = obj['Key']
pprint("Deleting object %s from bucket %s" % (key, bucket))
self.s3.delete_object(Bucket=bucket, Key=key)
self.s3.delete_bucket(Bucket=bucket)
def delete_from_bucket(self, bucket, path):
if bucket not in self.list_buckets():
error("Inexistent bucket %s" % bucket)
return
self.s3.delete_object(Bucket=bucket, Key=path)
def download_from_bucket(self, bucket, path):
self.s3.download_file(bucket, path, path)
def upload_to_bucket(self, bucket, path, overrides={}, temp_url=False, public=False):
if not os.path.exists(path):
error("Invalid path %s" % path)
return None
if bucket not in self.list_buckets():
error("Bucket %s doesn't exist" % bucket)
return None
extra_args = {'Metadata': overrides} if overrides else {}
if public:
extra_args['ACL'] = 'public-read'
dest = os.path.basename(path)
with open(path, "rb") as f:
self.s3.upload_fileobj(f, bucket, dest, ExtraArgs=extra_args)
if temp_url:
expiration = 600
return self.s3.generate_presigned_url('get_object', Params={'Bucket': bucket, 'Key': dest},
ExpiresIn=expiration)
return None
def fast_upload_to_bucket(self, bucket, path):
from ibm_s3transfer.aspera.manager import AsperaConfig, AsperaTransferManager
transfer_manager = AsperaTransferManager(self.s3)
ms_transfer_config = AsperaConfig(multi_session=2, multi_session_threshold_mb=100)
transfer_manager = AsperaTransferManager(client=self.s3, transfer_config=ms_transfer_config)
with AsperaTransferManager(self.s3) as transfer_manager:
future = transfer_manager.upload(path, bucket, os.path.basename(path))
future.result()
def list_buckets(self):
response = self.s3.list_buckets()
return [bucket["Name"] for bucket in response['Buckets']]
def list_bucketfiles(self, bucket):
if bucket not in self.list_buckets():
error("Inexistent bucket %s" % bucket)
return []
return [obj['Key'] for obj in self.s3.list_objects(Bucket=bucket).get('Contents', [])]
def public_bucketfile_url(self, bucket, path):
return "https://s3.direct.%s.cloud-object-storage.appdomain.cloud/%s/%s" % (self.region, bucket, path)
def reserve_dns(self, name, nets=[], domain=None, ip=None, alias=[], force=False, primary=False, instanceid=None):
if domain is None:
domain = nets[0]
pprint("Using domain %s..." % domain)
cluster = None
fqdn = "%s.%s" % (name, domain)
if fqdn.split('-')[0] == fqdn.split('.')[1]:
cluster = fqdn.split('-')[0]
name = '.'.join(fqdn.split('.')[:1])
domain = fqdn.replace("%s." % name, '').replace("%s." % cluster, '')
dnszone = self._get_dns_zone(domain)
if dnszone is None:
return
if ip is None:
counter = 0
while counter != 100:
ip = self.internalip(name)
if ip is None:
sleep(5)
pprint(
"Waiting 5 seconds to grab internal ip and create DNS record for %s..." % name)
counter += 10
else:
break
if ip is None:
error('Unable to find an IP for %s' % name)
return
try:
dnszone.create_dns_record(name=name, type='A', ttl=60, content=ip)
except ApiException as exc:
error('Unable to create DNS entry. %s' % exc)
return
if alias:
for a in alias:
if a == '*':
record_type = 'A'
content = ip
if cluster is not None and ('master' in name or 'worker' in name):
dnsname = '*.apps.%s.%s' % (cluster, domain)
else:
dnsname = '*.%s.%s' % (name, domain)
else:
record_type = 'CNAME'
content = "%s.%s" % (name, domain)
dnsname = '%s.%s' % (a, domain) if '.' not in a else a
try:
dnszone.create_dns_record(name=dnsname, type=record_type, ttl=60, content=content)
except ApiException as exc:
error('Unable to create DNS entry. %s' % exc)
return
def create_dns(self):
print("not implemented")
def delete_dns(self, name, domain, instanceid=None, allentries=False):
dnszone = self._get_dns_zone(domain)
if dnszone is None:
return
cluster = None
fqdn = "%s.%s" % (name, domain)
if fqdn.split('-')[0] == fqdn.split('.')[1]:
cluster = fqdn.split('-')[0]
name = '.'.join(fqdn.split('.')[:1])
domain = fqdn.replace("%s." % name, '').replace("%s." % cluster, '')
dnsentry = name if cluster is None else "%s.%s" % (name, cluster)
entry = "%s.%s" % (dnsentry, domain)
clusterdomain = "%s.%s" % (cluster, domain)
try:
records = dnszone.list_all_dns_records().get_result()['result']
except ApiException as exc:
error('Unable to check DNS %s records. %s' % (dnszone['name'], exc))
return
recordsfound = False
for record in records:
if entry in record['name'] or ('master-0' in name and record['name'].endswith(clusterdomain))\
or (record['type'] == 'CNAME' and record['content'] == entry):
record_identifier = record['id']
try:
dnszone.delete_dns_record(dnsrecord_identifier=record_identifier)
recordsfound = True
except ApiException as exc:
error('Unable to delete record %s. %s' % (record['name'], exc))
if not recordsfound:
error("No records found for %s" % entry)
return {'result': 'success'}
def list_dns(self, domain):
results = []
dnszone = self._get_dns_zone(domain)
if dnszone is None:
return []
try:
records = dnszone.list_all_dns_records().get_result()['result']
except ApiException as exc:
error('Unable to check DNS %s records. %s' % (dnszone['name'], exc))
return results
for record in records:
ip = record['content']
results.append([record['name'], record['type'], record['ttl'], ip])
return results
def _get_vm(self, name):
result = self.conn.list_instances(name=name).result
if result['total_count'] == 0:
return None
return result['instances'][0]
def _get_vms(self):
result = self.conn.list_instances().result
if result['total_count'] == 0:
return []
return result['instances']
def _get_subnet(self, name):
subnets = self._get_subnets()
for subnet in subnets:
if name == subnet['name'] and subnet['zone']['name'] == self.zone:
return subnet
return None
def _get_subnets(self):
return self.conn.list_subnets().result['subnets']
def _get_image(self, name):
result = self.conn.list_images(name=name).result
if len(result['images']) == 0:
return None
return result['images'][0]
def _get_profiles(self):
return {x['name']: x for x in self.conn.list_instance_profiles().result['profiles']}
def _get_volumes(self):
return {x['name']: x for x in self.conn.list_volumes().result['volumes']}
def _get_dns_zone(self, domain):
try:
dnslist = self.dns.list_zones().get_result()['result']
except ApiException as exc:
error('Unable to check DNS resources. %s' % exc)
return None
dnsfound = False
for dnsresource in dnslist:
dnsid = dnsresource['id']
dnsname = dnsresource['name']
if dnsname == domain:
dnsfound = True
break
if not dnsfound:
error('Domain %s not found' % domain)
return None
try:
dnszone = DnsRecordsV1(authenticator=self.authenticator, crn=self.cis_resource_instance_id,
zone_identifier=dnsid)
except ApiException as exc:
error('Unable to check DNS zones for DNS %s. %s' % (domain, exc))
return None
return dnszone
def create_security_group(self, name, ports):
vpc_id = [net['id'] for net in self.conn.list_vpcs().result['vpcs'] if net['name'] == self.vpc][0]
vpc_identity_model = {'id': vpc_id}
rules = []
security_group_rule_prototype_model = {}
security_group_rule_prototype_model['direction'] = 'outbound'
security_group_rule_prototype_model['ip_version'] = 'ipv4'
security_group_rule_prototype_model['protocol'] = 'all'
security_group_rule_prototype_model['remote'] = {'cidr_block': '0.0.0.0/0'}
rules.append(security_group_rule_prototype_model)
for port in ports:
security_group_rule_prototype_model = {}
security_group_rule_prototype_model['direction'] = 'inbound'
security_group_rule_prototype_model['ip_version'] = 'ipv4'
security_group_rule_prototype_model['protocol'] = 'tcp'
security_group_rule_prototype_model['port_min'] = port
security_group_rule_prototype_model['port_max'] = port
security_group_rule_prototype_model['remote'] = {'cidr_block': '0.0.0.0/0'}
rules.append(security_group_rule_prototype_model)
response = self.conn.create_security_group(vpc_identity_model, name=name, rules=rules).result
return response['id']
def delete_security_group(self, name):
security_groups = self.conn.list_security_groups().result['security_groups']
matching_sgs = [x for x in security_groups if x['name'] == name]
if matching_sgs:
security_group = matching_sgs[0]
security_group_id = security_group['id']
for n in security_group['network_interfaces']:
self.conn.remove_security_group_network_interface(security_group_id, n['id'])
self.conn.delete_security_group(security_group_id)
def _add_sno_security_group(self, cluster):
security_group_id = self.create_security_group("%s-sno" % cluster, [80, 443, 6443])
vm = self._get_vm("%s-master-0" % cluster)
nic_id = vm['primary_network_interface']['id']
self.conn.add_security_group_network_interface(security_group_id, nic_id)
def update_nic(self, name, index, network):
print("not implemented")
def update_network(self, name, dhcp=None, nat=None, domain=None, plan=None, overrides={}):
print("not implemented")
return {'result': 'success'}
| {
"content_hash": "8c727545d8fda5308c6dc299e4400f59",
"timestamp": "",
"source": "github",
"line_count": 1427,
"max_line_length": 119,
"avg_line_length": 45.14926419060967,
"alnum_prop": 0.5395945862047556,
"repo_name": "karmab/kcli",
"id": "3dbe272cc629f8f474e0bff9fa561c2619f3d38f",
"size": "64474",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kvirt/providers/ibm/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "27073"
},
{
"name": "Dockerfile",
"bytes": "173"
},
{
"name": "HTML",
"bytes": "59660"
},
{
"name": "JavaScript",
"bytes": "688801"
},
{
"name": "Jinja",
"bytes": "25491"
},
{
"name": "Makefile",
"bytes": "871"
},
{
"name": "Python",
"bytes": "1995130"
},
{
"name": "Shell",
"bytes": "61221"
}
],
"symlink_target": ""
} |
from Lotus.application import app
from Lotus.config import default_config
if __name__ == '__main__':
app.config.from_object(default_config.DevelopmentConfig)
HOST = app.config.get('HOST')
PORT = app.config.get('PORT')
DEBUG = app.config.get('DEBUG')
app.run(host=HOST, port=PORT, debug=DEBUG)
# print app.config['SQLALCHEMY_DATABASE_URI']
# engine = create_engine(app.config['SQLALCHEMY_DATABASE_URI'], convert_unicode=True)
# print engine.execute('select * from test').first() | {
"content_hash": "0aca0c2ee68a67b781207b9592c72629",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 89,
"avg_line_length": 39.30769230769231,
"alnum_prop": 0.6927592954990215,
"repo_name": "Jayin/Lotus",
"id": "376fb407dc0f60bbc49ee13711b5ffe6bfda6986",
"size": "536",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "run.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "7807"
}
],
"symlink_target": ""
} |
"""
"""
import cPickle
import gzip
import os
import sys
import time
import numpy
import theano
import theano.tensor as T
from theano.tensor.shared_randomstreams import RandomStreams
from logistic_sgd import LogisticRegression, load_data
from mlp import HiddenLayer
from rbm import RBM
from rbm_mean import RBM_Mean
from grbm import GRBM
class Mean_DBN(object):
"""Deep Belief Network
A deep belief network is obtained by stacking several RBMs on top of each
other. The hidden layer of the RBM at layer `i` becomes the input of the
RBM at layer `i+1`. The first layer RBM gets as input the input of the
network, and the hidden layer of the last RBM represents the output. When
used for classification, the DBN is treated as a MLP, by adding a logistic
regression layer on top.
"""
def __init__(self, numpy_rng, theano_rng=None, n_ins=784,
hidden_layers_sizes=[500, 500], n_outs=10, MU=None):
"""This class is made to support a variable number of layers.
:type numpy_rng: numpy.random.RandomState
:param numpy_rng: numpy random number generator used to draw initial
weights
:type theano_rng: theano.tensor.shared_randomstreams.RandomStreams
:param theano_rng: Theano random generator; if None is given one is
generated based on a seed drawn from `rng`
:type n_ins: int
:param n_ins: dimension of the input to the DBN
:type n_layers_sizes: list of ints
:param n_layers_sizes: intermediate layers size, must contain
at least one value
:type n_outs: int
:param n_outs: dimension of the output of the network
:MU: mean of visible layer
"""
self.sigmoid_layers = []
self.rbm_layers = []
self.params = []
self.n_layers = len(hidden_layers_sizes)
self.MU = MU
assert self.n_layers > 0
if not theano_rng:
theano_rng = RandomStreams(numpy_rng.randint(2 ** 30))
# allocate symbolic variables for the data
self.x = T.matrix('x') # the data is presented as rasterized images
self.y = T.ivector('y') # the labels are presented as 1D vector
# of [int] labels
# The DBN is an MLP, for which all weights of intermediate
# layers are shared with a different RBM. We will first
# construct the DBN as a deep multilayer perceptron, and when
# constructing each sigmoidal layer we also construct an RBM
# that shares weights with that layer. During pretraining we
# will train these RBMs (which will lead to chainging the
# weights of the MLP as well) During finetuning we will finish
# training the DBN by doing stochastic gradient descent on the
# MLP.
for i in xrange(self.n_layers):
# construct the sigmoidal layer
# the size of the input is either the number of hidden
# units of the layer below or the input size if we are on
# the first layer
if i == 0:
input_size = n_ins
layer_input = self.x
else:
input_size = hidden_layers_sizes[i - 1]
layer_input = self.sigmoid_layers[-1].output
sigmoid_layer = HiddenLayer(rng=numpy_rng,
input=layer_input,
n_in=input_size,
n_out=hidden_layers_sizes[i],
activation=T.nnet.sigmoid)
# add the layer to our list of layers
self.sigmoid_layers.append(sigmoid_layer)
# its arguably a philosophical question... but we are
# going to only declare that the parameters of the
# sigmoid_layers are parameters of the DBN. The visible
# biases in the RBM are parameters of those RBMs, but not
# of the DBN.
self.params.extend(sigmoid_layer.params)
# Construct an RBM that shared weights with this layer
if i == 0:
rbm_layer = GRBM(numpy_rng=numpy_rng,
theano_rng=theano_rng,
input=layer_input,
n_visible=input_size,
n_hidden=hidden_layers_sizes[i],
W=sigmoid_layer.W,
hbias=sigmoid_layer.b, MU=MU)
self.rbm_layers.append(rbm_layer)
else:
rbm_layer = RBM(numpy_rng=numpy_rng,
theano_rng=theano_rng,
input=layer_input,
n_visible=input_size,
n_hidden=hidden_layers_sizes[i],
W=sigmoid_layer.W,
hbias=sigmoid_layer.b)
self.rbm_layers.append(rbm_layer)
# We now need to add a logistic layer on top of the MLP
self.logLayer = LogisticRegression(
input=self.sigmoid_layers[-1].output,
n_in=hidden_layers_sizes[-1],
n_out=n_outs)
self.params.extend(self.logLayer.params)
# compute the cost for second phase of training, defined as the
# negative log likelihood of the logistic regression (output) layer
self.finetune_cost = self.logLayer.negative_log_likelihood(self.y)
# compute the gradients with respect to the model parameters
# symbolic variable that points to the number of errors made on the
# minibatch given by self.x and self.y
self.errors = self.logLayer.errors(self.y)
def pretraining_functions(self, train_set_x, batch_size, k):
'''Generates a list of functions, for performing one step of
gradient descent at a given layer. The function will require
as input the minibatch index, and to train an RBM you just
need to iterate, calling the corresponding function on all
minibatch indexes.
:type train_set_x: theano.tensor.TensorType
:param train_set_x: Shared var. that contains all datapoints used
for training the RBM
:type batch_size: int
:param batch_size: size of a [mini]batch
:param k: number of Gibbs steps to do in CD-k / PCD-k
'''
# index to a [mini]batch
index = T.lscalar('index') # index to a minibatch
learning_rate = T.scalar('lr') # learning rate to use
# number of batches
n_batches = train_set_x.get_value(borrow=True).shape[0] / batch_size
# begining of a batch, given `index`
batch_begin = index * batch_size
# ending of a batch given `index`
batch_end = batch_begin + batch_size
pretrain_fns = []
for rbm in self.rbm_layers:
# get the cost and the updates list
# using CD-k here (persisent=None) for training each RBM.
# TODO: change cost function to reconstruction error
cost, updates = rbm.get_cost_updates(learning_rate,
persistent=None, k=k)
# compile the theano function
fn = theano.function(inputs=[index,
theano.Param(learning_rate, default=0.1)],
outputs=cost,
updates=updates,
givens={self.x:
train_set_x[batch_begin:batch_end]})
# append `fn` to the list of functions
pretrain_fns.append(fn)
return pretrain_fns
def build_finetune_functions(self, datasets, batch_size, learning_rate):
'''Generates a function `train` that implements one step of
finetuning, a function `validate` that computes the error on a
batch from the validation set, and a function `test` that
computes the error on a batch from the testing set
:type datasets: list of pairs of theano.tensor.TensorType
:param datasets: It is a list that contain all the datasets;
the has to contain three pairs, `train`,
`valid`, `test` in this order, where each pair
is formed of two Theano variables, one for the
datapoints, the other for the labels
:type batch_size: int
:param batch_size: size of a minibatch
:type learning_rate: float
:param learning_rate: learning rate used during finetune stage
'''
(train_set_x, train_set_y) = datasets[0]
(valid_set_x, valid_set_y) = datasets[1]
(test_set_x, test_set_y) = datasets[2]
# compute number of minibatches for training, validation and testing
n_valid_batches = valid_set_x.get_value(borrow=True).shape[0]
n_valid_batches /= batch_size
n_test_batches = test_set_x.get_value(borrow=True).shape[0]
n_test_batches /= batch_size
index = T.lscalar('index') # index to a [mini]batch
# compute the gradients with respect to the model parameters
gparams = T.grad(self.finetune_cost, self.params)
# compute list of fine-tuning updates
updates = []
for param, gparam in zip(self.params, gparams):
updates.append((param, param - gparam * learning_rate))
train_fn = theano.function(inputs=[index],
outputs=self.finetune_cost,
updates=updates,
givens={self.x: train_set_x[index * batch_size:
(index + 1) * batch_size],
self.y: train_set_y[index * batch_size:
(index + 1) * batch_size]})
test_score_i = theano.function([index], self.errors,
givens={self.x: test_set_x[index * batch_size:
(index + 1) * batch_size],
self.y: test_set_y[index * batch_size:
(index + 1) * batch_size]})
valid_score_i = theano.function([index], self.errors,
givens={self.x: valid_set_x[index * batch_size:
(index + 1) * batch_size],
self.y: valid_set_y[index * batch_size:
(index + 1) * batch_size]})
# Create a function that scans the entire validation set
def valid_score():
return [valid_score_i(i) for i in xrange(n_valid_batches)]
# Create a function that scans the entire test set
def test_score():
return [test_score_i(i) for i in xrange(n_test_batches)]
return train_fn, valid_score, test_score
def test_DBN(finetune_lr=0.1, pretraining_epochs=100,
pretrain_lr=0.01, k=1, training_epochs=1000,
dataset='mnist.pkl.gz', batch_size=10):
"""
Demonstrates how to train and test a Deep Belief Network.
This is demonstrated on MNIST.
:type learning_rate: float
:param learning_rate: learning rate used in the finetune stage
:type pretraining_epochs: int
:param pretraining_epochs: number of epoch to do pretraining
:type pretrain_lr: float
:param pretrain_lr: learning rate to be used during pre-training
:type k: int
:param k: number of Gibbs steps in CD/PCD
:type training_epochs: int
:param training_epochs: maximal number of iterations ot run the optimizer
:type dataset: string
:param dataset: path the the pickled dataset
:type batch_size: int
:param batch_size: the size of a minibatch
"""
datasets = load_data(dataset)
train_set_x, train_set_y = datasets[0]
valid_set_x, valid_set_y = datasets[1]
test_set_x, test_set_y = datasets[2]
# compute number of minibatches for training, validation and testing
n_train_batches = train_set_x.get_value(borrow=True).shape[0] / batch_size
# numpy random generator
numpy_rng = numpy.random.RandomState(123)
print '... building the model'
mu = train_set_x.get_value().mean(axis=0)
shared_mu = theano.shared(numpy.asarray(mu,dtype=theano.config.floatX),
borrow=True)
# construct the Deep Belief Network
dbn = Mean_DBN(numpy_rng=numpy_rng, n_ins=28 * 28,
hidden_layers_sizes=[1000, 1000, 1000],
n_outs=10, MU = shared_mu)
#########################
# PRETRAINING THE MODEL #
#########################
print '... getting the pretraining functions'
pretraining_fns = dbn.pretraining_functions(train_set_x=train_set_x,
batch_size=batch_size,
k=k)
print '... pre-training the model'
start_time = time.clock()
## Pre-train layer-wise
for i in xrange(dbn.n_layers):
# go through pretraining epochs
for epoch in xrange(pretraining_epochs):
# go through the training set
c = []
for batch_index in xrange(n_train_batches):
c.append(pretraining_fns[i](index=batch_index,
lr=pretrain_lr))
print 'Pre-training layer %i, epoch %d, cost ' % (i, epoch),
print numpy.mean(c)
end_time = time.clock()
print >> sys.stderr, ('The pretraining code for file ' +
os.path.split(__file__)[1] +
' ran for %.2fm' % ((end_time - start_time) / 60.))
########################
# FINETUNING THE MODEL #
########################
# get the training, validation and testing function for the model
print '... getting the finetuning functions'
train_fn, validate_model, test_model = dbn.build_finetune_functions(
datasets=datasets, batch_size=batch_size,
learning_rate=finetune_lr)
print '... finetunning the model'
# early-stopping parameters
patience = 4 * n_train_batches # look as this many examples regardless
patience_increase = 2. # wait this much longer when a new best is
# found
improvement_threshold = 0.995 # a relative improvement of this much is
# considered significant
validation_frequency = min(n_train_batches, patience / 2)
# go through this many
# minibatche before checking the network
# on the validation set; in this case we
# check every epoch
best_params = None
best_validation_loss = numpy.inf
test_score = 0.
start_time = time.clock()
done_looping = False
epoch = 0
while (epoch < training_epochs) and (not done_looping):
epoch = epoch + 1
for minibatch_index in xrange(n_train_batches):
minibatch_avg_cost = train_fn(minibatch_index)
iter = (epoch - 1) * n_train_batches + minibatch_index
if (iter + 1) % validation_frequency == 0:
validation_losses = validate_model()
this_validation_loss = numpy.mean(validation_losses)
print('epoch %i, minibatch %i/%i, validation error %f %%' % \
(epoch, minibatch_index + 1, n_train_batches,
this_validation_loss * 100.))
# if we got the best validation score until now
if this_validation_loss < best_validation_loss:
#improve patience if loss improvement is good enough
if (this_validation_loss < best_validation_loss *
improvement_threshold):
patience = max(patience, iter * patience_increase)
# save best validation score and iteration number
best_validation_loss = this_validation_loss
best_iter = iter
# test it on the test set
test_losses = test_model()
test_score = numpy.mean(test_losses)
print((' epoch %i, minibatch %i/%i, test error of '
'best model %f %%') %
(epoch, minibatch_index + 1, n_train_batches,
test_score * 100.))
if patience <= iter:
done_looping = True
break
end_time = time.clock()
print(('Optimization complete with best validation score of %f %%,'
'with test performance %f %%') %
(best_validation_loss * 100., test_score * 100.))
print >> sys.stderr, ('The fine tuning code for file ' +
os.path.split(__file__)[1] +
' ran for %.2fm' % ((end_time - start_time)
/ 60.))
if __name__ == '__main__':
test_DBN() | {
"content_hash": "334b76b604b45f76cebacc4453c6393f",
"timestamp": "",
"source": "github",
"line_count": 424,
"max_line_length": 78,
"avg_line_length": 40.825471698113205,
"alnum_prop": 0.5534373194685153,
"repo_name": "xiawei0000/Kinectforactiondetect",
"id": "d8bc42608a3364ada281e6baf985c7462348b3a0",
"size": "17310",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "TheanoDL/Mean_DBN.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "671308"
}
],
"symlink_target": ""
} |
import pandas as pd
import pandas.util.testing as tm
import pytest
import ibis
pytest.importorskip('multipledispatch')
pytestmark = pytest.mark.pandas
join_type = pytest.mark.parametrize(
'how',
[
'inner',
'left',
'right',
'outer',
pytest.mark.xfail(
'semi',
raises=NotImplementedError,
reason='Semi join not implemented'
),
pytest.mark.xfail(
'anti',
raises=NotImplementedError,
reason='Anti join not implemented'
),
]
)
@join_type
def test_join(how, left, right, df1, df2):
expr = left.join(
right, left.key == right.key, how=how
)[left, right.other_value, right.key3]
result = expr.execute()
expected = pd.merge(df1, df2, how=how, on='key')
tm.assert_frame_equal(result[expected.columns], expected)
@join_type
def test_join_project_left_table(how, left, right, df1, df2):
expr = left.join(right, left.key == right.key, how=how)[left, right.key3]
result = expr.execute()
expected = pd.merge(df1, df2, how=how, on='key')[
list(left.columns) + ['key3']
]
tm.assert_frame_equal(result[expected.columns], expected)
@join_type
def test_join_with_multiple_predicates(how, left, right, df1, df2):
expr = left.join(
right, [left.key == right.key, left.key2 == right.key3], how=how
)[left, right.key3, right.other_value]
result = expr.execute()
expected = pd.merge(
df1, df2,
how=how,
left_on=['key', 'key2'],
right_on=['key', 'key3'],
).reset_index(drop=True)
tm.assert_frame_equal(
result[expected.columns],
expected
)
@join_type
def test_join_with_multiple_predicates_written_as_one(
how, left, right, df1, df2
):
predicate = (left.key == right.key) & (left.key2 == right.key3)
expr = left.join(right, predicate, how=how)[
left, right.key3, right.other_value]
result = expr.execute()
expected = pd.merge(
df1, df2,
how=how,
left_on=['key', 'key2'],
right_on=['key', 'key3'],
).reset_index(drop=True)
tm.assert_frame_equal(result[expected.columns], expected)
@join_type
def test_join_with_invalid_predicates(how, left, right):
predicate = (left.key == right.key) & (left.key2 <= right.key3)
expr = left.join(right, predicate, how=how)
with pytest.raises(TypeError):
expr.execute()
predicate = left.key >= right.key
expr = left.join(right, predicate, how=how)
with pytest.raises(TypeError):
expr.execute()
@join_type
@pytest.mark.xfail(reason='Hard to detect this case')
def test_join_with_duplicate_non_key_columns(how, left, right, df1, df2):
left = left.mutate(x=left.value * 2)
right = right.mutate(x=right.other_value * 3)
expr = left.join(right, left.key == right.key, how=how)
# This is undefined behavior because `x` is duplicated. This is difficult
# to detect
with pytest.raises(ValueError):
expr.execute()
@join_type
def test_join_with_duplicate_non_key_columns_not_selected(
how, left, right, df1, df2
):
left = left.mutate(x=left.value * 2)
right = right.mutate(x=right.other_value * 3)
right = right[['key', 'other_value']]
expr = left.join(right, left.key == right.key, how=how)[
left, right.other_value]
result = expr.execute()
expected = pd.merge(
df1.assign(x=df1.value * 2),
df2[['key', 'other_value']],
how=how,
on='key',
)
tm.assert_frame_equal(result[expected.columns], expected)
@join_type
def test_join_with_post_expression_selection(how, left, right, df1, df2):
join = left.join(right, left.key == right.key, how=how)
expr = join[left.key, left.value, right.other_value]
result = expr.execute()
expected = pd.merge(df1, df2, on='key', how=how)[[
'key', 'value', 'other_value'
]]
tm.assert_frame_equal(result[expected.columns], expected)
@join_type
def test_join_with_post_expression_filter(how, left):
lhs = left[['key', 'key2']]
rhs = left[['key2', 'value']]
joined = lhs.join(rhs, 'key2', how=how)
projected = joined[lhs, rhs.value]
expr = projected[projected.value == 4]
result = expr.execute()
df1 = lhs.execute()
df2 = rhs.execute()
expected = pd.merge(df1, df2, on='key2', how=how)
expected = expected.loc[expected.value == 4].reset_index(drop=True)
tm.assert_frame_equal(result, expected)
@join_type
def test_multi_join_with_post_expression_filter(how, left, df1):
lhs = left[['key', 'key2']]
rhs = left[['key2', 'value']]
rhs2 = left[['key2', 'value']].relabel(dict(value='value2'))
joined = lhs.join(rhs, 'key2', how=how)
projected = joined[lhs, rhs.value]
filtered = projected[projected.value == 4]
joined2 = filtered.join(rhs2, 'key2')
projected2 = joined2[filtered.key, rhs2.value2]
expr = projected2[projected2.value2 == 3]
result = expr.execute()
df1 = lhs.execute()
df2 = rhs.execute()
df3 = rhs2.execute()
expected = pd.merge(df1, df2, on='key2', how=how)
expected = expected.loc[expected.value == 4].reset_index(drop=True)
expected = pd.merge(expected, df3, on='key2')[['key', 'value2']]
expected = expected.loc[expected.value2 == 3].reset_index(drop=True)
tm.assert_frame_equal(result, expected)
@join_type
def test_join_with_non_trivial_key(how, left, right, df1, df2):
# also test that the order of operands in the predicate doesn't matter
join = left.join(right, right.key.length() == left.key.length(), how=how)
expr = join[left.key, left.value, right.other_value]
result = expr.execute()
expected = pd.merge(
df1.assign(key_len=df1.key.str.len()),
df2.assign(key_len=df2.key.str.len()),
on='key_len',
how=how,
).drop(['key_len', 'key_y', 'key2', 'key3'], axis=1).rename(
columns={'key_x': 'key'}
)
tm.assert_frame_equal(result[expected.columns], expected)
@join_type
def test_join_with_non_trivial_key_project_table(how, left, right, df1, df2):
# also test that the order of operands in the predicate doesn't matter
join = left.join(right, right.key.length() == left.key.length(), how=how)
expr = join[left, right.other_value]
expr = expr[expr.key.length() == 1]
result = expr.execute()
expected = pd.merge(
df1.assign(key_len=df1.key.str.len()),
df2.assign(key_len=df2.key.str.len()),
on='key_len',
how=how,
).drop(['key_len', 'key_y', 'key2', 'key3'], axis=1).rename(
columns={'key_x': 'key'}
)
expected = expected.loc[expected.key.str.len() == 1]
tm.assert_frame_equal(result[expected.columns], expected)
@join_type
def test_join_with_project_right_duplicate_column(client, how, left, df1, df3):
# also test that the order of operands in the predicate doesn't matter
right = client.table('df3')
join = left.join(right, ['key'], how=how)
expr = join[left.key, right.key2, right.other_value]
result = expr.execute()
expected = pd.merge(
df1, df3, on='key', how=how,
).drop(
['key2_x', 'key3', 'value'],
axis=1
).rename(columns={'key2_y': 'key2'})
tm.assert_frame_equal(result[expected.columns], expected)
def test_join_with_window_function(
players_base, players_df, batting, batting_df
):
players = players_base
# this should be semi_join
tbl = batting.left_join(players, ['playerID'])
t = tbl[batting.G, batting.playerID, batting.teamID]
expr = t.groupby(t.teamID).mutate(
team_avg=lambda d: d.G.mean(),
demeaned_by_player=lambda d: d.G - d.G.mean()
)
result = expr.execute()
expected = pd.merge(
batting_df, players_df[['playerID']], on='playerID', how='left'
)[['G', 'playerID', 'teamID']]
team_avg = expected.groupby('teamID').G.transform('mean')
expected = expected.assign(
team_avg=team_avg,
demeaned_by_player=lambda df: df.G - team_avg
)
tm.assert_frame_equal(result[expected.columns], expected)
merge_asof_minversion = pytest.mark.skipif(
pd.__version__ < '0.19.2',
reason="at least pandas-0.19.2 required for merge_asof")
@merge_asof_minversion
def test_asof_join(time_left, time_right, time_df1, time_df2):
expr = time_left.asof_join(time_right, 'time')[
time_left, time_right.other_value]
result = expr.execute()
expected = pd.merge_asof(time_df1, time_df2, on='time')
tm.assert_frame_equal(result[expected.columns], expected)
@merge_asof_minversion
def test_asof_join_predicate(time_left, time_right, time_df1, time_df2):
expr = time_left.asof_join(
time_right, time_left.time == time_right.time)[
time_left, time_right.other_value]
result = expr.execute()
expected = pd.merge_asof(time_df1, time_df2, on='time')
tm.assert_frame_equal(result[expected.columns], expected)
@merge_asof_minversion
def test_keyed_asof_join(
time_keyed_left, time_keyed_right, time_keyed_df1, time_keyed_df2):
expr = time_keyed_left.asof_join(time_keyed_right, 'time', by='key')[
time_keyed_left, time_keyed_right.other_value]
result = expr.execute()
expected = pd.merge_asof(
time_keyed_df1, time_keyed_df2, on='time', by='key')
tm.assert_frame_equal(result[expected.columns], expected)
@merge_asof_minversion
def test_keyed_asof_join_with_tolerance(
time_keyed_left, time_keyed_right, time_keyed_df1, time_keyed_df2):
expr = time_keyed_left.asof_join(
time_keyed_right, 'time', by='key', tolerance=2 * ibis.day())[
time_keyed_left, time_keyed_right.other_value]
result = expr.execute()
expected = pd.merge_asof(
time_keyed_df1, time_keyed_df2,
on='time', by='key', tolerance=pd.Timedelta('2D'))
tm.assert_frame_equal(result[expected.columns], expected)
| {
"content_hash": "6582012d39b8d7521cadad0d28a7237f",
"timestamp": "",
"source": "github",
"line_count": 317,
"max_line_length": 79,
"avg_line_length": 31.479495268138802,
"alnum_prop": 0.6288205230985069,
"repo_name": "deepfield/ibis",
"id": "a7f487c66e546155b6a39ba05f51911db566ddda",
"size": "9979",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ibis/pandas/execution/tests/test_join.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1730"
},
{
"name": "C",
"bytes": "3684"
},
{
"name": "C++",
"bytes": "44943"
},
{
"name": "CMake",
"bytes": "4383"
},
{
"name": "Makefile",
"bytes": "186"
},
{
"name": "Python",
"bytes": "1265497"
},
{
"name": "Shell",
"bytes": "5808"
}
],
"symlink_target": ""
} |
import logging
import pymel.core as pymel
from maya import cmds
from omtk import constants
from omtk.libs import libAttr
from omtk.libs import libCtrlShapes
from omtk.libs import libRigging
from omtk.modules import rigIK
from omtk.modules import rigLimb
log = logging.getLogger('omtk')
class CtrlIkLeg(rigIK.CtrlIk):
"""
Inherit of base CtrlIk to create a specific box shaped controller
"""
def __createNode__(self, *args, **kwargs):
return libCtrlShapes.create_shape_box_feet(*args, **kwargs)
class LegIk(rigIK.IK):
"""
IK/FK setup customized for Leg rigging. Include a FootRoll.
Create an IK chain with an embeded footroll.
Two modes are supported:
1) leg_upp, leg_low, leg_foot, leg_toes, leg_tip (classical setup)
2) leg_upp, leg_low, leg_foot, leg_heel, leg_toes, leg_tip (advanced setup)
Setup #2 is more usefull if the character have shoes.
This allow us to ensure the foot stay fixed when the 'Ankle Side' attribute is used.
"""
_CLASS_CTRL_IK = CtrlIkLeg
SHOW_IN_UI = False
BACK_ROTX_LONGNAME = 'rollBack'
BACK_ROTX_NICENAME = 'Back Roll'
BACK_ROTY_LONGNAME = 'backTwist'
BACK_ROTY_NICENAME = 'Back Twist'
HEEL_ROTY_LONGNAME = 'footTwist'
HEEL_ROTY_NICENAME = 'Heel Twist'
ANKLE_ROTX_LONGNAME = 'rollAnkle'
ANKLE_ROTX_NICENAME = 'Ankle Roll'
ANKLE_ROTZ_LONGNAME = 'heelSpin'
ANKLE_ROTZ_NICENAME = 'Ankle Side'
TOES_ROTY_LONGNAME = 'toesTwist'
TOES_ROTY_NICENAME = 'Toes Twist'
TOESFK_ROTX_LONGNAME = 'toeWiggle'
TOESFK_ROTX_NICENAME = 'Toe Wiggle'
FRONT_ROTX_LONGNAME = 'rollFront'
FRONT_ROTX_NICENAME = 'Front Roll'
FRONT_ROTY_LONGNAME = 'frontTwist'
FRONT_ROTY_NICENAME = 'Front Twist'
AUTOROLL_THRESHOLD_LONGNAME = 'rollAutoThreshold'
AUTOROLL_THRESHOLD_NICENAME = 'Roll Auto Threshold'
"""
A standard footroll that remember it's pivot when building/unbuilding.
"""
def __init__(self, *args, **kwargs):
super(LegIk, self).__init__(*args, **kwargs)
# Properties that contain the pivot reference object for each points.
# This is defined when the IK is built.
self.pivot_foot_heel = None
self.pivot_toes_heel = None
self.pivot_toes_ankle = None
self.pivot_foot_front = None
self.pivot_foot_back = None
self.pivot_foot_inn = None
self.pivot_foot_out = None
self.pivot_foot_ankle = None
self.pivot_foot_toes_fk = None
# Properties that contain the pivot positions relative to the foot matrix.
# This is defined when the IK is un-built.
self.pivot_foot_heel_pos = None
self.pivot_toes_heel_pos = None
self.pivot_toes_ankle_pos = None
self.pivot_foot_front_pos = None
self.pivot_foot_back_pos = None
self.pivot_foot_inn_pos = None
self.pivot_foot_out_pos = None
# Preserve the auto-threshold between builds.
self.attrAutoRollThreshold = None
def _get_reference_plane(self):
"""
When holding/fetching the footroll pivots, we do not want to use their worldSpace transforms.
:return: The reference worldSpace matrix to use when holding/fetching pivot positions.
"""
jnts = self.input[self.iCtrlIndex:]
pos_s = jnts[0].getTranslation(space='world')
pos_e = jnts[-1].getTranslation(space='world')
# We take in account that the foot is always flat on the floor.
axis_y = pymel.datatypes.Vector(0, 1, 0)
axis_z = pos_e - pos_s
axis_z.y = 0
axis_z.normalize()
axis_x = axis_y.cross(axis_z)
axis_x.normalize()
pos = pymel.datatypes.Point(self.chain_jnt[self.iCtrlIndex].getTranslation(space='world'))
tm = pymel.datatypes.Matrix(
axis_x.x, axis_x.y, axis_x.z, 0,
axis_y.x, axis_y.y, axis_y.z, 0,
axis_z.x, axis_z.y, axis_z.z, 0,
pos.x, pos.y, pos.z, 1
)
return tm
def _get_recommended_pivot_heelfloor(self, pos_foot):
"""
:param pos_foot: The position of the foot jnt
:return: The position of the heel pivot
"""
result = pymel.datatypes.Point(pos_foot)
result.y = 0
return result
def _get_recommended_pivot_front(self, geometries, tm_ref, tm_ref_dir, pos_toes, pos_tip):
"""
Determine recommended position using ray-cast from the toes.
If the ray-cast fail, use the last joint position.
return: The recommended position as a world pymel.datatypes.Vector
"""
dir = pymel.datatypes.Point(0, 0, 1) * tm_ref_dir
pos = libRigging.ray_cast_farthest(pos_toes, dir, geometries)
if not pos:
cmds.warning("Can't automatically solve FootRoll front pivot, using last joint as reference.")
pos = pos_tip
else:
# Compare our result with the last joint position and take the longuest.
pos.z = max(pos.z, pos_tip.z)
# Ensure we are aligned with the reference matrix.
pos_relative = pos * tm_ref.inverse()
pos_relative.x = 0
pos_relative.y = 0
pos = pos_relative * tm_ref
pos.y = 0
# HACK : Ensure that the point is size 3 and not 4
return pymel.datatypes.Point(pos.x, pos.y, pos.z)
def _get_recommended_pivot_back(self, geometries, tm_ref, tm_ref_dir, pos_toes):
"""
Determine recommended position using ray-cast from the toes.
If the ray-cast fail, use the toes position.
return: The recommended position as a world pymel.datatypes.Vector
"""
dir = pymel.datatypes.Point(0, 0, -1) * tm_ref_dir
pos = libRigging.ray_cast_farthest(pos_toes, dir, geometries)
if not pos:
cmds.warning("Can't automatically solve FootRoll back pivot.")
pos = pos_toes
# Ensure we are aligned with the reference matrix.
pos_relative = pos * tm_ref.inverse()
pos_relative.x = 0
pos_relative.y = 0
pos = pos_relative * tm_ref
pos.y = 0
# HACK : Ensure that the point is size 3 and not 4
return pymel.datatypes.Point(pos.x, pos.y, pos.z)
def _get_recommended_pivot_bank(self, geometries, tm_ref, tm_ref_dir, pos_toes, direction=1):
"""
Determine recommended position using ray-cast from the toes.
TODO: If the ray-case fail, use a specified default value.
return: The recommended position as a world pymel.datatypes.Vector
"""
# Sanity check, ensure that at least one point is in the bounds of geometries.
# This can prevent rays from being fired from outside a geometry.
# TODO: Make it more robust.
filtered_geometries = []
for geometry in geometries:
xmin, ymin, zmin, xmax, ymax, zmax = cmds.exactWorldBoundingBox(geometry.__melobject__())
bound = pymel.datatypes.BoundingBox((xmin, ymin, zmin), (xmax, ymax, zmax))
if bound.contains(pos_toes):
filtered_geometries.append(geometry)
dir = pymel.datatypes.Point(direction, 0, 0) * tm_ref_dir
pos = libRigging.ray_cast_nearest(pos_toes, dir, filtered_geometries)
if not pos:
cmds.warning("Can't automatically solve FootRoll bank inn pivot.")
pos = pos_toes
pos.y = 0
return pos
def _get_ik_ctrl_tms(self):
"""
Compute the desired rotation for the ik ctrl.
If the LEGACY_LEG_IK_CTRL_ORIENTATION is set, we'll simply align to the influence.
:return: A two-size tuple containing the transformation matrix for the ctrl offset and the ctrl itself.
"""
if self.rig.LEGACY_LEG_IK_CTRL_ORIENTATION:
return super(LegIk, self)._get_ik_ctrl_tms()
inf_tm = self.input[self.iCtrlIndex].getMatrix(worldSpace=True)
# Resolve offset_tm
offset_tm = pymel.datatypes.Matrix()
# Resolve ctrl_tm
axis_dir = constants.Axis.x
inn_tm_dir = libRigging.get_matrix_axis(inf_tm, axis_dir)
inn_tm_dir.y = 0 # Ensure the foot ctrl never have pitch values
# Ensure the ctrl look front
if inn_tm_dir.z < 0:
inn_tm_dir = pymel.datatypes.Vector(
inn_tm_dir.x * -1,
inn_tm_dir.y * -1,
inn_tm_dir.z * -1
)
inn_tm_upp = pymel.datatypes.Vector(0, 1, 0)
ctrl_tm = libRigging.get_matrix_from_direction(
inn_tm_dir,
inn_tm_upp,
look_axis=pymel.datatypes.Vector.zAxis,
upp_axis=pymel.datatypes.Vector.yAxis
)
ctrl_tm.translate = inf_tm.translate
return offset_tm, ctrl_tm
def build(self, attr_holder=None, constraint_handle=False, setup_softik=True, default_autoroll_threshold=25.0,
**kwargs):
"""
Build the LegIk system
:param attr_holder: The attribute holder object for all the footroll params
:param kwargs: More kwargs pass to the superclass
:return: Nothing
"""
# Compute ctrl_ik orientation
super(LegIk, self).build(
constraint_handle=constraint_handle,
setup_softik=setup_softik,
**kwargs
)
nomenclature_rig = self.get_nomenclature_rig()
jnts = self._chain_ik[self.iCtrlIndex:]
num_jnts = len(jnts)
if num_jnts == 4:
jnt_foot, jnt_heel, jnt_toes, jnt_tip = jnts
elif num_jnts == 3:
jnt_foot, jnt_toes, jnt_tip = jnts
jnt_heel = None
else:
raise Exception("Unexpected number of joints after the limb. Expected 3 or 4, got {0}".format(num_jnts))
# Create FootRoll (chain?)
pos_foot = pymel.datatypes.Point(jnt_foot.getTranslation(space='world'))
pos_heel = pymel.datatypes.Point(jnt_heel.getTranslation(space='world')) if jnt_heel else None
pos_toes = pymel.datatypes.Point(jnt_toes.getTranslation(space='world'))
pos_tip = pymel.datatypes.Point(jnt_tip.getTranslation(space='world'))
# Resolve pivot locations
tm_ref = self._get_reference_plane()
tm_ref_dir = pymel.datatypes.Matrix( # Used to compute raycast directions
tm_ref.a00, tm_ref.a01, tm_ref.a02, tm_ref.a03,
tm_ref.a10, tm_ref.a11, tm_ref.a12, tm_ref.a13,
tm_ref.a20, tm_ref.a21, tm_ref.a22, tm_ref.a23,
0, 0, 0, 1
)
#
# Resolve pivot positions
#
geometries = self.rig.get_meshes()
# Resolve pivot inn
if self.pivot_foot_inn_pos:
pos_pivot_inn = pymel.datatypes.Point(self.pivot_foot_inn_pos) * tm_ref
else:
pos_pivot_inn = self._get_recommended_pivot_bank(geometries, tm_ref, tm_ref_dir, pos_toes, direction=-1)
# Resolve pivot bank out
if self.pivot_foot_out_pos:
pos_pivot_out = pymel.datatypes.Point(self.pivot_foot_out_pos) * tm_ref
else:
pos_pivot_out = self._get_recommended_pivot_bank(geometries, tm_ref, tm_ref_dir, pos_toes, direction=1)
# Resolve pivot Back
if self.pivot_foot_back_pos:
pos_pivot_back = pymel.datatypes.Point(self.pivot_foot_back_pos) * tm_ref
else:
pos_pivot_back = self._get_recommended_pivot_back(geometries, tm_ref, tm_ref_dir, pos_toes)
# Set pivot Front
if self.pivot_foot_front_pos:
pos_pivot_front = pymel.datatypes.Point(self.pivot_foot_front_pos) * tm_ref
else:
pos_pivot_front = self._get_recommended_pivot_front(geometries, tm_ref, tm_ref_dir, pos_toes, pos_tip)
# Set pivot Ankle
if self.pivot_toes_ankle_pos:
pos_pivot_ankle = pymel.datatypes.Point(self.pivot_toes_ankle_pos) * tm_ref
else:
pos_pivot_ankle = pos_toes
# Set pivot Heel floor
if self.pivot_toes_heel_pos:
pos_pivot_heel = pymel.datatypes.Point(self.pivot_toes_heel_pos) * tm_ref
else:
if jnt_heel:
pos_pivot_heel = pos_heel
else:
pos_pivot_heel = pymel.datatypes.Point(pos_foot)
pos_pivot_heel.y = 0
#
# Build Setup
#
root_footRoll = pymel.createNode('transform', name=nomenclature_rig.resolve('footRoll'))
# Align all pivots to the reference plane
root_footRoll.setMatrix(tm_ref)
# Create pivots hierarchy
self.pivot_toes_heel = pymel.spaceLocator(name=nomenclature_rig.resolve('pivotToesHeel'))
self.pivot_toes_ankle = pymel.spaceLocator(name=nomenclature_rig.resolve('pivotToesAnkle'))
self.pivot_foot_ankle = pymel.spaceLocator(name=nomenclature_rig.resolve('pivotFootAnkle'))
self.pivot_foot_front = pymel.spaceLocator(name=nomenclature_rig.resolve('pivotFootFront'))
self.pivot_foot_back = pymel.spaceLocator(name=nomenclature_rig.resolve('pivotFootBack'))
self.pivot_foot_inn = pymel.spaceLocator(name=nomenclature_rig.resolve('pivotFootBankInn'))
self.pivot_foot_out = pymel.spaceLocator(name=nomenclature_rig.resolve('pivotFootBankOut'))
self.pivot_foot_heel = pymel.spaceLocator(name=nomenclature_rig.resolve('pivotFootHeel'))
self.pivot_foot_toes_fk = pymel.spaceLocator(name=nomenclature_rig.resolve('pivotToesFkRoll'))
chain_footroll = [
root_footRoll,
self.pivot_foot_ankle,
self.pivot_foot_inn,
self.pivot_foot_out,
self.pivot_foot_back,
self.pivot_foot_heel,
self.pivot_foot_front,
self.pivot_toes_ankle,
self.pivot_toes_heel
]
libRigging.create_hyerarchy(chain_footroll)
chain_footroll[0].setParent(self.grp_rig)
self.pivot_foot_toes_fk.setParent(self.pivot_foot_heel)
self.pivot_foot_ankle.setTranslation(pos_pivot_ankle, space='world')
self.pivot_foot_inn.setTranslation(pos_pivot_inn, space='world')
self.pivot_foot_out.setTranslation(pos_pivot_out, space='world')
self.pivot_foot_back.setTranslation(pos_pivot_back, space='world')
self.pivot_foot_heel.setTranslation(pos_pivot_heel, space='world')
self.pivot_foot_front.setTranslation(pos_pivot_front, space='world')
self.pivot_toes_ankle.setTranslation(pos_pivot_ankle, space='world')
self.pivot_foot_toes_fk.setTranslation(pos_pivot_ankle, space='world')
self.pivot_toes_heel.setTranslation(pos_pivot_heel, space='world')
#
# Create attributes
#
attr_holder = self.ctrl_ik
libAttr.addAttr_separator(attr_holder, 'footRoll', niceName='Foot Roll')
attr_inn_roll_auto = libAttr.addAttr(attr_holder, longName='rollAuto', k=True)
# Auto-Roll Threshold
auto_roll_threshold_default_value = self.attrAutoRollThreshold or default_autoroll_threshold
self.attrAutoRollThreshold = libAttr.addAttr(
attr_holder,
longName=self.AUTOROLL_THRESHOLD_LONGNAME,
niceName=self.AUTOROLL_THRESHOLD_NICENAME,
k=True,
defaultValue=auto_roll_threshold_default_value
)
attr_inn_bank = libAttr.addAttr(attr_holder, longName='bank', k=True)
attr_inn_ankle_rotz = libAttr.addAttr(
attr_holder,
longName=self.ANKLE_ROTZ_LONGNAME,
niceName=self.ANKLE_ROTZ_NICENAME,
k=True, hasMinValue=True, hasMaxValue=True, minValue=-90, maxValue=90
)
attr_inn_back_rotx = libAttr.addAttr(
attr_holder,
longName=self.BACK_ROTX_LONGNAME,
niceName=self.BACK_ROTX_NICENAME,
k=True, hasMinValue=True, hasMaxValue=True, minValue=-90, maxValue=0
)
attr_inn_ankle_rotx = libAttr.addAttr(
attr_holder,
longName=self.ANKLE_ROTX_LONGNAME,
niceName=self.ANKLE_ROTX_NICENAME,
k=True, hasMinValue=True, hasMaxValue=True, minValue=0, maxValue=90
)
attr_inn_front_rotx = libAttr.addAttr(
attr_holder,
longName=self.FRONT_ROTX_LONGNAME,
niceName=self.FRONT_ROTX_NICENAME,
k=True, hasMinValue=True, hasMaxValue=True, minValue=0, maxValue=90
)
attr_inn_back_roty = libAttr.addAttr(
attr_holder,
longName=self.BACK_ROTY_LONGNAME,
niceName=self.BACK_ROTY_NICENAME,
k=True, hasMinValue=True, hasMaxValue=True, minValue=-90, maxValue=90
)
attr_inn_heel_roty = libAttr.addAttr(
attr_holder,
longName=self.HEEL_ROTY_LONGNAME,
niceName=self.HEEL_ROTY_NICENAME,
k=True, hasMinValue=True, hasMaxValue=True, minValue=-90, maxValue=90
)
attr_inn_toes_roty = libAttr.addAttr(
attr_holder,
longName=self.TOES_ROTY_LONGNAME,
niceName=self.TOES_ROTY_NICENAME,
k=True, hasMinValue=True, hasMaxValue=True, minValue=-90, maxValue=90
)
attr_inn_front_roty = libAttr.addAttr(
attr_holder,
longName=self.FRONT_ROTY_LONGNAME,
niceName=self.FRONT_ROTY_NICENAME,
k=True, hasMinValue=True, hasMaxValue=True, minValue=-90, maxValue=90
)
attr_inn_toes_fk_rotx = libAttr.addAttr(
attr_holder,
longName=self.TOESFK_ROTX_LONGNAME,
niceName=self.TOESFK_ROTX_NICENAME,
k=True, hasMinValue=True, hasMaxValue=True, minValue=-90, maxValue=90
)
#
# Connect attributes
#
attr_roll_auto_pos = libRigging.create_utility_node('condition', operation=2, firstTerm=attr_inn_roll_auto,
secondTerm=0,
colorIfTrueR=attr_inn_roll_auto,
colorIfFalseR=0.0).outColorR # Greater
attr_roll_auto_f = libRigging.create_utility_node('condition', operation=2,
firstTerm=attr_inn_roll_auto,
secondTerm=self.attrAutoRollThreshold,
colorIfFalseR=0,
colorIfTrueR=(
libRigging.create_utility_node('plusMinusAverage',
operation=2,
input1D=[
attr_inn_roll_auto,
self.attrAutoRollThreshold]).output1D)
).outColorR # Substract
attr_roll_auto_b = libRigging.create_utility_node('condition', operation=2, firstTerm=attr_inn_roll_auto,
secondTerm=0.0,
colorIfTrueR=0, colorIfFalseR=attr_inn_roll_auto
).outColorR # Greater
attr_roll_m = libRigging.create_utility_node('addDoubleLinear', input1=attr_roll_auto_pos,
input2=attr_inn_ankle_rotx).output
attr_roll_f = libRigging.create_utility_node('addDoubleLinear', input1=attr_roll_auto_f,
input2=attr_inn_front_rotx).output
attr_roll_b = libRigging.create_utility_node('addDoubleLinear', input1=attr_roll_auto_b,
input2=attr_inn_back_rotx).output
attr_bank_inn = libRigging.create_utility_node('condition', operation=2,
firstTerm=attr_inn_bank, secondTerm=0,
colorIfTrueR=attr_inn_bank,
colorIfFalseR=0.0
).outColorR # Greater
attr_bank_out = libRigging.create_utility_node('condition', operation=4,
firstTerm=attr_inn_bank, secondTerm=0,
colorIfTrueR=attr_inn_bank,
colorIfFalseR=0.0).outColorR # Less
pymel.connectAttr(attr_roll_m, self.pivot_toes_ankle.rotateX)
pymel.connectAttr(attr_roll_f, self.pivot_foot_front.rotateX)
pymel.connectAttr(attr_roll_b, self.pivot_foot_back.rotateX)
pymel.connectAttr(attr_bank_inn, self.pivot_foot_inn.rotateZ)
pymel.connectAttr(attr_bank_out, self.pivot_foot_out.rotateZ)
pymel.connectAttr(attr_inn_heel_roty, self.pivot_foot_heel.rotateY)
pymel.connectAttr(attr_inn_front_roty, self.pivot_foot_front.rotateY)
pymel.connectAttr(attr_inn_back_roty, self.pivot_foot_back.rotateY)
pymel.connectAttr(attr_inn_ankle_rotz, self.pivot_toes_heel.rotateZ)
pymel.connectAttr(attr_inn_toes_roty, self.pivot_foot_ankle.rotateY)
pymel.connectAttr(attr_inn_toes_fk_rotx, self.pivot_foot_toes_fk.rotateX)
# Create ikHandles and parent them
# Note that we are directly parenting them so the 'Preserve Child Transform' of the translate tool still work.
if jnt_heel:
ikHandle_foot, ikEffector_foot = pymel.ikHandle(startJoint=jnt_foot, endEffector=jnt_heel,
solver='ikSCsolver')
else:
ikHandle_foot, ikEffector_foot = pymel.ikHandle(startJoint=jnt_foot, endEffector=jnt_toes,
solver='ikSCsolver')
ikHandle_foot.rename(nomenclature_rig.resolve('ikHandle', 'foot'))
ikHandle_foot.setParent(self.grp_rig)
ikHandle_foot.setParent(self.pivot_toes_heel)
if jnt_heel:
ikHandle_heel, ikEffector_foot = pymel.ikHandle(startJoint=jnt_heel, endEffector=jnt_toes,
solver='ikSCsolver')
ikHandle_heel.rename(nomenclature_rig.resolve('ikHandle', 'heel'))
ikHandle_heel.setParent(self.grp_rig)
ikHandle_heel.setParent(self.pivot_foot_front)
ikHandle_toes, ikEffector_toes = pymel.ikHandle(startJoint=jnt_toes, endEffector=jnt_tip, solver='ikSCsolver')
ikHandle_toes.rename(nomenclature_rig.resolve('ikHandle', 'toes'))
ikHandle_toes.setParent(self.grp_rig)
ikHandle_toes.setParent(self.pivot_foot_toes_fk)
# Hack: Re-constraint foot ikhandle
# todo: cleaner!
pymel.parentConstraint(self.ctrl_ik, root_footRoll, maintainOffset=True)
# Connect the footroll to the main ikHandle
# Note that we also need to hijack the softik network.
fn_can_delete = lambda x: isinstance(x, pymel.nodetypes.Constraint) and \
not isinstance(x, pymel.nodetypes.PoleVectorConstraint)
pymel.delete(filter(fn_can_delete, self._ik_handle_target.getChildren()))
if jnt_heel:
pymel.parentConstraint(self.pivot_toes_heel, self._ik_handle_target, maintainOffset=True)
else:
pymel.parentConstraint(self.pivot_toes_ankle, self._ik_handle_target, maintainOffset=True)
'''
# Constraint swivel to ctrl_ik
pymel.parentConstraint(self.ctrl_ik, self.ctrl_swivel,
maintainOffset=True) # TODO: Implement SpaceSwitch
'''
# Handle globalScale
pymel.connectAttr(self.grp_rig.globalScale, root_footRoll.scaleX)
pymel.connectAttr(self.grp_rig.globalScale, root_footRoll.scaleY)
pymel.connectAttr(self.grp_rig.globalScale, root_footRoll.scaleZ)
def unbuild(self):
"""
Unbuild the system
Remember footroll locations in relation with a safe matrix
The reference matrix is the ankle, maybe we should zero out the y axis.
:return: Nothing
"""
# Hold auto-roll threshold
self.attrAutoRollThreshold = libAttr.hold_attrs(self.attrAutoRollThreshold,
hold_curve=False) # only preserve value
tm_ref_inv = self._get_reference_plane().inverse()
if self.pivot_foot_heel:
self.pivot_foot_heel_pos = (self.pivot_foot_heel.getMatrix(worldSpace=True) * tm_ref_inv).translate
if self.pivot_toes_heel:
self.pivot_toes_heel_pos = (self.pivot_toes_heel.getMatrix(worldSpace=True) * tm_ref_inv).translate
if self.pivot_toes_ankle:
self.pivot_toes_ankle_pos = (self.pivot_toes_ankle.getMatrix(worldSpace=True) * tm_ref_inv).translate
if self.pivot_foot_front:
self.pivot_foot_front_pos = (self.pivot_foot_front.getMatrix(worldSpace=True) * tm_ref_inv).translate
if self.pivot_foot_back:
self.pivot_foot_back_pos = (self.pivot_foot_back.getMatrix(worldSpace=True) * tm_ref_inv).translate
if self.pivot_foot_inn:
self.pivot_foot_inn_pos = (self.pivot_foot_inn.getMatrix(worldSpace=True) * tm_ref_inv).translate
if self.pivot_foot_out:
self.pivot_foot_out_pos = (self.pivot_foot_out.getMatrix(worldSpace=True) * tm_ref_inv).translate
super(LegIk, self).unbuild()
self.pivot_foot_heel = None
self.pivot_toes_heel = None
self.pivot_toes_ankle = None
self.pivot_foot_front = None
self.pivot_foot_back = None
self.pivot_foot_inn = None
self.pivot_foot_out = None
self.pivot_foot_ankle = None
self.pivot_foot_toes_fk = None
class Leg(rigLimb.Limb):
"""
Basic leg system which use the LegIk class implementation.
"""
_CLASS_SYS_IK = LegIk
def validate(self):
"""
Allow the ui to know if the module is valid to be builded or not
:return: True or False depending if it pass the building validation
"""
super(Leg, self).validate()
num_inputs = len(self.input)
if num_inputs < 5 or num_inputs > 6:
raise Exception("Expected between 5 to 6 joints, got {0}".format(num_inputs))
return True
def register_plugin():
return Leg
| {
"content_hash": "ef0cc1abb23864349702d27d75d4d1df",
"timestamp": "",
"source": "github",
"line_count": 611,
"max_line_length": 135,
"avg_line_length": 44.09001636661211,
"alnum_prop": 0.5923011247633543,
"repo_name": "SqueezeStudioAnimation/omtk",
"id": "4e1eb6e9a947c10bc5f8225216536a67cd6c8cb7",
"size": "26939",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/omtk/modules/rigLeg.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Mathematica",
"bytes": "1124321"
},
{
"name": "Python",
"bytes": "1054644"
},
{
"name": "Shell",
"bytes": "143"
}
],
"symlink_target": ""
} |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import io
import json
import os
import re
import sys
import tempfile
import itertools
import pstats
import six
from .console import log, truncate_left
from .environment import get_environments
from . import util
# Can't use benchmark.__file__, because that points to the compiled
# file, so it can't be run by another version of Python.
BENCHMARK_RUN_SCRIPT = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "benchmark.py")
def run_benchmark(benchmark, root, env, show_stderr=False, quick=False,
profile=False):
"""
Run a benchmark in different process in the given environment.
Parameters
----------
benchmark : Benchmark object
root : str
Path to benchmark directory in which to find the benchmark
env : Environment object
show_stderr : bool
When `True`, write the stderr out to the console.
quick : bool, optional
When `True`, run the benchmark function exactly once.
profile : bool, optional
When `True`, run the benchmark through the `cProfile` profiler
and save the results.
Returns
-------
result : dict
Returns a dictionary with the following keys:
- `result`: The numeric value of the benchmark (usually the
runtime in seconds for a timing benchmark), but may be an
arbitrary JSON data structure. Set to `None` if the
benchmark failed.
- `profile`: If `profile` is `True`, this key will exist, and
be a byte string containing the cProfile data.
"""
name = benchmark['name']
result = {}
bench_results = []
bench_profiles = []
log.step()
name_max_width = util.get_terminal_width() - 33
short_name = truncate_left(name, name_max_width)
initial_message = 'Running {0}'.format(short_name)
log.info(initial_message)
def log_result(msg):
padding = " "*(util.get_terminal_width() - len(initial_message) - 14 - 1 - len(msg))
log.add(" {0}{1}".format(padding, msg))
with log.indent():
if benchmark['params']:
param_iter = enumerate(itertools.product(*benchmark['params']))
else:
param_iter = [(None, None)]
bad_output = None
failure_count = 0
total_count = 0
for param_idx, params in param_iter:
success, data, profile_data, err, out, errcode = \
_run_benchmark_single(benchmark, root, env, param_idx,
quick=quick, profile=profile)
total_count += 1
if success:
bench_results.append(data)
if profile:
bench_profiles.append(profile_data)
else:
failure_count += 1
bench_results.append(None)
bench_profiles.append(None)
if data is not None:
bad_output = data
err = err.strip()
out = out.strip()
if err or out:
err += out
if benchmark['params']:
head_msg = "\n\nFor parameters: %s\n" % (", ".join(params),)
else:
head_msg = ''
result.setdefault('stderr', '')
result['stderr'] += head_msg
result['stderr'] += err
if errcode:
result['errcode'] = errcode
# Display status
if failure_count > 0:
if bad_output is None:
if failure_count == total_count:
log_result("failed")
else:
log_result("{0}/{1} failed".format(failure_count, total_count))
else:
log_result("invalid output")
with log.indent():
log.debug(data)
# Display results
if benchmark['params'] and show_stderr:
# Long format display
if failure_count == 0:
log_result("ok")
display = _format_benchmark_result(bench_results, benchmark)
log.info("\n" + "\n".join(display))
else:
if failure_count == 0:
# Failure already shown above
if not bench_results:
display = "[]"
else:
display = util.human_value(bench_results[0], benchmark['unit'])
if len(bench_results) > 1:
display += ";..."
log_result(display)
# Dump program output
if show_stderr and result.get('stderr'):
with log.indent():
log.error(result['stderr'])
# Non-parameterized benchmarks have just a single number
if benchmark['params']:
result['result'] = dict(result=bench_results,
params=benchmark['params'])
if profile:
# Produce only a single profile
profile_data = _combine_profile_data(bench_profiles)
if profile_data is not None:
result['profile'] = profile_data
else:
result['result'] = bench_results[0]
if profile and bench_profiles[0] is not None:
result['profile'] = bench_profiles[0]
return result
def _run_benchmark_single(benchmark, root, env, param_idx, profile, quick):
"""
Run a benchmark, for single parameter combination index in case it
is parameterized
Returns
-------
success : bool
Whether test was successful
data
If success, the parsed JSON data. If failure, unparsed json data.
profile_data
Collected profiler data
err
Stderr content
out
Stdout content
errcode
Process return value
"""
name = benchmark['name']
if param_idx is not None:
name += '-%d' % (param_idx,)
if profile:
profile_fd, profile_path = tempfile.mkstemp()
os.close(profile_fd)
else:
profile_path = 'None'
result_file = tempfile.NamedTemporaryFile(delete=False)
try:
success = True
result_file.close()
out, err, errcode = env.run(
[BENCHMARK_RUN_SCRIPT, 'run', root, name, str(quick),
profile_path, result_file.name],
dots=False, timeout=benchmark['timeout'],
display_error=False, return_stderr=True,
valid_return_codes=None)
if errcode:
success = False
parsed = None
else:
with open(result_file.name, 'r') as stream:
data = stream.read()
try:
parsed = json.loads(data)
except:
success = False
parsed = data
if profile:
with io.open(profile_path, 'rb') as profile_fd:
profile_data = profile_fd.read()
if not profile_data:
profile_data = None
else:
profile_data = None
return success, parsed, profile_data, err, out, errcode
finally:
os.remove(result_file.name)
if profile:
os.remove(profile_path)
class Benchmarks(dict):
"""
Manages and runs the set of benchmarks in the project.
"""
api_version = 1
def __init__(self, conf, benchmarks=None, regex=None):
"""
Discover benchmarks in the given `benchmark_dir`.
Parameters
----------
conf : Config object
The project's configuration
regex : str or list of str, optional
`regex` is a list of regular expressions matching the
benchmarks to run. If none are provided, all benchmarks
are run.
"""
self._conf = conf
self._benchmark_dir = conf.benchmark_dir
if benchmarks is None:
benchmarks = self.disc_benchmarks(conf)
else:
benchmarks = six.itervalues(benchmarks)
if not regex:
regex = []
if isinstance(regex, six.string_types):
regex = [regex]
self._all_benchmarks = {}
for benchmark in benchmarks:
self._all_benchmarks[benchmark['name']] = benchmark
if not regex or any(re.search(reg, benchmark['name']) for reg in regex):
self[benchmark['name']] = benchmark
@classmethod
def disc_benchmarks(cls, conf):
"""
Discover all benchmarks in a directory tree.
"""
root = conf.benchmark_dir
cls.check_tree(root)
environments = list(get_environments(conf))
if len(environments) == 0:
raise util.UserError("No available environments")
# Ideally, use an environment in the same Python version as
# master, but if one isn't found, just default to the first
# one.
this_version = "{0:d}.{1:d}".format(
sys.version_info[0], sys.version_info[1])
for env in environments:
if env.python == this_version:
break
else:
env = environments[0]
log.info("Discovering benchmarks")
with log.indent():
env.create()
env.install_project(conf)
result_file = tempfile.NamedTemporaryFile(delete=False)
try:
result_file.close()
output = env.run(
[BENCHMARK_RUN_SCRIPT, 'discover', root,
result_file.name],
dots=False)
with open(result_file.name, 'r') as fp:
benchmarks = json.load(fp)
finally:
os.remove(result_file.name)
for benchmark in benchmarks:
yield benchmark
@classmethod
def check_tree(cls, root):
"""
Check the benchmark tree for files with the same name as
directories.
Also, ensure that every directory has an __init__.py file.
Raises
------
ValueError :
A .py file and directory with the same name (excluding the
extension) were found.
"""
if os.path.basename(root) == '__pycache__':
return
if not os.path.isfile(os.path.join(root, '__init__.py')):
raise util.UserError(
"No __init__.py file in '{0}'".format(root))
# First, check for the case where a .py file and a directory
# have the same name (without the extension). This can't be
# handled, so just raise an exception
found = set()
for filename in os.listdir(root):
path = os.path.join(root, filename)
if os.path.isfile(path):
filename, ext = os.path.splitext(filename)
if ext == '.py':
found.add(filename)
for dirname in os.listdir(root):
path = os.path.join(root, dirname)
if os.path.isdir(path):
if dirname in found:
raise util.UserError(
"Found a directory and python file with same name in "
"benchmark tree: '{0}'".format(path))
cls.check_tree(path)
@classmethod
def get_benchmark_file_path(cls, results_dir):
"""
Get the path to the benchmarks.json file in the results dir.
"""
return os.path.join(results_dir, "benchmarks.json")
def save(self):
"""
Save the ``benchmarks.json`` file, which is a cached set of the
metadata about the discovered benchmarks, in the results dir.
"""
path = self.get_benchmark_file_path(self._conf.results_dir)
util.write_json(path, self._all_benchmarks, self.api_version)
del self._all_benchmarks['version']
@classmethod
def load(cls, conf):
"""
Load the benchmark descriptions from the `benchmarks.json` file.
If the file is not found, one of the given `environments` will
be used to discover benchmarks.
Parameters
----------
conf : Config object
The project's configuration
Returns
-------
benchmarks : Benchmarks object
"""
def regenerate():
self = cls(conf)
self.save()
return self
path = cls.get_benchmark_file_path(conf.results_dir)
if not os.path.isfile(path):
return regenerate()
d = util.load_json(path, cleanup=False)
version = d['version']
del d['version']
if version != cls.api_version:
# Just re-do the discovery if the file is the wrong
# version
return regenerate()
return cls(conf, benchmarks=d)
def run_benchmarks(self, env, show_stderr=False, quick=False, profile=False,
skip=None):
"""
Run all of the benchmarks in the given `Environment`.
Parameters
----------
env : Environment object
Environment in which to run the benchmarks.
show_stderr : bool, optional
When `True`, display any stderr emitted by the benchmark.
quick : bool, optional
When `True`, run each benchmark function exactly once.
This is useful to quickly find errors in the benchmark
functions, without taking the time necessary to get
accurate timings.
profile : bool, optional
When `True`, run the benchmark through the `cProfile`
profiler.
skip : set, optional
Benchmark names to skip.
Returns
-------
dict : result
Returns a dictionary where the keys are benchmark names
and the values are dictionaries containing information
about running that benchmark.
Each of the values in the dictionary has the following
keys:
- `result`: The numeric value of the benchmark (usually
the runtime in seconds for a timing benchmark), but may
be an arbitrary JSON data structure. For parameterized tests,
this is a dictionary with keys 'params' and 'result', where
the value of 'params' contains a list of lists of parameter values,
and 'result' is a list of results, corresponding to itertools.product
iteration over parameters.
Set to `None` if the benchmark failed.
- `profile`: If `profile` is `True`, this key will exist,
and be a byte string containing the cProfile data.
"""
log.info("Benchmarking {0}".format(env.name))
with log.indent():
times = {}
benchmarks = sorted(list(six.iteritems(self)))
for name, benchmark in benchmarks:
if skip and name in skip:
continue
times[name] = run_benchmark(
benchmark, self._benchmark_dir, env, show_stderr=show_stderr,
quick=quick, profile=profile)
return times
def skip_benchmarks(self, env):
"""
Mark benchmarks as skipped.
"""
log.warn("Skipping {0}".format(env.name))
with log.indent():
times = {}
for name in self:
log.step()
log.warn('Benchmark {0} skipped'.format(name))
times[name] = {'result': None}
return times
def _combine_profile_data(datasets):
"""
Combine a list of profile data to a single profile
"""
datasets = [data for data in datasets if data is not None]
if not datasets:
return None
elif len(datasets) == 1:
return datasets[0]
# Load and combine stats
stats = None
while datasets:
data = datasets.pop(0)
f = tempfile.NamedTemporaryFile(delete=False)
try:
f.write(data)
f.close()
if stats is None:
stats = pstats.Stats(f.name)
else:
stats.add(f.name)
finally:
os.remove(f.name)
# Write combined stats out
f = tempfile.NamedTemporaryFile(delete=False)
try:
f.close()
stats.dump_stats(f.name)
with open(f.name, 'rb') as fp:
return fp.read()
finally:
os.remove(f.name)
def _format_benchmark_result(result, benchmark, max_width=None):
"""
Format the result from a parameterized benchmark as an ASCII table
"""
if not result:
return ['[]']
def do_formatting(num_column_params):
# Fold result to a table
if num_column_params > 0:
column_params = benchmark['params'][-num_column_params:]
else:
column_params = []
rows = []
if column_params:
row_params = benchmark['params'][:-len(column_params)]
header = benchmark['param_names'][:len(row_params)]
column_param_permutations = list(itertools.product(*column_params))
header += [" / ".join(_format_param_value(value) for value in values)
for values in column_param_permutations]
rows.append(header)
column_items = len(column_param_permutations)
name_header = " / ".join(benchmark['param_names'][len(row_params):])
else:
column_items = 1
row_params = benchmark['params']
name_header = ""
header = benchmark['param_names']
rows.append(header)
for j, values in enumerate(itertools.product(*row_params)):
row_results = [util.human_value(x, benchmark['unit'])
for x in result[j*column_items:(j+1)*column_items]]
row = [_format_param_value(value) for value in values] + row_results
rows.append(row)
if name_header:
display = util.format_text_table(rows, 1,
top_header_text=name_header,
top_header_span_start=len(row_params))
else:
display = util.format_text_table(rows, 1)
return display.splitlines()
# Determine how many parameters can be fit to columns
if max_width is None:
max_width = util.get_terminal_width() * 3//4
text = do_formatting(0)
for j in range(1, len(benchmark['params'])):
new_text = do_formatting(j)
width = max(len(line) for line in new_text)
if width < max_width:
text = new_text
else:
break
return text
def _format_param_value(value_repr):
"""
Format a parameter value for displaying it as test output. The
values are string obtained via Python repr.
"""
regexs = ["^'(.+)'$",
"^u'(.+)'$",
"^<class '(.+)'>$"]
for regex in regexs:
m = re.match(regex, value_repr)
if m and m.group(1).strip():
return m.group(1)
return value_repr
| {
"content_hash": "73693500baa80e2ece307288680fd428",
"timestamp": "",
"source": "github",
"line_count": 608,
"max_line_length": 92,
"avg_line_length": 31.794407894736842,
"alnum_prop": 0.5418240132429776,
"repo_name": "ericdill/asv",
"id": "0c051e85aeacc00e4fdcceb5329ecb7a086f7073",
"size": "19420",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "asv/benchmarks.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "2261"
},
{
"name": "HTML",
"bytes": "6741"
},
{
"name": "JavaScript",
"bytes": "61033"
},
{
"name": "Python",
"bytes": "247879"
},
{
"name": "Shell",
"bytes": "414"
}
],
"symlink_target": ""
} |
"""
flask_cors
~~~~
Flask-CORS is a simple extension to Flask allowing you to support cross
origin resource sharing (CORS) using a simple decorator.
:copyright: (c) 2016 by Cory Dolphin.
:license: MIT, see LICENSE for more details.
"""
from .decorator import cross_origin
from .extension import CORS
from .version import __version__
__all__ = ['CORS', 'cross_origin']
# Set default logging handler to avoid "No handler found" warnings.
import logging
from logging import NullHandler
# Set initial level to WARN. Users must manually enable logging for
# flask_cors to see our logging.
rootlogger = logging.getLogger(__name__)
rootlogger.addHandler(NullHandler())
if rootlogger.level == logging.NOTSET:
rootlogger.setLevel(logging.WARN) | {
"content_hash": "3318b3e25b8157d3fca2f05a9c480df2",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 75,
"avg_line_length": 29.5,
"alnum_prop": 0.7327249022164276,
"repo_name": "corydolphin/flask-cors",
"id": "c84a763f80ec2d17dfb3f059ba3f719b14ab62e8",
"size": "791",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "flask_cors/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "88721"
}
],
"symlink_target": ""
} |
import csv
import codecs
import cStringIO
class UTF8Recoder:
"""
Iterator that reads an encoded stream and reencodes the input to UTF-8
"""
def __init__(self, f, encoding):
self.reader = codecs.getreader(encoding)(f)
def __iter__(self):
return self
def next(self):
return self.reader.next().encode("utf-8")
class UnicodeReader:
"""
A CSV reader which will iterate over lines in the CSV file "f",
which is encoded in the given encoding.
"""
def __init__(self, f, dialect=csv.excel, encoding="utf-8", **kwds):
f = UTF8Recoder(f, encoding)
self.reader = csv.reader(f, dialect=dialect, **kwds)
def next(self):
row = self.reader.next()
return [unicode(s, "utf-8") for s in row]
def __iter__(self):
return self
@property
def line_num(self):
return self.reader.line_num
class UnicodeWriter:
"""
A CSV writer which will write rows to CSV file "f",
which is encoded in the given encoding.
"""
def __init__(self, f, dialect=csv.excel, encoding="utf-8", **kwds):
# Redirect output to a queue
self.queue = cStringIO.StringIO()
self.writer = csv.writer(self.queue, dialect=dialect, **kwds)
self.stream = f
self.encoder = codecs.getincrementalencoder(encoding)()
def writerow(self, row):
self.writer.writerow([s.encode("utf-8") for s in row])
# Fetch UTF-8 output from the queue ...
data = self.queue.getvalue()
data = data.decode("utf-8")
# ... and reencode it into the target encoding
data = self.encoder.encode(data)
# write to the target stream
self.stream.write(data)
# empty queue
self.queue.truncate(0)
def writerows(self, rows):
for row in rows:
self.writerow(row)
| {
"content_hash": "1380d0cb38be1ea76d58df86366aa3f0",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 74,
"avg_line_length": 27.159420289855074,
"alnum_prop": 0.6008537886872999,
"repo_name": "Glasgow2015/team-10",
"id": "42bb4d75b2e79362469de488523055eaa8cfbfd7",
"size": "1874",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "project/newsletter/addressimport/csv_util.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "562501"
},
{
"name": "HTML",
"bytes": "458748"
},
{
"name": "JavaScript",
"bytes": "786940"
},
{
"name": "PHP",
"bytes": "5453"
},
{
"name": "Python",
"bytes": "12350526"
},
{
"name": "Ruby",
"bytes": "990"
},
{
"name": "Shell",
"bytes": "4232"
},
{
"name": "XSLT",
"bytes": "5122"
}
],
"symlink_target": ""
} |
from django.core.management.base import BaseCommand, CommandError
from django.db.models.loading import get_model
from django_usda.models import Food, FoodGroup, FoodLanguaLFactor, LanguaLFactor, NutrientData, Nutrient, Source, Derivation, Weight, Footnote, DataLink, DataSource, DeletedFood, DeletedNutrient, DeletedFootnote
import zipfile
import csv
import json
import time
from django.db import IntegrityError
from django import db
appLabel = "django_usda"
modelMap = [
{"fileName": "DATA_SRC.txt", "model": DataSource},
{"fileName": "FD_GROUP.txt", "model": FoodGroup},
{"fileName": "FOOD_DES.txt", "model": Food},
{"fileName": "LANGDESC.txt", "model": LanguaLFactor},
{"fileName": "LANGUAL.txt", "model": FoodLanguaLFactor},
{"fileName": "NUTR_DEF.txt", "model": Nutrient},
{"fileName": "DERIV_CD.txt", "model": Derivation},
{"fileName": "SRC_CD.txt", "model": Source},
{"fileName": "NUT_DATA.txt", "model": NutrientData},
{"fileName": "WEIGHT.txt", "model": Weight},
{"fileName": "FOOTNOTE.txt", "model": Footnote},
{"fileName": "DATSRCLN.txt", "model": DataLink}
]
def filter(value):
newValue = value.replace("\r\n", "")
if newValue == "":
return None
return newValue
def importFile(file, model):
contents = file.readlines()
bulk = []
print "Creating objects."
for counter, line in enumerate(contents):
values = line.replace("~", "").decode(
'iso-8859-1').encode('utf8').split("^")
fields = list(model._meta.fields)
if fields[0].get_internal_type() == "AutoField":
del fields[0]
newModel = createObject(model, fields, values)
if newModel:
bulk.append(newModel)
importObjects(model, bulk)
def importObjects(model, bulk):
length = len(bulk)
chunkSize = 50000
if length > chunkSize:
for counter, chunk in enumerate(chunks(bulk, chunkSize)):
print "Importing %s/%s objects into the database." % (counter * chunkSize + len(chunk), length)
importChunk(model, chunk)
else:
print "Importing %s objects into the database." % len(bulk)
importChunk(model, bulk)
def importChunk(model, chunk):
try:
model.objects.bulk_create(chunk)
except IntegrityError as e:
if "Duplicate entry" not in str(e):
print "Database Error: %s" % e
print chunk
def createObject(model, fields, values):
linkedFields = {}
try:
for counter, value in enumerate(values):
value = filter(value)
field = fields[counter]
key = field.name
if not field.null and value == "":
raise Exception(
"%s: Field required but null given." % field.name)
fieldType = field.get_internal_type()
if fieldType == "ForeignKey":
key = key + "_id"
elif fieldType == "BooleanField":
value = False
if value == "Y":
value = True
linkedFields[key] = value
return model(**linkedFields)
except Exception as e:
print "Model creation error for pk '%s': %s" % (values[0], e)
return False
def chunks(l, n):
for i in xrange(0, len(l), n):
yield l[i:i + n]
class Command(BaseCommand):
args = "<zipFile>"
help = 'Import the nutrition database (Only R27 Supported)'
def handle(self, *args, **options):
openedZipFile = zipfile.ZipFile(args[0])
order = 0
for info in modelMap:
print "Importing file '%s' as %s" % (info["fileName"], info["model"]._meta.verbose_name_plural.title())
importFile(openedZipFile.open(info["fileName"]), info["model"])
openedZipFile.close()
| {
"content_hash": "7860b9447f54a4011ee54c28173d01cb",
"timestamp": "",
"source": "github",
"line_count": 112,
"max_line_length": 211,
"avg_line_length": 34.026785714285715,
"alnum_prop": 0.6037785358173707,
"repo_name": "Zundrium/django-usda",
"id": "4ff1fa100f6d7beea43b3e70fc84a6e50a168c19",
"size": "3811",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django_usda/management/commands/import_r27.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "70599"
}
],
"symlink_target": ""
} |
"""Generates the all_tests.js file for consumption by Protractor.
Usage:
$ cd packages/grpc-web
$ python3 ./scripts/gen_test_htmls.py # Prerequisite
$ python3 ./scripts/gen_all_tests_js.py
"""
from string import Template
import common
ALL_TESTS_TEMPLATE_FILE = './scripts/template_all_tests_js.txt'
# The path of the generated all_tests.js file
GENERATED_ALL_TESTS_JS_PATH = './generated/all_tests.js'
# File paths needs to be prepended by the relative path of the grpc-web package
# because web server is hosting the root of github repo for tests to access the
# javascript files.
GRPC_WEB_BASE_DIR = 'packages/grpc-web'
def main():
template_data = common.read_file(ALL_TESTS_TEMPLATE_FILE)
template = Template(template_data)
test_html_paths = []
for file_name in common.get_files_with_suffix(
common.GENERATED_TEST_BASE_PATH, '_test.html'):
test_html_paths.append(" '%s/%s'," % (GRPC_WEB_BASE_DIR, file_name))
# Example output paths:
# 'packages/grpc-web/generated/test_htmls/javascript__net__grpc__web__grpcwebclientbase_test.html',
# 'packages/grpc-web/generated/test_htmls/javascript__net__grpc__web__grpcwebstreamparser_test.html',
test_html_paths_str = "\n".join(test_html_paths)
# Writes the generated output to the all_tests.js file.
common.write_file(GENERATED_ALL_TESTS_JS_PATH,
template.substitute(test_html_paths=test_html_paths_str))
if __name__ == "__main__":
main()
| {
"content_hash": "26e16298457151c2742fccb08d5fc321",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 105,
"avg_line_length": 34.44186046511628,
"alnum_prop": 0.6988521269412559,
"repo_name": "grpc/grpc-web",
"id": "3886c5430547144d922254a20384aea511fb1796",
"size": "2078",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/grpc-web/scripts/gen_all_tests_js.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "70132"
},
{
"name": "Dockerfile",
"bytes": "16715"
},
{
"name": "HTML",
"bytes": "6664"
},
{
"name": "Java",
"bytes": "71525"
},
{
"name": "JavaScript",
"bytes": "182361"
},
{
"name": "Makefile",
"bytes": "3479"
},
{
"name": "Python",
"bytes": "17399"
},
{
"name": "Shell",
"bytes": "15282"
},
{
"name": "Starlark",
"bytes": "2250"
},
{
"name": "TypeScript",
"bytes": "14539"
},
{
"name": "Zig",
"bytes": "18380"
}
],
"symlink_target": ""
} |
from flask import (g, request, redirect, render_template,
session, url_for, jsonify, Blueprint)
from .models import User, Group
from .application import db, meetup
from .auth import login_required
from .forms import GroupForm
studygroup = Blueprint("studygroup", __name__, static_folder='static')
@studygroup.before_request
def load_user():
user_id = session.get('user_id')
if user_id:
g.user = User.query.filter_by(id=user_id).first()
else:
g.user = None
@studygroup.route('/')
def index():
return render_template('index.html')
@studygroup.route('/groups')
@login_required
def show_groups():
g.groups = Group.all_with_memberships()
return render_template('groups.html')
@studygroup.route('/group/<id>')
def show_group(id):
g.group = Group.query.filter_by(id=id).first()
return render_template('show_group.html')
@studygroup.route('/group/new', methods=('GET', 'POST'))
def new_group():
form = GroupForm()
if form.validate_on_submit():
group = form.save()
return redirect(url_for('.show_group', id=group.id))
return render_template('new_group.html', form=form)
@studygroup.route('/join_group', methods=('POST',))
def join_group():
pass
@studygroup.route('/members')
@studygroup.route('/members/<int:offset>')
@login_required
def show_members(offset=None):
g.users = User.query.all()
return render_template('members.html')
# if 'meetup_token' in session:
# if offset is None:
# offset = 0
#
# me = meetup.get(
# '2/members',
# data={
# 'group_id': settings.MEETUP_GROUP_ID,
# 'page': 20,
# 'offset': offset
# })
#
# return render_template(
# 'members.html',
# members=me.data['results'],
# next_offset=offset + 1)
#
# return redirect(url_for('.login'))
@studygroup.route('/send_message/<int:member_id>', methods=['GET', 'POST'])
def send_message(member_id):
if 'meetup_token' not in session:
return redirect(url_for('.login'))
if request.method == 'GET':
member = meetup.get('2/member/%s' % member_id)
return render_template("send_message.html", member=member.data)
elif request.method == 'POST':
response = meetup.post(
'2/message',
data={
'subject': request.form['subject'],
'message': request.form['message'],
'member_id': request.form['member_id']
})
return jsonify(response.data)
else:
return "Invalid Request", 500
@studygroup.route('/boom')
def boom():
raise Exception('BOOM')
@studygroup.route('/login')
def login():
return meetup.authorize(callback=url_for('.authorized', _external=True))
@studygroup.route('/logout')
def logout():
session.clear()
return redirect(url_for('.index'))
@studygroup.route('/login/authorized')
@meetup.authorized_handler
def authorized(resp):
if resp is None:
return 'Access denied: reason=%s error=%s' % (
request.args['error_reason'],
request.args['error_description']
)
session['meetup_token'] = (resp['access_token'], '')
meetup_response = meetup.get('2/member/self')
member_details = meetup_response.data
member_id = str(member_details['id'])
user = User.query.filter_by(meetup_member_id=member_id).first()
if not user:
user = User(
full_name=member_details['name'],
meetup_member_id=member_id
)
db.session.add(user)
db.session.commit()
session['user_id'] = user.id
return redirect(url_for('.index'))
@meetup.tokengetter
def get_meetup_oauth_token():
return session.get('meetup_token')
| {
"content_hash": "c090a3e12d633eba87037fe6292c1338",
"timestamp": "",
"source": "github",
"line_count": 140,
"max_line_length": 76,
"avg_line_length": 27.40714285714286,
"alnum_prop": 0.6020328381548085,
"repo_name": "BostonPython/studygroup",
"id": "3060ac136512824d76f48b50cdf6bf8114aff55b",
"size": "3837",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "studygroup/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1410"
},
{
"name": "Python",
"bytes": "16218"
}
],
"symlink_target": ""
} |
import sys
import os
import sphinx_rtd_theme
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ---------------------------------------------------
# Try to override the matplotlib configuration as early as possible
try:
import gen_rst
except:
pass
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.doctest",
"sphinx.ext.intersphinx",
"sphinx.ext.todo",
"numpydoc",
"sphinx.ext.ifconfig",
"sphinx.ext.viewcode",
"sphinx_gallery.gen_gallery",
]
# pngmath / imgmath compatibility layer for different sphinx versions
import sphinx
from distutils.version import LooseVersion
if LooseVersion(sphinx.__version__) < LooseVersion("1.4"):
extensions.append("sphinx.ext.pngmath")
else:
extensions.append("sphinx.ext.imgmath")
sphinx_gallery_conf = {
# path to your examples scripts
"examples_dirs": "../examples",
# path where to save gallery generated examples
"gallery_dirs": "auto_examples",
}
autosummary_generate = True
numpydoc_show_class_members = False
autodoc_default_flags = ["members", "inherited-members"]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix of source filenames.
source_suffix = ".rst"
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# Generate the plots for the gallery
plot_gallery = True
# The master toctree document.
master_doc = "index"
# General information about the project.
project = u"fluentopt"
copyright = u"2016, Mehdi Cherti"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = "0.0.1"
# The full version, including alpha/beta/rc tags.
release = "0.0.1"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ["_build"]
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "sphinx_rtd_theme"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = "project-templatedoc"
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(
"index",
"project-template.tex",
u"project-template Documentation",
u"Vighnesh Birodkar",
"manual",
)
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(
"index",
"project-template",
u"project-template Documentation",
[u"Vighnesh Birodkar"],
1,
)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
"index",
"project-template",
u"project-template Documentation",
u"Vighnesh Birodkar",
"project-template",
"One line description of project.",
"Miscellaneous",
)
]
def generate_example_rst(app, what, name, obj, options, lines):
# generate empty examples files, so that we don't get
# inclusion errors if there are no examples for a class / module
examples_path = os.path.join(
app.srcdir, "modules", "generated", "%s.examples" % name
)
if not os.path.exists(examples_path):
# touch file
open(examples_path, "w").close()
def setup(app):
app.connect("autodoc-process-docstring", generate_example_rst)
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {"http://docs.python.org/": None}
| {
"content_hash": "35e5a633f87949eb665bbcfe16a12ed9",
"timestamp": "",
"source": "github",
"line_count": 320,
"max_line_length": 79,
"avg_line_length": 30.51875,
"alnum_prop": 0.6876919926274832,
"repo_name": "mehdidc/fluentopt",
"id": "d6a620ae4c923e73c05db017c933663f286f0dee",
"size": "10195",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "doc/conf.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "3366"
},
{
"name": "Makefile",
"bytes": "191"
},
{
"name": "Python",
"bytes": "24506"
},
{
"name": "Shell",
"bytes": "3040"
}
],
"symlink_target": ""
} |
def omaketx_command (infile, outfile, extraargs="",
options="", output_cmd="-otex",
showinfo=True, showinfo_extra="",
silent=False, concat=True) :
command = (oiio_app("oiiotool")
+ " " + make_relpath(infile,tmpdir)
+ " " + extraargs
+ " " + output_cmd + options + " " + make_relpath(outfile,tmpdir) )
if not silent :
command += " >> out.txt"
if concat:
command += " ;\n"
if showinfo:
command += info_command (outfile, extraargs=showinfo_extra, safematch=1)
return command
# Just for simplicity, make a checkerboard with a solid alpha
command += oiiotool (" --pattern checker 128x128 4 --ch R,G,B,=1.0"
+ " -d uint8 -o " + make_relpath("checker.tif") )
# Basic test - recreate the grid texture
command += omaketx_command ("../common/grid.tif", "grid.tx")
# Test --resize (to power of 2) with the grid, which is 1000x1000
command += omaketx_command ("../common/grid.tif", "grid-resize.tx",
options=":resize=1")
# Test -d to set output data type
command += omaketx_command ("checker.tif", "checker-uint16.tx",
"-d uint16")
# Test --ch to restrict the number of channels
command += omaketx_command ("checker.tif", "checker-1chan.tx",
"--ch 0")
# Test --tiles to set a non-default tile size
command += omaketx_command ("checker.tif", "checker-16x32tile.tx",
"--tile 16 32")
# Test --separate and --compression
command += omaketx_command ("checker.tif", "checker-seplzw.tx",
"--planarconfig separate --compression lzw")
# Test --wrap
command += omaketx_command ("checker.tif", "checker-clamp.tx",
options=":wrap=clamp")
# Test --swrap and --twrap
command += omaketx_command ("checker.tif", "checker-permir.tx",
options=":swrap=periodic:twrap=mirror")
# Test --nomipmap
command += omaketx_command ("checker.tif", "checker-nomip.tx",
options=":nomipmap=1")
# Test setting matrices
command += omaketx_command ("checker.tif", "checker-camera.tx",
"--attrib:type=matrix worldtocamera 1,0,0,0,0,2,0,0,0,0,1,0,0,0,0,1 " +
"--attrib:type=matrix worldtoscreen 3,0,0,0,0,3,0,0,0,0,3,0,1,2,3,1")
# Test --opaque-detect (should drop the alpha channel)
command += omaketx_command ("checker.tif", "checker-opaque.tx",
options=":opaque_detect=1")
# Test --monochrome-detect (first create a monochrome image)
command += oiiotool (" --pattern constant:color=.25,.25,.25 256x256 3 "
+ " -d uint8 -o " + make_relpath("gray.tif"))
command += omaketx_command ("gray.tif", "gray-mono.tx",
options=":monochrome_detect=1")
# Test --monochrome-detect on something that is NOT monochrome
command += oiiotool (" --pattern constant:color=.25,.2,.15 256x256 3 "
+ " -d uint8 -o " + make_relpath("pink.tif"))
command += omaketx_command ("pink.tif", "pink-mono.tx",
options=":monochrome_detect=1")
# Test --prman : should save 'separate' planarconfig, and funny 64x32 tiles
# since we are specifying 16 bits, and it should save as 'int16' even though
# we asked for unsigned.
command += omaketx_command ("checker.tif", "checker-prman.tx",
"-d uint16", options=":prman=1")
# Test --fixnan : take advantage of the bad.exr images in
# testsuite/oiiotool-fixnan. (Use --nomipmap to cut down on stats output)
# FIXME: would also like to test --checknan, but the problem with that is
# that is actually FAILS if there's a nan.
command += omaketx_command (OIIO_TESTSUITE_ROOT+"/oiiotool-fixnan/src/bad.exr", "nan.exr",
"--fixnan box3", options=":nomipmap=1",
showinfo=True, showinfo_extra="--stats")
# Test that when outputting half textures, we clamp large float values
# rather than inadvertetly turning into Inf in the process of output to
# half.
command += oiiotool (" --pattern constant:color=1.0e6,1.0e6,1.0e6 2x2 3 -d float -o million.tif")
command += omaketx_command ("million.tif", "bigval.exr",
"-d half", showinfo_extra="--stats")
# Test --format to force exr even though it can't be deduced from the name.
command += omaketx_command ("checker.tif", "checker-exr.pdq",
options=":fileformatname=exr")
# Test that the oiio:SHA-1 hash is stable, and that that changing filter and
# using -hicomp result in different images and different hashes.
command += omaketx_command ("../common/grid.tif", "grid-lanczos3.tx",
options = ":filter=lanczos3", showinfo=False)
command += omaketx_command ("../common/grid.tif", "grid-lanczos3-hicomp.tx",
options = ":filter=lanczos3:highlightcomp=1", showinfo=False)
command += info_command ("grid.tx",
extraargs="--metamatch oiio:SHA-1")
command += info_command ("grid-lanczos3.tx",
extraargs="--metamatch oiio:SHA-1")
command += info_command ("grid-lanczos3-hicomp.tx",
extraargs="--metamatch oiio:SHA-1")
# Test that we cleanly replace any existing SHA-1 hash and ConstantColor
# hint in the ImageDescription of the input file.
command += oiiotool (" --pattern constant:color=1,0,0 64x64 3 "
+ " --caption \"foo SHA-1=1234abcd ConstantColor=[0.0,0,-0.0] bar\""
+ " -d uint8 -o " + make_relpath("small.tif") )
command += info_command ("small.tif", safematch=1);
command += omaketx_command ("small.tif", "small.tx",
options=":oiio=1:constant_color_detect=1")
# Regression test -- at one point, we had a bug where we were botching
# the poles of OpenEXR env maps, adding energy. Check it by creating an
# all-white image, turning it into an env map, and calculating its
# statistics (should be 1.0 everywhere).
command += oiiotool (" --pattern constant:color=1,1,1 4x2 3 "
+ " -d half -o " + make_relpath("white.exr"))
command += omaketx_command ("white.exr", "whiteenv.exr",
output_cmd="-oenv", showinfo=False)
command += oiiotool ("--stats -a whiteenv.exr")
command += oiiotool (" --pattern noise 64x64 1"
+ " -d half -o " + make_relpath("bump.exr"))
command += omaketx_command ("bump.exr", "bumpslope.exr",
extraargs="-d half",
output_cmd="-obump", showinfo=False)
command += oiiotool ("--stats -a bumpslope.exr")
outputs = [ "out.txt" ]
# To do: --filter --checknan --fullpixels
# --prman-metadata --ignore-unassoc
# --mipimage
# --envlatl TIFF, --envlatl EXR
# --colorconvert --unpremult -u --fovcot
| {
"content_hash": "14ac3cfb8141f7e0c849707f7d149141",
"timestamp": "",
"source": "github",
"line_count": 152,
"max_line_length": 98,
"avg_line_length": 45.88815789473684,
"alnum_prop": 0.5948387096774194,
"repo_name": "lgritz/oiio",
"id": "da2a79d6655733502f67aa2bc654c09f048e39fa",
"size": "7099",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "testsuite/oiiotool-maketx/run.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "131728"
},
{
"name": "C++",
"bytes": "6649294"
},
{
"name": "CMake",
"bytes": "218101"
},
{
"name": "Makefile",
"bytes": "18697"
},
{
"name": "POV-Ray SDL",
"bytes": "5056106"
},
{
"name": "Python",
"bytes": "269004"
},
{
"name": "Shell",
"bytes": "56909"
}
],
"symlink_target": ""
} |
def patch():
if patch._initialized:
return
patch._initialized = True
import gevent.monkey
gevent.monkey.patch_all()
import sys
if sys.version_info.major < 3:
_py2_patches()
_export()
patch._initialized = False
def _export():
import lymph
lymph.__version__ = '0.10.0-dev'
from lymph.exceptions import RpcError, LookupFailure, Timeout
from lymph.core.decorators import rpc, raw_rpc, event, task
from lymph.core.interfaces import Interface
from lymph.core.declarations import proxy
for obj in (RpcError, LookupFailure, Timeout, rpc, raw_rpc, event, Interface, proxy, task):
setattr(lymph, obj.__name__, obj)
def _py2_patches():
import monotime # NOQA
| {
"content_hash": "d8eaf895d96be0f6d46c9ac39b71eb55",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 95,
"avg_line_length": 23.93548387096774,
"alnum_prop": 0.6630727762803235,
"repo_name": "dushyant88/lymph",
"id": "779e7b7391cef99a2239bf6ce49e832fb23ad5f7",
"size": "744",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lymph/monkey.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "245"
},
{
"name": "Python",
"bytes": "240443"
}
],
"symlink_target": ""
} |
from mrjob.job import MRJob
class MRWordCounter(MRJob):
def mapper(self, key, line):
timestamp, user_id = line.split()
yield user_id, timestamp
def reducer(self, uid, timestamps):
yield uid, sorted(timestamps)
if __name__ == '__main__':
MRWordCounter.run()
| {
"content_hash": "a6274dcca1877a8b3f6907cc98bf2578",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 41,
"avg_line_length": 19.933333333333334,
"alnum_prop": 0.6254180602006689,
"repo_name": "jepatti/mrjob_recipes",
"id": "0b34358f8563c8932bebb6c807061edf17d667d7",
"size": "299",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "event_stream_segmentation/event_stream_segmentation.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "7164"
}
],
"symlink_target": ""
} |
import os
import optparse
import psutil
import sys
import time
from collections import Counter
PROC_ROOT_DIR = '/proc/'
TCP_CONN_STATUSES = [
'ESTABLISHED',
'SYN_SENT',
'SYN_RECV',
'FIN_WAIT1',
'FIN_WAIT2',
'TIME_WAIT',
'CLOSE',
'CLOSE_WAIT',
'LAST_ACK',
'LISTEN',
'CLOSING',
'NONE'
]
def find_pids_from_name(process_name):
'''Find process PID from name using /proc/<pids>/comm'''
pids_in_proc = [ pid for pid in os.listdir(PROC_ROOT_DIR) if pid.isdigit() ]
pids = []
for pid in pids_in_proc:
path = PROC_ROOT_DIR + pid
if 'comm' in os.listdir(path):
file_handler = open(path + '/comm', 'r')
if file_handler.read().rstrip() == process_name:
pids.append(int(pid))
return pids
def stats_per_pid(pid):
'''Gets process stats using psutil module
details at http://pythonhosted.org/psutil/#process-class'''
stats = {}
process_handler = psutil.Process(pid)
stats['cpu.user'] = process_handler.cpu_times().user
stats['cpu.system'] = process_handler.cpu_times().system
stats['cpu.percent'] = process_handler.cpu_percent()
stats['threads'] = process_handler.num_threads()
stats['memory.rss'] = process_handler.memory_info_ex().rss
stats['memory.vms'] = process_handler.memory_info_ex().vms
stats['memory.shared'] = process_handler.memory_info_ex().shared
stats['memory.text'] = process_handler.memory_info_ex().text
stats['memory.lib'] = process_handler.memory_info_ex().lib
stats['memory.data'] = process_handler.memory_info_ex().data
stats['memory.dirty'] = process_handler.memory_info_ex().dirty
stats['memory.percent'] = process_handler.memory_percent()
stats['fds'] = process_handler.num_fds()
stats['ctx_switches.voluntary'] = process_handler.num_ctx_switches().voluntary
stats['ctx_switches.involuntary'] = process_handler.num_ctx_switches().involuntary
stats['io_counters.read_count'] = process_handler.io_counters().read_count
stats['io_counters.write_count'] = process_handler.io_counters().write_count
stats['io_counters.read_bytes'] = process_handler.io_counters().read_bytes
stats['io_counters.write_bytes'] = process_handler.io_counters().write_bytes
# TCP/UDP/Unix Socket Connections
tcp_conns = process_handler.connections(kind='tcp')
if tcp_conns:
stats['conns.tcp.total'] = len(tcp_conns)
tcp_conns_count = {}
for tcp_status in TCP_CONN_STATUSES:
tcp_conns_count['conns.tcp.' + tcp_status.lower()] = 0
for conn in tcp_conns:
if conn.status == tcp_status:
tcp_conns_count['conns.tcp.' + tcp_status.lower()] = tcp_conns_count[
'conns.tcp.' + tcp_status.lower()] + 1
stats.update(tcp_conns_count)
udp_conns = process_handler.connections(kind='udp')
if udp_conns:
stats['conns.udp.total'] = len(udp_conns)
unix_conns = process_handler.connections(kind='unix')
if unix_conns:
stats['conns.unix_sockets.total'] = len(unix_conns)
return stats
def multi_pid_process_stats(pids):
stats = {'total_processes': len(pids)}
for pid in pids:
stats = Counter(stats) + Counter(stats_per_pid(pid))
return stats
def recursive_dict_sum(dictionnary):
sum_dict = Counter(dictionnary) + Counter(dictionnary)
recursive_dict_sum(sum_dict)
return sum_dict
def graphite_printer(stats, graphite_scheme):
now = time.time()
for stat in stats:
print "%s.%s %s %d" % (graphite_scheme, stat, stats[stat], now)
def get_pid_from_pid_file(pid_file):
try:
file_handler = open(pid_file, 'r')
except Exception as e:
print 'could not read: %s' % pid_file
print e
sys.exit(1)
try:
pid = [].append(int(file_handler.read().rstrip()))
except Exception as e:
print 'It seems file : %s, does not use standard pid file convention' % pid_file
print 'Pid file typically just contains the PID of the process'
print e
sys.exit(1)
return pid
def main():
parser = optparse.OptionParser()
parser.add_option('-n', '--process-name',
help = 'name of process to collect stats (imcompatible with -p)',
dest = 'process_name',
metavar = 'PROCESS_NAME')
parser.add_option('-p', '--pid-file',
help = 'path to pid file for process to collect stats (imcompatible with -n)',
dest = 'process_pid_file',
metavar = 'PROCESS_PID_FILE')
parser.add_option('-s', '--graphite_scheme',
help = 'graphite scheme to prepend, default to <process_stats>',
default = 'per_process_stats',
dest = 'graphite_scheme',
metavar = 'GRAPHITE_SCHEME')
(options, args) = parser.parse_args()
if options.process_name and options.process_pid_file:
print 'Specify a process name or a process pid file path, but not both'
sys.exit(1)
if not options.process_name and not options.process_pid_file:
print 'A process name or a process pid file path is needed'
sys.exit(1)
if options.process_name:
pids = find_pids_from_name(options.process_name)
graphite_printer(multi_pid_process_stats(pids), options.graphite_scheme)
if options.process_pid_file:
pid = get_pid_from_pid_file(options.process_pid_file)
graphite_printer(stats_per_pid(pid), options.graphite_scheme)
#
if __name__ == '__main__':
main()
| {
"content_hash": "3711a9d2815c47c4c2505ccbce71ae42",
"timestamp": "",
"source": "github",
"line_count": 155,
"max_line_length": 85,
"avg_line_length": 33.4,
"alnum_prop": 0.6762603824608847,
"repo_name": "jaimegago/sensu-plugins-process-checks",
"id": "f23606a0944be6e164556606e81ac98661cf09a3",
"size": "7656",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bin/metrics-per-process.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7656"
},
{
"name": "Ruby",
"bytes": "35791"
},
{
"name": "Shell",
"bytes": "2285"
}
],
"symlink_target": ""
} |
from unittest import mock
import pytest
from globus_sdk.authorizers import RefreshTokenAuthorizer
REFRESH_TOKEN = "refresh_token_1"
ACCESS_TOKEN = "access_token_1"
EXPIRES_AT = -1
@pytest.fixture(params=["simple", "with_new_refresh_token"])
def response(request):
r = mock.Mock()
r.by_resource_server = {
"simple": {"rs1": {"expires_at_seconds": -1, "access_token": "access_token_2"}},
"with_new_refresh_token": {
"rs1": {
"expires_at_seconds": -1,
"access_token": "access_token_2",
"refresh_token": "refresh_token_2",
}
},
}[request.param]
return r
@pytest.fixture
def client(response):
c = mock.Mock()
c.oauth2_refresh_token = mock.Mock(return_value=response)
return c
@pytest.fixture
def authorizer(client):
return RefreshTokenAuthorizer(
REFRESH_TOKEN, client, access_token=ACCESS_TOKEN, expires_at=EXPIRES_AT
)
def test_get_token_response(authorizer, client, response):
"""
Calls _get_token_response, confirms that the mock
AuthClient is used and the known data was returned.
"""
# get new_access_token
res = authorizer._get_token_response()
assert res == response
# confirm mock ConfidentailAppAuthClient was used as expected
client.oauth2_refresh_token.assert_called_once_with(REFRESH_TOKEN)
def test_multiple_resource_servers(authorizer, response):
"""
Sets the mock client to return multiple resource servers.
Confirms GlobusError is raised when _extract_token_data is called.
"""
response.by_resource_server["rs2"] = {
"expires_at_seconds": -1,
"access_token": "access_token_3",
}
with pytest.raises(ValueError) as excinfo:
authorizer._extract_token_data(response)
assert "didn't return exactly one token" in str(excinfo.value)
def test_conditional_refresh_token_update(authorizer, response):
"""
Call check_expiration_time (triggering a refresh)
Confirm that the authorizer always udpates its access token and only updates
refresh_token if one was present in the response
"""
authorizer.check_expiration_time() # trigger refresh
token_data = response.by_resource_server["rs1"]
if "refresh_token" in token_data: # if present, confirm refresh token was updated
assert authorizer.access_token == "access_token_2"
assert authorizer.refresh_token == "refresh_token_2"
else: # otherwise, confirm no change
assert authorizer.access_token == "access_token_2"
assert authorizer.refresh_token == "refresh_token_1"
| {
"content_hash": "629cc2d70f6cffb9d1809eb207daa2ca",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 88,
"avg_line_length": 32.50617283950617,
"alnum_prop": 0.6699582225598177,
"repo_name": "globusonline/globus-sdk-python",
"id": "6b8bc6d352ba69808dfe04e6cb078f50d3185712",
"size": "2633",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tests/unit/authorizers/test_refresh_token_authorizer.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "23852"
}
],
"symlink_target": ""
} |
import re
import os
import shutil
from gppylib.db import dbconn
from test.behave_utils.utils import check_schema_exists, check_table_exists, drop_table_if_exists
from behave import given, when, then
CREATE_MULTI_PARTITION_TABLE_SQL = """
CREATE TABLE %s.%s (trans_id int, date date, amount decimal(9,2), region text)
WITH (appendonly=true, orientation=column)
DISTRIBUTED BY (trans_id)
PARTITION BY RANGE (date)
SUBPARTITION BY LIST (region)
SUBPARTITION TEMPLATE
( SUBPARTITION usa VALUES ('usa'),
SUBPARTITION asia VALUES ('asia'),
SUBPARTITION europe VALUES ('europe'),
DEFAULT SUBPARTITION other_regions)
(START (date '2011-01-01') INCLUSIVE
END (date '2012-01-01') EXCLUSIVE
EVERY (INTERVAL '5 month'),
DEFAULT PARTITION outlying_dates)
"""
CREATE_PARTITION_TABLE_SQL = """
CREATE TABLE %s.%s (id int, date date) WITH (appendonly=true, orientation=column)
DISTRIBUTED BY (id)
PARTITION BY RANGE (date)
( START (date '2008-01-01') INCLUSIVE
END (date '2008-01-04') EXCLUSIVE
EVERY (INTERVAL '1 day'),
DEFAULT PARTITION default_dates);
"""
@given('there is a regular "{storage_type}" table "{tablename}" with column name list "{col_name_list}" and column type list "{col_type_list}" in schema "{schemaname}"')
def impl(context, storage_type, tablename, col_name_list, col_type_list, schemaname):
schemaname_no_quote = schemaname
if '"' in schemaname:
schemaname_no_quote = schemaname[1:-1]
if not check_schema_exists(context, schemaname_no_quote, context.dbname):
raise Exception("Schema %s does not exist in database %s" % (schemaname_no_quote, context.dbname))
drop_table_if_exists(context, '.'.join([schemaname, tablename]), context.dbname)
create_table_with_column_list(context.conn, storage_type, schemaname, tablename, col_name_list, col_type_list)
check_table_exists(context, context.dbname, '.'.join([schemaname, tablename]), table_type=storage_type)
@given('there is a hard coded ao partition table "{tablename}" with 4 child partitions in schema "{schemaname}"')
def impl(context, tablename, schemaname):
if not check_schema_exists(context, schemaname, context.dbname):
raise Exception("Schema %s does not exist in database %s" % (schemaname, context.dbname))
drop_table_if_exists(context, '.'.join([schemaname, tablename]), context.dbname)
dbconn.execSQL(context.conn, CREATE_PARTITION_TABLE_SQL % (schemaname, tablename))
context.conn.commit()
check_table_exists(context, context.dbname, '.'.join([schemaname, tablename]), table_type='ao')
@given('there is a hard coded multi-level ao partition table "{tablename}" with 4 mid-level and 16 leaf-level partitions in schema "{schemaname}"')
def impl(context, tablename, schemaname):
if not check_schema_exists(context, schemaname, context.dbname):
raise Exception("Schema %s does not exist in database %s" % (schemaname, context.dbname))
drop_table_if_exists(context, '.'.join([schemaname, tablename]), context.dbname)
dbconn.execSQL(context.conn, CREATE_MULTI_PARTITION_TABLE_SQL % (schemaname, tablename))
context.conn.commit()
check_table_exists(context, context.dbname, '.'.join([schemaname, tablename]), table_type='ao')
@given('no state files exist for database "{dbname}"')
def impl(context, dbname):
analyze_dir = get_analyze_dir(dbname)
if os.path.exists(analyze_dir):
shutil.rmtree(analyze_dir)
@then('"{number}" analyze directories exist for database "{dbname}"')
def impl(context, number, dbname):
dirs_found = get_list_of_analyze_dirs(dbname)
if str(number) != str(len(dirs_found)):
raise Exception("number of directories expected, %s, didn't match number found: %s" % (
str(number), str(len(dirs_found))))
@given('a view "{view_name}" exists on table "{table_name}" in schema "{schema_name}"')
def impl(context, view_name, table_name, schema_name):
create_view_on_table(context.conn, schema_name, table_name, view_name)
@given('"{qualified_table}" appears in the latest state files')
@then('"{qualified_table}" should appear in the latest state files')
def impl(context, qualified_table):
found, filename = table_found_in_state_file(context.dbname, qualified_table)
if not found:
if filename == '':
assert False, "no state files found for database %s" % context.dbname
else:
assert False, "table %s not found in state file %s" % (qualified_table, os.path.basename(filename))
@given('"{expected_result}" should appear in the latest ao_state file in database "{dbname}"')
@then('"{expected_result}" should appear in the latest ao_state file in database "{dbname}"')
def impl(context, expected_result, dbname):
latest_file = get_latest_aostate_file(dbname)
with open(latest_file, 'r') as f:
for line in f:
if expected_result in line:
return True
raise Exception("couldn't find %s in %s" % (expected_result, latest_file))
@given('columns "{col_name_list}" of table "{qualified_table}" appear in the latest column state file')
@then('columns "{col_name_list}" of table "{qualified_table}" should appear in the latest column state file')
def impl(context, col_name_list, qualified_table):
found, column, filename = column_found_in_state_file(context.dbname, qualified_table, col_name_list)
if not found:
if filename == '':
assert False, "no column state file found for database %s" % context.dbname
else:
assert False, "column(s) %s of table %s not found in state file %s" % (
column, qualified_table, os.path.basename(filename))
@given('column "{col_name}" of table "{qualified_table}" does not appear in the latest column state file')
@then('column "{col_name}" of table "{qualified_table}" should not appear in the latest column state file')
def impl(context, col_name, qualified_table):
found, column, filename = column_found_in_state_file(context.dbname, qualified_table, col_name)
if found:
if filename == '':
assert False, "no column state file found for database %s" % context.dbname
else:
assert False, "unexpected column %s of table %s found in state file %s" % (
column, qualified_table, os.path.basename(filename))
@given('"{qualified_table}" appears in the latest report file')
@then('"{qualified_table}" should appear in the latest report file')
def impl(context, qualified_table):
found, filename = table_found_in_report_file(context.dbname, qualified_table)
if not found:
assert False, "table %s not found in report file %s" % (qualified_table, os.path.basename(filename))
@then('output should contain either "{output1}" or "{output2}"')
def impl(context, output1, output2):
pat1 = re.compile(output1)
pat2 = re.compile(output2)
if not pat1.search(context.stdout_message) and not pat2.search(context.stdout_message):
err_str = "Expected stdout string '%s' or '%s', but found:\n'%s'" % (output1, output2, context.stdout_message)
raise Exception(err_str)
@then('output should not contain "{output1}"')
def impl(context, output1):
pat1 = re.compile(output1)
if pat1.search(context.stdout_message):
err_str = "Unexpected stdout string '%s', found:\n'%s'" % (output1, context.stdout_message)
raise Exception(err_str)
@then('output should contain both "{output1}" and "{output2}"')
def impl(context, output1, output2):
pat1 = re.compile(output1)
pat2 = re.compile(output2)
if not pat1.search(context.stdout_message) or not pat2.search(context.stdout_message):
err_str = "Expected stdout string '%s' and '%s', but found:\n'%s'" % (output1, output2, context.stdout_message)
raise Exception(err_str)
@given('table "{qualified_table}" does not appear in the latest state files')
def impl(context, qualified_table):
found, filename = table_found_in_state_file(context.dbname, qualified_table)
if found:
delete_table_from_state_files(context.dbname, qualified_table)
@given('some data is inserted into table "{tablename}" in schema "{schemaname}" with column type list "{column_type_list}"')
@when('some data is inserted into table "{tablename}" in schema "{schemaname}" with column type list "{column_type_list}"')
def impl(context, tablename, schemaname, column_type_list):
insert_data_into_table(context.conn, schemaname, tablename, column_type_list)
@given('some ddl is performed on table "{tablename}" in schema "{schemaname}"')
def impl(context, tablename, schemaname):
perform_ddl_on_table(context.conn, schemaname, tablename)
@given('the user starts a transaction and runs "{query}" on "{dbname}"')
@when('the user starts a transaction and runs "{query}" on "{dbname}"')
def impl(context, query, dbname):
if 'long_lived_conn' not in context:
create_long_lived_conn(context, dbname)
dbconn.execSQL(context.long_lived_conn, 'BEGIN; %s' % query)
@given('the user commits transaction')
@when('the user commits transaction')
def impl(context):
dbconn.execSQL(context.long_lived_conn, 'END;')
@given('the user rollsback the transaction')
@when('the user rollsback the transaction')
def impl(context):
dbconn.execSQL(context.long_lived_conn, 'ROLLBACK;')
@then('the latest state file should have a mod count of {mod_count} for table "{table}" in "{schema}" schema for database "{dbname}"')
def impl(context, mod_count, table, schema, dbname):
mod_count_in_state_file = get_mod_count_in_state_file(dbname, schema, table)
if mod_count_in_state_file != mod_count:
raise Exception(
"mod_count %s does not match mod_count %s in state file for %s.%s" %
(mod_count, mod_count_in_state_file, schema, table))
def get_mod_count_in_state_file(dbname, schema, table):
file = get_latest_aostate_file(dbname)
comma_name = ','.join([schema, table])
with open(file) as fd:
for line in fd:
if comma_name in line:
return line.split(',')[2].strip()
return -1
def create_long_lived_conn(context, dbname):
context.long_lived_conn = dbconn.connect(dbconn.DbURL(dbname=dbname))
def table_found_in_state_file(dbname, qualified_table):
comma_name = ','.join(qualified_table.split('.'))
files = get_latest_analyze_state_files(dbname)
if len(files) == 0:
return False, ""
state_file = ""
for state_file in files:
found = False
with open(state_file) as fd:
for line in fd:
if comma_name in line:
found = True
continue
if not found:
return False, state_file
return True, state_file
def table_found_in_report_file(dbname, qualified_table):
report_file = get_latest_analyze_report_file(dbname)
with open(report_file) as fd:
for line in fd:
if qualified_table == line.strip('\n'):
return True, report_file
return False, report_file
def column_found_in_state_file(dbname, qualified_table, col_name_list):
comma_name = ','.join(qualified_table.split('.'))
files = get_latest_analyze_state_files(dbname)
if len(files) == 0:
return False, "", ""
for state_file in files:
if "col_state_file" not in state_file:
continue
with open(state_file) as fd:
for line in fd:
line = line.strip('\n')
if comma_name in line:
for column in col_name_list.split(','):
if column not in line.split(',')[2:]:
return False, column, state_file
return True, "", state_file
return False, col_name_list, state_file
def delete_table_from_state_files(dbname, qualified_table):
comma_name = ','.join(qualified_table.split('.'))
files = get_latest_analyze_state_files(dbname)
for filename in files:
lines = []
with open(filename) as fd:
for line in fd:
lines.append(line.strip('\n'))
f = open(filename, "w")
for line in lines:
if comma_name not in line:
f.write(line)
f.close()
def get_list_of_analyze_dirs(dbname):
analyze_dir = get_analyze_dir(dbname)
if not os.path.exists(analyze_dir):
return []
ordered_list = [os.path.join(analyze_dir, x) for x in sorted(os.listdir(analyze_dir), reverse=True)]
return filter(os.path.isdir, ordered_list)
def get_latest_analyze_dir(dbname):
analyze_dir = get_analyze_dir(dbname)
folders = get_list_of_analyze_dirs(dbname)
if len(folders) == 0:
return []
return os.path.join(analyze_dir, folders[0])
def get_analyze_dir(dbname):
master_data_dir = os.environ.get('MASTER_DATA_DIRECTORY')
analyze_dir = os.path.join(master_data_dir, 'db_analyze', dbname)
return analyze_dir
def get_latest_aostate_file(dbname):
for path in get_latest_analyze_state_files(dbname):
if 'ao_state' in path:
return path
return None
def get_latest_analyze_state_files(dbname):
"""
return the latest state files (absolute paths)
"""
state_files_dir = get_latest_analyze_dir(dbname)
if not state_files_dir:
return []
files = os.listdir(state_files_dir)
if len(files) != 4:
raise Exception("Missing or unexpected state files in folder %s" % state_files_dir)
ret = []
for f in files:
if 'report' not in f:
ret.append(os.path.join(state_files_dir, f))
return ret
def get_latest_analyze_report_file(dbname):
"""
return the latest report file (absolute path)
"""
report_file_dir = get_latest_analyze_dir(dbname)
if not report_file_dir:
return []
files = os.listdir(report_file_dir)
for f in files:
if 'report' in f:
return os.path.join(report_file_dir, f)
raise Exception("Missing report file in folder %s" % report_file_dir)
def create_table_with_column_list(conn, storage_type, schemaname, tablename, col_name_list, col_type_list):
col_name_list = col_name_list.strip().split(',')
col_type_list = col_type_list.strip().split(',')
col_list = ' (' + ','.join(['%s %s' % (x, y) for x, y in zip(col_name_list, col_type_list)]) + ') '
if storage_type.lower() == 'heap':
storage_str = ''
elif storage_type.lower() == 'ao':
storage_str = " with (appendonly=true) "
elif storage_type.lower() == 'co':
storage_str = " with (appendonly=true, orientation=column) "
else:
raise Exception("Invalid storage type")
query = 'CREATE TABLE %s.%s %s %s DISTRIBUTED RANDOMLY' % (schemaname, tablename, col_list, storage_str)
dbconn.execSQL(conn, query)
conn.commit()
def insert_data_into_table(conn, schemaname, tablename, col_type_list):
col_type_list = col_type_list.strip().split(',')
col_str = ','.join(["(random()*i)::%s" % x for x in col_type_list])
query = "INSERT INTO " + schemaname + '.' + tablename + " SELECT " + col_str + " FROM generate_series(1,100) i"
dbconn.execSQL(conn, query)
conn.commit()
def perform_ddl_on_table(conn, schemaname, tablename):
query = "ALTER TABLE " + schemaname + '.' + tablename + " ADD COLUMN tempcol int default 0"
dbconn.execSQL(conn, query)
query = "ALTER TABLE " + schemaname + '.' + tablename + " DROP COLUMN tempcol"
dbconn.execSQL(conn, query)
conn.commit()
def create_view_on_table(conn, schemaname, tablename, viewname):
query = "CREATE OR REPLACE VIEW " + schemaname + "." + viewname + \
" AS SELECT * FROM " + schemaname + "." + tablename
dbconn.execSQL(conn, query)
conn.commit()
| {
"content_hash": "a200c7d1b18d4e61b6adfc14ed34bdaf",
"timestamp": "",
"source": "github",
"line_count": 396,
"max_line_length": 169,
"avg_line_length": 39.88636363636363,
"alnum_prop": 0.6606521050965496,
"repo_name": "Chibin/gpdb",
"id": "2bc239dfb221a295ea3ae3e48226c72d6b6d5012",
"size": "15795",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gpMgmt/test/behave/mgmt_utils/steps/analyzedb_mgmt_utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "3737"
},
{
"name": "Batchfile",
"bytes": "11369"
},
{
"name": "C",
"bytes": "33469761"
},
{
"name": "C++",
"bytes": "2705055"
},
{
"name": "CSS",
"bytes": "7407"
},
{
"name": "Csound Score",
"bytes": "164"
},
{
"name": "DTrace",
"bytes": "3746"
},
{
"name": "Fortran",
"bytes": "14777"
},
{
"name": "GDB",
"bytes": "576"
},
{
"name": "Gherkin",
"bytes": "440687"
},
{
"name": "HTML",
"bytes": "355087"
},
{
"name": "Java",
"bytes": "186576"
},
{
"name": "JavaScript",
"bytes": "23969"
},
{
"name": "Lex",
"bytes": "195903"
},
{
"name": "M4",
"bytes": "97129"
},
{
"name": "Makefile",
"bytes": "422102"
},
{
"name": "Objective-C",
"bytes": "42255"
},
{
"name": "PLSQL",
"bytes": "218011"
},
{
"name": "PLpgSQL",
"bytes": "4947989"
},
{
"name": "Perl",
"bytes": "3906788"
},
{
"name": "Perl 6",
"bytes": "8302"
},
{
"name": "Python",
"bytes": "6267140"
},
{
"name": "Roff",
"bytes": "32274"
},
{
"name": "Ruby",
"bytes": "26862"
},
{
"name": "SQLPL",
"bytes": "642650"
},
{
"name": "Shell",
"bytes": "558642"
},
{
"name": "XS",
"bytes": "8405"
},
{
"name": "XSLT",
"bytes": "5779"
},
{
"name": "Yacc",
"bytes": "516996"
}
],
"symlink_target": ""
} |
from sklearn2sql_heroku.tests.classification import generic as class_gen
class_gen.test_model("GaussianNB" , "BinaryClass_10" , "oracle")
| {
"content_hash": "1b659318c2662ba056142c4a9cbf2c10",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 72,
"avg_line_length": 35,
"alnum_prop": 0.7785714285714286,
"repo_name": "antoinecarme/sklearn2sql_heroku",
"id": "4db90e1ee37ca5beb079e74cf3428a5b24c375d2",
"size": "140",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/classification/BinaryClass_10/ws_BinaryClass_10_GaussianNB_oracle_code_gen.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "507043"
},
{
"name": "Procfile",
"bytes": "37"
},
{
"name": "Python",
"bytes": "1021137"
},
{
"name": "R",
"bytes": "2521"
}
],
"symlink_target": ""
} |
import math
import os
import random
import re
import sys
# Complete the circularArrayRotation function below.
def circularArrayRotation(a, k, queries):
nn = len(a)
results = []
for query_idx in queries:
a_rot = [0 for ii in range(nn)]
# Create version of a after k rotations.
for ii in range(nn):
rot_ii = (ii + k) % nn
a_rot[rot_ii] = a[ii]
results.append(a_rot[query_idx])
return results
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
nkq = input().split()
n = int(nkq[0])
k = int(nkq[1])
q = int(nkq[2])
a = list(map(int, input().rstrip().split()))
queries = []
for _ in range(q):
queries_item = int(input())
queries.append(queries_item)
result = circularArrayRotation(a, k, queries)
fptr.write('\n'.join(map(str, result)))
fptr.write('\n')
fptr.close()
| {
"content_hash": "c5f584795c572082a04ce40b51360342",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 52,
"avg_line_length": 24.91891891891892,
"alnum_prop": 0.5704989154013015,
"repo_name": "ejspeiro/HackerRank",
"id": "2662dec42e2ce2612527053ea7d3c680996b58d9",
"size": "938",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "all_domains/algorithms/implementation/circular_array_rotation.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "8454"
},
{
"name": "C++",
"bytes": "183382"
},
{
"name": "JavaScript",
"bytes": "1013"
},
{
"name": "Python",
"bytes": "34547"
},
{
"name": "SQL",
"bytes": "2110"
},
{
"name": "Shell",
"bytes": "632"
}
],
"symlink_target": ""
} |
"""
ks.time.util
~~~~~~~~~~~~
Misc. utility functions for dealing with time.
:copyright: (c) 2010 -- 2012 by Charlie Sharpsteen
:license: BSD
"""
import time
from datetime import datetime, timedelta
def from_isostring(datestring):
"""Takes a string in 'unambiguous format' and returns a datetime object.
Here, 'unambiguous format' is arbitrarily declared to be a subset of the
ISO 8601 format:
%Y-%m-%d %H:%M:%S
"""
return datetime.strptime(datestring, '%Y-%m-%d %H:%M:%S')
def to_posix(dt):
"""Converts a time object to seconds since the UNIX epoch."""
return time.mktime(dt.timetuple())
| {
"content_hash": "0f7b941ff33f376c8882aa8263247467",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 76,
"avg_line_length": 24.185185185185187,
"alnum_prop": 0.6447166921898928,
"repo_name": "Sharpie/python-kitchensink",
"id": "5529b1c73af700d7809394b5bf02e52f50f025b0",
"size": "677",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ks/time/_util.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "5346"
}
],
"symlink_target": ""
} |
from __future__ import print_function
from future.standard_library import install_aliases
install_aliases()
import json
import os
import urllib
import re
from re import findall
from urllib.parse import urlparse, urlencode
from urllib.request import urlopen, Request
from urllib.error import HTTPError
from os.path import splitext
from flask import Flask
from flask import request
from flask import make_response
# Flask app should start in global layout
app = Flask(__name__)
def processRequest(req):
baseurl = "http://www.funcage.com/gif/"
site = urlopen(baseurl)
htmltxt = site.read()
lnb = htmltxt.decode()
txt = '<img src="./photos/(.+?)"'
title1 = findall(txt,lnb)
dcd = title1[0]
body = '"http://www.funcage.com/gif/photos/' + dcd + '"'
obje = '{"file":' + body +'}'
data = json.loads(obje)
res = makeWebhookResult(data)
return res
def makeWebhookResult(data):
joke = data.get('file')
# print(json.dumps(item, indent=4))
speech = joke
print("Response:")
print(speech)
if getext(joke) == ".gif":
kik_message = [
{
"type": "video",
"videoUrl": speech
}
]
else:
kik_message = [
{
"type": "picture",
"picUrl": speech
}
]
print(json.dumps(kik_message))
return {
"speech": speech,
"displayText": speech,
"data": {"kik": kik_message},
# "contextOut": [],
"source": "apiai-weather-webhook-sample"
}
def getext(joke):
parsed = urlparse(joke)
root, ext = splitext(parsed.path)
return ext
if __name__ == '__main__':
port = int(os.getenv('PORT', 5000))
print("Starting app on port %d" % port)
app.run(debug=False, port=port, host='0.0.0.0')
| {
"content_hash": "33a2d0b4173e736c0c639c5edb4454cf",
"timestamp": "",
"source": "github",
"line_count": 87,
"max_line_length": 59,
"avg_line_length": 22.20689655172414,
"alnum_prop": 0.5590062111801242,
"repo_name": "andrewpx1/apiai-weather-webhook-sample-master",
"id": "d5f4b10028302e1ac7683a43eadf186a6784253a",
"size": "1955",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pxgiffun.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "40524"
}
],
"symlink_target": ""
} |
import re
from marshmallow import ValidationError
# from marshmallow.validate import Validator
__author__ = 'quxl'
class Phone(object):
"""Validate an Phone in China.
:param str error: Error message to raise in case of a validation error. Can be
interpolated with `{input}`.
"""
PHONE_REGEX = re.compile(
r"^(0|86|17951)?(13[0-9]|15[012356789]|17[678]|18[0-9]|14[57])[0-9]{8}$")
default_message = '{input} is not a valid phone.'
def __init__(self, error=None):
self.error = error or self.default_message
def _format_error(self, value):
return self.error.format(input=value)
def __call__(self, value):
message = self._format_error(value)
if not value or len(value) != 11:
raise ValidationError(message)
if not self.PHONE_REGEX.match(value):
raise ValidationError(message)
return value
class Telephone(object):
"""Validate an Telephone in China.
:param str error: Error message to raise in case of a validation error. Can be
interpolated with `{input}`.
"""
TELEPHONE_REGEX = re.compile(r'(([0\+]\d{2,3}-)?(0\d{2,3})-)?(\d{7,8})(-(\d{3,}))?$')
default_message = '{input} is not a valid telephone.'
def __init__(self, error=None):
self.error = error or self.default_message
def _format_error(self, value):
return self.error.format(input=value)
def __call__(self, value):
message = self._format_error(value)
if not self.TELEPHONE_REGEX.match(value):
raise ValidationError(message)
return value | {
"content_hash": "9dbc5d6471564d4417d81f7fb009b6b5",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 89,
"avg_line_length": 27.47457627118644,
"alnum_prop": 0.6162862430598396,
"repo_name": "snbway/flask-rest-framework",
"id": "dbffff8b527f86fb65ee6cbc0cd52ad7d0397b8f",
"size": "1638",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rest_framework_flask/validator.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "23041"
}
],
"symlink_target": ""
} |
"""Tests for wrapper_common.execute."""
import contextlib
import io
import signal
import unittest
from tools.wrapper_common import execute
_INVALID_UTF8 = b'\xa0\xa1'
def _cmd_filter(cmd_result, stdout, stderr):
# Concat the input to a native string literal, to make sure
# it doesn't trigger a unicode encode/decode error
return cmd_result, stdout + ' filtered', stderr + ' filtered'
class ExecuteTest(unittest.TestCase):
def test_execute_unicode(self):
bytes_out = u'\u201d '.encode('utf8') + _INVALID_UTF8
args = ['echo', '-n', bytes_out]
with contextlib.redirect_stdout(io.StringIO()) as mock_stdout, \
contextlib.redirect_stderr(io.StringIO()) as mock_stderr:
execute.execute_and_filter_output(
args,
filtering=_cmd_filter,
print_output=True,
raise_on_failure=False)
stdout = mock_stdout.getvalue()
stderr = mock_stderr.getvalue()
expected = bytes_out.decode('utf8', 'replace')
expected += ' filtered'
self.assertEqual(expected, stdout)
self.assertIn('filtered', stderr)
def test_execute_timeout(self):
args = ['sleep', '30']
result, stdout, stderr = execute.execute_and_filter_output(
args, timeout=1, raise_on_failure=False)
self.assertEqual(-signal.SIGKILL, result)
def test_execute_inputstr(self):
args = ['cat', '-']
result, stdout, stderr = execute.execute_and_filter_output(
args, inputstr=b'foo', raise_on_failure=False)
self.assertEqual(0, result)
self.assertEqual('foo', stdout)
@contextlib.contextmanager
def _mock_streams(self):
orig_stdout = sys.stdout
orig_stderr = sys.stderr
mock_stdout = io.StringIO()
mock_stderr = io.StringIO()
try:
sys.stdout = mock_stdout
sys.stderr = mock_stderr
yield mock_stdout, mock_stderr
finally:
sys.stdout = orig_stdout
sys.stderr = orig_stderr
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "2cfefb446dd5ce7b35007267034fd152",
"timestamp": "",
"source": "github",
"line_count": 70,
"max_line_length": 68,
"avg_line_length": 28,
"alnum_prop": 0.661734693877551,
"repo_name": "bazelbuild/rules_apple",
"id": "3657fe13b24f466e351e0c2448f0e437fbb21bd3",
"size": "2562",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tools/wrapper_common/execute_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "4138"
},
{
"name": "DTrace",
"bytes": "90"
},
{
"name": "Metal",
"bytes": "96"
},
{
"name": "Objective-C",
"bytes": "5997"
},
{
"name": "Python",
"bytes": "347646"
},
{
"name": "Shell",
"bytes": "352681"
},
{
"name": "Starlark",
"bytes": "1978907"
},
{
"name": "Swift",
"bytes": "8566"
}
],
"symlink_target": ""
} |
"""
test.t_utils.test_compat
~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: Copyright 2014 by the RootForum.org team, see AUTHORS.
:license: MIT License, see LICENSE for details.
"""
from __future__ import unicode_literals
import os
import shutil
import tempfile
import stat
from unittest import TestCase
from magrathea.conf import get_conf
from magrathea.utils.compat import comp_makedirs, comp_open, CompConfigParser
from magrathea.utils.convert import to_bytes
class TestMagratheaUtilsCompat(TestCase):
"""
Unit tests for :py:mod:`magrathea.utils.compat`
"""
def test_01(self):
"""
Test Case 01:
Create a yet non-existing directory
Test is passed if directory is created.
"""
td = tempfile.mkdtemp()
target = os.path.join(td, 'testpath')
comp_makedirs(target)
self.assertTrue(os.path.exists(target) and os.path.isdir(target))
shutil.rmtree(td)
def test_02(self):
"""
Test Case 02:
Create an already existing directory with the same mode and ``exist_ok`` set to True
Test is passed if no exception is raised.
"""
td = tempfile.mkdtemp()
mode = stat.S_IMODE(os.stat(td).st_mode)
flag = True
try:
comp_makedirs(td, mode=mode, exist_ok=True)
except OSError:
flag = False
shutil.rmtree(td)
self.assertTrue(flag)
def test_03(self):
"""
Test Case 03:
Create an already existing directory with a different mode and ``exist_ok`` set to True
Test is passed if no exception is raised.
"""
td = tempfile.mkdtemp()
mode = stat.S_IMODE(os.stat(td).st_mode)
if mode == 0o700:
mode = 0o755
else:
mode = 0o700
flag = True
try:
comp_makedirs(td, mode=mode, exist_ok=True)
except OSError:
flag = False
shutil.rmtree(td)
self.assertTrue(flag)
def test_04(self):
"""
Test Case 04:
Create an already existing directory with ``exist_ok`` set to False
Test is passed if :py:exc:`OSError` is raised
"""
td = tempfile.mkdtemp()
with self.assertRaises(OSError):
comp_makedirs(td, exist_ok=False)
shutil.rmtree(td)
def test_05(self):
"""
Test Case 05:
Open a file using :py:func:`magrathea.utils.compat.comp_open`.
Test is passed if file content can be read and equals the given input.
"""
test_string = "String with Ünicøde characters"
fp, name = tempfile.mkstemp()
os.write(fp, to_bytes(test_string))
os.close(fp)
fd = comp_open(name, mode='r', encoding=get_conf('DEFAULT_CHARSET'))
result_string = fd.read()
fd.close()
os.unlink(name)
self.assertEqual(test_string, result_string)
def test_06(self):
"""
Test Case 06:
Use :py:func:`magrathea.utils.compat.comp_open` as :py:keyword:`with` statement context manager.
Test is passed if file content can be read, equals the given input and the file pointer is closed.
"""
test_string = "String with Ünicøde characters"
fp, name = tempfile.mkstemp()
os.write(fp, to_bytes(test_string))
os.close(fp)
with comp_open(name, mode='r', encoding=get_conf('DEFAULT_CHARSET')) as fd:
result_string = fd.read()
os.unlink(name)
self.assertTrue(fd.closed)
self.assertEqual(test_string, result_string)
def test_07(self):
"""
Test Case 07:
Try instantiating :py:class:`magrathea.utils.compat.CompConfigParser`.
Test is passed if instance proves being an instance of :py:class:`magrathea.utils.compat.CompConfigParser`.
"""
obj = CompConfigParser()
self.assertIsInstance(obj, CompConfigParser)
def test_08(self):
"""
Test Case 08:
Read simple configuration from file.
Test is passed if expected information is present within
:py:class:`magrathea.utils.compat.CompConfigParser` object.
"""
test_string = "[test]\nfoo = bar"
fp, name = tempfile.mkstemp()
os.write(fp, to_bytes(test_string))
os.close(fp)
obj = CompConfigParser()
obj.read(name)
os.unlink(name)
self.assertTrue(obj.has_section('test'))
self.assertTrue(obj.has_option('test', 'foo'))
self.assertEqual(obj.get('test', 'foo'), 'bar')
def test_09(self):
"""
Test Case 09:
Read simple configuration from string.
Test is passed if expected information is present within
:py:class:`magrathea.utils.compat.CompConfigParser` object.
"""
test_string = "[test]\nfoo = bar"
obj = CompConfigParser()
obj.read_string(test_string)
self.assertTrue(obj.has_section('test'))
self.assertTrue(obj.has_option('test', 'foo'))
self.assertEqual(obj.get('test', 'foo'), 'bar')
| {
"content_hash": "a2c3946f8c9d72f005e1f340896affe1",
"timestamp": "",
"source": "github",
"line_count": 164,
"max_line_length": 115,
"avg_line_length": 31.353658536585368,
"alnum_prop": 0.5929599377674056,
"repo_name": "RootForum/magrathea",
"id": "157f36b30ae7141635aec7dc2763b4856823c6e9",
"size": "5170",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/t_utils/test_compat.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "200494"
},
{
"name": "Shell",
"bytes": "6474"
}
],
"symlink_target": ""
} |
"""Support for IKEA Tradfri covers."""
from homeassistant.components.cover import ATTR_POSITION, CoverEntity
from .base_class import TradfriBaseDevice
from .const import ATTR_MODEL, CONF_GATEWAY_ID, DEVICES, DOMAIN, KEY_API
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Load Tradfri covers based on a config entry."""
gateway_id = config_entry.data[CONF_GATEWAY_ID]
tradfri_data = hass.data[DOMAIN][config_entry.entry_id]
api = tradfri_data[KEY_API]
devices = tradfri_data[DEVICES]
covers = [dev for dev in devices if dev.has_blind_control]
if covers:
async_add_entities(TradfriCover(cover, api, gateway_id) for cover in covers)
class TradfriCover(TradfriBaseDevice, CoverEntity):
"""The platform class required by Home Assistant."""
def __init__(self, device, api, gateway_id):
"""Initialize a cover."""
super().__init__(device, api, gateway_id)
self._unique_id = f"{gateway_id}-{device.id}"
self._refresh(device)
@property
def device_state_attributes(self):
"""Return the state attributes."""
attr = {ATTR_MODEL: self._device.device_info.model_number}
return attr
@property
def current_cover_position(self):
"""Return current position of cover.
None is unknown, 0 is closed, 100 is fully open.
"""
return 100 - self._device_data.current_cover_position
async def async_set_cover_position(self, **kwargs):
"""Move the cover to a specific position."""
await self._api(self._device_control.set_state(100 - kwargs[ATTR_POSITION]))
async def async_open_cover(self, **kwargs):
"""Open the cover."""
await self._api(self._device_control.set_state(0))
async def async_close_cover(self, **kwargs):
"""Close cover."""
await self._api(self._device_control.set_state(100))
async def async_stop_cover(self, **kwargs):
"""Close cover."""
await self._api(self._device_control.trigger_blind())
@property
def is_closed(self):
"""Return if the cover is closed or not."""
return self.current_cover_position == 0
def _refresh(self, device):
"""Refresh the cover data."""
super()._refresh(device)
self._device = device
# Caching of BlindControl and cover object
self._device_control = device.blind_control
self._device_data = device.blind_control.blinds[0]
| {
"content_hash": "ddbe8ab9afa5e653e1e09c71bd9b751e",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 84,
"avg_line_length": 34.054794520547944,
"alnum_prop": 0.6468222043443282,
"repo_name": "tchellomello/home-assistant",
"id": "2d99de7756ac75407b7a93bd52fd6a27fdee9672",
"size": "2486",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "homeassistant/components/tradfri/cover.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1488"
},
{
"name": "Python",
"bytes": "26713364"
},
{
"name": "Shell",
"bytes": "4528"
}
],
"symlink_target": ""
} |
import mango
import mango.core
import sys
if sys.platform.startswith('linux'):
import DLFCN as dl
_flags = sys.getdlopenflags()
sys.setdlopenflags(dl.RTLD_NOW|dl.RTLD_GLOBAL)
from . import _mango_open_data_generator as _mango_open_data_generator_so
sys.setdlopenflags(_flags)
else:
from . import _mango_open_data_generator as _mango_open_data_generator_so
from ._mango_open_data_generator import *
import numpy as np
import numpy.random
import scipy as sp
import mango.mpi
def createCheckerDds(shape, checkShape, checkOrigin=(0,0,0), black=0, white=1, dtype="uint8", mtype=None, halo=(0,0,0), mpidims=(0,0,0), origin=(0,0,0)):
"""
Creates a 3D checker-board image.
:type shape: 3-sequence
:param shape: The global shape :samp:`(zSz,ySz,xSz)` of the returned :obj:`mango.Dds` image.
:type checkShape: 3-sequence
:param checkShape: The shape of the checks.
:type checkOrigin: 3-sequence
:param checkOrigin: The origin (relative to the :samp:`origin` parameter) of where the checks
begin to be stacked.
:type black: value
:param black: The value for the *black* checks.
:type white: value
:param white: The value for the *white* checks.
:type dtype: :obj:`numpy.dtype`
:param dtype: The element type of the returned array.
:type mtype: :obj:`mango.mtype`
:param mtype: The mango data type for the checker-board image.
:type halo: 3-sequence
:param halo: The halo size for the returned :obj:`mango.Dds` object.
:type mpidims: 3-sequence
:param mpidims: Cartesian MPI domain decomposition.
:type origin: 3-sequence
:param origin: The global origin index for the returned :obj:`mango.Dds` object.
:rtype: :obj:`mango.Dds`
:return: Checker-board :obj:`mango.Dds`
"""
while (len(shape) <= 2):
shape = (1,) + tuple(shape)
cbDds = mango.empty(shape, dtype=dtype, mtype=mtype, halo=halo, mpidims=mpidims, origin=origin)
checkOrigin = sp.array(checkOrigin) + cbDds.origin
cbArr = cbDds.subd.asarray()
sbeg = sp.array(cbDds.subd.origin)
send = sbeg + cbDds.subd.shape
coords = np.ogrid[sbeg[0]:send[0],sbeg[1]:send[1],sbeg[2]:send[2]]
vals = np.array([black,white],dtype=dtype)
cbArr[...] = \
vals[
(
(coords[0]-checkOrigin[0])//checkShape[0]
+
(coords[1]-checkOrigin[1])//checkShape[1]
+
(coords[2]-checkOrigin[2])//checkShape[2]
)
%
2
]
return cbDds
def gaussian_noise(
shape,
mean=0.0,
stdd=1.0,
dtype=None,
mtype=None,
halo=(0,0,0),
mpidims=(0,0,0),
origin=(0,0,0),
subdshape=None,
subdorigin=None
):
"""
Generates image of Gaussian (Normal) distributed noise.
:type shape: 3-sequence
:param shape: The global shape :samp:`(zSz,ySz,xSz)` of the returned :obj:`mango.Dds` image.
:type mean: float
:param mean: The mean parameter of the normally distributed noise.
:type stdd: float
:param stdd: The standard-deviation parameter of the normally distributed noise.
:type dtype: :obj:`numpy.dtype`
:param dtype: The element type of the returned array.
:type mtype: :obj:`mango.mtype`
:param mtype: The mango data type for the return noise image.
:type halo: 3-sequence
:param halo: The halo size for the returned :obj:`mango.Dds` object.
:type mpidims: 3-sequence
:param mpidims: Cartesian MPI domain decomposition.
:type origin: 3-sequence
:param origin: The global origin index for the returned :obj:`mango.Dds` object.
:type subdshape: 3-sequence
:param subdshape: Explicitly specify the sub-domain shape for the current MPI process.
:type subdorigin: 3-sequence
:param subdorigin: Explicitly specify the sub-domain origin for the current MPI process.
:rtype: :obj:`mango.Dds`
:return: Noise :obj:`mango.Dds` image.
"""
dds = \
mango.empty(
shape=shape,
dtype=dtype,
mtype=mtype,
halo=halo,
mpidims=mpidims,
origin=origin,
subdshape=subdshape,
subdorigin=subdorigin
)
dds.subd.asarray()[...] = np.random.normal(loc=mean, scale=stdd, size=dds.subd.asarray().shape)
return dds;
def gaussian_noise_like(
input,
mean=0.0,
stdd=1.0,
shape=None,
dtype=None,
mtype=None,
halo=None,
mpidims=None,
origin=None,
subdshape=None,
subdorigin=None
):
"""
Generates image of Gaussian (Normal) distributed noise.
:type input: :obj:`Dds`
:param input: The type/shape/MPI-layout determine the same attributes of the returned :obj:`Dds`.
:type shape: 3-sequence
:param shape: Overrides :samp:`dds.shape`.
:type dtype: scipy.dtype
:param dtype: Overrides :samp:`dds.dtype`.
:type mtype: mango.mtype
:param mtype: Overrides :samp:`dds.mtype`.
:type halo: 3-sequence
:param halo: Overrides :samp:`dds.halo`.
:type mpidims: 3-sequence
:param mpidims: Overrides :samp:`dds.mpi.shape`.
:type origin: 3-sequence
:param origin: Overrides :samp:`dds.origin`.
:type subdshape: 3-sequence
:param subdshape: Overrides :samp:`dds.subd.shape`.
:type subdorigin: 3-sequence
:param subdorigin: Overrides :samp:`dds.subd.origin`.
:rtype: :obj:`Dds`
:return: :obj:`Dds` array of gaussian noise initialised elements with the same type,
shape and MPI-layout as :samp:`input`.
"""
out = \
mango.empty_like(
input,
shape=shape,
dtype=dtype,
mtype=mtype,
halo=halo,
mpidims=mpidims,
origin=origin,
subdshape=subdshape,
subdorigin=subdorigin
)
out.subd.asarray()[...] = np.random.normal(loc=mean, scale=stdd, size=out.subd.asarray().shape)
return out
def chi_squared_noise(
shape,
dof=1.0,
dtype=None,
mtype=None,
halo=(0,0,0),
mpidims=(0,0,0),
origin=(0,0,0),
subdshape=None,
subdorigin=None
):
"""
Generates image of Chi-Squared distributed noise.
:type shape: 3-sequence
:param shape: The global shape :samp:`(zSz,ySz,xSz)` of the returned :obj:`mango.Dds` image.
:type dof: float
:param dof: The degrees-of-freedom parameter of the central-Chi-Squared distributed noise.
:type dtype: :obj:`numpy.dtype`
:param dtype: The element type of the returned array.
:type mtype: :obj:`mango.mtype`
:param mtype: The mango data type for the return noise image.
:type halo: 3-sequence
:param halo: The halo size for the returned :obj:`mango.Dds` object.
:type mpidims: 3-sequence
:param mpidims: Cartesian MPI domain decomposition.
:type origin: 3-sequence
:param origin: The global origin index for the returned :obj:`mango.Dds` object.
:type subdshape: 3-sequence
:param subdshape: Explicitly specify the sub-domain shape for the current MPI process.
:type subdorigin: 3-sequence
:param subdorigin: Explicitly specify the sub-domain origin for the current MPI process.
:rtype: :obj:`mango.Dds`
:return: Noise :obj:`mango.Dds` image.
"""
dds = \
mango.empty(
shape=shape,
dtype=dtype,
mtype=mtype,
halo=halo,
mpidims=mpidims,
origin=origin,
subdshape=subdshape,
subdorigin=subdorigin
)
dds.subd.asarray()[...] = np.random.chisquare(df=dof, size=dds.subd.asarray().shape)
return dds;
| {
"content_hash": "9c5bc110596f2edfd671b5554ce4c271",
"timestamp": "",
"source": "github",
"line_count": 241,
"max_line_length": 153,
"avg_line_length": 32.004149377593365,
"alnum_prop": 0.6299753662647478,
"repo_name": "pymango/pymango",
"id": "a6da7c64e681b8dca72d19a9966a185d9e1e6390",
"size": "7713",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "misc/python/mango/data/_factory.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CMake",
"bytes": "1621"
},
{
"name": "Python",
"bytes": "652240"
}
],
"symlink_target": ""
} |
from typing import Any, AsyncIterable, Callable, Dict, Optional, TypeVar, Union
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._digital_twins_endpoint_operations import build_create_or_update_request_initial, build_delete_request_initial, build_get_request, build_list_request
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class DigitalTwinsEndpointOperations:
"""DigitalTwinsEndpointOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.digitaltwins.v2021_06_30_preview.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def list(
self,
resource_group_name: str,
resource_name: str,
**kwargs: Any
) -> AsyncIterable["_models.DigitalTwinsEndpointResourceListResult"]:
"""Get DigitalTwinsInstance Endpoints.
:param resource_group_name: The name of the resource group that contains the
DigitalTwinsInstance.
:type resource_group_name: str
:param resource_name: The name of the DigitalTwinsInstance.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DigitalTwinsEndpointResourceListResult or the
result of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.digitaltwins.v2021_06_30_preview.models.DigitalTwinsEndpointResourceListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2021-06-30-preview") # type: str
cls = kwargs.pop('cls', None) # type: ClsType["_models.DigitalTwinsEndpointResourceListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
api_version=api_version,
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
api_version=api_version,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("DigitalTwinsEndpointResourceListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DigitalTwins/digitalTwinsInstances/{resourceName}/endpoints"} # type: ignore
@distributed_trace_async
async def get(
self,
resource_group_name: str,
resource_name: str,
endpoint_name: str,
**kwargs: Any
) -> "_models.DigitalTwinsEndpointResource":
"""Get DigitalTwinsInstances Endpoint.
:param resource_group_name: The name of the resource group that contains the
DigitalTwinsInstance.
:type resource_group_name: str
:param resource_name: The name of the DigitalTwinsInstance.
:type resource_name: str
:param endpoint_name: Name of Endpoint Resource.
:type endpoint_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DigitalTwinsEndpointResource, or the result of cls(response)
:rtype: ~azure.mgmt.digitaltwins.v2021_06_30_preview.models.DigitalTwinsEndpointResource
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DigitalTwinsEndpointResource"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2021-06-30-preview") # type: str
request = build_get_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
endpoint_name=endpoint_name,
api_version=api_version,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('DigitalTwinsEndpointResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DigitalTwins/digitalTwinsInstances/{resourceName}/endpoints/{endpointName}"} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
resource_name: str,
endpoint_name: str,
endpoint_description: "_models.DigitalTwinsEndpointResource",
**kwargs: Any
) -> "_models.DigitalTwinsEndpointResource":
cls = kwargs.pop('cls', None) # type: ClsType["_models.DigitalTwinsEndpointResource"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2021-06-30-preview") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(endpoint_description, 'DigitalTwinsEndpointResource')
request = build_create_or_update_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
endpoint_name=endpoint_name,
api_version=api_version,
content_type=content_type,
json=_json,
template_url=self._create_or_update_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('DigitalTwinsEndpointResource', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('DigitalTwinsEndpointResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DigitalTwins/digitalTwinsInstances/{resourceName}/endpoints/{endpointName}"} # type: ignore
@distributed_trace_async
async def begin_create_or_update(
self,
resource_group_name: str,
resource_name: str,
endpoint_name: str,
endpoint_description: "_models.DigitalTwinsEndpointResource",
**kwargs: Any
) -> AsyncLROPoller["_models.DigitalTwinsEndpointResource"]:
"""Create or update DigitalTwinsInstance endpoint.
:param resource_group_name: The name of the resource group that contains the
DigitalTwinsInstance.
:type resource_group_name: str
:param resource_name: The name of the DigitalTwinsInstance.
:type resource_name: str
:param endpoint_name: Name of Endpoint Resource.
:type endpoint_name: str
:param endpoint_description: The DigitalTwinsInstance endpoint metadata and security metadata.
:type endpoint_description:
~azure.mgmt.digitaltwins.v2021_06_30_preview.models.DigitalTwinsEndpointResource
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either DigitalTwinsEndpointResource or the
result of cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.digitaltwins.v2021_06_30_preview.models.DigitalTwinsEndpointResource]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2021-06-30-preview") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.DigitalTwinsEndpointResource"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
resource_name=resource_name,
endpoint_name=endpoint_name,
endpoint_description=endpoint_description,
api_version=api_version,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('DigitalTwinsEndpointResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DigitalTwins/digitalTwinsInstances/{resourceName}/endpoints/{endpointName}"} # type: ignore
async def _delete_initial(
self,
resource_group_name: str,
resource_name: str,
endpoint_name: str,
**kwargs: Any
) -> Optional["_models.DigitalTwinsEndpointResource"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.DigitalTwinsEndpointResource"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2021-06-30-preview") # type: str
request = build_delete_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
endpoint_name=endpoint_name,
api_version=api_version,
template_url=self._delete_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('DigitalTwinsEndpointResource', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('DigitalTwinsEndpointResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_delete_initial.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DigitalTwins/digitalTwinsInstances/{resourceName}/endpoints/{endpointName}"} # type: ignore
@distributed_trace_async
async def begin_delete(
self,
resource_group_name: str,
resource_name: str,
endpoint_name: str,
**kwargs: Any
) -> AsyncLROPoller["_models.DigitalTwinsEndpointResource"]:
"""Delete a DigitalTwinsInstance endpoint.
:param resource_group_name: The name of the resource group that contains the
DigitalTwinsInstance.
:type resource_group_name: str
:param resource_name: The name of the DigitalTwinsInstance.
:type resource_name: str
:param endpoint_name: Name of Endpoint Resource.
:type endpoint_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either DigitalTwinsEndpointResource or the
result of cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.digitaltwins.v2021_06_30_preview.models.DigitalTwinsEndpointResource]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2021-06-30-preview") # type: str
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.DigitalTwinsEndpointResource"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
resource_name=resource_name,
endpoint_name=endpoint_name,
api_version=api_version,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('DigitalTwinsEndpointResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DigitalTwins/digitalTwinsInstances/{resourceName}/endpoints/{endpointName}"} # type: ignore
| {
"content_hash": "4705c54ea8a5060c8cd248b179b678f4",
"timestamp": "",
"source": "github",
"line_count": 450,
"max_line_length": 229,
"avg_line_length": 46.373333333333335,
"alnum_prop": 0.6563638106191297,
"repo_name": "Azure/azure-sdk-for-python",
"id": "0bf487cefd9d1d198205a0cd9238519b0a13cf0d",
"size": "21368",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/digitaltwins/azure-mgmt-digitaltwins/azure/mgmt/digitaltwins/v2021_06_30_preview/aio/operations/_digital_twins_endpoint_operations.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
"""Computations based on Chebyshev polynomial expansion
The kernel polynomial method (KPM) can be used to approximate various functions by expanding them
in a series of Chebyshev polynomials.
"""
import warnings
import numpy as np
import scipy
from . import _cpp
from . import results
from .model import Model
from .system import System
from .utils.time import timed
from .support.deprecated import LoudDeprecationWarning
__all__ = ['KernelPolynomialMethod', 'kpm', 'kpm_cuda',
'jackson_kernel', 'lorentz_kernel', 'dirichlet_kernel']
class SpatialLDOS:
"""Holds the results of :meth:`KPM.calc_spatial_ldos`
It's a product of a :class:`Series` and a :class:`StructureMap`.
"""
def __init__(self, data, energy, structure):
self.data = data
self.energy = energy
self.structure = structure
def structure_map(self, energy):
"""Return a :class:`StructureMap` of the spatial LDOS at the given energy
Parameters
----------
energy : float
Produce a structure map for LDOS data closest to this energy value.
"""
idx = np.argmin(abs(self.energy - energy))
return self.structure.with_data(self.data[idx])
def ldos(self, position, sublattice=""):
"""Return the LDOS as a function of energy at a specific position
Parameters
----------
position : array_like
sublattice : Optional[str]
"""
idx = self.structure.find_nearest(position, sublattice)
return results.Series(self.energy, self.data[:, idx],
labels=dict(variable="E (eV)", data="LDOS", columns="orbitals"))
class KernelPolynomialMethod:
"""The common interface for various KPM implementations
It should not be created directly but via specific functions
like :func:`kpm` or :func:`kpm_cuda`.
All implementations are based on: https://doi.org/10.1103/RevModPhys.78.275
"""
def __init__(self, impl):
self.impl = impl
@property
def model(self) -> Model:
"""The tight-binding model holding the Hamiltonian"""
return self.impl.model
@model.setter
def model(self, model):
self.impl.model = model
@property
def system(self) -> System:
"""The tight-binding system (shortcut for `KernelPolynomialMethod.model.system`)"""
return System(self.impl.system)
@property
def scaling_factors(self) -> tuple:
"""A tuple of KPM scaling factors `a` and `b`"""
return self.impl.scaling_factors
@property
def kernel(self):
"""The damping kernel"""
return self.impl.kernel
def report(self, shortform=False):
"""Return a report of the last computation
Parameters
----------
shortform : bool, optional
Return a short one line version of the report
"""
return self.impl.report(shortform)
def __call__(self, *args, **kwargs):
warnings.warn("Use .calc_greens() instead", LoudDeprecationWarning)
return self.calc_greens(*args, **kwargs)
def moments(self, num_moments, alpha, beta=None, op=None):
r"""Calculate KPM moments in the form of expectation values
The result is an array of moments where each value is equal to:
.. math::
\mu_n = <\beta|op \cdot T_n(H)|\alpha>
Parameters
----------
num_moments : int
The number of moments to calculate.
alpha : array_like
The starting state vector of the KPM iteration.
beta : Optional[array_like]
If not given, defaults to :math:`\beta = \alpha`.
op : Optional[csr_matrix]
Operator in the form of a sparse matrix. If omitted, an identity matrix
is assumed: :math:`\mu_n = <\beta|T_n(H)|\alpha>`.
Returns
-------
ndarray
"""
from scipy.sparse import csr_matrix
if beta is None:
beta = []
if op is None:
op = csr_matrix([])
else:
op = op.tocsr()
return self.impl.moments(num_moments, alpha, beta, op)
def calc_greens(self, i, j, energy, broadening):
"""Calculate Green's function of a single Hamiltonian element
Parameters
----------
i, j : int
Hamiltonian indices.
energy : ndarray
Energy value array.
broadening : float
Width, in energy, of the smallest detail which can be resolved.
Lower values result in longer calculation time.
Returns
-------
ndarray
Array of the same size as the input `energy`.
"""
return self.impl.calc_greens(i, j, energy, broadening)
def calc_ldos(self, energy, broadening, position, sublattice="", reduce=True):
"""Calculate the local density of states as a function of energy
Parameters
----------
energy : ndarray
Values for which the LDOS is calculated.
broadening : float
Width, in energy, of the smallest detail which can be resolved.
Lower values result in longer calculation time.
position : array_like
Cartesian position of the lattice site for which the LDOS is calculated.
Doesn't need to be exact: the method will find the actual site which is
closest to the given position.
sublattice : str
Only look for sites of a specific sublattice, closest to `position`.
The default value considers any sublattice.
reduce : bool
This option is only relevant for multi-orbital models. If true, the
resulting LDOS will summed over all the orbitals at the target site
and the result will be a 1D array. If false, the individual orbital
results will be preserved and the result will be a 2D array with
`shape == (energy.size, num_orbitals)`.
Returns
-------
:class:`~pybinding.Series`
"""
ldos = self.impl.calc_ldos(energy, broadening, position, sublattice, reduce)
return results.Series(energy, ldos.squeeze(), labels=dict(variable="E (eV)", data="LDOS",
columns="orbitals"))
def calc_spatial_ldos(self, energy, broadening, shape, sublattice=""):
"""Calculate the LDOS as a function of energy and space (in the area of the given shape)
Parameters
----------
energy : ndarray
Values for which the LDOS is calculated.
broadening : float
Width, in energy, of the smallest detail which can be resolved.
Lower values result in longer calculation time.
shape : Shape
Determines the site positions at which to do the calculation.
sublattice : str
Only look for sites of a specific sublattice, within the `shape`.
The default value considers any sublattice.
Returns
-------
:class:`SpatialLDOS`
"""
ldos = self.impl.calc_spatial_ldos(energy, broadening, shape, sublattice)
smap = self.system[shape.contains(*self.system.positions)]
if sublattice:
smap = smap[smap.sub == sublattice]
return SpatialLDOS(ldos, energy, smap)
def calc_dos(self, energy, broadening, num_random=1):
"""Calculate the density of states as a function of energy
Parameters
----------
energy : ndarray
Values for which the DOS is calculated.
broadening : float
Width, in energy, of the smallest detail which can be resolved.
Lower values result in longer calculation time.
num_random : int
The number of random vectors to use for the stochastic calculation of KPM moments.
Larger numbers improve the quality of the result but also increase calculation time
linearly. Fortunately, result quality also improves with system size, so the DOS of
very large systems can be calculated accurately with only a small number of random
vectors.
Returns
-------
:class:`~pybinding.Series`
"""
dos = self.impl.calc_dos(energy, broadening, num_random)
return results.Series(energy, dos, labels=dict(variable="E (eV)", data="DOS"))
def deferred_ldos(self, energy, broadening, position, sublattice=""):
"""Same as :meth:`calc_ldos` but for parallel computation: see the :mod:`.parallel` module
Parameters
----------
energy : ndarray
Values for which the LDOS is calculated.
broadening : float
Width, in energy, of the smallest detail which can be resolved.
Lower values result in longer calculation time.
position : array_like
Cartesian position of the lattice site for which the LDOS is calculated.
Doesn't need to be exact: the method will find the actual site which is
closest to the given position.
sublattice : str
Only look for sites of a specific sublattice, closest to `position`.
The default value considers any sublattice.
Returns
-------
Deferred
"""
return self.impl.deferred_ldos(energy, broadening, position, sublattice)
def calc_conductivity(self, chemical_potential, broadening, temperature,
direction="xx", volume=1.0, num_random=1, num_points=1000):
"""Calculate Kubo-Bastin electrical conductivity as a function of chemical potential
The return value is in units of the conductance quantum (e^2 / hbar) not taking into
account spin or any other degeneracy.
The calculation is based on: https://doi.org/10.1103/PhysRevLett.114.116602.
Parameters
----------
chemical_potential : array_like
Values (in eV) for which the conductivity is calculated.
broadening : float
Width (in eV) of the smallest detail which can be resolved in the chemical potential.
Lower values result in longer calculation time.
temperature : float
Value of temperature for the Fermi-Dirac distribution.
direction : Optional[str]
Direction in which the conductivity is calculated. E.g., "xx", "xy", "zz", etc.
volume : Optional[float]
The volume of the system.
num_random : int
The number of random vectors to use for the stochastic calculation of KPM moments.
Larger numbers improve the quality of the result but also increase calculation time
linearly. Fortunately, result quality also improves with system size, so the DOS of
very large systems can be calculated accurately with only a small number of random
vectors.
num_points : Optional[int]
Number of points for integration.
Returns
-------
:class:`~pybinding.Series`
"""
data = self.impl.calc_conductivity(chemical_potential, broadening, temperature,
direction, num_random, num_points)
if volume != 1.0:
data /= volume
return results.Series(chemical_potential, data,
labels=dict(variable=r"$\mu$ (eV)", data="$\sigma (e^2/h)$"))
class _ComputeProgressReporter:
def __init__(self):
from .utils.progressbar import ProgressBar
self.pbar = ProgressBar(0)
def __call__(self, delta, total):
if total == 1:
return # Skip reporting for short jobs
if delta < 0:
print("Computing KPM moments...")
self.pbar.size = total
self.pbar.start()
elif delta == total:
self.pbar.finish()
else:
self.pbar += delta
def kpm(model, energy_range=None, kernel="default", num_threads="auto", silent=False, **kwargs):
"""The default CPU implementation of the Kernel Polynomial Method
This implementation works on any system and is well optimized.
Parameters
----------
model : Model
Model which will provide the Hamiltonian matrix.
energy_range : Optional[Tuple[float, float]]
KPM needs to know the lowest and highest eigenvalue of the Hamiltonian, before
computing the expansion moments. By default, this is determined automatically
using a quick Lanczos procedure. To override the automatic boundaries pass a
`(min_value, max_value)` tuple here. The values can be overestimated, but note
that performance drops as the energy range becomes wider. On the other hand,
underestimating the range will produce `NaN` values in the results.
kernel : Kernel
The kernel in the *Kernel* Polynomial Method. Used to improve the quality of
the function reconstructed from the Chebyshev series. Possible values are
:func:`jackson_kernel` or :func:`lorentz_kernel`. The Jackson kernel is used
by default.
num_threads : int
The number of CPU threads to use for calculations. This is automatically set
to the number of logical cores available on the current machine.
silent : bool
Don't show any progress messages.
Returns
-------
:class:`~pybinding.chebyshev.KernelPolynomialMethod`
"""
if kernel != "default":
kwargs["kernel"] = kernel
if num_threads != "auto":
kwargs["num_threads"] = num_threads
if "progress_callback" not in kwargs:
kwargs["progress_callback"] = _ComputeProgressReporter()
if silent:
del kwargs["progress_callback"]
return KernelPolynomialMethod(_cpp.kpm(model, energy_range or (0, 0), **kwargs))
def kpm_cuda(model, energy_range=None, kernel="default", **kwargs):
"""Same as :func:`kpm` except that it's executed on the GPU using CUDA (if supported)
See :func:`kpm` for detailed parameter documentation.
This method is only available if the C++ extension module was compiled with CUDA.
Parameters
----------
model : Model
energy_range : Optional[Tuple[float, float]]
kernel : Kernel
Returns
-------
:class:`~pybinding.chebyshev.KernelPolynomialMethod`
"""
try:
if kernel != "default":
kwargs["kernel"] = kernel
# noinspection PyUnresolvedReferences
return KernelPolynomialMethod(_cpp.kpm_cuda(model, energy_range or (0, 0), **kwargs))
except AttributeError:
raise Exception("The module was compiled without CUDA support.\n"
"Use a different KPM implementation or recompile the module with CUDA.")
def jackson_kernel():
"""The Jackson kernel -- a good general-purpose kernel, appropriate for most applications
Imposes Gaussian broadening `sigma = pi / N` where `N` is the number of moments. The
broadening value is user-defined for each function calculation (LDOS, Green's, etc.).
The number of moments is then determined based on the broadening -- it's not directly
set by the user.
"""
return _cpp.jackson_kernel()
def lorentz_kernel(lambda_value=4.0):
"""The Lorentz kernel -- best for Green's function
This kernel is most appropriate for the expansion of the Green’s function because it most
closely mimics the divergences near the true eigenvalues of the Hamiltonian. The Lorentzian
broadening is given by `epsilon = lambda / N` where `N` is the number of moments.
Parameters
----------
lambda_value : float
May be used to fine-tune the smoothness of the convergence. Usual values are
between 3 and 5. Lower values will speed up the calculation at the cost of
accuracy. If in doubt, leave it at the default value of 4.
"""
return _cpp.lorentz_kernel(lambda_value)
def dirichlet_kernel():
"""The Dirichlet kernel -- returns raw moments, least favorable choice
This kernel doesn't modify the moments at all. The resulting moments represent just
a truncated series which results in lots of oscillation in the reconstructed function.
Therefore, this kernel should almost never be used. It's only here in case the raw
moment values are needed for some other purpose. Note that `required_num_moments()`
returns `N = pi / sigma` for compatibility with the Jackson kernel, but there is
no actual broadening associated with the Dirichlet kernel.
"""
return _cpp.dirichlet_kernel()
class _PythonImpl:
"""Basic Python/SciPy implementation of KPM"""
def __init__(self, model, energy_range, kernel, **_):
self.model = model
self.energy_range = energy_range
self.kernel = kernel
self._stats = {}
@property
def stats(self):
class AttrDict(dict):
"""Allows dict items to be retrieved as attributes: d["item"] == d.item"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.__dict__ = self
s = AttrDict(self._stats)
s.update({k: v.elapsed for k, v in s.items() if "_time" in k})
s["eps"] = s["nnz"] / s["moments_time"]
return s
def _scaling_factors(self):
"""Compute the energy bounds of the model and return the appropriate KPM scaling factors"""
def find_bounds():
if self.energy_range[0] != self.energy_range[1]:
return self.energy_range
from scipy.sparse.linalg import eigsh
h = self.model.hamiltonian
self.energy_range = [eigsh(h, which=x, k=1, tol=2e-3, return_eigenvectors=False)[0]
for x in ("SA", "LA")]
return self.energy_range
with timed() as self._stats["bounds_time"]:
emin, emax = find_bounds()
self._stats["energy_min"] = emin
self._stats["energy_max"] = emax
tolerance = 0.01
a = 0.5 * (emax - emin) * (1 + tolerance)
b = 0.5 * (emax + emin)
return a, b
def _rescale_hamiltonian(self, h, a, b):
size = h.shape[0]
with timed() as self._stats["rescale_time"]:
return (h - b * scipy.sparse.eye(size)) * (2 / a)
def _compute_diagonal_moments(self, num_moments, starter, h2):
"""Procedure for computing KPM moments when the two vectors are identical"""
r0 = starter.copy()
r1 = h2.dot(r0) * 0.5
moments = np.zeros(num_moments, dtype=h2.dtype)
moments[0] = np.vdot(r0, r0) * 0.5
moments[1] = np.vdot(r1, r0)
for n in range(1, num_moments // 2):
r0 = h2.dot(r1) - r0
r0, r1 = r1, r0
moments[2 * n] = 2 * (np.vdot(r0, r0) - moments[0])
moments[2 * n + 1] = 2 * np.vdot(r1, r0) - moments[1]
self._stats["num_moments"] = num_moments
self._stats["nnz"] = h2.nnz * num_moments / 2
self._stats["vector_memory"] = r0.nbytes + r1.nbytes
self._stats["matrix_memory"] = (h2.data.nbytes + h2.indices.nbytes + h2.indptr.nbytes
if isinstance(h2, scipy.sparse.csr_matrix) else 0)
return moments
@staticmethod
def _exval_starter(h2, index):
"""Initial vector for the expectation value procedure"""
r0 = np.zeros(h2.shape[0], dtype=h2.dtype)
r0[index] = 1
return r0
@staticmethod
def _reconstruct_real(moments, energy, a, b):
"""Reconstruct a real function from KPM moments"""
scaled_energy = (energy - b) / a
ns = np.arange(moments.size)
k = 2 / (a * np.pi)
return np.array([k / np.sqrt(1 - w**2) * np.sum(moments.real * np.cos(ns * np.arccos(w)))
for w in scaled_energy])
def _ldos(self, index, energy, broadening):
"""Calculate the LDOS at the given Hamiltonian index"""
a, b = self._scaling_factors()
num_moments = self.kernel.required_num_moments(broadening / a)
h2 = self._rescale_hamiltonian(self.model.hamiltonian, a, b)
starter = self._exval_starter(h2, index)
with timed() as self._stats["moments_time"]:
moments = self._compute_diagonal_moments(num_moments, starter, h2)
with timed() as self._stats["reconstruct_time"]:
moments *= self.kernel.damping_coefficients(num_moments)
return self._reconstruct_real(moments, energy, a, b)
def calc_ldos(self, energy, broadening, position, sublattice="", reduce=True):
"""Calculate the LDOS at the given position/sublattice"""
with timed() as self._stats["total_time"]:
system_index = self.model.system.find_nearest(position, sublattice)
ham_idx = self.model.system.to_hamiltonian_indices(system_index)
result_data = np.array([self._ldos(i, energy, broadening) for i in ham_idx]).T
if reduce:
return np.sum(result_data, axis=1)
else:
return result_data
def report(self, *_):
from .utils import with_suffix, pretty_duration
stats = self.stats.copy()
stats.update({k: with_suffix(stats[k]) for k in ("num_moments", "eps")})
stats.update({k: pretty_duration(v) for k, v in stats.items() if "_time" in k})
fmt = " ".join([
"{energy_min:.2f}, {energy_max:.2f} [{bounds_time}]",
"[{rescale_time}]",
"{num_moments} @ {eps}eps [{moments_time}]",
"[{reconstruct_time}]",
"| {total_time}"
])
return fmt.format_map(stats)
def _kpm_python(model, energy_range=None, kernel="default", **kwargs):
"""Basic Python/SciPy implementation of KPM"""
if kernel == "default":
kernel = jackson_kernel()
return KernelPolynomialMethod(_PythonImpl(model, energy_range or (0, 0), kernel, **kwargs))
| {
"content_hash": "d5446e28698a990b4cec9b87f210846e",
"timestamp": "",
"source": "github",
"line_count": 572,
"max_line_length": 99,
"avg_line_length": 38.69755244755245,
"alnum_prop": 0.6101648972215947,
"repo_name": "MAndelkovic/pybinding",
"id": "74d5f316f6dc766d879d794a3415f48cb462847f",
"size": "22137",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pybinding/chebyshev.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C++",
"bytes": "427572"
},
{
"name": "CMake",
"bytes": "12854"
},
{
"name": "Cuda",
"bytes": "10135"
},
{
"name": "Python",
"bytes": "332184"
},
{
"name": "Shell",
"bytes": "431"
}
],
"symlink_target": ""
} |
"""Pytest configuration file.
This is a bit of a, ahem, alternative way of allowing testpy to find the pypyr
module, without having to go into your virtualenv or dealing with setup.py
first. This is because pytest makes the dir this file is in the root.
What this file is actually for is per-directory fixture scopes:
http://doc.pytest.org/en/latest/example/simple.html#package-directory-level-fixtures-setups
"""
import logging
from pypyr.log.logger import set_root_logger
set_root_logger(logging.DEBUG)
| {
"content_hash": "ff7b3dfa553717c6782a45317c3ca53f",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 91,
"avg_line_length": 36.357142857142854,
"alnum_prop": 0.7897838899803536,
"repo_name": "pypyr/pypyr-cli",
"id": "ab0876a896b6662f8244aa219c2ff6a7da004195",
"size": "509",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "conftest.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "819975"
},
{
"name": "Shell",
"bytes": "10150"
}
],
"symlink_target": ""
} |
"""Script to parse Windows Defender scan DetectionHistory files."""
import argparse
import logging
import sys
from dtformats import detection_history
from dtformats import output_writers
def Main():
"""The main program function.
Returns:
bool: True if successful or False if not.
"""
argument_parser = argparse.ArgumentParser(description=(
'Extracts information from a Windows Defender scan DetectionHistory '
'file.'))
argument_parser.add_argument(
'-d', '--debug', dest='debug', action='store_true', default=False,
help='enable debug output.')
argument_parser.add_argument(
'source', nargs='?', action='store', metavar='PATH', default=None, help=(
'path of the Windows Defender scan DetectionHistory file.'))
options = argument_parser.parse_args()
if not options.source:
print('Source file missing.')
print('')
argument_parser.print_help()
print('')
return False
logging.basicConfig(
level=logging.INFO, format='[%(levelname)s] %(message)s')
output_writer = output_writers.StdoutWriter()
try:
output_writer.Open()
except IOError as exception:
print(f'Unable to open output writer with error: {exception!s}')
print('')
return False
detection_history_file = (
detection_history.WindowsDefenderScanDetectionHistoryFile(
debug=options.debug, output_writer=output_writer))
detection_history_file.Open(options.source)
output_writer.WriteText('Windows Defender scan DetectionHistory information:')
# TODO: print information.
detection_history_file.Close()
output_writer.Close()
return True
if __name__ == '__main__':
if not Main():
sys.exit(1)
else:
sys.exit(0)
| {
"content_hash": "b5d607fdd463f62a9cf7fc0924e3ec59",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 80,
"avg_line_length": 25.043478260869566,
"alnum_prop": 0.6892361111111112,
"repo_name": "libyal/dtformats",
"id": "6508f69e9ee26af5bff15c866ef3fdfcfadd8a00",
"size": "1774",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "scripts/detection_history.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "122"
},
{
"name": "PowerShell",
"bytes": "827"
},
{
"name": "Python",
"bytes": "700241"
},
{
"name": "Shell",
"bytes": "1139"
}
],
"symlink_target": ""
} |
"""
miscellaneous math.
"""
from numbers import Integral, Number
def is_number(x):
"""
Tests if `x` is a number.
Parameters
----------
x : object
The object to test the numericalness of.
Returns
-------
b : bool
True if `x` is a number, False otherwise.
"""
return isinstance(x, Number)
def is_integer(x):
"""
Tests if `x` is a integer.
Parameters
----------
x : object
The object to test the integerness of.
Returns
-------
b : bool
True if `x` is an integer, False otherwise.
"""
return isinstance(x, Integral)
def factorial(n):
"""
Computes n!
Parameters
----------
n : int
A positive integer
Returns
-------
f : int
n!
Raises
------
TypeError
If `n` is not a number.
ValueError
If `n` is not non-negative integer.
"""
if not is_number(n):
raise TypeError("{0} is not a number.".format(n))
if not is_integer(n) or n < 0:
raise ValueError("{0} is not a positive integer.".format(n))
if n == 0:
return 1
else:
return n * factorial(n-1)
def combinations(n, k):
"""
Returns the binomial coefficient of `n` choose `k`.
Parameters
----------
n : int
The size of the set to draw from.
k : int
The number of elements to draw.
Returns
-------
nck : int
`n` choose `k`
Raises
------
TypeError
If `n` and `k` are not numbers.
ValueError
If `n` and `k` are not positive integers, and `n` >= `k`.
"""
nf = factorial(n)
kf = factorial(k)
if k > n:
raise ValueError("{0} is larger than {1}.".format(k, n))
nmkf = factorial(n-k)
return nf/(kf*nmkf)
| {
"content_hash": "8bd508c96a185a2e5365734204652ea7",
"timestamp": "",
"source": "github",
"line_count": 97,
"max_line_length": 68,
"avg_line_length": 18.75257731958763,
"alnum_prop": 0.5123694337548104,
"repo_name": "chebee7i/dit",
"id": "27e5706528beb2a47c5370b1de9644d4fec5dbe3",
"size": "1819",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dit/math/misc.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "5938"
},
{
"name": "HTML",
"bytes": "265"
},
{
"name": "PHP",
"bytes": "614"
},
{
"name": "Python",
"bytes": "714621"
}
],
"symlink_target": ""
} |
"""Tests for pointnet."""
from absl.testing import parameterized
from lingvo import compat as tf
from lingvo.core import py_utils
from lingvo.core import test_utils
from lingvo.tasks.car import pointnet
class PointNetTest(test_utils.TestCase, parameterized.TestCase):
def _testOutShape(self, p, input_shape, expected_shape):
batch_size, num_points, _ = input_shape
g = tf.Graph()
with g.as_default():
net = p.Instantiate()
input_data = py_utils.NestedMap(
points=tf.random.uniform((batch_size, num_points, 3)),
features=tf.random.uniform(input_shape),
padding=tf.zeros((batch_size, num_points), dtype=tf.float32),
label=tf.random.uniform((batch_size,),
minval=0,
maxval=16,
dtype=tf.int32))
result = net.FPropDefaultTheta(input_data)
with self.session(graph=g):
self.evaluate(tf.global_variables_initializer())
np_result = self.evaluate(result)
self.assertEqual(np_result.shape, expected_shape)
@parameterized.parameters((128, 3), (128, 9), (256, 3))
def testPointNetClassifier(self, feature_dims, input_dims):
p = pointnet.PointNet().Classifier(
input_dims=input_dims, feature_dims=feature_dims)
# Network should produce a global feature of feature_dims.
self.assertEqual(p.output_dim, feature_dims)
self._testOutShape(p, (8, 128, input_dims), (8, feature_dims))
def testPointNetSegmentation(self):
p = pointnet.PointNet().Segmentation()
# Network takes batch_size=8 input and produce 128-dim pointwise feature.
self.assertEqual(p.output_dim, 128)
self._testOutShape(p, (8, 100, 3), (8, 100, 128))
def testPointNetSegmentationShapeNet(self):
p = pointnet.PointNet().SegmentationShapeNet()
self.assertEqual(p.output_dim, 128)
self._testOutShape(p, (8, 2000, 3), (8, 2000, 128))
@parameterized.parameters((128, 3), (128, 9), (256, 3))
def testPointNetPPClassifier(self, feature_dims, input_dims):
p = pointnet.PointNetPP().Classifier(
input_dims=input_dims, feature_dims=feature_dims)
# Network should produce a global feature of feature_dims.
self.assertEqual(p.output_dim, feature_dims)
self._testOutShape(p, (8, 1024, input_dims), (8, feature_dims))
if __name__ == '__main__':
test_utils.main()
| {
"content_hash": "b7537bb50097de8fca780dcf85eb37cd",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 77,
"avg_line_length": 39.85,
"alnum_prop": 0.6612296110414053,
"repo_name": "tensorflow/lingvo",
"id": "09962d90ad2998d33397817cfa0bb5e8bb4bf718",
"size": "3080",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lingvo/tasks/car/pointnet_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "5163"
},
{
"name": "C++",
"bytes": "556344"
},
{
"name": "Dockerfile",
"bytes": "8484"
},
{
"name": "Jupyter Notebook",
"bytes": "36721"
},
{
"name": "Python",
"bytes": "9574124"
},
{
"name": "Shell",
"bytes": "50408"
},
{
"name": "Starlark",
"bytes": "182688"
},
{
"name": "TeX",
"bytes": "37275"
}
],
"symlink_target": ""
} |
from __future__ import annotations
import re
import textwrap
import yaml
from pre_commit.util import yaml_load
def _is_header_line(line: str) -> bool:
return line.startswith(('#', '---')) or not line.strip()
def _migrate_map(contents: str) -> str:
if isinstance(yaml_load(contents), list):
# Find the first non-header line
lines = contents.splitlines(True)
i = 0
# Only loop on non empty configuration file
while i < len(lines) and _is_header_line(lines[i]):
i += 1
header = ''.join(lines[:i])
rest = ''.join(lines[i:])
# If they are using the "default" flow style of yaml, this operation
# will yield a valid configuration
try:
trial_contents = f'{header}repos:\n{rest}'
yaml_load(trial_contents)
contents = trial_contents
except yaml.YAMLError:
contents = f'{header}repos:\n{textwrap.indent(rest, " " * 4)}'
return contents
def _migrate_sha_to_rev(contents: str) -> str:
return re.sub(r'(\n\s+)sha:', r'\1rev:', contents)
def migrate_config(config_file: str, quiet: bool = False) -> int:
with open(config_file) as f:
orig_contents = contents = f.read()
contents = _migrate_map(contents)
contents = _migrate_sha_to_rev(contents)
if contents != orig_contents:
with open(config_file, 'w') as f:
f.write(contents)
print('Configuration has been migrated.')
elif not quiet:
print('Configuration is already migrated.')
return 0
| {
"content_hash": "2ff1ff960c1f6a8bb8498f172ade48b3",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 76,
"avg_line_length": 27.57894736842105,
"alnum_prop": 0.6036895674300254,
"repo_name": "pre-commit/pre-commit",
"id": "c3d0a509f6be4e3db6c2ec132901fb56b21e08bc",
"size": "1572",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "pre_commit/commands/migrate_config.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C#",
"bytes": "753"
},
{
"name": "Dart",
"bytes": "142"
},
{
"name": "Dockerfile",
"bytes": "508"
},
{
"name": "Go",
"bytes": "240"
},
{
"name": "JavaScript",
"bytes": "128"
},
{
"name": "Lua",
"bytes": "513"
},
{
"name": "Perl",
"bytes": "532"
},
{
"name": "PowerShell",
"bytes": "744"
},
{
"name": "Python",
"bytes": "511310"
},
{
"name": "R",
"bytes": "24268"
},
{
"name": "Ruby",
"bytes": "829"
},
{
"name": "Rust",
"bytes": "56"
},
{
"name": "Shell",
"bytes": "3952"
},
{
"name": "Swift",
"bytes": "181"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.