text
stringlengths 4
1.02M
| meta
dict |
---|---|
import logging
class Logging(object):
def log(self):
return logging.getLogger('.'.join((self.__class__.__module__,
self.__class__.__name__)))
| {
"content_hash": "a6c0ca8ef9b5a7849fc9a1fb52a050c5",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 69,
"avg_line_length": 25,
"alnum_prop": 0.475,
"repo_name": "fakedrake/WikipediaBase",
"id": "f2b52055aca6c582d765e60f5412320ffcc98f74",
"size": "200",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "wikipediabase/log.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Emacs Lisp",
"bytes": "321"
},
{
"name": "Python",
"bytes": "156664"
}
],
"symlink_target": ""
} |
from google.cloud import bigquery_datatransfer_v1
def sample_get_transfer_run():
# Create a client
client = bigquery_datatransfer_v1.DataTransferServiceClient()
# Initialize request argument(s)
request = bigquery_datatransfer_v1.GetTransferRunRequest(
name="name_value",
)
# Make the request
response = client.get_transfer_run(request=request)
# Handle the response
print(response)
# [END bigquerydatatransfer_v1_generated_DataTransferService_GetTransferRun_sync]
| {
"content_hash": "9fb98a399910b7bd07cbe5ab1d1e144e",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 81,
"avg_line_length": 27.05263157894737,
"alnum_prop": 0.7354085603112841,
"repo_name": "googleapis/python-bigquery-datatransfer",
"id": "08f00c3b76d80e61b0119fb0832c0caec4557fb9",
"size": "1934",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "samples/generated_samples/bigquerydatatransfer_v1_generated_data_transfer_service_get_transfer_run_sync.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "584257"
},
{
"name": "Shell",
"bytes": "30705"
}
],
"symlink_target": ""
} |
"""
* The <code>LoadTest</code> is a test decorator that runs
* a test with a simulated number of concurrent users and
* iterations.
* <p>
* In its simplest form, a <code>LoadTest</code> is constructed
* with a test to decorate and the number of concurrent users.
* </p>
* <p>
* For example, to create a load test of 10 concurrent users
* with each user running <code>ExampleTest</code> once and
* all users started simultaneously, use:
* <blockquote>
* <pre>
* Test loadTest = new LoadTest(new TestSuite(ExampleTest.class), 10);
* </pre>
* </blockquote>
* or, to load test a single test method, use:
* <blockquote>
* <pre>
* Test loadTest = new LoadTest(new ExampleTest("testSomething"), 10);
* </pre>
* </blockquote>
* </p>
* <p>
* The load can be ramped by specifying a pluggable
* <code>Timer</code> instance which prescribes the delay
* between the addition of each concurrent user. A
* <code>ConstantTimer</code> has a constant delay, with
* a zero value indicating that all users will be started
* simultaneously. A <code>RandomTimer</code> has a random
* delay with a uniformly distributed variation.
* </p>
* <p>
* For example, to create a load test of 10 concurrent users
* with each user running <code>ExampleTest.testSomething()</code> once and
* with a one second delay between the addition of users, use:
* <blockquote>
* <pre>
* Timer timer = new ConstantTimer(1000);
* Test loadTest = new LoadTest(new ExampleTest("testSomething"), 10, timer);
* </pre>
* </blockquote>
* </p>
* <p>
* In order to simulate each concurrent user running a test for a
* specified number of iterations, a <code>LoadTest</code> can be
* constructed to decorate a <code>RepeatedTest</code>.
* Alternatively, a <code>LoadTest</code> convenience constructor
* specifying the number of iterations is provided which creates a
* <code>RepeatedTest</code>.
* </p>
* <p>
* For example, to create a load test of 10 concurrent users
* with each user running <code>ExampleTest.testSomething()</code> for 20 iterations,
* and with a one second delay between the addition of users, use:
* <blockquote>
* <pre>
* Timer timer = new ConstantTimer(1000);
* Test repeatedTest = new RepeatedTest(new ExampleTest("testSomething"), 20);
* Test loadTest = new LoadTest(repeatedTest, 10, timer);
* </pre>
* </blockquote>
* or, alternatively, use:
* <blockquote>
* <pre>
* Timer timer = new ConstantTimer(1000);
* Test loadTest = new LoadTest(new ExampleTest("testSomething"), 10, 20, timer);
* </pre>
* </blockquote>
* A <code>LoadTest</code> can be decorated as a <code>TimedTest</code>
* to test the elapsed time of the load test. For example, to decorate
* the load test constructed above as a timed test with a maximum elapsed
* time of 2 seconds, use:
* <blockquote>
* <pre>
* Test timedTest = new TimedTest(loadTest, 2000);
* </pre>
* </blockquote>
* </p>
* <p>
* By default, a <code>LoadTest</code> does not enforce test
* atomicity (as defined in transaction processing) if its decorated
* test spawns threads, either directly or indirectly. In other words,
* if a decorated test spawns a thread and then returns control without
* waiting for its spawned thread to complete, then the test is assumed
* to be transactionally complete.
* </p>
* <p>
* If threads are integral to the successful completion of
* a decorated test, meaning that the decorated test should not be
* treated as complete until all of its threads complete, then
* <code>setEnforceTestAtomicity(true)</code> should be invoked to
* enforce test atomicity. This effectively causes the load test to
* wait for the completion of all threads belonging to the same
* <code>ThreadGroup</code> as the thread running the decorated test.
* </p>
* @author <b>Mike Clark</b>
* @author Clarkware Consulting, Inc.
* @author Ervin Varga
**************************************
* Ported to Python by Grig Gheorghiu *
**************************************
"""
import time
from threading import Thread
from unittest import TestResult, TestCase
from Test import Test
from RepeatedTest import RepeatedTest
from ConstantTimer import ConstantTimer
from CustomExceptions import IllegalArgumentException
from ThreadBarrier import ThreadBarrier
from ThreadedTestGroup import ThreadedTestGroup
from ThreadedTest import ThreadedTest
try:
bool = True
except:
True = 1
False = 0
class LoadTest(Test):
def __init__(self, test, users, iterations=0, timer=None):
"""
* Constructs a <code>LoadTest</code> to decorate
* the specified test using the specified number
* of concurrent users starting simultaneously and
* the number of iterations per user. If a Timer is
* indicated, then a delay is introduced
*
* @param test Test to decorate.
* @param users Number of concurrent users.
* @param iterations Number of iterations per user.
* @param timer Delay timer.
"""
if iterations:
test = RepeatedTest(test, iterations)
if timer is None:
timer = ConstantTimer(0)
if users < 1:
raise IllegalArgumentException("Number of users must be > 0")
if timer is None:
raise IllegalArgumentException("Delay timer is null")
if test is None:
raise IllegalArgumentException("Decorated test is null")
self.users = users
self.timer = timer
self.setEnforceTestAtomicity(False)
self.barrier = ThreadBarrier(users)
self.group = ThreadedTestGroup(self, "LoadTest:ThreadedTestGroup")
self.test = ThreadedTest(test, self.group, self.barrier)
def setEnforceTestAtomicity(self, isAtomic):
"""
* Indicates whether test atomicity should be enforced.
* <p>
* If threads are integral to the successful completion of
* a decorated test, meaning that the decorated test should not be
* treated as complete until all of its threads complete, then
* <code>setEnforceTestAtomicity(true)</code> should be invoked to
* enforce test atomicity. This effectively causes the load test to
* wait for the completion of all threads belonging to the same
* <code>ThreadGroup</code> as the thread running the decorated test.
*
* @param isAtomic <code>true</code> to enforce test atomicity;
* <code>false</code> otherwise.
"""
self.enforceTestAtomicity = isAtomic
def countTestCases(self):
"""
* Returns the number of tests in this load test.
*
* @return Number of tests.
"""
return self.users * self.test.countTestCases()
def run(self, result):
"""
* Runs the test.
*
* @param result Test result.
"""
self.group.setTestResult(result)
for i in range(self.users):
#if result.shouldStop():
# self.barrier.cancelThreads(self.users - i)
# break
self.test.run(result)
self.sleep(self.getDelay())
self.waitForTestCompletion()
self.cleanup()
def __call__(self, result):
self.run(result)
def waitForTestCompletion(self):
"""
// TODO: May require a strategy pattern
// if other algorithms emerge.
"""
if self.enforceTestAtomicity:
self.waitForAllThreadsToComplete()
else:
self.waitForThreadedTestThreadsToComplete()
def waitForThreadedTestThreadsToComplete(self):
while not self.barrier.isReached():
self.sleep(50)
def waitForAllThreadsToComplete(self):
while self.group.activeCount() > 0:
self.sleep(50)
def sleep(self, ms):
try:
time.sleep(ms*0.001)
except:
pass
def cleanup(self):
try:
self.group.destroy()
except:
pass
def __str__(self):
if self.enforceTestAtomicity:
return "LoadTest (ATOMIC): " + str(self.test)
else:
return "LoadTest (NON-ATOMIC): " + str(self.test)
def getDelay(self):
return self.timer.getDelay()
| {
"content_hash": "b5aaa82e32695c50353a89b81f5a3e4e",
"timestamp": "",
"source": "github",
"line_count": 239,
"max_line_length": 86,
"avg_line_length": 32.36401673640167,
"alnum_prop": 0.6989010989010989,
"repo_name": "nmondal/pyunitperf",
"id": "f430c355c12588ce9e6350af460fcba8e2a6956a",
"size": "7735",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "LoadTest.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "40037"
},
{
"name": "Python",
"bytes": "34416"
}
],
"symlink_target": ""
} |
"""Plot 7 Day Precipitation Totals"""
import datetime
import sys
from pandas import read_sql
from pyiem.plot import MapPlot
from pyiem.util import get_dbconnstr
def fmter(val):
"""Make pretty text"""
if val is None:
return 0
if 0 < val < 0.009:
return "T"
return "%.2f" % (val,)
def main(days, argv):
"""Go!"""
today = datetime.date.today()
routes = "ac"
if len(argv) == 4:
today = datetime.date(int(argv[1]), int(argv[2]), int(argv[3]))
routes = "a"
sixago = today - datetime.timedelta(days=(days - 1))
# Compute normal from the climate database
df = read_sql(
"""
select s.id, ST_x(s.geom) as lon, ST_y(s.geom) as lat,
sum(pday) as rainfall
from summary c JOIN stations s on (c.iemid = s.iemid)
WHERE day >= %s and day <= %s
and s.network = 'IA_ASOS' and pday >= 0 and pday < 30
GROUP by s.id, lon, lat
""",
get_dbconnstr("iem"),
params=(sixago, today),
index_col="id",
)
df["label"] = df["rainfall"].apply(fmter)
mp = MapPlot(
title=f"Iowa {days} Day Precipitation Total [inch] (ASOS)",
subtitle=("%s - %s inclusive")
% (sixago.strftime("%d %b %Y"), today.strftime("%d %b %Y")),
continentalcolor="white",
)
mp.plot_values(
df["lon"].values,
df["lat"].values,
df["label"].values,
"%s",
labels=df.index.values,
labelbuffer=5,
)
mp.drawcounties()
pqstr = (
"plot %s %s0000 summary/%sday/ia_precip.png "
"summary/%sday/ia_precip.png png"
) % (routes, today.strftime("%Y%m%d"), days, days)
mp.postprocess(pqstr=pqstr)
mp.close()
if __name__ == "__main__":
for _days in [7, 31, 91]:
main(_days, sys.argv)
| {
"content_hash": "c2f6179feb5ff67756d1134bdf27a3f0",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 71,
"avg_line_length": 26.420289855072465,
"alnum_prop": 0.5441579813494241,
"repo_name": "akrherz/iem",
"id": "cf85c5312082c62971c1d94a5f68094a75ad3cf1",
"size": "1823",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "scripts/week/plot_obs.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "16912"
},
{
"name": "HTML",
"bytes": "1092923"
},
{
"name": "Hack",
"bytes": "7078"
},
{
"name": "JavaScript",
"bytes": "244253"
},
{
"name": "PHP",
"bytes": "3492474"
},
{
"name": "Python",
"bytes": "3279270"
},
{
"name": "Rich Text Format",
"bytes": "30075"
},
{
"name": "Shell",
"bytes": "72284"
}
],
"symlink_target": ""
} |
import Inline
info = {
"friendly_name": "Preformatted text (Block)",
"summary": "Prints child paragraphs in a monospace typeface, while still interpreting them as markup.",
}
def SublanguageHandler(args, doc, renderer):
text = doc.reconstruct_child_text().as_string()
(fragments, rest) = Inline.parse(text)
renderer.add(Inline.TagFragment('pyle_pre', fragments, 'pre'))
| {
"content_hash": "3977032a2f5436857c01445937b746a3",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 107,
"avg_line_length": 35.63636363636363,
"alnum_prop": 0.7066326530612245,
"repo_name": "CymaticLabs/Unity3D.Amqp",
"id": "bc18d39c6915802641b3d5a6583ac4a6e82ada0a",
"size": "392",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/rabbitmq-dotnet-client-rabbitmq_v3_4_4/docs/pyle2-fcfcf7e/sublanguages/pre.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "13572"
},
{
"name": "C#",
"bytes": "3448460"
},
{
"name": "CSS",
"bytes": "4291"
},
{
"name": "HTML",
"bytes": "511888"
},
{
"name": "JavaScript",
"bytes": "4843"
},
{
"name": "Makefile",
"bytes": "3989"
},
{
"name": "Perl",
"bytes": "3838"
},
{
"name": "Python",
"bytes": "987455"
},
{
"name": "Roff",
"bytes": "9846"
},
{
"name": "Shell",
"bytes": "25576"
},
{
"name": "XSLT",
"bytes": "62862"
}
],
"symlink_target": ""
} |
"""
Copyright (c) 2014, Samsung Electronics Co.,Ltd.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
The views and conclusions contained in the software and documentation are those
of the authors and should not be interpreted as representing official policies,
either expressed or implied, of Samsung Electronics Co.,Ltd..
"""
"""
jpeg4py - libjpeg-turbo cffi bindings and helper classes.
URL: https://github.com/ajkxyz/jpeg4py
Original author: Alexey Kazantsev <[email protected]>
"""
"""
Helper classes for libjpeg-turbo cffi bindings.
"""
import jpeg4py._cffi as jpeg
from jpeg4py._cffi import TJPF_RGB
import numpy
import os
class JPEGRuntimeError(RuntimeError):
def __init__(self, msg, code):
super(JPEGRuntimeError, self).__init__(msg)
self.code = code
class Base(object):
"""Base class.
Attributes:
lib_: cffi handle to loaded shared library.
"""
def __init__(self, lib_):
"""Constructor.
Parameters:
lib_: cffi handle to loaded shared library.
"""
if lib_ is None:
jpeg.initialize()
lib_ = jpeg.lib
self.lib_ = lib_
def get_last_error(self):
"""Returns last error string.
"""
return jpeg.ffi.string(self.lib_.tjGetErrorStr()).decode("utf-8")
class Handle(Base):
"""Stores tjhandle pointer.
Attributes:
handle_: cffi tjhandle pointer.
"""
def __init__(self, handle_, lib_):
"""Constructor.
Parameters:
handle_: cffi tjhandle pointer.
"""
self.handle_ = None
super(Handle, self).__init__(lib_)
self.handle_ = handle_
def release(self):
if self.handle_ is not None:
self.lib_.tjDestroy(self.handle_)
self.handle_ = None
class JPEG(Base):
"""Main class.
Attributes:
decompressor: Handle object for decompressor.
source: numpy array with source data,
either encoded raw jpeg which may be decoded/transformed or
or source image for the later encode.
width: image width.
height: image height.
subsampling: level of chrominance subsampling.
Static attributes:
decompressors: list of decompressors for caching purposes.
"""
decompressors = []
@staticmethod
def clear():
"""Clears internal caches.
"""
# Manually release cached JPEG decompressors
for handle in reversed(JPEG.decompressors):
handle.release()
del JPEG.decompressors[:]
def __init__(self, source, lib_=None):
"""Constructor.
Parameters:
source: source for JPEG operations (numpy array or file name).
"""
super(JPEG, self).__init__(lib_)
self.decompressor = None
self.width = None
self.height = None
self.subsampling = None
if hasattr(source, "__array_interface__"):
self.source = source
elif numpy.fromfile is not None:
self.source = numpy.fromfile(source, dtype=numpy.uint8)
else:
fin = open(source, "rb")
self.source = numpy.empty(os.path.getsize(source),
dtype=numpy.uint8)
fin.readinto(self.source)
fin.close()
def _get_decompressor(self):
if self.decompressor is not None:
return
try:
self.decompressor = JPEG.decompressors.pop(-1)
except IndexError:
d = self.lib_.tjInitDecompress()
if d == jpeg.ffi.NULL:
raise JPEGRuntimeError(
"tjInitDecompress() failed with error "
"string %s" % self.get_last_error(), 0)
self.decompressor = Handle(d, self.lib_)
def parse_header(self):
"""Parses JPEG header.
Fills self.width, self.height, self.subsampling.
"""
self._get_decompressor()
whs = jpeg.ffi.new("int[]", 3)
whs_base = int(jpeg.ffi.cast("size_t", whs))
whs_itemsize = int(jpeg.ffi.sizeof("int"))
n = self.lib_.tjDecompressHeader2(
self.decompressor.handle_,
jpeg.ffi.cast("unsigned char*",
self.source.__array_interface__["data"][0]),
self.source.nbytes,
jpeg.ffi.cast("int*", whs_base),
jpeg.ffi.cast("int*", whs_base + whs_itemsize),
jpeg.ffi.cast("int*", whs_base + whs_itemsize + whs_itemsize))
if n:
raise JPEGRuntimeError("tjDecompressHeader2() failed with error "
"%d and error string %s" %
(n, self.get_last_error()), n)
self.width = int(whs[0])
self.height = int(whs[1])
self.subsampling = int(whs[2])
def decode(self, dst=None, pixfmt=TJPF_RGB):
bpp = jpeg.tjPixelSize[pixfmt]
if dst is None:
if self.width is None:
self.parse_header()
sh = [self.height, self.width]
if bpp > 1:
sh.append(bpp)
dst = numpy.zeros(sh, dtype=numpy.uint8)
elif not hasattr(dst, "__array_interface__"):
raise ValueError("dst should be numpy array or None")
if len(dst.shape) < 2:
raise ValueError("dst shape length should 2 or 3")
if dst.nbytes < dst.shape[1] * dst.shape[0] * bpp:
raise ValueError(
"dst is too small to hold the requested pixel format")
self._get_decompressor()
n = self.lib_.tjDecompress2(
self.decompressor.handle_,
jpeg.ffi.cast("unsigned char*",
self.source.__array_interface__["data"][0]),
self.source.nbytes,
jpeg.ffi.cast("unsigned char*",
dst.__array_interface__["data"][0]),
dst.shape[1], dst.strides[0], dst.shape[0], pixfmt, 0)
if n:
raise JPEGRuntimeError("tjDecompress2() failed with error "
"%d and error string %s" %
(n, self.get_last_error()), n)
return dst
def __del__(self):
# Return decompressor to cache.
if self.decompressor is not None:
JPEG.decompressors.append(self.decompressor)
| {
"content_hash": "c25d6b09e438fe081cbbcbecd368c0e9",
"timestamp": "",
"source": "github",
"line_count": 216,
"max_line_length": 79,
"avg_line_length": 35.26851851851852,
"alnum_prop": 0.5992386453137306,
"repo_name": "ajkxyz/jpeg4py",
"id": "f98eee318c6822fe70e4a26249fec58f4c702419",
"size": "7618",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/jpeg4py/_py.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "27727"
}
],
"symlink_target": ""
} |
r"""
Give Python the ability to decode some common, flawed encodings.
Python does not want you to be sloppy with your text. Its encoders and decoders
("codecs") follow the relevant standards whenever possible, which means that
when you get text that *doesn't* follow those standards, you'll probably fail
to decode it. Or you might succeed at decoding it for implementation-specific
reasons, which is perhaps worse.
There are some encodings out there that Python wishes didn't exist, which are
widely used outside of Python:
- "utf-8-variants", a family of not-quite-UTF-8 encodings, including the
ever-popular CESU-8 and "Java modified UTF-8".
- "Sloppy" versions of character map encodings, where bytes that don't map to
anything will instead map to the Unicode character with the same number.
Simply importing this module, or in fact any part of the `ftfy` package, will
make these new "bad codecs" available to Python through the standard Codecs
API. You never have to actually call any functions inside `ftfy.bad_codecs`.
However, if you want to call something because your code checker insists on it,
you can call ``ftfy.bad_codecs.ok()``.
A quick example of decoding text that's encoded in CESU-8:
>>> import ftfy.bad_codecs
>>> print(b'\xed\xa0\xbd\xed\xb8\x8d'.decode('utf-8-variants'))
😍
"""
from __future__ import unicode_literals
from encodings import normalize_encoding
import codecs
_CACHE = {}
# Define some aliases for 'utf-8-variants'. All hyphens get turned into
# underscores, because of `normalize_encoding`.
UTF8_VAR_NAMES = (
'utf_8_variants', 'utf8_variants',
'utf_8_variant', 'utf8_variant',
'utf_8_var', 'utf8_var',
'cesu_8', 'cesu8',
'java_utf_8', 'java_utf8'
)
def search_function(encoding):
"""
Register our "bad codecs" with Python's codecs API. This involves adding
a search function that takes in an encoding name, and returns a codec
for that encoding if it knows one, or None if it doesn't.
The encodings this will match are:
- Encodings of the form 'sloppy-windows-NNNN' or 'sloppy-iso-8859-N',
where the non-sloppy version is an encoding that leaves some bytes
unmapped to characters.
- The 'utf-8-variants' encoding, which has the several aliases seen
above.
"""
if encoding in _CACHE:
return _CACHE[encoding]
norm_encoding = normalize_encoding(encoding)
codec = None
if norm_encoding in UTF8_VAR_NAMES:
from ftfy.bad_codecs.utf8_variants import CODEC_INFO
codec = CODEC_INFO
elif norm_encoding.startswith('sloppy_'):
from ftfy.bad_codecs.sloppy import CODECS
codec = CODECS.get(norm_encoding)
if codec is not None:
_CACHE[encoding] = codec
return codec
def ok():
"""
A feel-good function that gives you something to call after importing
this package.
Why is this here? Pyflakes. Pyflakes gets upset when you import a module
and appear not to use it. It doesn't know that you're using it when
you use the ``unicode.encode`` and ``bytes.decode`` methods with certain
encodings.
"""
pass
codecs.register(search_function)
| {
"content_hash": "9c758886ce5b2f83252f0e39efb629c1",
"timestamp": "",
"source": "github",
"line_count": 93,
"max_line_length": 79,
"avg_line_length": 34.12903225806452,
"alnum_prop": 0.7110901071203529,
"repo_name": "timokoola/finnkinotxt",
"id": "0984bd525265ee1ae8ba397ddc15bc4cdd2f497c",
"size": "3193",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "ftfy/bad_codecs/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "52779"
},
{
"name": "JavaScript",
"bytes": "15800"
},
{
"name": "Python",
"bytes": "4304877"
},
{
"name": "TeX",
"bytes": "1527"
}
],
"symlink_target": ""
} |
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Series'
db.create_table('series_series', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('title', self.gf('django.db.models.fields.CharField')(max_length=250)),
('description', self.gf('django.db.models.fields.TextField')()),
))
db.send_create_signal('series', ['Series'])
# Adding model 'SeriesNode'
db.create_table('series_seriesnode', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('series', self.gf('django.db.models.fields.related.ForeignKey')(related_name='nodes', to=orm['series.Series'])),
('order', self.gf('django.db.models.fields.IntegerField')(default=0)),
('content_type', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['contenttypes.ContentType'])),
('object_id', self.gf('django.db.models.fields.PositiveIntegerField')()),
))
db.send_create_signal('series', ['SeriesNode'])
def backwards(self, orm):
# Deleting model 'Series'
db.delete_table('series_series')
# Deleting model 'SeriesNode'
db.delete_table('series_seriesnode')
models = {
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'series.series': {
'Meta': {'object_name': 'Series'},
'description': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '250'})
},
'series.seriesnode': {
'Meta': {'object_name': 'SeriesNode'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'series': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'nodes'", 'to': "orm['series.Series']"})
}
}
complete_apps = ['series'] | {
"content_hash": "170268b7a669d248ee13a764d97037d4",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 161,
"avg_line_length": 48.26229508196721,
"alnum_prop": 0.5737092391304348,
"repo_name": "armstrong/armstrong.apps.series",
"id": "cd35bddc74c4d8c53beadef5e5afef3254d4322d",
"size": "2968",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "armstrong/apps/series/migrations/0001_initial.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "17680"
},
{
"name": "Ruby",
"bytes": "43"
}
],
"symlink_target": ""
} |
'''
File created on Aug 7, 2012
@author: jhc02
'''
# Samtools module provides functions for various samtools operations
from subprocess import call
samtools_exec = '/usr/local/bio/samtools-0.1.19/samtools'
bcftools_exec = '/usr/local/bio/samtools-0.1.19/bcftools/bcftools'
def set_samtools_exec(samtools_location):
global samtools_exec
samtools_exec = samtools_location
def get_samtools_exec():
return samtools_exec
def import_sam(ref_genome_file, sam_file, bam_file = None):
if bam_file == None:
bam_file = sam_file.rsplit('.',1)[0] + '.bam'
print '\tSAM import...\n'
status = call([samtools_exec, 'import', ref_genome_file, sam_file, bam_file])
print '\tSAM import...DONE...Status code: ' + str(status) + '\n\n'
return (status, bam_file)
def sort(bam_input_file, sorted_output_file = None):
if sorted_output_file == None:
# Suffix is automatically added by sort
sorted_output_file = bam_input_file.rsplit('.',1)[0] + '_SORTED'
else:
sorted_output_file = bam_input_file.rsplit('.',1)[0]
print '\tSAM sort...\n'
status = call([samtools_exec, 'sort', bam_input_file, sorted_output_file])
print '\tSAM sort...DONE...Status code: ' + str(status) + '\n\n'
return (status, sorted_output_file + '.bam')
def index(input_file, output_file = None):
if output_file == None:
output_file = input_file.rsplit('.',1)[0] + '.bai'
print '\tSAM index...\n'
status = call([samtools_exec, 'index', input_file, output_file])
print '\tSAM index...DONE...Status code: ' + str(status) + '\n\n'
return (status, output_file)
def faidx(input_file, output_file = None):
if output_file == None:
output_file = input_file.rsplit('.',1)[0] + '.fai'
print '\tsamtools FA idx...\n'
status = call([samtools_exec, 'faidx', input_file, output_file])
print '\tsamtools faidx...DONE...Status code: ' + str(status) + '\n\n'
return (status, output_file)
def mpileup(input_file, output_file = None):
if output_file == None:
output_file = input_file.rsplit('.',1)[0] + '.bcf'
output_file_handle = open(output_file,'w')
print '\tsamtools mpileup...\n'
status = call([samtools_exec, 'mpileup', '-u', input_file], stdout=output_file_handle)
output_file_handle.close()
print '\tsamtools mpileup...DONE...Status code: ' + str(status) + '\n\n'
return (status, output_file)
def bcf2vcf(input_file, output_file = None):
if output_file == None:
output_file = input_file.rsplit('.',1)[0] + '.vcf'
output_file_handle = open(output_file,'w')
print '\tbcftools view...\n'
status = call([bcftools_exec, 'view', input_file], stdout=output_file_handle)
output_file_handle.close()
print '\tbcftools view...DONE...Status code: ' + str(status) + '\n\n'
return (status, output_file)
| {
"content_hash": "c9421f875fbee25f6be57a9b1162f8f4",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 90,
"avg_line_length": 38.58108108108108,
"alnum_prop": 0.6339754816112084,
"repo_name": "london-escience/libhpc-cf",
"id": "132f3854ed94f6348349879050ce00553e1a5324",
"size": "5044",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "libhpc/wrapper/bio/samtools.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "122774"
}
],
"symlink_target": ""
} |
import re
def parseClangCompileParams(args):
className = ''
objectCompilation = ''
arch = ''
isysroot = ''
Lparams = []
Fparams = []
minOSParam=''
idx = 0
for arg in args:
# print('arg is %s' % arg)
if (re.match('.*\w+\.mm?$', arg)):
className = arg
# print('Found class name : ' + className)
if (re.match('.*\w+\.o$', arg)):
objectCompilation = arg
# print('Found object compilation name : ' + objectCompilation)
if (re.match('^-L.*', arg)):
Lparams = Lparams + [arg]
if (re.match('^-F.*', arg)):
Fparams = Fparams + [arg]
if (arg == '-arch'):
arch = args[idx+1]
if (arg == '-isysroot'):
isysroot = args[idx+1]
if (re.match('^-mi.*-min=.*', arg)):
minOSParam = arg
idx += 1
#print 'Class name : %s' % className
#print 'Object name : %s ' % objectCompilation
#print 'LParams %s' % Lparams
#print 'FParams %s' % Fparams
#print 'arch = %s ' % arch
#print 'isysroot = %s ' % isysroot
return {'class':className,
'object':objectCompilation,
'arch':arch,
'isysroot':isysroot,
'LParams':Lparams,
'FParams':Fparams,
'minOSParam':minOSParam
}
| {
"content_hash": "26c1f659d54c4589f754bfae88696204",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 82,
"avg_line_length": 29.170212765957448,
"alnum_prop": 0.48067104303428154,
"repo_name": "xuvw/dyci-main",
"id": "9876444104f8a2f763f01eaf86c19c6b706c835b",
"size": "1371",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Scripts/clangParams.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
} |
import math
# Project Coins
quarters = int(input("How many quarters do you have? "))
print()
dimes = int(input("How many dimes do you have? "))
print()
nickles = int(input("How many nickles do you have? "))
print()
pennies = int(input("How many pennies do you have? "))
print()
quarters_new = quarters * .25
dimes_new = dimes * .10
nickles_new = nickles * .05
pennies_new = pennies * .01
sum_of = quarters_new + dimes_new + nickles_new + pennies_new
print("You have",quarters,"quarters!")
print()
print("You have",dimes,"dimes!")
print()
print("You have",nickles,"nickles!")
print()
print("You have",pennies,"pennies!")
print()
print("You have ${0} worth of change!".format("%.2f" % sum_of))
# Project Pizza
print()
user_name = input("What is your name?" )
print("Welcome, {0}".format(user_name))
print()
cost = float(input("What is the cost of the pizza? "))
diameter = float(input("What is the diameter of the pizza? "))
radius = diameter / 2
area = math.pi * radius ** 2
price = cost / area
print("The cost per square inch of your pizza is ${0}".format("%.2f" % price))
# Project Coffee
name = input("What is your name? ")
print("Welcome, {0}!".format(name))
print()
cost = 10.50
pounds = int(input("How many pounds of coffee are you wanting to buy? "))
spp = .86 #means shipping per pound
shipping_flat = 1.50
tax_mi = .06
subtotal = cost * pounds
grand_total = subtotal + shipping_flat * (shipping
print("The subtotal is {0}, the grand total is {1}, and the Michigan Tax is {2}".format(subtotal, grand_total, tax_mi))
| {
"content_hash": "c4c0850f0681c508bca42231da3503a0",
"timestamp": "",
"source": "github",
"line_count": 116,
"max_line_length": 119,
"avg_line_length": 14.724137931034482,
"alnum_prop": 0.6083138173302107,
"repo_name": "NoahFlowa/CTC_Projects",
"id": "10267dbe4df05dd563e50a3336098575b27c2c94",
"size": "1824",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Osterhout_Python/Osterhout_Ales_ExamQs.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "31926"
}
],
"symlink_target": ""
} |
import os
import logging
from datetime import datetime, timedelta
import requests
from flask import Flask, request, session, url_for, redirect, abort, jsonify, render_template
app = Flask(__name__)
app.secret_key = os.environ['FLASK_SECRET_KEY']
logging.basicConfig(level='DEBUG')
GITLAB_HOST = os.environ['GITLAB_HOST']
GITLAB_APPID = os.environ['GITLAB_APPID']
GITLAB_APP_SECRET = os.environ['GITLAB_APP_SECRET']
# time tags value is hour for this tag
DATE_TAGS = {
'0.25D': 2,
'0.5D': 5,
'1D': 24,
'2D': 48,
}
DATE_FORMAT = '%Y-%m-%d'
@app.errorhandler(401)
def not_login_handler(error):
url = GITLAB_HOST + '/oauth/authorize?client_id={client_id}&redirect_uri={redirect_uri}&response_type=code'
auth_url = url_for('.index', _external=True)
url = url.format(client_id=GITLAB_APPID, redirect_uri=auth_url)
return redirect(url)
class GitlabToken(object):
def __init__(self, code=None, token_json=None):
logging.debug('instance gitlab token. code: %s, token_json:%s' % (code, token_json))
if code:
data = self._auth(code)
else:
data = token_json
self.token_json = data
self.access_token = data['access_token']
self.expires_at = datetime.now() + timedelta(seconds=7000)
self.refresh_token = data['refresh_token']
def __str__(self):
return "<access_token: %s, expires_at: %s>" % (self.access_token, self.expires_at)
def _auth(self, code):
url = GITLAB_HOST + '/oauth/token'
params = {
"client_id": GITLAB_APPID,
"client_secret": GITLAB_APP_SECRET,
"code": code,
"grant_type": "authorization_code",
"redirect_uri": url_for('.index', _external=True)
}
r = requests.post(url, params=params)
logging.debug('result:', url, params, r.content, r.status_code)
if r.status_code != 200:
abort(400)
return r.json()
def _refresh_token(self):
url = GITLAB_HOST + '/oauth/token'
params = {
"refresh_token": self.refresh_token,
"grant_type": "refresh_token",
"scope": "api"
}
r = requests.post(url, params=params)
logging.debug('result:', url, params, r.content, r.status_code)
if r.status_code != 200:
abort(400)
return r.json()
def is_valid(self):
return self.access_token and self.expires_at and datetime.now() < self.expires_at
def refresh(self):
data = self._refresh_token()
self.access_token = data['access_token']
self.expires_at = datetime.now() + timedelta(seconds=7000)
self.refresh_token = data['refresh_token']
def get_token_or_refresh(self):
if not self.is_valid():
self.refresh_token()
return self.access_token
@classmethod
def get_instance(cls):
code = request.args.get('code')
token_json = session.get('token_json')
logging.debug('token: %s' % token_json)
if token_json:
token = GitlabToken(token_json=token_json)
elif code:
token = GitlabToken(code=code)
session['token_json'] = token.token_json
else:
abort(401)
return token
@app.route('/')
def index():
token = GitlabToken.get_instance()
url = GITLAB_HOST + '/api/v3/groups'
r = requests.get(url, headers={
"Authorization": "Bearer " + token.access_token
})
data = reversed(r.json())
logging.debug('groups: %s' % r.content.decode())
current_group_id = r.json()[0]['id'] if 'current_group_id' not in session else session['current_group_id']
return render_template('index.html', groups=data, current_group_id=int(current_group_id))
@app.route('/milestones')
def api_milestones():
token = GitlabToken.get_instance()
url = GITLAB_HOST + '/api/v3/projects'
r = requests.get(url, headers={
"Authorization": "Bearer " + token.access_token
})
milestones = []
for project in r.json():
url = GITLAB_HOST + '/api/v3/projects/%s/milestones' % project['id']
r = requests.get(url, headers={
"Authorization": "Bearer " + token.access_token
})
logging.debug('milestones: %s' % r.content)
if r.json():
milestones += r.json()
return jsonify(milestones)
@app.route('/api/calendar')
def api_calendar():
current_group_id = request.args.get('current_group_id')
session['current_group_id'] = current_group_id
events = []
token = GitlabToken.get_instance()
url = GITLAB_HOST + '/api/v3/groups/%s/issues?per_page=100&state=all' % current_group_id
logging.debug('url: %s' % url)
r = requests.get(url, headers={
"Authorization": "Bearer " + token.access_token
})
logging.debug('result issues: %s' % r.content.decode())
for issue in r.json():
data = {
"title": issue.get('title'),
"start": issue.get('created_at')[:10],
"allDay": True,
}
if issue.get('assignee'):
data['title'] += ' <i class="fa fa-user" aria-hidden="true"></i>%s' % issue['assignee']['name']
if issue.get('state') == 'closed':
data['backgroundColor'] = '#00a65a'
data['borderColor'] = '#00a65a'
due_date = issue.get('due_date')
if due_date:
due_date_time = datetime.strptime(due_date, DATE_FORMAT) + timedelta(hours=24)
data["end"] = due_date
labels = issue.get('labels')
if labels:
for label in labels:
date_tag = DATE_TAGS.get(label)
if date_tag:
fixed_start = due_date_time - timedelta(hours=date_tag)
fixed_start = fixed_start.strftime(DATE_FORMAT)
data['start'] = fixed_start
data['title'] += ' <i class="fa fa-clock-o" aria-hidden="true"></i>' + label
break
else:
data['backgroundColor'] = '#ad8d43'
data['borderColor'] = '#ad8d43'
else:
data['backgroundColor'] = '#ad8d43'
data['borderColor'] = '#ad8d43'
if issue.get('state') != 'closed':
if datetime.now() > due_date_time:
data['backgroundColor'] = '#f56954'
data['borderColor'] = '#f56954'
events.append(data)
return jsonify(events)
| {
"content_hash": "ecc2effa8c98969748933e1a2c3db20b",
"timestamp": "",
"source": "github",
"line_count": 211,
"max_line_length": 111,
"avg_line_length": 31.29383886255924,
"alnum_prop": 0.5623201575041648,
"repo_name": "beastbikes/GitlabCalendar",
"id": "4198c43d9cc9da543228911051332027d5e8deeb",
"size": "6603",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "194369"
},
{
"name": "HTML",
"bytes": "31508"
},
{
"name": "JavaScript",
"bytes": "384946"
},
{
"name": "Python",
"bytes": "6769"
}
],
"symlink_target": ""
} |
""" This script compares the solution time in the presence and absence of
interpolation.
"""
# standard library
import time
import shutil
import glob
import sys
import os
# virtual environment
if not hasattr(sys, 'real_prefix'):
raise AssertionError('Please use a virtual environment for testing')
# PYTHONPATH
sys.path.insert(0, os.environ['ROBUPY'])
# project
import robupy
# Cleanup directory.
for file_ in glob.glob('*'):
# Skip setup files
if ('run' in file_) or ('ini' in file_):
continue
try:
os.unlink(file_)
except IsADirectoryError:
shutil.rmtree(file_)
# Process the initialization file into a dictionary to ease modification from
# baseline.
robupy_obj = robupy.read('data_one.robupy.ini')
init_dict = robupy_obj.get_attr('init_dict')
# Run solution without interpolation in a subdirectory
for is_interpolated in [True, False]:
# Set up directory structure.
if is_interpolated:
dir_ = 'with_interpolation'
else:
dir_ = 'without_interpolation'
os.mkdir(dir_), os.chdir(dir_)
# Modify request from baseline specification.
init_dict['INTERPOLATION']['apply'] = is_interpolated
init_dict['PROGRAM']['version'] = 'F2PY'
robupy_obj.unlock()
robupy_obj.set_attr('init_dict', init_dict)
robupy_obj.lock()
# Run solution to requested model and determine execution time.
print('\n Starting solution ' + dir_.replace('_', ' '))
start_time = time.time()
robupy.solve(robupy_obj)
execution_time = time.time() - start_time
print(' ... finished after {0:.3f} seconds'.format(execution_time))
os.chdir('../')
print('\n')
| {
"content_hash": "5f474f7e3dfc1a04380f5409bf7b4698",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 78,
"avg_line_length": 21.474358974358974,
"alnum_prop": 0.6680597014925374,
"repo_name": "peisenha/computational_coffee",
"id": "f5b10e6d536fd7be3448d85c24159cec5884d662",
"size": "1697",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "interpolation/benchmarking/run.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1697"
}
],
"symlink_target": ""
} |
import unittest
import json
from unittest.mock import patch
from main import app
import routes.api.shared as shared_api
import routes.api.choropleth as choropleth_api
class TestChoroplethPlaces(unittest.TestCase):
@patch('routes.api.choropleth.place_api.parent_places')
@patch('routes.api.choropleth.place_api.get_place_type')
def test_get_choropleth_display_level_has_display_level(
self, mock_place_type, mock_parents):
dcid = "test_dcid1"
mock_place_type.return_value = "Country"
result = choropleth_api.get_choropleth_display_level(dcid)
assert result == (dcid, "AdministrativeArea1")
@patch('routes.api.choropleth.place_api.parent_places')
@patch('routes.api.choropleth.place_api.get_place_type')
def test_get_choropleth_display_level_equivalent_has_display_level(
self, mock_place_type, mock_parents):
dcid = "test_dcid2"
mock_place_type.return_value = "AdministrativeArea1"
result = choropleth_api.get_choropleth_display_level(dcid)
assert result == (dcid, "AdministrativeArea2")
@patch('routes.api.choropleth.place_api.parent_places')
@patch('routes.api.choropleth.place_api.get_place_type')
def test_get_choropleth_display_level_has_no_display_level(
self, mock_place_type, mock_parents):
dcid = "test_dcid3"
parent_dcid = "parent_dcid"
mock_place_type.return_value = "County"
mock_parents.return_value = mock_parents.return_value = {
dcid: [{
'dcid': parent_dcid,
'types': ['Country']
}]
}
result = choropleth_api.get_choropleth_display_level(dcid)
assert result == (None, None)
@patch('routes.api.choropleth.place_api.parent_places')
@patch('routes.api.choropleth.place_api.get_place_type')
def test_get_choropleth_display_level_parent_places(self, mock_place_type,
mock_parents):
dcid = "test_dcid4"
parent_dcid = "parent_dcid"
mock_place_type.return_value = "County"
mock_parents.return_value = {
dcid: [{
'dcid': parent_dcid,
'types': ['AdministrativeArea1']
}]
}
result = choropleth_api.get_choropleth_display_level(dcid)
assert result == (parent_dcid, "County")
@patch('routes.api.choropleth.place_api.parent_places')
@patch('routes.api.choropleth.place_api.get_place_type')
def test_get_choropleth_display_level_parent_has_equivalent(
self, mock_place_type, mock_parents):
dcid = "test_dcid5"
parent_dcid = "parent_dcid"
mock_place_type.return_value = "County"
mock_parents.return_value = {
dcid: [{
'dcid': parent_dcid,
'types': ['State']
}]
}
result = choropleth_api.get_choropleth_display_level(dcid)
assert result == (parent_dcid, "County")
class TestGetGeoJson(unittest.TestCase):
@staticmethod
def side_effect(*args):
return args[0]
@patch('routes.api.choropleth.dc.get_places_in')
@patch('routes.api.choropleth.coerce_geojson_to_righthand_rule')
@patch('routes.api.choropleth.dc.property_values')
@patch('routes.api.choropleth.place_api.get_display_name')
@patch('routes.api.choropleth.get_choropleth_display_level')
def test_get_geojson(self, mock_display_level, mock_display_name,
mock_geojson_values, mock_choropleth_helper,
mock_places):
dcid1 = "dcid1"
dcid2 = "dcid2"
mock_display_level.return_value = ("parentDcid", "State")
def get_places_in_(*args):
if args[0] == ["parentDcid"] and args[1] == "State":
return {"parentDcid": [dcid1, dcid2]}
else:
return {args[0]: []}
mock_places.side_effect = get_places_in_
mock_display_name.return_value = {dcid1: dcid1, dcid2: dcid2}
mock_geojson_values.return_value = {
dcid1: json.dumps({
"coordinates": [],
"type": "Polygon"
}),
dcid2: json.dumps({
"coordinates": [],
"type": "MultiPolygon"
})
}
mock_choropleth_helper.side_effect = self.side_effect
response = app.test_client().get('/api/choropleth/geojson?placeDcid=' +
dcid1)
assert response.status_code == 200
response_data = json.loads(response.data)
assert len(response_data['features']) == 2
assert len(response_data['properties']['current_geo']) == dcid1
@patch('routes.api.choropleth.dc.get_places_in')
@patch('routes.api.choropleth.coerce_geojson_to_righthand_rule')
@patch('routes.api.choropleth.dc.property_values')
@patch('routes.api.choropleth.place_api.get_display_name')
def test_get_geojson_with_place_type(self, mock_display_name,
mock_geojson_values,
mock_choropleth_helper, mock_places):
dcid1 = "dcid1"
dcid2 = "dcid2"
def get_places_in_(*args):
if args[0] == ["parentDcid"] and args[1] == "State":
return {"parentDcid": [dcid1, dcid2]}
else:
return {args[0]: []}
mock_places.side_effect = get_places_in_
mock_display_name.return_value = {dcid1: dcid1, dcid2: dcid2}
mock_geojson_values.return_value = {
dcid1: json.dumps({
"coordinates": [],
"type": "Polygon"
}),
dcid2: json.dumps({
"coordinates": [],
"type": "MultiPolygon"
})
}
mock_choropleth_helper.side_effect = self.side_effect
response = app.test_client().get(
f'/api/choropleth/geojson?placeDcid=${dcid1}&placeType=State')
assert response.status_code == 200
response_data = json.loads(response.data)
assert len(response_data['features']) == 2
assert len(response_data['properties']['current_geo']) == dcid1
class TestChoroplethDataHelpers(unittest.TestCase):
def test_get_choropleth_configs(self):
cc1 = {
'category': ['Test', 'Test1'],
'title': 'Test1',
'statsVars': ['StatVar1'],
'isOverview': True,
}
cc2 = {
'category': ['Test', 'Test2'],
'title': 'Test2',
'statsVars': ['StatVar2'],
'isChoropleth': False
}
cc3 = {
'category': ['Test', 'Test2'],
'title': 'Test2',
'statsVars': ['StatVar3'],
'isChoropleth': True
}
with app.app_context():
app.config['CHART_CONFIG'] = [cc1, cc2, cc3]
expected_chart_configs = [cc3]
actual_chart_configs = choropleth_api.get_choropleth_configs()
assert expected_chart_configs == actual_chart_configs
def test_get_choropleth_sv(self):
cc1 = {
'category': ['Test', 'Test2'],
'title': 'Test2',
'statsVars': ['StatVar4'],
'isChoropleth': True,
'relatedChart': {
'scale': True,
'denominator': 'Test_Denominator'
}
}
cc2 = {
'category': ['Test', 'Test2'],
'title': 'Test2',
'statsVars': ['StatVar5'],
'denominator': ['StatVar6'],
'isChoropleth': True
}
cc3 = {
'category': ['Test', 'Test2'],
'title': 'Test2',
'statsVars': ['StatVar7'],
'isChoropleth': True,
'relatedChart': {
'scale': True
}
}
cc4 = {
'category': ['Test', 'Test2'],
'title': 'Test2',
'statsVars': ['StatVar8'],
'isChoropleth': True,
'relatedChart': {
'scale': False
}
}
expected_sv_set = {'StatVar4', 'StatVar5', 'StatVar7', 'StatVar8'}
expected_denom_set = {'StatVar6', 'Count_Person', 'Test_Denominator'}
actual_sv_set, actual_denom_set = shared_api.get_stat_vars(
[cc1, cc2, cc3, cc4])
assert expected_sv_set == actual_sv_set
assert expected_denom_set == actual_denom_set
def test_get_denom_val(self):
test_denom_data = [
{
'date': '2017-01',
'value': 1
},
{
'date': '2018-01',
'value': 2
},
{
'date': '2020-01',
'value': 3
},
]
result_in_denom_data = choropleth_api.get_denom_val("2018-01",
test_denom_data)
assert result_in_denom_data == 2
result_earlier_than_denom_data = choropleth_api.get_denom_val(
"2016-01", test_denom_data)
assert result_earlier_than_denom_data == 1
result_later_than_denom_data = choropleth_api.get_denom_val(
"2021-01", test_denom_data)
assert result_later_than_denom_data == 3
result_denom_data_no_match = choropleth_api.get_denom_val(
"2019-01", test_denom_data)
assert result_denom_data_no_match == 2
result_denom_date_less_specific = choropleth_api.get_denom_val(
"2018-01-01", test_denom_data)
assert result_denom_date_less_specific == 2
result_denom_date_less_specific_no_match = choropleth_api.get_denom_val(
"2019-07-01", test_denom_data)
assert result_denom_date_less_specific_no_match == 3
result_denom_date_more_specific = choropleth_api.get_denom_val(
"2018", test_denom_data)
assert result_denom_date_more_specific == 2
result_denom_date_less_specific_no_match = choropleth_api.get_denom_val(
"2019", test_denom_data)
assert result_denom_date_less_specific_no_match == 2
def test_get_date_range(self):
test_single_date = {"2019"}
single_date_result = shared_api.get_date_range(test_single_date)
assert single_date_result == "2019"
test_multiple_dates = {"2019", "2018", "2017"}
multiple_date_result = shared_api.get_date_range(test_multiple_dates)
assert multiple_date_result == "2017 – 2019"
test_empty_dates = {}
empty_date_result = shared_api.get_date_range(test_empty_dates)
assert empty_date_result == ""
test_empty_valid_dates = {""}
empty_valid_date_result = shared_api.get_date_range(test_empty_valid_dates)
assert empty_valid_date_result == ""
class TestChoroplethData(unittest.TestCase):
@patch('routes.api.choropleth.dc.get_places_in')
@patch('routes.api.choropleth.point_api.point_within_core')
@patch('routes.api.choropleth.series_api.series_core')
@patch('routes.api.choropleth.get_choropleth_display_level')
@patch('routes.api.choropleth.get_choropleth_configs')
@patch('routes.api.shared.get_stat_vars')
def testRoute(self, mock_stat_vars, mock_configs, mock_display_level,
mock_denom_data, mock_num_data, mock_places_in):
test_dcid = 'test_dcid'
geo1 = 'dcid1'
geo2 = 'dcid2'
display_level = "AdministrativeArea1"
sv1 = 'StatVar1'
sv2 = 'StatVar2'
sv3 = 'StatVar3'
sv1_date1 = '2018'
sv1_date2 = '2019'
sv2_date = '2018'
sv1_val = 2
sv2_val1 = 4
sv2_val2 = 6
source1 = 'source1'
source2 = 'source2'
source3 = 'source3'
scaling_val = 100
denom_val = 2
cc1 = {
'category': ['Test', 'Test2'],
'title': 'Test2',
'statsVars': [sv1],
'isChoropleth': True
}
cc2 = {
'category': ['Test', 'Test2'],
'title': 'Test2',
'statsVars': [sv2],
'isChoropleth': True,
'relatedChart': {
'scale': True,
'denominator': sv3,
'scaling': scaling_val
}
}
sv_set = {sv1, sv2}
denoms_set = {sv3}
chart_configs = [cc1, cc2]
geos = [geo1, geo2]
mock_configs.return_value = [cc1, cc2]
mock_display_level.return_value = test_dcid, display_level
def places_in_side_effect(*args):
if args[0] == [test_dcid] and args[1] == display_level:
return {test_dcid: geos}
else:
return {}
mock_places_in.side_effect = places_in_side_effect
def stat_vars_side_effect(*args):
if args[0] == chart_configs:
return sv_set, denoms_set
else:
return {}, {}
mock_stat_vars.side_effect = stat_vars_side_effect
num_api_resp = {
'data': {
sv1: {
geo1: {
'date': sv1_date1,
'value': sv1_val,
'facet': "facet1",
},
geo2: {
'date': sv1_date2,
'value': sv1_val,
'facet': "facet1",
}
},
sv2: {
geo1: {
'date': sv2_date,
'value': sv2_val1,
'facet': "facet1",
},
geo2: {
'date': sv2_date,
'value': sv2_val2,
'facet': "facet2",
}
}
},
'facets': {
'facet1': {
'importName': 'importName1',
'provenanceUrl': source1
},
'facet2': {
'importName': 'importName2',
'provenanceUrl': source2
}
}
}
def num_data_side_effect(*args):
if args[0] == test_dcid and args[1] == display_level:
return num_api_resp
else:
return {}
mock_num_data.side_effect = num_data_side_effect
denom_api_resp = {
'data': {
sv3: {
geo1: {
'series': [{
'date': '2018',
'value': 2,
}],
'facet': 'facet3'
},
geo2: {}
},
},
'facets': {
'facet3': {
'importName': 'importName3',
'provenanceUrl': source3
},
}
}
def denom_data_side_effect(*args):
if args[0] == geos and args[1] == [sv3]:
return denom_api_resp
else:
return {}
mock_denom_data.side_effect = denom_data_side_effect
response = app.test_client().get('/api/choropleth/data/' + test_dcid)
assert response.status_code == 200
response_data = json.loads(response.data)
expected_data = {
sv1: {
'date':
f'{sv1_date1} – {sv1_date2}',
'data': {
geo1: sv1_val,
geo2: sv1_val
},
'numDataPoints':
2,
'exploreUrl':
"/tools/map#&pd=test_dcid&ept=AdministrativeArea1&sv=StatVar1",
'sources': [source1]
},
sv2: {
'date':
sv2_date,
'data': {
geo1: (sv2_val1 / denom_val) * scaling_val
},
'numDataPoints':
1,
'exploreUrl':
"/tools/map#&pd=test_dcid&ept=AdministrativeArea1&sv=StatVar2&pc=1",
'sources': [source1, source3]
}
}
assert response_data == expected_data
| {
"content_hash": "6bea105a26a8c3d10d28e5cbcf3ad1c7",
"timestamp": "",
"source": "github",
"line_count": 465,
"max_line_length": 84,
"avg_line_length": 32.27956989247312,
"alnum_prop": 0.5473017988007994,
"repo_name": "datacommonsorg/website",
"id": "3fc084aecd403ef700819848905a211193c53a0c",
"size": "15590",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "server/tests/choropleth_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "24960"
},
{
"name": "Dockerfile",
"bytes": "3378"
},
{
"name": "HCL",
"bytes": "24322"
},
{
"name": "HTML",
"bytes": "353238"
},
{
"name": "JavaScript",
"bytes": "65712"
},
{
"name": "Python",
"bytes": "596216"
},
{
"name": "SCSS",
"bytes": "107955"
},
{
"name": "Shell",
"bytes": "39019"
},
{
"name": "Smarty",
"bytes": "8285"
},
{
"name": "TypeScript",
"bytes": "1764442"
}
],
"symlink_target": ""
} |
'''
@author: sheng
@contact: [email protected]
@copyright: License according to the project license.
'''
NAME='guichou33'
SPELL='guǐchǒu'
CN='癸丑'
SEQ='50'
if __name__=='__main__':
pass
| {
"content_hash": "bc14cde3269cd02c77ee0ff7228acfef",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 53,
"avg_line_length": 14.923076923076923,
"alnum_prop": 0.6752577319587629,
"repo_name": "sinotradition/sinoera",
"id": "30549eec8bc785868da95b7c6da46d6465515c6b",
"size": "233",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sinoera/ganzhi/guichou33.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "74484"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import image.models
import image.storage
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Albumn',
fields=[
('id', models.CharField(default=uuid.uuid4, max_length=64, primary_key=True, serialize=False, verbose_name='Activation key')),
('name', models.CharField(db_index=True, max_length=60, unique=True)),
('weight', models.IntegerField(default=0)),
('slug', models.SlugField(max_length=150, unique=True)),
('created', models.DateTimeField(auto_now_add=True, db_index=True)),
('active', models.BooleanField(default=True)),
],
),
migrations.CreateModel(
name='Image',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(blank=True, max_length=60, null=True)),
('image_key', models.CharField(default=uuid.uuid4, max_length=64, verbose_name='Activation key')),
('image', models.ImageField(storage=image.storage.OverwriteStorage(), upload_to=image.models.image_upload_path)),
('created', models.DateTimeField(auto_now_add=True)),
('active', models.BooleanField(default=True)),
('weight', models.IntegerField(default=0)),
('albumn', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='image.Albumn')),
],
),
]
| {
"content_hash": "5139273e751a1936cdbb40f9b171c769",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 142,
"avg_line_length": 41.69047619047619,
"alnum_prop": 0.597372929754426,
"repo_name": "vollov/django-blog",
"id": "cbf495bf84a1a39972d47b23aa0e243c9f3a3d28",
"size": "1822",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "image/migrations/0001_initial.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2090"
},
{
"name": "HTML",
"bytes": "9094"
},
{
"name": "JavaScript",
"bytes": "6583"
},
{
"name": "Python",
"bytes": "38462"
}
],
"symlink_target": ""
} |
from optics.driver.abstract_driver_setting import AbstractDriverSetting
class SRWDriverSetting(AbstractDriverSetting):
def __init__(self):
from code_drivers.SRW.SRW_driver import SRWDriver
AbstractDriverSetting.__init__(self,
driver=SRWDriver())
| {
"content_hash": "20c3b32cf2ce54ffd7ce34bf3815d63b",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 71,
"avg_line_length": 43.714285714285715,
"alnum_prop": 0.6633986928104575,
"repo_name": "radiasoft/optics",
"id": "f2eeccd94cf15a462c3f0d2b249857c0fa50db20",
"size": "306",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "code_drivers/SRW/SRW_driver_setting.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "220945"
}
],
"symlink_target": ""
} |
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class auditnslogpolicy_aaauser_binding(base_resource) :
""" Binding class showing the aaauser that can be bound to auditnslogpolicy.
"""
def __init__(self) :
self._boundto = ""
self._priority = 0
self._activepolicy = 0
self._name = ""
self.___count = 0
@property
def name(self) :
"""Name of the policy.<br/>Minimum length = 1.
"""
try :
return self._name
except Exception as e:
raise e
@name.setter
def name(self, name) :
"""Name of the policy.<br/>Minimum length = 1
"""
try :
self._name = name
except Exception as e:
raise e
@property
def boundto(self) :
"""The entity name to which policy is bound.
"""
try :
return self._boundto
except Exception as e:
raise e
@boundto.setter
def boundto(self, boundto) :
"""The entity name to which policy is bound.
"""
try :
self._boundto = boundto
except Exception as e:
raise e
@property
def priority(self) :
try :
return self._priority
except Exception as e:
raise e
@property
def activepolicy(self) :
try :
return self._activepolicy
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(auditnslogpolicy_aaauser_binding_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.auditnslogpolicy_aaauser_binding
except Exception as e :
raise e
def _get_object_name(self) :
""" Returns the value of object identifier argument
"""
try :
if (self.name) :
return str(self.name)
return None
except Exception as e :
raise e
@classmethod
def get(cls, service, name) :
""" Use this API to fetch auditnslogpolicy_aaauser_binding resources.
"""
try :
obj = auditnslogpolicy_aaauser_binding()
obj.name = name
response = obj.get_resources(service)
return response
except Exception as e:
raise e
@classmethod
def get_filtered(cls, service, name, filter_) :
""" Use this API to fetch filtered set of auditnslogpolicy_aaauser_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = auditnslogpolicy_aaauser_binding()
obj.name = name
option_ = options()
option_.filter = filter_
response = obj.getfiltered(service, option_)
return response
except Exception as e:
raise e
@classmethod
def count(cls, service, name) :
""" Use this API to count auditnslogpolicy_aaauser_binding resources configued on NetScaler.
"""
try :
obj = auditnslogpolicy_aaauser_binding()
obj.name = name
option_ = options()
option_.count = True
response = obj.get_resources(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
@classmethod
def count_filtered(cls, service, name, filter_) :
""" Use this API to count the filtered set of auditnslogpolicy_aaauser_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = auditnslogpolicy_aaauser_binding()
obj.name = name
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
class auditnslogpolicy_aaauser_binding_response(base_response) :
def __init__(self, length=1) :
self.auditnslogpolicy_aaauser_binding = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.auditnslogpolicy_aaauser_binding = [auditnslogpolicy_aaauser_binding() for _ in range(length)]
| {
"content_hash": "6ee9217af66d37f520b37ca94e0f504e",
"timestamp": "",
"source": "github",
"line_count": 166,
"max_line_length": 134,
"avg_line_length": 27.09036144578313,
"alnum_prop": 0.6931287525016678,
"repo_name": "mahabs/nitro",
"id": "f97702ab488e0d17adf487a302202507c657c141",
"size": "5111",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nssrc/com/citrix/netscaler/nitro/resource/config/audit/auditnslogpolicy_aaauser_binding.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "498"
},
{
"name": "Python",
"bytes": "10647176"
}
],
"symlink_target": ""
} |
from django import shortcuts
from django.views import generic
import models
class ArticleListView(generic.ListView):
paginate_by = 8
kind = None
def get_queryset(self):
if self.kind:
return models.Article.objects.published().filter(kind=self.kind)
else:
return models.Article.objects.published()
def get_context_data(self, **kwargs):
context = super(ArticleListView, self).get_context_data(**kwargs)
context['kind'] = self.kind
return context
class ArticleDetailView(generic.DetailView):
model = models.Article
def get(self, request, *args, **kwargs):
self.object = self.get_object()
if self.object.get_absolute_url() != self.request.path:
return shortcuts.redirect(self.object, permanent=True)
# See: super(ArticleDetailView, self).get(request, *args, **kwargs)
context = self.get_context_data(object=self.object)
return self.render_to_response(context)
# TODO: Find a way to call super() without retrieving the object twice
# super(ArticleDetailView, self).get(request, *args, **kwargs)
def get_object(self, queryset=None):
if queryset is None:
queryset = self.get_queryset()
obj = shortcuts.get_object_or_404(queryset, pk=self.kwargs.get('id'),
status='p')
return obj
| {
"content_hash": "6298a3faf0ced388c4a82c3ce28820d4",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 78,
"avg_line_length": 28.54,
"alnum_prop": 0.6306937631394534,
"repo_name": "megaprojectske/megaprojects.co.ke",
"id": "509538ab1d5e79e77ec227f8d94b8ed975277d81",
"size": "1427",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "megaprojects/articles/views.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "43714"
},
{
"name": "HTML",
"bytes": "49616"
},
{
"name": "Python",
"bytes": "222166"
},
{
"name": "Ruby",
"bytes": "1210"
}
],
"symlink_target": ""
} |
from setuptools import setup, find_packages
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
setup(
name = 'twarkov',
version = '0.0.2',
description = 'Markov generator built for generating Tweets from timelines',
license = 'MIT',
author = 'Amanda Pickering',
author_email = '[email protected]',
install_requires = ['nltk', 'wsgiref'],
url = 'https://github.com/amandapickering/twarkov',
keywords = 'twitter markov generator bots',
packages = find_packages(),
)
| {
"content_hash": "03d162f9957020be66110ac0abe683c6",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 78,
"avg_line_length": 29.61111111111111,
"alnum_prop": 0.7054409005628518,
"repo_name": "amanda/twarkov",
"id": "7d6cc110a669df85939b4a07390e2fbf146dd9d6",
"size": "533",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7197"
}
],
"symlink_target": ""
} |
from django.contrib import admin
from hunts import models
admin.site.register(models.Business)
admin.site.register(models.Hunt)
admin.site.register(models.Follow)
admin.site.register(models.Comment)
| {
"content_hash": "6ad079704f417ad82607ed5a33836ff9",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 36,
"avg_line_length": 28.571428571428573,
"alnum_prop": 0.83,
"repo_name": "srohatgi/cloud",
"id": "9c53855753fe3ac015a52709c3f1c449cc92bdd4",
"size": "200",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "huntnet/hunts/admin.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "11300"
},
{
"name": "JavaScript",
"bytes": "734"
},
{
"name": "Makefile",
"bytes": "88"
},
{
"name": "Perl",
"bytes": "1301"
},
{
"name": "Python",
"bytes": "16411"
},
{
"name": "Ruby",
"bytes": "810998"
},
{
"name": "Shell",
"bytes": "1886"
}
],
"symlink_target": ""
} |
import copy
import re
from cloudify import ctx
from cloudify.exceptions import NonRecoverableError
from openstack_plugin_common import (
get_resource_id,
use_external_resource,
delete_resource_and_runtime_properties,
validate_resource,
validate_ip_or_range_syntax,
OPENSTACK_ID_PROPERTY,
OPENSTACK_TYPE_PROPERTY,
OPENSTACK_NAME_PROPERTY,
COMMON_RUNTIME_PROPERTIES_KEYS
)
SECURITY_GROUP_OPENSTACK_TYPE = 'security_group'
# Runtime properties
RUNTIME_PROPERTIES_KEYS = COMMON_RUNTIME_PROPERTIES_KEYS
NODE_NAME_RE = re.compile('^(.*)_.*$') # Anything before last underscore
def build_sg_data(args=None):
security_group = {
'description': None,
'name': get_resource_id(ctx, SECURITY_GROUP_OPENSTACK_TYPE),
}
args = args or {}
security_group.update(ctx.node.properties['security_group'], **args)
return security_group
def process_rules(client, sgr_default_values, cidr_field_name,
remote_group_field_name, min_port_field_name,
max_port_field_name):
rules_to_apply = ctx.node.properties['rules']
security_group_rules = []
for rule in rules_to_apply:
security_group_rules.append(
_process_rule(rule, client, sgr_default_values, cidr_field_name,
remote_group_field_name, min_port_field_name,
max_port_field_name))
return security_group_rules
def use_external_sg(client):
return use_external_resource(ctx, client,
SECURITY_GROUP_OPENSTACK_TYPE)
def set_sg_runtime_properties(sg, client):
ctx.instance.runtime_properties[OPENSTACK_ID_PROPERTY] =\
client.get_id_from_resource(sg)
ctx.instance.runtime_properties[OPENSTACK_TYPE_PROPERTY] =\
SECURITY_GROUP_OPENSTACK_TYPE
ctx.instance.runtime_properties[OPENSTACK_NAME_PROPERTY] = \
client.get_name_from_resource(sg)
def delete_sg(client, **kwargs):
delete_resource_and_runtime_properties(ctx, client,
RUNTIME_PROPERTIES_KEYS)
def sg_creation_validation(client, cidr_field_name, **kwargs):
validate_resource(ctx, client, SECURITY_GROUP_OPENSTACK_TYPE)
ctx.logger.debug('validating CIDR for rules with a {0} field'.format(
cidr_field_name))
for rule in ctx.node.properties['rules']:
if cidr_field_name in rule:
validate_ip_or_range_syntax(ctx, rule[cidr_field_name])
def _process_rule(rule, client, sgr_default_values, cidr_field_name,
remote_group_field_name, min_port_field_name,
max_port_field_name):
ctx.logger.debug(
"Security group rule before transformations: {0}".format(rule))
sgr = copy.deepcopy(sgr_default_values)
if 'port' in rule:
rule[min_port_field_name] = rule['port']
rule[max_port_field_name] = rule['port']
del rule['port']
sgr.update(rule)
if (remote_group_field_name in sgr) and sgr[remote_group_field_name]:
sgr[cidr_field_name] = None
elif ('remote_group_node' in sgr) and sgr['remote_group_node']:
_, remote_group_node = _capabilities_of_node_named(
sgr['remote_group_node'])
sgr[remote_group_field_name] = remote_group_node[OPENSTACK_ID_PROPERTY]
del sgr['remote_group_node']
sgr[cidr_field_name] = None
elif ('remote_group_name' in sgr) and sgr['remote_group_name']:
sgr[remote_group_field_name] = \
client.get_id_from_resource(
client.cosmo_get_named(
SECURITY_GROUP_OPENSTACK_TYPE, sgr['remote_group_name']))
del sgr['remote_group_name']
sgr[cidr_field_name] = None
ctx.logger.debug(
"Security group rule after transformations: {0}".format(sgr))
return sgr
def _capabilities_of_node_named(node_name):
result = None
caps = ctx.capabilities.get_all()
for node_id in caps:
match = NODE_NAME_RE.match(node_id)
if match:
candidate_node_name = match.group(1)
if candidate_node_name == node_name:
if result:
raise NonRecoverableError(
"More than one node named '{0}' "
"in capabilities".format(node_name))
result = (node_id, caps[node_id])
if not result:
raise NonRecoverableError(
"Could not find node named '{0}' "
"in capabilities".format(node_name))
return result
| {
"content_hash": "6b7d78350601a1c08fef55f1b4a4be89",
"timestamp": "",
"source": "github",
"line_count": 133,
"max_line_length": 79,
"avg_line_length": 34.12781954887218,
"alnum_prop": 0.6276712932363957,
"repo_name": "szpotona/cloudify-openstack-plugin",
"id": "0fa21aa149fa99a30056caf65a940fab35fa9d33",
"size": "5177",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "openstack_plugin_common/security_group.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "1033"
},
{
"name": "Python",
"bytes": "193595"
}
],
"symlink_target": ""
} |
import re
from datetime import datetime
import json #???
#libs
from sqlalchemy import create_engine
from sqlalchemy import Integer
from sqlalchemy import DateTime
from sqlalchemy import MetaData
from sqlalchemy import Column
from sqlalchemy import Table
from sqlalchemy import select
from sqlalchemy.orm import sessionmaker
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.ext.declarative import declared_attr
#from pydash import _
# Specify Constraint and Index Naming Conventions
optio_convention = {
"ix": 'ix_%(column_0_label)s',
"uq": "uq_%(table_name)s_%(column_0_name)s",
"ck": "ck_%(table_name)s_%(constraint_name)s",
"fk": "fk_%(table_name)s_%(column_0_name)s_%(referred_table_name)s",
"pk": "pk_%(table_name)s"
}
first_cap_re = re.compile('(.)([A-Z][a-z]+)')
all_cap_re = re.compile('([a-z0-9])([A-Z])')
def mk_table_name(name):
s = first_cap_re.sub(r'\1_\2', name)
n = all_cap_re.sub(r'\1_\2', s).lower()
n = n if n.endswith('s') else n + 's'
if n.endswith('ys'):
n = n[:-2] + 'ies'
# todo: find a better way to pluralize.
# this is easy to break.
# perhaps no need to cover all possible options as
# convention is perhaps ok to rely on in naming classes.
return n
class OptioBase:
'''
to_dict derived from:
- https://gist.github.com/alanhamlett/6604662
- https://github.com/jfinkels/flask-restless/blob/master/flask_restless/helpers.py#L261
'''
@declared_attr
def __tablename__(cls):
return mk_table_name(cls.__name__)
_changes = {}
created_at = Column(DateTime, nullable=False, default=datetime.utcnow)
def __init__(self):
pass
def _set_columns(self, **kwargs):
pass
def set_columns(self, **kwargs):
pass
@property
def changes(self):
return self._changes
def reset_changes(self):
self._changes = {}
def get_value(self, key):
val = getattr(self, key)
if isinstance(val, datetime):
print(val)
print('xxx bbbbbbbbbbb bla')
val = str(val)
return val
def to_dict(self, show=[], hide=[], path=None, show_all=None):
'''
Return dictionary representation of this model
'''
tpl = '{}.{}'
# get hidden_fields
hidden = []
if hasattr(self, 'hidden_fields'):
hidden = self.hidden_fields
# get default fields
default = []
if hasattr(self, 'default_fields'):
default = self.default_fields
ret_data = {}
if not path:
path = self.__tablename__
show = [tpl.format(path, x.lower()) for x in show]
hide = [tpl.format(path, x.lower()) for x in hide]
columns = self.__table__.columns.keys()
relationships = self.__mapper__.relationships.keys()
properties = dir(self)
#load data from object columns
for key in columns:
check = tpl.format(path, key)
if check in hide or key in hidden:
continue
if show_all or key is 'id' or check in show or key in default:
ret_data[key] = self.get_value(key)
#load data from related classes
for key in relationships:
check = tpl.format(path, key)
print('>>>>>>>> ', check, path, key)
if check in hide or key in hidden:
continue
if show_all or check in show or key in default:
hide.append(check) #why?
is_list = self.__mapper__.relationships[key].uselist
is_object = self.__mapper__.relationships[key].query_class is not None
is_basic = not (is_list or is_object)
key_path = tpl.format(path, key.lower())
info = dict(check=check, is_list=is_list, is_object=is_object, is_basic=is_basic, key_path=key_path)
#one to many
if is_list:
ret_data[key] = []
for item in getattr(self, key):
ret_data[key].append(item.to_dict(
show=show, hide=hide,
path=key_path, show_all=show_all,
))
elif is_object:
ret_data[key] = getattr(self, key).to_dict(
show=show, hide=hide,
path=key_path, show_all=show_all
)
props = list(set(properties) - set(columns) - set(relationships))
for key in props:
if key.startswith('_'):
continue
check = tpl.format(path, key)
if check in hide or key in hidden:
continue
if show_all or check in show or key in default:
val = getattr(self, key)
try:
ret_data[key] = json.loads(json.dumps(val))
except:
print('Can not serialize:', check, type(val))
print(json.dumps(ret_data, sort_keys=True, indent=4, separators=(',', ': ')))
return ret_data
class SessionContext:
def __init__(self, session_class):
self.session_class = session_class
def __enter__(self):
self.session = self.session_class()
return self.session
def __exit__(self, type, value, traceback):
self.session.close()
class SQLAlchemy:
def __init__(self):
self.meta = MetaData(naming_convention=optio_convention)
self.Model = declarative_base(cls=OptioBase, name='Model', metadata=self.meta)
def configure(self, engine):
self.engine = engine
self.Session = sessionmaker(bind=engine)
# def session(self):
# return self.Session()
def schemata(self):
'''
dubious method. just playing around with information_schema
'''
m1 = MetaData()
schemata = Table('schemata', m1,
schema='information_schema',
autoload=True,
autoload_with=self.engine
)
q = select([schemata.c.schema_name])
#q = select([schemata.c.schema_name])#.\
#where(schemata.c.schema_name == sn)
for schema in self.engine.execute(q).fetchall():
yield schema[0]
def drop_all(self):
'''
https://kronosapiens.github.io/blog/2014/07/29/setting-up-unit-tests-with-flask/
http://www.mbeckler.org/blog/?p=218
'''
self.Model.metadata.drop_all(self.engine)
def create_all(self):
self.Model.metadata.create_all(self.engine)
| {
"content_hash": "e8f849f462576fc93e081423790a1d87",
"timestamp": "",
"source": "github",
"line_count": 240,
"max_line_length": 116,
"avg_line_length": 28.0875,
"alnum_prop": 0.5525886367007863,
"repo_name": "meantheory/optio",
"id": "c126e92c0cfb3340d2e4c279d36e334f99d16d31",
"size": "6749",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "optio/sqlalchemy/base.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "30368"
}
],
"symlink_target": ""
} |
import argparse
import cv2
import numpy as np
import os.path
def generateColormapsCv2(S):
names = {
"autumn",
"bone",
"cool",
"hot",
"hsv",
"jet",
"ocean",
"pink",
"rainbow",
"spring",
"summer",
"winter",
}
colormaps = {}
for name in names:
c = getattr(cv2, "COLORMAP_" + name.upper())
colormaps[name.lower()] = cv2.applyColorMap(S, c)[:,:,::-1]
return colormaps
def generateColormapsMatplotlib(S):
import matplotlib.cm
colormaps = {}
for name in dir(matplotlib.cm):
if name[-2:] == "_r":
continue
c = getattr(matplotlib.cm, name)
if isinstance(c, matplotlib.colors.LinearSegmentedColormap):
colormaps[name.lower()] = np.round(255.0 * c(S)[:,:,:3]).astype("uint8")
return colormaps
def generateColormaps(targetDir):
# load gray scale slope image (1x256 pixels)
S = cv2.imread(os.path.join(os.path.dirname(__file__), "utils-generateColormapImages-graySlope.png"), cv2.IMREAD_GRAYSCALE)
colormaps = {}
colormaps.update(generateColormapsCv2(S))
colormaps.update(generateColormapsMatplotlib(S))
for (name, C) in colormaps.items():
filenameOut = "{}.png".format(name.lower())
print("Saving colormap image '{}'".format(filenameOut))
cv2.imwrite(os.path.join(targetDir, filenameOut), C[:,:,::-1])
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Applies various colormaps on the standard gray scale slope image and saves the colorized images (which can then be converted to JSON via 'utils-convertColormapImagesToJson.py')")
parser.add_argument("targetDir", help="directory where the resulting image files are saved to")
args = parser.parse_args()
generateColormaps(**vars(args))
| {
"content_hash": "1566002b3475ab502090da3526641930",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 228,
"avg_line_length": 31.64406779661017,
"alnum_prop": 0.6256025709694697,
"repo_name": "dhaase-de/dh-python-dh",
"id": "965148d68e51069a76292e37d6b8e6b4868973d0",
"size": "1887",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/utils-generateColormapImages.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "183301"
},
{
"name": "Shell",
"bytes": "6444"
}
],
"symlink_target": ""
} |
import sys
import traceback
from .exceptions import RetryException
import logging
logger = logging.getLogger('sqjobs.worker')
class Worker(object):
DEFAULT_TIMEOUT = 20 # seconds
def __init__(self, broker, queue_name, timeout=None):
self.broker = broker
self.queue_name = queue_name
self.timeout = timeout or self.DEFAULT_TIMEOUT
self.registered_jobs = {}
self.exception_handlers = []
def __repr__(self):
return 'Worker({connector})'.format(
connector=type(self.broker.connector).__name__
)
def register_job(self, job_class):
name = job_class._task_name()
if job_class.abstract:
logger.info('Job %s is abstract, ignoring it...', name)
return
if name in self.registered_jobs:
logger.warning('Job %s already registered, overwriting it...', name)
logger.info('Registering new job: %s', name)
self.registered_jobs[name] = job_class
def append_exception_handler(self, handler):
self.exception_handlers.append(handler)
def run(self):
logger.info('Running worker, %d jobs registered...', len(self.registered_jobs))
for payload in self.broker.jobs(self.queue_name, self.timeout):
try:
job_class = self.registered_jobs.get(payload['name'])
if not job_class:
logger.error('Unregistered task: %s', payload['name'])
continue
job, args, kwargs = self.broker.unserialize_job(job_class, self.queue_name, payload)
self._set_custom_retry_time_if_needed(job)
self._execute_job(job, args, kwargs)
except:
logger.exception('Error executing job')
def _set_custom_retry_time_if_needed(self, job):
if job.next_retry_time() is None: # Use default value of the queue
return
self.broker.set_retry_time(job, job.next_retry_time())
def _execute_job(self, job, args, kwargs):
try:
job.execute(*args, **kwargs)
self.broker.delete_job(job)
except RetryException:
job.on_retry()
return
except:
job.on_failure()
self._handle_exception(job, args, kwargs, *sys.exc_info())
return
job.on_success()
def _handle_exception(self, job, args, kwargs, *exc_info):
exception_message = ''.join(
traceback.format_exception_only(*exc_info[:2]) +
traceback.format_exception(*exc_info)
)
logger.error(exception_message, exc_info=True, extra={
'job_queue_name': job.queue_name,
'job_id': job.id,
'job_name': job.name,
'job_args': args,
'job_kwargs': kwargs,
})
for handler in reversed(self.exception_handlers):
logger.debug('Executing exception handler %s', handler)
handler(job, args, kwargs, *exc_info)
| {
"content_hash": "2635872322428b6a3bf783512b8405fc",
"timestamp": "",
"source": "github",
"line_count": 95,
"max_line_length": 100,
"avg_line_length": 31.957894736842107,
"alnum_prop": 0.5770750988142292,
"repo_name": "gnufede/sqjobs",
"id": "4f3e7b7fa0266309bac45468e21383bca8114e9f",
"size": "3036",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sqjobs/worker.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "57752"
},
{
"name": "Shell",
"bytes": "168"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import models, migrations
import datetime
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('bankaccounts', '0001_initial'),
('banktransactiontags', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='BankTransaction',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('label', models.CharField(max_length=255, verbose_name='Label')),
('date', models.DateField(verbose_name='Date', default=datetime.date.today)),
('amount', models.DecimalField(max_digits=10, verbose_name='Amount', decimal_places=2)),
('currency', models.CharField(editable=False, max_length=3, verbose_name='Currency')),
('status', models.CharField(max_length=32, default='active', verbose_name='Status', help_text='Depending on its value, determine whether it could alter the bank account balance or being used by statistics.', choices=[('active', 'Active'), ('ignored', 'Ignored'), ('inactive', 'Inactive')])),
('reconciled', models.BooleanField(verbose_name='Reconciled', help_text='Whether the bank transaction has been applied on the real bank account.', default=False)),
('payment_method', models.CharField(max_length=32, default='credit_card', verbose_name='Payment method', choices=[('credit_card', 'Credit card'), ('cash', 'Cash'), ('transfer', 'Transfer'), ('transfer_internal', 'Transfer internal'), ('check', 'Check')])),
('memo', models.TextField(blank=True, verbose_name='Memo')),
('scheduled', models.BooleanField(editable=False, default=False)),
('bankaccount', models.ForeignKey(to='bankaccounts.BankAccount', related_name='banktransactions', on_delete=models.CASCADE)),
('tag', models.ForeignKey(related_name='banktransactions', on_delete=django.db.models.deletion.SET_NULL, verbose_name='Tag', to='banktransactiontags.BankTransactionTag', blank=True, null=True)),
],
options={
'get_latest_by': 'date',
'db_table': 'banktransactions',
},
),
migrations.AlterIndexTogether(
name='banktransaction',
index_together=set([('bankaccount', 'reconciled'), ('bankaccount', 'date'), ('bankaccount', 'amount')]),
),
]
| {
"content_hash": "4b89810a9cddb75d7211f5686d8d9a28",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 307,
"avg_line_length": 61.829268292682926,
"alnum_prop": 0.6276134122287969,
"repo_name": "ychab/mymoney",
"id": "0a687e91b7e1d149c850259e91476505247b69e3",
"size": "2559",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mymoney/apps/banktransactions/migrations/0001_initial.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "494"
},
{
"name": "HTML",
"bytes": "68172"
},
{
"name": "JavaScript",
"bytes": "5995"
},
{
"name": "Python",
"bytes": "392344"
},
{
"name": "Shell",
"bytes": "874"
}
],
"symlink_target": ""
} |
"""
Blocks World domain definition for Pyhop 1.1.
Author: Dana Nau <[email protected]>, November 15, 2012
This file should work correctly in both Python 2.7 and Python 3.2.
"""
import pyhop
"""Each Pyhop planning operator is a Python function. The 1st argument is
the current state, and the others are the planning operator's usual arguments.
This is analogous to how methods are defined for Python classes (where
the first argument is always the name of the class instance). For example,
the function pickup(state,b) implements the planning operator for the task
('pickup', b).
The blocks-world operators use three state variables:
- pos[b] = block b's position, which may be 'table', 'hand', or another block.
- clear[b] = False if a block is on b or the hand is holding b, else True.
- holding = name of the block being held, or False if the hand is empty.
"""
def pickup(state,b):
if state.pos[b] == 'table' and state.clear[b] == True and state.holding == False:
state.pos[b] = 'hand'
state.clear[b] = False
state.holding = b
return state
else: return False
def unstack(state,b,c):
if state.pos[b] == c and c != 'table' and state.clear[b] == True and state.holding == False:
state.pos[b] = 'hand'
state.clear[b] = False
state.holding = b
state.clear[c] = True
return state
else: return False
def putdown(state,b):
if state.pos[b] == 'hand':
state.pos[b] = 'table'
state.clear[b] = True
state.holding = False
return state
else: return False
def stack(state,b,c):
if state.pos[b] == 'hand' and state.clear[c] == True:
state.pos[b] = c
state.clear[b] = True
state.holding = False
state.clear[c] = False
return state
else: return False
"""
Below, 'declare_operators(pickup, unstack, putdown, stack)' tells Pyhop
what the operators are. Note that the operator names are *not* quoted.
"""
pyhop.declare_operators(pickup, unstack, putdown, stack)
| {
"content_hash": "8af24107d9286565300fbf46ff0efcae",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 96,
"avg_line_length": 33.278688524590166,
"alnum_prop": 0.6576354679802956,
"repo_name": "jhomble/electron435",
"id": "725fffe2f1fcba168ed8acdd62cf3abd989527d3",
"size": "2030",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "dananau-pyhop-195ab6320571/blocks_world_operators.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "774"
},
{
"name": "CSS",
"bytes": "465554"
},
{
"name": "HTML",
"bytes": "284682"
},
{
"name": "JavaScript",
"bytes": "815770"
},
{
"name": "Matlab",
"bytes": "5019"
},
{
"name": "Python",
"bytes": "29692657"
},
{
"name": "Shell",
"bytes": "565"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
# This file is only used if you use `make publish` or
# explicitly specify it as your config file.
import os
import sys
sys.path.append(os.curdir)
from pelicanconf import *
SITEURL = 'http://cms-big-data.github.io'
RELATIVE_URLS = False
FEED_ALL_ATOM = 'feeds/all.atom.xml'
CATEGORY_FEED_ATOM = 'feeds/%s.atom.xml'
DELETE_OUTPUT_DIRECTORY = True
# Following items are often useful when publishing
#DISQUS_SITENAME = ""
#GOOGLE_ANALYTICS = ""
| {
"content_hash": "115ab6b367999c4f8f60d989d9e238e9",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 53,
"avg_line_length": 22.227272727272727,
"alnum_prop": 0.7382413087934561,
"repo_name": "cms-big-data/cms-big-data.github.io-source",
"id": "51b5d3b84a6bab71a3da92a139b52e4c5dd7a12f",
"size": "537",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "publishconf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1301"
},
{
"name": "HTML",
"bytes": "196102"
},
{
"name": "Makefile",
"bytes": "3849"
},
{
"name": "Python",
"bytes": "4139"
},
{
"name": "Shell",
"bytes": "3125"
}
],
"symlink_target": ""
} |
from functools import reduce
from datetime import datetime
from operator import and_
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from django.db.models import Q
from django.contrib.contenttypes.models import ContentType
from celery import states
from pytz import utc
from vms.models import Dc, Vm, Node, NodeStorage, Subnet, Image, VmTemplate, Iso, TaskLogEntry
from pdns.models import Domain
from gui.models import User, Role
from que import TT_AUTO
from api import serializers as s
from api.task.log import get_task_types
TASK_LOG_MODELS = (Dc, Vm, Node, NodeStorage, Subnet, Image, VmTemplate, Iso, Domain, User, Role)
TASK_STATES = (
('', _('Status (all)')),
(states.PENDING, _(states.PENDING)),
(states.SUCCESS, _(states.SUCCESS)),
(states.FAILURE, _(states.FAILURE)),
(states.REVOKED, _(states.REVOKED)),
)
# TODO: Switch to filtering based on object_type => add index(content_type_model) + remove content_type field
# TODO: That means that in the meantime we don't have filters on logs for dummy task models
# noinspection PyProtectedMember,PyUnresolvedReferences
TASK_OBJECT_TYPES = [('', _('Object type (all)'))] + \
[(m._meta.model_name, m._meta.verbose_name) for m in TASK_LOG_MODELS]
class TaskLogEntrySerializer(s.ModelSerializer):
"""
Serializes vms.models.TaskLogEntry
"""
username = s.Field(source='get_username')
object_name = s.Field(source='get_object_name')
object_alias = s.Field(source='get_object_alias')
object_type = s.Field(source='object_type')
class Meta:
model = TaskLogEntry
fields = ('time', 'task', 'status', 'username', 'msg', 'detail',
'object_name', 'object_alias', 'object_type', 'flag')
class TaskCancelSerializer(s.Serializer):
force = s.BooleanField(default=False)
class TaskLogFilterSerializer(s.Serializer):
_content_type = None
_object_pks = None
status = s.ChoiceField(label=_('Status'), required=False, choices=TASK_STATES)
object_type = s.ChoiceField(source='content_type', label=_('Object type'), required=False,
choices=TASK_OBJECT_TYPES)
object_name = s.CharField(label=_('Object name'), max_length=2048, required=False)
show_running = s.BooleanField(label=_('Show only running tasks'), required=False, default=False)
hide_auto = s.BooleanField(label=_('Hide automatic tasks'), required=False, default=False)
date_from = s.DateField(label=_('Since'), required=False)
date_to = s.DateField(label=_('Until'), required=False)
def validate(self, attrs):
object_type = attrs.get('content_type', None)
object_name = attrs.get('object_name', None)
# object_name depends on object_type
if object_name:
if not object_type:
self._errors['object_type'] = s.ErrorList([_('object_type attribute is required when '
'filtering by object_name.')])
return attrs
self._content_type = content_type = ContentType.objects.get(model=object_type)
model_class = content_type.model_class()
lookup_kwargs = model_class.get_log_name_lookup_kwargs(object_name)
filter_kwargs = {key + '__icontains': val for key, val in lookup_kwargs.items()}
self._object_pks = list(model_class.objects.filter(**filter_kwargs).values_list('pk', flat=True))
return attrs
def get_filters(self, pending_tasks=()):
if self._object_pks is not None and not self._object_pks: # Means that we want to return empty filter results
return False
tz = timezone.get_current_timezone()
data = self.object
query = []
date_from = data.get('date_from')
if date_from:
date_from = datetime.combine(date_from, datetime.min.time())
query.append(Q(time__gte=date_from.replace(tzinfo=utc).astimezone(tz)))
date_to = data.get('date_to')
if date_to:
date_to = datetime.combine(date_to, datetime.min.time())
query.append(Q(time__lte=date_to.replace(tzinfo=utc).astimezone(tz)))
if self._object_pks:
query.append(Q(object_pk__in=self._object_pks))
status = data.get('status')
if status:
query.append(Q(status=status))
if data.get('show_running'):
query.append(Q(task__in=pending_tasks))
object_type = data.get('content_type')
if object_type:
if self._content_type:
content_type = self._content_type
else:
content_type = ContentType.objects.get(model=object_type)
query.append(Q(content_type=content_type))
if data.get('hide_auto'):
query.append(~Q(task_type__in=get_task_types(tt=(TT_AUTO,))))
if query:
return reduce(and_, query)
else:
return None
| {
"content_hash": "95909fcfaffc7ab32180aa12ef603f92",
"timestamp": "",
"source": "github",
"line_count": 128,
"max_line_length": 118,
"avg_line_length": 39.2421875,
"alnum_prop": 0.6336850487756321,
"repo_name": "erigones/esdc-ce",
"id": "5b2041faf7e1807322390ad186b94cb4c2a0ff8c",
"size": "5023",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "api/task/serializers.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Awk",
"bytes": "2728"
},
{
"name": "C",
"bytes": "8581"
},
{
"name": "CSS",
"bytes": "146461"
},
{
"name": "DTrace",
"bytes": "2250"
},
{
"name": "Erlang",
"bytes": "18842"
},
{
"name": "HTML",
"bytes": "473343"
},
{
"name": "JavaScript",
"bytes": "679240"
},
{
"name": "Jinja",
"bytes": "29584"
},
{
"name": "PLpgSQL",
"bytes": "17954"
},
{
"name": "Perl",
"bytes": "93955"
},
{
"name": "Python",
"bytes": "3124524"
},
{
"name": "Ruby",
"bytes": "56"
},
{
"name": "SCSS",
"bytes": "82814"
},
{
"name": "Shell",
"bytes": "281885"
}
],
"symlink_target": ""
} |
from django.template import TemplateSyntaxError
from django.template.base import Token
import re
__all__ = (
'parse_as_var', 'parse_token_kwargs'
)
kwarg_re = re.compile('^(?P<name>\w+)=')
def parse_as_var(parser, token):
"""
Parse the remainder of the token, to find a "as varname" statement.
:param parser: The "parser" object that ``@register.tag`` provides.
:type parser: :class:`~django.template.Parser`
:param token: The "token" object that ``@register.tag`` provides.
:type token: :class:`~django.template.Token` or splitted bits
"""
if isinstance(token, Token):
bits = token.split_contents()
else:
bits = token
as_var = None
if len(bits) > 2 and bits[-2] == 'as':
bits = bits[:]
as_var = bits.pop()
bits.pop() # as keyword
return bits, as_var
def parse_token_kwargs(parser, token, allowed_kwargs=None, compile_args=True, compile_kwargs=True):
"""
Allow the template tag arguments to be like a normal Python function, with *args and **kwargs.
:param parser: The "parser" object that ``@register.tag`` provides.
:type parser: :class:`~django.template.Parser`
:param token: The "token" object that ``@register.tag`` provides.
:type token: :class:`~django.template.Token` or splitted bits
:param compile_args: Whether the arguments should be compiled using :func:`parser.compile_filter <django.template.Parser.compile_filter>`.
:param compile_kwargs: Whether the keyword arguments should be compiled using :func:`parser.compile_filter <django.template.Parser.compile_filter>`.
:param allowed_kwargs: A list of allowed keyword arguments. A value of ``None`` disables the check.
:type allowed_kwargs: tuple
:return: The tag name, arguments and keyword arguments.
:rtype: tuple(tag_name, args, kwargs)
"""
if isinstance(token, Token):
bits = token.split_contents()
else:
bits = token
expect_kwarg = False
args = []
kwargs = {}
prev_bit = None
tag_name = bits[0]
for bit in bits[1::]:
kwarg_match = kwarg_re.match(bit)
if kwarg_match:
# Keyword argument
expect_kwarg = True
(name, expr) = bit.split('=', 2)
kwargs[name] = parser.compile_filter(expr) if compile_kwargs else expr
else:
# Still at positioned arguments.
if expect_kwarg:
raise TemplateSyntaxError("{0} tag may not have a non-keyword argument ({1}) after a keyword argument ({2}).".format(bits[0], bit, prev_bit))
args.append(parser.compile_filter(bit) if compile_args else bit)
prev_bit = bit
# Validate the allowed arguments, to make things easier for template developers
if allowed_kwargs is not None and kwargs:
if not allowed_kwargs:
raise TemplateSyntaxError("The option %s=... cannot be used in '%s'.\nNo keyword arguments are allowed.")
for name in kwargs:
if name not in allowed_kwargs:
raise TemplateSyntaxError("The option %s=... cannot be used in '%s'.\nPossible options are: %s." % (name, bits[0], ", ".join(allowed_kwargs)))
return tag_name, args, kwargs
| {
"content_hash": "ca4673f146305b23272b00a6e7c6e088",
"timestamp": "",
"source": "github",
"line_count": 87,
"max_line_length": 158,
"avg_line_length": 37.264367816091955,
"alnum_prop": 0.6403454657618753,
"repo_name": "steventimberman/masterDebater",
"id": "c9c9c7b5f95700232747fde7159016cc13b687f0",
"size": "3242",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "venv/lib/python2.7/site-packages/tag_parser/parser.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "673"
},
{
"name": "CSS",
"bytes": "83414"
},
{
"name": "HTML",
"bytes": "696030"
},
{
"name": "JavaScript",
"bytes": "176225"
},
{
"name": "Makefile",
"bytes": "148"
},
{
"name": "Python",
"bytes": "11809652"
},
{
"name": "Shell",
"bytes": "3230"
}
],
"symlink_target": ""
} |
"""Builds the MNIST network.
Implements the inference/loss/training pattern for model building.
1. inference() - Builds the model as far as is required for running the network
forward to make predictions.
2. loss() - Adds to the inference model the layers required to generate loss.
3. training() - Adds to the loss model the Ops required to generate and
apply gradients.
This file is used by the various "fully_connected_*.py" files and not meant to
be run.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import tensorflow as tf
# The MNIST dataset has 10 classes, representing the digits 0 through 9.
NUM_CLASSES = 10
# The MNIST images are always 28x28 pixels.
IMAGE_SIZE = 28
IMAGE_PIXELS = IMAGE_SIZE * IMAGE_SIZE
def inference(images, hidden1_units, hidden2_units):
"""Build the MNIST model up to where it may be used for inference.
Args:
images: Images placeholder, from inputs().
hidden1_units: Size of the first hidden layer.
hidden2_units: Size of the second hidden layer.
Returns:
softmax_linear: Output tensor with the computed logits.
"""
# Hidden 1
with tf.name_scope('hidden1'):
weights = tf.Variable(
tf.truncated_normal([IMAGE_PIXELS, hidden1_units],
stddev=1.0 / math.sqrt(float(IMAGE_PIXELS))),
name='weights')
biases = tf.Variable(tf.zeros([hidden1_units]),
name='biases')
hidden1 = tf.nn.relu(tf.matmul(images, weights) + biases)
# Hidden 2
with tf.name_scope('hidden2'):
weights = tf.Variable(
tf.truncated_normal([hidden1_units, hidden2_units],
stddev=1.0 / math.sqrt(float(hidden1_units))),
name='weights')
biases = tf.Variable(tf.zeros([hidden2_units]),
name='biases')
hidden2 = tf.nn.relu(tf.matmul(hidden1, weights) + biases)
# Linear
with tf.name_scope('softmax_linear'):
weights = tf.Variable(
tf.truncated_normal([hidden2_units, NUM_CLASSES],
stddev=1.0 / math.sqrt(float(hidden2_units))),
name='weights')
biases = tf.Variable(tf.zeros([NUM_CLASSES]),
name='biases')
logits = tf.matmul(hidden2, weights) + biases
return logits
def loss(logits, labels):
"""Calculates the loss from the logits and the labels.
Args:
logits: Logits tensor, float - [batch_size, NUM_CLASSES].
labels: Labels tensor, int32 - [batch_size].
Returns:
loss: Loss tensor of type float.
"""
labels = tf.to_int64(labels)
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=labels, logits=logits, name='xentropy')
return tf.reduce_mean(cross_entropy, name='xentropy_mean')
def training(loss, learning_rate):
"""Sets up the training Ops.
Creates a summarizer to track the loss over time in TensorBoard.
Creates an optimizer and applies the gradients to all trainable variables.
The Op returned by this function is what must be passed to the
`sess.run()` call to cause the model to train.
Args:
loss: Loss tensor, from loss().
learning_rate: The learning rate to use for gradient descent.
Returns:
train_op: The Op for training.
"""
# Add a scalar summary for the snapshot loss.
tf.summary.scalar('loss', loss)
# Create the gradient descent optimizer with the given learning rate.
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
# Create a variable to track the global step.
global_step = tf.Variable(0, name='global_step', trainable=False)
# Use the optimizer to apply the gradients that minimize the loss
# (and also increment the global step counter) as a single training step.
train_op = optimizer.minimize(loss, global_step=global_step)
return train_op
def evaluation(logits, labels):
"""Evaluate the quality of the logits at predicting the label.
Args:
logits: Logits tensor, float - [batch_size, NUM_CLASSES].
labels: Labels tensor, int32 - [batch_size], with values in the
range [0, NUM_CLASSES).
Returns:
A scalar int32 tensor with the number of examples (out of batch_size)
that were predicted correctly.
"""
# For a classifier model, we can use the in_top_k Op.
# It returns a bool tensor with shape [batch_size] that is true for
# the examples where the label is in the top k (here k=1)
# of all logits for that example.
correct = tf.nn.in_top_k(logits, labels, 1)
# Return the number of true entries.
return tf.reduce_sum(tf.cast(correct, tf.int32))
| {
"content_hash": "a02832ce0a19598bad4ba5e77722d6bf",
"timestamp": "",
"source": "github",
"line_count": 120,
"max_line_length": 79,
"avg_line_length": 38.233333333333334,
"alnum_prop": 0.6857018308631212,
"repo_name": "miyamotok0105/deeplearning-sample",
"id": "8cef566c041003cc38b0167feac6255a36126f93",
"size": "5278",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/tensorflow1.0.0/mnist0.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "593"
},
{
"name": "Cuda",
"bytes": "2783"
},
{
"name": "Python",
"bytes": "103510"
}
],
"symlink_target": ""
} |
from django.test import TestCase
from django.core.urlresolvers import reverse
from django.contrib.auth.models import User as DjangoUser
from anaf.core.models import Group, Perspective, ModuleSetting
class NewsViewsTest(TestCase):
username = "test"
password = "password"
def setUp(self):
self.group, created = Group.objects.get_or_create(name='test')
self.user, created = DjangoUser.objects.get_or_create(username=self.username, is_staff=True)
self.user.set_password(self.password)
self.user.save()
perspective, created = Perspective.objects.get_or_create(name='default')
perspective.set_default_user()
perspective.save()
ModuleSetting.set('default_perspective', perspective.id)
######################################
# Testing views when user is logged in
######################################
def test_news_index_login(self):
"Test index page with login at /news/all/"
response = self.client.post('/accounts/login',
{'username': self.username, 'password': self.password})
self.assertRedirects(response, '/')
response = self.client.get(reverse('news_index'))
self.assertEquals(response.status_code, 200)
def test_news_top(self):
"Test index page with login at /news/top/"
response = self.client.post('/accounts/login',
{'username': self.username, 'password': self.password})
self.assertRedirects(response, '/')
response = self.client.get(reverse('news_top'))
self.assertEquals(response.status_code, 200)
def test_news_my_activity(self):
"Test index page with login at /news/my/"
response = self.client.post('/accounts/login',
{'username': self.username, 'password': self.password})
self.assertRedirects(response, '/')
response = self.client.get(reverse('news_my_activity'))
self.assertEquals(response.status_code, 200)
def test_news_watchlist(self):
"Test index page with login at /news/watchlist/"
response = self.client.post('/accounts/login',
{'username': self.username, 'password': self.password})
self.assertRedirects(response, '/')
response = self.client.get(reverse('news_my_watchlist'))
self.assertEquals(response.status_code, 200)
######################################
# Testing views when user is not logged in
######################################
def test_news_index(self):
"Testing /news/"
response = self.client.get(reverse('news'))
# Redirects as unauthenticated
self.assertRedirects(response, "/accounts/login")
def test_news_top_out(self):
"Testing /news/top/"
response = self.client.get(reverse('news_top'))
self.assertRedirects(response, reverse('user_login'))
def test_news_my_activity_out(self):
"Testing /news/my/"
response = self.client.get(reverse('news_my_activity'))
self.assertRedirects(response, reverse('user_login'))
def test_news_watchlist_out(self):
"Testing /news/watchlist/"
response = self.client.get(reverse('news_my_watchlist'))
self.assertRedirects(response, reverse('user_login'))
| {
"content_hash": "bb60b7081e4d9e41114afef2eefa16f9",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 100,
"avg_line_length": 43.256410256410255,
"alnum_prop": 0.6052163604030824,
"repo_name": "tovmeod/anaf",
"id": "cea24b546b66e889e357adaec2bcf5fa4856db6c",
"size": "3374",
"binary": false,
"copies": "1",
"ref": "refs/heads/drf",
"path": "anaf/news/tests.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "400736"
},
{
"name": "HTML",
"bytes": "1512873"
},
{
"name": "JavaScript",
"bytes": "2136807"
},
{
"name": "PHP",
"bytes": "25856"
},
{
"name": "Python",
"bytes": "2045934"
},
{
"name": "Shell",
"bytes": "18005"
},
{
"name": "TSQL",
"bytes": "147855"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
import idiokit
from .. import events, bot, taskfarm
class Handler(object):
def __init__(self, log):
self.log = log
@idiokit.stream
def transform(self):
while True:
event = yield idiokit.next()
yield idiokit.send(event)
@idiokit.stream
def _collect_set():
result_set = set()
while True:
try:
value = yield idiokit.next()
except StopIteration:
break
result_set.add(value)
idiokit.stop(result_set)
class Transformation(bot.ServiceBot):
def __init__(self, *args, **keys):
bot.ServiceBot.__init__(self, *args, **keys)
self._rooms = taskfarm.TaskFarm(self._room)
self._srcs = taskfarm.TaskFarm(self._src)
self._dsts = taskfarm.TaskFarm(self._dst)
self._pipes = taskfarm.TaskFarm(self._pipe, grace_period=0.0)
def _pipe(self, src, dst, key):
return idiokit.pipe(
self._srcs.inc(src),
self.transform(*key),
events.events_to_elements(),
self._dsts.inc(dst))
def _src(self, src):
return idiokit.pipe(
self._rooms.inc(src),
events.stanzas_to_events())
def _dst(self, dst):
return idiokit.pipe(
self._rooms.inc(dst),
idiokit.consume())
@idiokit.stream
def _room(self, name):
room = yield idiokit.pipe(
self._delayed_log("Joining room " + repr(name)),
self.xmpp.muc.join(name, self.bot_name))
self.log.info("Joined room " + repr(name))
try:
yield room
finally:
self.log.info("Left room " + repr(name))
@idiokit.stream
def _delayed_log(self, logline, delay=1.0):
yield idiokit.sleep(delay)
self.log.info(logline)
yield idiokit.Event()
@idiokit.stream
def session(self, _, src_room, dst_room, **keys):
keyset = yield idiokit.pipe(
self.transform_keys(src_room=src_room, dst_room=dst_room, **keys),
_collect_set())
pipes = [self._pipes.inc(src_room, dst_room, key) for key in keyset]
yield idiokit.pipe(*pipes)
@idiokit.stream
def transform_keys(self, **keys):
yield idiokit.send(())
@idiokit.stream
def transform(self):
while True:
event = yield idiokit.next()
yield idiokit.send(event)
| {
"content_hash": "56a5e351b95c1fffd6c4d85cd93dd476",
"timestamp": "",
"source": "github",
"line_count": 94,
"max_line_length": 78,
"avg_line_length": 26.095744680851062,
"alnum_prop": 0.5658377496942519,
"repo_name": "abusesa/abusehelper",
"id": "0b8aa660d45fb81257f397afc4524476e06872dc",
"size": "2453",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "abusehelper/core/transformation/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "408257"
}
],
"symlink_target": ""
} |
"""Transport implementation using requests package.
"""
import logging
from typing import cast, Callable, Dict, MutableMapping, Optional
import requests
from looker_sdk.rtl import transport
class RequestsTransport(transport.Transport):
"""RequestsTransport implementation of Transport."""
def __init__(
self, settings: transport.PTransportSettings, session: requests.Session
):
self.settings = settings
headers: Dict[str, str] = {transport.LOOKER_API_ID: settings.agent_tag}
if settings.headers:
headers.update(settings.headers)
session.headers.update(headers)
session.verify = settings.verify_ssl
self.session = session
self.logger = logging.getLogger(__name__)
@classmethod
def configure(cls, settings: transport.PTransportSettings) -> transport.Transport:
return cls(settings, requests.Session())
def request(
self,
method: transport.HttpMethod,
path: str,
query_params: Optional[MutableMapping[str, str]] = None,
body: Optional[bytes] = None,
authenticator: transport.TAuthenticator = None,
transport_options: Optional[transport.TransportOptions] = None,
) -> transport.Response:
headers = {}
timeout = self.settings.timeout
if authenticator:
headers.update(authenticator(transport_options or {}))
if transport_options:
if transport_options.get("headers"):
headers.update(transport_options["headers"])
if transport_options.get("timeout"):
timeout = transport_options["timeout"]
self.logger.info("%s(%s)", method.name, path)
try:
resp = self.session.request(
method.name,
path,
auth=NullAuth(),
params=query_params,
data=body,
headers=headers,
timeout=timeout,
)
except IOError as exc:
ret = transport.Response(
False,
bytes(str(exc), encoding="utf-8"),
transport.ResponseMode.STRING,
)
else:
ret = transport.Response(
resp.ok,
resp.content,
transport.response_mode(resp.headers.get("content-type")),
)
encoding = cast(
Optional[str], requests.utils.get_encoding_from_headers(resp.headers)
)
if encoding:
ret.encoding = encoding
return ret
class NullAuth(requests.auth.AuthBase):
"""A custom auth class which ensures requests does not override authorization
headers with netrc file credentials if present.
"""
def __call__(self, r):
return r
| {
"content_hash": "c01e62cf251fd27717aedd4f9840e86d",
"timestamp": "",
"source": "github",
"line_count": 88,
"max_line_length": 86,
"avg_line_length": 32.25,
"alnum_prop": 0.587737843551797,
"repo_name": "looker-open-source/sdk-codegen",
"id": "03667d84af7ae4a7f40391006d4990414be2e920",
"size": "3970",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "python/looker_sdk/rtl/requests_transport.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C#",
"bytes": "1558559"
},
{
"name": "Go",
"bytes": "780579"
},
{
"name": "HTML",
"bytes": "1094"
},
{
"name": "JavaScript",
"bytes": "46766"
},
{
"name": "Jupyter Notebook",
"bytes": "44336"
},
{
"name": "Kotlin",
"bytes": "1224618"
},
{
"name": "Nix",
"bytes": "132"
},
{
"name": "Python",
"bytes": "2119978"
},
{
"name": "Shell",
"bytes": "4961"
},
{
"name": "Swift",
"bytes": "1996724"
},
{
"name": "TypeScript",
"bytes": "2759848"
}
],
"symlink_target": ""
} |
import ddt
import six
from cinder.api.openstack import api_version_request
from cinder import exception
from cinder import test
@ddt.ddt
class APIVersionRequestTests(test.TestCase):
def test_init(self):
result = api_version_request.APIVersionRequest()
self.assertIsNone(result._ver_major)
self.assertIsNone(result._ver_minor)
def test_min_version(self):
self.assertEqual(
api_version_request.APIVersionRequest(
api_version_request._MIN_API_VERSION),
api_version_request.min_api_version())
def test_max_api_version(self):
self.assertEqual(
api_version_request.APIVersionRequest(
api_version_request._MAX_API_VERSION),
api_version_request.max_api_version())
@ddt.data(
('1.1', 1, 1),
('2.10', 2, 10),
('5.234', 5, 234),
('12.5', 12, 5),
('2.0', 2, 0),
('2.200', 2, 200)
)
@ddt.unpack
def test_valid_version_strings(self, version_string, major, minor):
request = api_version_request.APIVersionRequest(version_string)
self.assertEqual(major, request._ver_major)
self.assertEqual(minor, request._ver_minor)
def test_null_version(self):
v = api_version_request.APIVersionRequest()
self.assertTrue(v.is_null())
@ddt.data('2', '200', '2.1.4', '200.23.66.3', '5 .3', '5. 3',
'5.03', '02.1', '2.001', '', ' 2.1', '2.1 ')
def test_invalid_version_strings(self, version_string):
self.assertRaises(exception.InvalidAPIVersionString,
api_version_request.APIVersionRequest,
version_string)
def test_cmpkey(self):
request = api_version_request.APIVersionRequest('1.2')
self.assertEqual((1, 2), request._cmpkey())
def test_version_comparisons(self):
v1 = api_version_request.APIVersionRequest('2.0')
v2 = api_version_request.APIVersionRequest('2.5')
v3 = api_version_request.APIVersionRequest('5.23')
v4 = api_version_request.APIVersionRequest('2.0')
v_null = api_version_request.APIVersionRequest()
self.assertTrue(v1 < v2)
self.assertTrue(v1 <= v2)
self.assertTrue(v3 > v2)
self.assertTrue(v3 >= v2)
self.assertTrue(v1 != v2)
self.assertTrue(v1 == v4)
self.assertTrue(v1 != v_null)
self.assertTrue(v_null == v_null)
self.assertFalse(v1 == '2.0')
def test_version_matches(self):
v1 = api_version_request.APIVersionRequest('2.0')
v2 = api_version_request.APIVersionRequest('2.5')
v3 = api_version_request.APIVersionRequest('2.45')
v4 = api_version_request.APIVersionRequest('3.3')
v5 = api_version_request.APIVersionRequest('3.23')
v6 = api_version_request.APIVersionRequest('2.0')
v7 = api_version_request.APIVersionRequest('3.3')
v8 = api_version_request.APIVersionRequest('4.0')
v_null = api_version_request.APIVersionRequest()
self.assertTrue(v2.matches(v1, v3))
self.assertTrue(v2.matches(v1, v_null))
self.assertTrue(v1.matches(v6, v2))
self.assertTrue(v4.matches(v2, v7))
self.assertTrue(v4.matches(v_null, v7))
self.assertTrue(v4.matches(v_null, v8))
self.assertFalse(v1.matches(v2, v3))
self.assertFalse(v5.matches(v2, v4))
self.assertFalse(v2.matches(v3, v1))
self.assertTrue(v1.matches(v_null, v_null))
self.assertRaises(ValueError, v_null.matches, v1, v3)
def test_matches_versioned_method(self):
request = api_version_request.APIVersionRequest('2.0')
self.assertRaises(exception.InvalidParameterValue,
request.matches_versioned_method,
'fake_method')
def test_get_string(self):
v1_string = '3.23'
v1 = api_version_request.APIVersionRequest(v1_string)
self.assertEqual(v1_string, v1.get_string())
self.assertRaises(ValueError,
api_version_request.APIVersionRequest().get_string)
@ddt.data(('1', '0'), ('1', '1'))
@ddt.unpack
def test_str(self, major, minor):
request_input = '%s.%s' % (major, minor)
request = api_version_request.APIVersionRequest(request_input)
request_string = six.text_type(request)
self.assertEqual('API Version Request '
'Major: %s, Minor: %s' % (major, minor),
request_string)
| {
"content_hash": "bd0c7f409d1d0d802a0af3734806e1e3",
"timestamp": "",
"source": "github",
"line_count": 133,
"max_line_length": 77,
"avg_line_length": 34.37593984962406,
"alnum_prop": 0.6034558180227472,
"repo_name": "scottdangelo/RemoveVolumeMangerLocks",
"id": "a087eb08bc4964894f1c4df6182382b7b3ba7764",
"size": "5229",
"binary": false,
"copies": "1",
"ref": "refs/heads/RemoveVolumeManagerLocks",
"path": "cinder/tests/unit/api/openstack/test_api_version_request.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "13128387"
},
{
"name": "Shell",
"bytes": "8222"
}
],
"symlink_target": ""
} |
"""Mixin for Matter Pressure Measurement cluster test suite."""
from mobly import asserts
class PressureMeasurementClusterTestSuite:
"""Mixin for Matter Pressure Measurement cluster test suite.
The mixin assumes self.endpoint is set.
"""
def test_measured_value_attribute(self):
"""Tests the MeasuredValue attribute."""
asserts.assert_is_instance(
self.endpoint.pressure_measurement.measured_value, int,
"MeasuredValue attribute must be the int type.")
def test_min_measured_value_attribute(self):
"""Tests the MinMeasuredValue attribute."""
asserts.assert_is_instance(
self.endpoint.pressure_measurement.min_measured_value, int,
"MinMeasuredValue attribute must be the int type.")
def test_max_measured_value_attribute(self):
"""Tests the MaxMeasuredValue attribute."""
asserts.assert_is_instance(
self.endpoint.pressure_measurement.max_measured_value, int,
"MaxMeasuredValue attribute must be the int type.")
| {
"content_hash": "cea76b06e534286eae28e54cf1589595",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 67,
"avg_line_length": 37.074074074074076,
"alnum_prop": 0.7282717282717283,
"repo_name": "google/gazoo-device",
"id": "47ec829caa2cc928fc00ad089b526f59000f6ef6",
"size": "1577",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gazoo_device/tests/functional_tests/mixins/pressure_measurement_cluster_suite.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "3014778"
},
{
"name": "Shell",
"bytes": "19088"
}
],
"symlink_target": ""
} |
import os
from django.test import TestCase
from domain.models import Domain
from xformmanager.models import FormDefModel
from buildmanager.tests.util import setup_build_objects, create_build
from buildmanager.models import Project, ProjectBuild, BuildDownload, BuildForm
from xformmanager.storageutility import StorageUtility
class ReleaseTestCase(TestCase):
def setUp(self):
su = StorageUtility()
su.clear()
user, domain, project, build = setup_build_objects(jar_file_name="Test.jar")
self.domain = domain
self.user = user
self.project = project
self.build = build
def tearDown(self):
# clean up, in case some other tests left some straggling
# form data. Do this in setup and teardown because we want
# to start with a clean slate and leave a clean slate.
su = StorageUtility()
su.clear()
def testReleaseBrac(self):
# for convenience
build = create_build(self.user, self.domain, self.project, status="build",
jar_file_name="BracCHP.jar", build_number=2)
build.release(self.user)
def testRelease(self):
self.assertEqual(0, len(FormDefModel.objects.all()))
# the saving of the build should have auto-created these
try:
self.build.release(self.user)
self.fail("Releasing a released build should fail!")
except Exception:
pass
self.build.status = "build"
self.build.save()
self.build.release(self.user)
formdefs = FormDefModel.objects.all()
self.assertEqual(2, len(formdefs), "Releasing a build did not register xforms!")
# try to reset it and release again
self.build.status = "build"
self.build.save()
self.build.release(self.user)
formdefs = FormDefModel.objects.all()
self.assertEqual(2, len(formdefs), "Releasing a build twice registered extra xforms!")
bad_jars = ["ExtraMetaField.jar", "DuplicateMetaField.jar", "MissingMetaField.jar",
"NoXmlns.jar", "NoVersion.jar", "NoUiVersion.jar"]
build_number = 2
for bad_jar in bad_jars:
bad_build = create_build(self.user, self.domain, self.project, status="build",
jar_file_name=bad_jar, build_number=build_number)
build_number += 1
try:
bad_build.release()
self.fail("Releasing a bad build: %s should fail!" % bad_jar)
except Exception:
pass
def testCrossDomainRelease(self):
self.assertEqual(0, len(FormDefModel.objects.all()))
self.build.status = "build"
self.build.save()
self.build.release(self.user)
self.assertEqual(2, FormDefModel.objects.count())
self.assertEqual(2, FormDefModel.objects.filter(domain=self.domain).count())
other_domain = Domain.objects.create(name="new_domain", is_active=True)
# create the same build but in the new domain
new_project = Project.objects.create(domain=other_domain, name="New Project",
description="New Description")
self.build.id = None
self.build.project = new_project
self.build.status = "build"
self.build.save()
self.build.release(self.user)
self.assertEqual(4, FormDefModel.objects.count())
self.assertEqual(2, FormDefModel.objects.filter(domain=self.domain).count())
self.assertEqual(2, FormDefModel.objects.filter(domain=other_domain).count()) | {
"content_hash": "88b5442bfd05cd0ffeeff0f69615c56b",
"timestamp": "",
"source": "github",
"line_count": 89,
"max_line_length": 94,
"avg_line_length": 42.42696629213483,
"alnum_prop": 0.6054025423728814,
"repo_name": "commtrack/temp-aquatest",
"id": "1ed1a25ed398974ea90013cad4bafeba6841bbfb",
"size": "3776",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "apps/buildmanager/tests/release.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "742874"
},
{
"name": "PHP",
"bytes": "2863"
},
{
"name": "Python",
"bytes": "3707591"
},
{
"name": "Shell",
"bytes": "490"
}
],
"symlink_target": ""
} |
from logging import getLogger
from django.db import connection
from uw_canvas.models import CanvasSection, CanvasRole
from restclients_core.exceptions import DataFailureException
from canvas_users.dao.canvas import (
get_user_by_sis_id, create_user, enroll_course_user)
from canvas_users.views import UserRESTDispatch
from canvas_users.models import AddUser, AddUsersImport
from multiprocessing import Process
import json
import sys
import os
logger = getLogger(__name__)
class ValidCanvasCourseUsers(UserRESTDispatch):
""" Exposes API to manage Canvas users
GET returns 200 with user details
"""
def post(self, request, *args, **kwargs):
try:
course_id = kwargs['canvas_course_id']
data = json.loads(request.body)
course_users = AddUser.objects.users_in_course(
course_id, data['section_id'], data['role_base'],
data['login_ids'])
return self.json_response({
'users': [user.json_data() for user in course_users]
})
except Exception as ex:
return self.error_response(
400, message='Validation Error {}'.format(ex))
class ImportCanvasCourseUsers(UserRESTDispatch):
""" Exposes API to manage Canvas users
"""
def get(self, request, *args, **kwargs):
course_id = kwargs['canvas_course_id']
try:
import_id = request.GET['import_id']
imp = AddUsersImport.objects.get(id=import_id)
if imp.import_error:
try:
msg = json.loads(imp.import_error)
except Exception:
msg = imp.import_error
return self.json_response({'error': msg}, status=400)
if imp.progress() == 100:
imp.delete()
return self.json_response(imp.json_data())
except AddUsersImport.DoesNotExist:
return self.error_response(400, message="Unknown import id")
except KeyError:
return self.error_response(400, message="Missing import id")
def post(self, request, *args, **kwargs):
try:
course_id = kwargs['canvas_course_id']
data = json.loads(request.body)
logins = [x['login'] for x in data['logins']]
users = []
for user in AddUser.objects.users_in_course(
course_id, data['section_id'], data['role_base'], logins):
if user.is_valid():
users.append(user)
role = CanvasRole(
role_id=data['role_id'],
label=data['role'],
base_role_type=data['role_base'])
section = CanvasSection(
section_id=data['section_id'],
sis_section_id=data['section_sis_id'],
course_id=course_id)
section_only = data['section_only']
notify_users = data['notify_users']
imp = AddUsersImport(
importer=self.blti.user_login_id,
importer_id=self.blti.canvas_user_id,
importing=len(users),
course_id=course_id,
role=role.label,
section_id=section.sis_section_id)
imp.save()
connection.close()
p = Process(target=self._api_import_users,
args=(imp.pk, users, role, section,
section_only, notify_users))
p.start()
return self.json_response(imp.json_data())
except KeyError as ex:
return self.error_response(
400, message='Incomplete Request: {}'.format(ex))
except Exception as ex:
return self.error_response(
400, message='Import Error: {}'.format(ex))
def _api_import_users(self, import_id, users, role,
section, section_only, notify_users):
try:
imp = AddUsersImport.objects.get(id=import_id)
imp.import_pid = os.getpid()
imp.save()
for u in users:
try:
canvas_user = get_user_by_sis_id(u.regid)
except DataFailureException as ex:
if ex.status == 404:
logger.info(
'CREATE USER "{}", login: {}, reg_id: {}'.format(
u.name, u.login, u.regid))
# add user as "admin" on behalf of importer
canvas_user = create_user(
name=u.name,
login_id=u.login,
sis_user_id=u.regid,
email=u.email)
else:
raise Exception('Cannot create user {}: {}'.format(
u.login, ex))
logger.info(
'{importer} ADDING {user} ({user_id}) TO {course_id}: '
'{sis_section_id} ({section_id}) AS {role} ({role_id}) '
'- O:{section_only}, N:{notify}'.format(
importer=imp.importer, user=canvas_user.login_id,
user_id=canvas_user.user_id,
course_id=section.course_id,
sis_section_id=section.sis_section_id,
section_id=section.section_id, role=role.label,
role_id=role.role_id, section_only=section_only,
notify=notify_users))
enroll_course_user(
as_user=imp.importer_id,
course_id=section.course_id,
section_id=section.section_id,
user_id=canvas_user.user_id,
role_type=role.base_role_type,
role_id=role.role_id,
section_only=section_only,
notify_users=notify_users)
imp.imported += 1
imp.save()
except DataFailureException as ex:
logger.info('Request failed: {}'.format(ex))
try:
msg = json.loads(ex.msg)
imp.import_error = json.dumps({
'url': ex.url, 'status': ex.status, 'msg': msg})
except Exception:
imp.import_error = '{}'.format(ex)
imp.save()
except Exception as ex:
logger.info('EXCEPTION: {}'.format(ex))
imp.import_error = '{}'.format(ex)
imp.save()
sys.exit(0)
| {
"content_hash": "d175f8364267e472889834aa248b31dd",
"timestamp": "",
"source": "github",
"line_count": 175,
"max_line_length": 78,
"avg_line_length": 38.011428571428574,
"alnum_prop": 0.5055622369212267,
"repo_name": "uw-it-aca/django-canvas-users",
"id": "5c4aaea3586f92c7a9e928f4f0f78c56e72095c0",
"size": "6741",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "canvas_users/views/api/course.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "2164"
},
{
"name": "Dockerfile",
"bytes": "755"
},
{
"name": "HTML",
"bytes": "13420"
},
{
"name": "JavaScript",
"bytes": "35252"
},
{
"name": "Python",
"bytes": "35512"
},
{
"name": "Shell",
"bytes": "744"
}
],
"symlink_target": ""
} |
import warnings
try: # Python 3
import http.client as httplib
from urllib.parse import parse_qsl
from functools import partial
to_bytes = lambda value, *args, **kwargs: bytes(value, "utf-8", *args, **kwargs)
except ImportError: # Python 2
import httplib
from urlparse import parse_qsl
to_bytes = str
import textwrap
import codecs
import json
from . import exceptions
import base64
import hashlib
import hmac
try:
from rauth import OAuth1Session, OAuth1Service, OAuth2Session
except ImportError:
print("Please import Rauth:\n\n")
print("http://rauth.readthedocs.org/en/latest/\n")
raise
class Environments(object):
SANDBOX = 'sandbox'
PRODUCTION = 'production'
class QuickBooks(object):
company_id = 0
session = None
auth_client = None
sandbox = False
minorversion = None
verifier_token = None
invoice_link = False
sandbox_api_url_v3 = "https://sandbox-quickbooks.api.intuit.com/v3"
api_url_v3 = "https://quickbooks.api.intuit.com/v3"
current_user_url = "https://appcenter.intuit.com/api/v1/user/current"
_BUSINESS_OBJECTS = [
"Account", "Attachable", "Bill", "BillPayment",
"Class", "CreditMemo", "Customer", "CompanyCurrency",
"Department", "Deposit", "Employee", "Estimate", "ExchangeRate", "Invoice",
"Item", "JournalEntry", "Payment", "PaymentMethod", "Preferences",
"Purchase", "PurchaseOrder", "RefundReceipt",
"SalesReceipt", "TaxAgency", "TaxCode", "TaxService/Taxcode", "TaxRate", "Term",
"TimeActivity", "Transfer", "Vendor", "VendorCredit", "CreditCardPayment",
]
__instance = None
__use_global = False
def __new__(cls, **kwargs):
"""
If global is disabled, don't set global client instance.
"""
if QuickBooks.__use_global:
if QuickBooks.__instance is None:
QuickBooks.__instance = object.__new__(cls)
instance = QuickBooks.__instance
else:
instance = object.__new__(cls)
if 'refresh_token' in kwargs:
instance.refresh_token = kwargs['refresh_token']
if 'auth_client' in kwargs:
instance.auth_client = kwargs['auth_client']
if instance.auth_client.environment == Environments.SANDBOX:
instance.sandbox = True
else:
instance.sandbox = False
refresh_token = instance._start_session()
instance.refresh_token = refresh_token
if 'company_id' in kwargs:
instance.company_id = kwargs['company_id']
if 'minorversion' in kwargs:
instance.minorversion = kwargs['minorversion']
instance.invoice_link = kwargs.get('invoice_link', False)
if 'verifier_token' in kwargs:
instance.verifier_token = kwargs.get('verifier_token')
return instance
def _start_session(self):
if self.auth_client.access_token is None:
self.auth_client.refresh(refresh_token=self.refresh_token)
self.session = OAuth2Session(
client_id=self.auth_client.client_id,
client_secret=self.auth_client.client_secret,
access_token=self.auth_client.access_token,
)
return self.auth_client.refresh_token
@classmethod
def get_instance(cls):
return cls.__instance
@classmethod
def disable_global(cls):
"""
Disable use of singleton pattern.
"""
warnings.warn("disable_global deprecated", PendingDeprecationWarning)
QuickBooks.__use_global = False
QuickBooks.__instance = None
@classmethod
def enable_global(cls):
"""
Allow use of singleton pattern.
"""
warnings.warn("enable_global deprecated", PendingDeprecationWarning)
QuickBooks.__use_global = True
def _drop(self):
QuickBooks.__instance = None
@property
def api_url(self):
if self.sandbox:
return self.sandbox_api_url_v3
else:
return self.api_url_v3
def validate_webhook_signature(self, request_body, signature, verifier_token=None):
hmac_verifier_token_hash = hmac.new(
to_bytes(verifier_token or self.verifier_token),
request_body.encode('utf-8'),
hashlib.sha256
).digest()
decoded_hex_signature = base64.b64decode(signature)
return hmac_verifier_token_hash == decoded_hex_signature
def get_current_user(self):
"""Get data from the current user endpoint"""
url = self.current_user_url
result = self.get(url)
return result
def get_report(self, report_type, qs=None):
"""Get data from the report endpoint"""
if qs is None:
qs = {}
url = self.api_url + "/company/{0}/reports/{1}".format(self.company_id, report_type)
result = self.get(url, params=qs)
return result
def change_data_capture(self, entity_string, changed_since):
url = "{0}/company/{1}/cdc".format(self.api_url, self.company_id)
params = {"entities": entity_string, "changedSince": changed_since}
result = self.get(url, params=params)
return result
def make_request(self, request_type, url, request_body=None, content_type='application/json',
params=None, file_path=None, request_id=None):
if not params:
params = {}
if self.minorversion:
params['minorversion'] = self.minorversion
if request_id:
params['requestid'] = request_id
if self.invoice_link:
params['include'] = 'invoiceLink'
if not request_body:
request_body = {}
headers = {
'Content-Type': content_type,
'Accept': 'application/json',
'User-Agent': 'python-quickbooks V3 library'
}
if file_path:
attachment = open(file_path, 'rb')
url = url.replace('attachable', 'upload')
boundary = '-------------PythonMultipartPost'
headers.update({
'Content-Type': 'multipart/form-data; boundary=%s' % boundary,
'Accept-Encoding': 'gzip;q=1.0,deflate;q=0.6,identity;q=0.3',
'User-Agent': 'python-quickbooks V3 library',
'Accept': 'application/json',
'Connection': 'close'
})
binary_data = str(base64.b64encode(attachment.read()).decode('ascii'))
content_type = json.loads(request_body)['ContentType']
request_body = textwrap.dedent(
"""
--%s
Content-Disposition: form-data; name="file_metadata_01"
Content-Type: application/json
%s
--%s
Content-Disposition: form-data; name="file_content_01"
Content-Type: %s
Content-Transfer-Encoding: base64
%s
--%s--
"""
) % (boundary, request_body, boundary, content_type, binary_data, boundary)
# make sure request_body is not unicode (python 2 case)
request_body = str(request_body)
req = self.process_request(request_type, url, headers=headers, params=params, data=request_body)
if req.status_code == httplib.UNAUTHORIZED:
raise exceptions.AuthorizationException(
"Application authentication failed", error_code=req.status_code, detail=req.text)
try:
result = req.json()
except:
raise exceptions.QuickbooksException("Error reading json response: {0}".format(req.text), 10000)
if "Fault" in result:
self.handle_exceptions(result["Fault"])
elif not req.status_code == httplib.OK:
raise exceptions.QuickbooksException("Error returned with status code '{0}': {1}".format(
req.status_code, req.text), 10000)
else:
return result
def get(self, *args, **kwargs):
return self.make_request("GET", *args, **kwargs)
def post(self, *args, **kwargs):
return self.make_request("POST", *args, **kwargs)
def process_request(self, request_type, url, headers="", params="", data=""):
if self.session is None:
raise exceptions.QuickbooksException('No session manager')
headers.update({'Authorization': 'Bearer ' + self.session.access_token})
return self.session.request(
request_type, url, headers=headers, params=params, data=data)
def get_single_object(self, qbbo, pk):
url = "{0}/company/{1}/{2}/{3}/".format(self.api_url, self.company_id, qbbo.lower(), pk)
result = self.get(url, {})
return result
@staticmethod
def handle_exceptions(results):
"""
Error codes with description in documentation:
https://developer.intuit.com/app/developer/qbo/docs/develop/troubleshooting/error-codes#id1
"""
# Needs to handle multiple errors
for error in results["Error"]:
message = error["Message"]
detail = ""
if "Detail" in error:
detail = error["Detail"]
code = ""
if "code" in error:
code = int(error["code"])
if 0 < code <= 499:
raise exceptions.AuthorizationException(message, code, detail)
elif 500 <= code <= 599:
raise exceptions.UnsupportedException(message, code, detail)
elif 600 <= code <= 1999:
if code == 610:
raise exceptions.ObjectNotFoundException(message, code, detail)
raise exceptions.GeneralException(message, code, detail)
elif 2000 <= code <= 4999:
raise exceptions.ValidationException(message, code, detail)
elif 10000 <= code:
raise exceptions.SevereException(message, code, detail)
else:
raise exceptions.QuickbooksException(message, code, detail)
def create_object(self, qbbo, request_body, _file_path=None, request_id=None):
self.isvalid_object_name(qbbo)
url = "{0}/company/{1}/{2}".format(self.api_url, self.company_id, qbbo.lower())
results = self.post(url, request_body, file_path=_file_path, request_id=request_id)
return results
def query(self, select):
url = "{0}/company/{1}/query".format(self.api_url, self.company_id)
result = self.post(url, select, content_type='application/text')
return result
def isvalid_object_name(self, object_name):
if object_name not in self._BUSINESS_OBJECTS:
raise Exception("{0} is not a valid QBO Business Object.".format(object_name))
return True
def update_object(self, qbbo, request_body, _file_path=None, request_id=None):
url = "{0}/company/{1}/{2}".format(self.api_url, self.company_id, qbbo.lower())
result = self.post(url, request_body, file_path=_file_path, request_id=request_id)
return result
def delete_object(self, qbbo, request_body, _file_path=None, request_id=None):
url = "{0}/company/{1}/{2}".format(self.api_url, self.company_id, qbbo.lower())
result = self.post(url, request_body, params={'operation': 'delete'}, file_path=_file_path, request_id=request_id)
return result
def batch_operation(self, request_body):
url = "{0}/company/{1}/batch".format(self.api_url, self.company_id)
results = self.post(url, request_body)
return results
def misc_operation(self, end_point, request_body, content_type='application/json'):
url = "{0}/company/{1}/{2}".format(self.api_url, self.company_id, end_point)
results = self.post(url, request_body, content_type)
return results
def download_pdf(self, qbbo, item_id):
if self.session is None:
raise exceptions.QuickbooksException('No session')
url = "{0}/company/{1}/{2}/{3}/pdf".format(
self.api_url, self.company_id, qbbo.lower(), item_id)
headers = {
'Content-Type': 'application/pdf',
'Accept': 'application/pdf, application/json',
'User-Agent': 'python-quickbooks V3 library'
}
response = self.process_request("GET", url, headers=headers)
if response.status_code != httplib.OK:
if response.status_code == httplib.UNAUTHORIZED:
# Note that auth errors have different result structure which can't be parsed by handle_exceptions()
raise exceptions.AuthorizationException(
"Application authentication failed", error_code=response.status_code, detail=response.text)
try:
result = response.json()
except:
raise exceptions.QuickbooksException("Error reading json response: {0}".format(response.text), 10000)
self.handle_exceptions(result["Fault"])
else:
return response.content
| {
"content_hash": "4579d70e82bbd591d111bdb0700aae19",
"timestamp": "",
"source": "github",
"line_count": 381,
"max_line_length": 122,
"avg_line_length": 34.71653543307087,
"alnum_prop": 0.5941634535419974,
"repo_name": "sidecars/python-quickbooks",
"id": "dd7da2e81e20672d58613804e0357d417719d039",
"size": "13227",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "quickbooks/client.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "164"
},
{
"name": "Python",
"bytes": "2135934"
}
],
"symlink_target": ""
} |
from name_utilities import enum_key_for_css_keyword
import json5_generator
import template_expander
import gperf
class CSSValueKeywordsWriter(json5_generator.Writer):
_FILE_BASENAME = 'css_value_keywords'
def __init__(self, file_paths, output_dir):
json5_generator.Writer.__init__(self, file_paths, output_dir)
self._outputs = {
(self._FILE_BASENAME + '.h'): self.generate_header,
(self._FILE_BASENAME + '.cc'): self.generate_implementation,
}
self._value_keywords = self.json5_file.name_dictionaries
first_keyword_id = 1
for offset, keyword in enumerate(self._value_keywords):
keyword['lower_name'] = keyword['name'].original.lower()
keyword['enum_name'] = enum_key_for_css_keyword(keyword['name'])
keyword['enum_value'] = first_keyword_id + offset
if keyword['name'].original.startswith('-internal-'):
assert keyword['mode'] is None, 'Can\'t specify mode for ' \
'value keywords with the prefix "-internal-".'
keyword['mode'] = 'UASheet'
else:
assert keyword['mode'] != 'UASheet', 'UASheet mode only ' \
'value keywords should have the prefix "-internal-".'
self._keyword_count = len(self._value_keywords) + first_keyword_id
@template_expander.use_jinja(
'core/css/templates/css_value_keywords.h.tmpl')
def generate_header(self):
return {
'value_keywords':
self._value_keywords,
'value_keywords_count':
self._keyword_count,
'max_value_keyword_length':
max(
len(keyword['name'].original)
for keyword in self._value_keywords),
'header_guard':
self.make_header_guard(self._relative_output_dir +
self._FILE_BASENAME + '.h')
}
def _value_keywords_with_mode(self, mode):
return [
keyword for keyword in self._value_keywords
if keyword['mode'] == mode
]
@gperf.use_jinja_gperf_template(
'core/css/templates/css_value_keywords.cc.tmpl',
['-Q', 'CSSValueStringPool'])
def generate_implementation(self):
keyword_offsets = []
current_offset = 0
for keyword in self._value_keywords:
keyword_offsets.append(current_offset)
current_offset += len(keyword["name"].original) + 1
return {
'value_keywords':
self._value_keywords,
'value_keyword_offsets':
keyword_offsets,
'ua_sheet_mode_values_keywords':
self._value_keywords_with_mode('UASheet'),
'quirks_mode_or_ua_sheet_mode_values_keywords':
self._value_keywords_with_mode('QuirksOrUASheet'),
'gperf_path':
self.gperf_path,
}
if __name__ == "__main__":
json5_generator.Maker(CSSValueKeywordsWriter).main()
| {
"content_hash": "33e21a2e4da07992997d39f22ead3261",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 76,
"avg_line_length": 37.54320987654321,
"alnum_prop": 0.5695494902992436,
"repo_name": "chromium/chromium",
"id": "de0f837caa7dafa160ea7c7aa22096564138bdfc",
"size": "3064",
"binary": false,
"copies": "6",
"ref": "refs/heads/main",
"path": "third_party/blink/renderer/build/scripts/core/css/make_css_value_keywords.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
import os
import pytest
import apistar
filenames = [
'testcases/openapi/api-with-examples.yaml',
# 'testcases/openapi/callback-example.yaml',
# 'testcases/openapi/link-example.yaml',
'testcases/openapi/petstore-expanded.yaml',
'testcases/openapi/petstore.yaml',
'testcases/openapi/uspto.yaml',
]
@pytest.mark.parametrize("filename", filenames)
def test_openapi(filename):
with open(filename, 'rb') as input_file:
content = input_file.read()
path, extension = os.path.splitext(filename)
encoding = {".json": "json", ".yaml": "yaml"}[extension]
apistar.validate(content, format='openapi', encoding=encoding)
| {
"content_hash": "49b58fa2f482870e3241317dbafce2e8",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 66,
"avg_line_length": 27.5,
"alnum_prop": 0.696969696969697,
"repo_name": "tomchristie/apistar",
"id": "a48a39ba738597c952f3b101e3a6d0ed74234e2e",
"size": "660",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/schemas/test_openapi.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "29222"
},
{
"name": "HTML",
"bytes": "18167"
},
{
"name": "JavaScript",
"bytes": "10517"
},
{
"name": "Python",
"bytes": "170674"
},
{
"name": "Shell",
"bytes": "1549"
}
],
"symlink_target": ""
} |
from django.shortcuts import render
from web.data import *
from web.models import User, SharedTask
from web.berg import getQRCodeUrl
import base64
import datetime
import hashlib
import hmac
import json
import random
import string
import uuid
# TODO: add AWS credentials here
AWS_ACCESS_KEY_ID = 'XXX'
AWS_SECRET_KEY = 'XXX'
def _generateRandomName(n):
C = random.choice(string.uppercase)
cs = [random.choice(string.lowercase) for i in xrange(n - 1)]
return C + ''.join(cs)
def generateRandomCode():
return hashlib.sha256(str(uuid.uuid4())).hexdigest()
def createRandomUser(code=None, name=None):
if code is None:
code = generateRandomCode()
if name is None:
name = _generateRandomName(10)
user = User(
code = code,
name = name,
cha = random.randrange(1, 8),
con = random.randrange(1, 8),
dex = random.randrange(1, 8),
int = random.randrange(1, 8),
str = random.randrange(1, 8),
wis = random.randrange(1, 8),
role = random.randrange(7))
user.save()
def createRandomUsers(n):
for i in xrange(n):
createRandomUser()
def renderViewUser(request, user, is_admin = False):
roles_met = [{
'role_name': role_name,
'task_name': TASKS[(user.role + role_id) % 7],
'completed_with': [],
'started_with': []
} for role_id, role_name in enumerate(ROLES)]
shared_tasks = SharedTask.objects.filter(user_1 = user)
for shared_task in shared_tasks:
user_met = shared_task.user_2
if shared_task.completed is None:
roles_met[user_met.role]['started_with'].append(user_met)
else:
roles_met[user_met.role]['completed_with'].append(user_met)
completed_all_tasks = True
for role_met in roles_met:
if not role_met['completed_with']:
completed_all_tasks = False
break
return render(request, 'view.html', {
'user': user,
'is_admin': is_admin,
'user_role': ROLES[user.role],
'roles': ROLES,
'story': ROLE_STORIES[user.role],
'qrcode_url': getQRCodeUrl(request, user, 300),
'roles_met': roles_met,
'completed_all_tasks': completed_all_tasks
})
def getPhotoCredentials(user):
key = 'images/{0}.jpg'.format(user.code)
now = datetime.datetime.utcnow()
expiration = now + datetime.timedelta(seconds=60)
policy_object = {
'expiration': expiration.isoformat() + 'Z',
'conditions': [
{'bucket': 'k4hoedown'},
{'acl': 'public-read'},
{'Cache-Control': 'max-age=1209600'},
{'Content-Type': 'image/jpeg'},
['starts-with', '$key', key],
['content-length-range', 0, 1024 * 1024]
]
}
policy_document = json.dumps(policy_object)
policy = base64.b64encode(policy_document)
signature_hmac = hmac.new(AWS_SECRET_KEY, policy, hashlib.sha1)
signature = base64.b64encode(signature_hmac.digest())
response = {
'AWSAccessKeyId': AWS_ACCESS_KEY_ID,
'acl': 'public-read',
'Cache-Control': 'max-age=1209600',
'Content-Type': 'image/jpeg',
'key': key,
'policy': policy,
'signature': signature
}
return response
| {
"content_hash": "8812e13472f6a8d57e604829662bc250",
"timestamp": "",
"source": "github",
"line_count": 107,
"max_line_length": 67,
"avg_line_length": 30.50467289719626,
"alnum_prop": 0.6084558823529411,
"repo_name": "savageinternet/hoedown",
"id": "1388f639ee33ff8376af07f795e024396ea04b60",
"size": "3264",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "k4pop/web/utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "4399"
},
{
"name": "JavaScript",
"bytes": "64375"
},
{
"name": "Python",
"bytes": "27197"
}
],
"symlink_target": ""
} |
"""
По всем этим урлам отдаётся простая html-страничка,
вся логика веб-интерфейса находится в джсе.
Исключение — 'zoo.views.task_log'. Для скорости богатый цветной лог таска рендерится на сервере,
с помощью шаблона.
"""
from web.zoo.views import home, server, gallery, install, install_application
from web.zoo.views import upgrade, uninstall, task_list, task, task_log
from web.zoo.views import settings_view, console, icon, update, cancel_task
from web.helpers.http import CommonRequestHandler, IconRequestHandler
from web.helpers.http import CommonRequestHandlerOneParam, SettingsRequestHandler
application_urls = [(r'/', CommonRequestHandler, dict(callable_object=home, name="home", appname='front')),
(r'/server/$', CommonRequestHandler, dict(callable_object=server, name="server",
appname='front')),
(r'/gallery/$', CommonRequestHandler, dict(callable_object=gallery, name="gallery",
appname='front')),
(r'/install/application/$', CommonRequestHandler, dict(callable_object=install_application,
name='install',
appname='front')),
(r'/install/$', CommonRequestHandler, dict(callable_object=install, name='install',
appname='front')),
(r'/cancel_install/$', CommonRequestHandler, dict(callable_object=install,
name='cancel_install',
appname='front')),
(r'/upgrade/$', CommonRequestHandler, dict(callable_object=upgrade, name='upgrade',
appname='front')),
(r'/uninstall/$', CommonRequestHandler, dict(callable_object=uninstall, name='uninstall',
appname='front')),
(r'/task/$', CommonRequestHandler, dict(callable_object=task_list, name='task_list',
appname='front')),
(r'/task/(?P<param1>\d+)/$', CommonRequestHandlerOneParam, dict(callable_object=task,
name='task_id',
appname='front')),
(r'/cancel_task/(?P<param1>\d+)/$', CommonRequestHandlerOneParam, dict(callable_object=cancel_task,
name='cancel_task_id',
appname='front')),
(r'/task/(?P<param1>\d+)/log/$', CommonRequestHandlerOneParam, dict(callable_object=task_log,
name='task_id_log',
appname='front')),
(r'/settings/$', SettingsRequestHandler, dict(callable_object=settings_view, name='settings_view',
appname='front')),
(r'/console/$', CommonRequestHandler, dict(callable_object=console, name='console',
appname='front')),
(r'/update/$', CommonRequestHandler, dict(callable_object=update, name='update',
appname='front')),
(r'/product/(?P<product_name>[^/]+)/icon/', IconRequestHandler, dict(callable_object=icon,
name='icon',
appname='front'
))
]
| {
"content_hash": "29bc8272382af0f7c2b00b518d333334",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 119,
"avg_line_length": 79.81818181818181,
"alnum_prop": 0.41275626423690204,
"repo_name": "helicontech/zoo",
"id": "612e14a978c4c4c0d36b196b0889c0691c1a63ad",
"size": "4570",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Zoocmd/web/zoo/urls.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "8232"
},
{
"name": "C",
"bytes": "1962566"
},
{
"name": "C++",
"bytes": "20316288"
},
{
"name": "CMake",
"bytes": "293719"
},
{
"name": "CSS",
"bytes": "16032"
},
{
"name": "HTML",
"bytes": "88920"
},
{
"name": "JavaScript",
"bytes": "389952"
},
{
"name": "Objective-C",
"bytes": "929638"
},
{
"name": "Python",
"bytes": "550176"
},
{
"name": "XSLT",
"bytes": "629"
}
],
"symlink_target": ""
} |
'''
Created on Feb 24, 2014
@author: Chunwei Yan @ PKU
@mail: [email protected]
'''
from __future__ import division
import sys
sys.path.append('..')
import unittest
import numpy
from models.recursive_autoencoder import BinaryAutoencoder
class TestBinaryAutoencoder(unittest.TestCase):
def setUp(self):
self.bae = BinaryAutoencoder(
len_vector = 8,
)
def test_get_cost_updates(self):
self.bae.get_cost_updates()
def test_hidden_fn(self):
x = [0.1 for i in range(16)]
x = numpy.array(x, dtype='float32')
print 'hidden:', self.bae.hidden_fn(x)
def test_train_fn(self):
x = [0.1 for i in range(16)]
x = numpy.array(x, dtype='float32')
lcount = 4
rcount = 12
for i in range(100):
print 'cost', self.bae.train_fn(x, lcount, rcount)
def test_predict_fn(self):
x = [0.1 for i in range(16)]
x = numpy.array(x, dtype='float32')
lcount = 4
rcount = 12
for i in range(100):
hidden, cost = self.bae.predict_fn(x, lcount, rcount)
print 'cost', cost
def test_train_iter(self):
print 'test train iter ...'
x = [0.1 for i in range(16)]
x = numpy.array(x, dtype='float32')
lcount = 4
rcount = 12
for i in range(100):
cost = self.bae.train_iter(x, lcount, rcount)
print 'cost', cost
if __name__ == "__main__":
unittest.main()
| {
"content_hash": "a7d77dbd17e686b79aaab4491fe157d3",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 65,
"avg_line_length": 24.306451612903224,
"alnum_prop": 0.5600530856005309,
"repo_name": "Superjom/NeuralNetworks",
"id": "71af3ed0b6d547976a6f3c76ed81fd722da66c16",
"size": "1531",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/binary_autoencoder.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "112964"
},
{
"name": "Shell",
"bytes": "4358"
}
],
"symlink_target": ""
} |
from opensfm.actions import create_tracks
from . import command
import argparse
from opensfm.dataset import DataSet
class Command(command.CommandBase):
name = "create_tracks"
help = "Link matches pair-wise matches into tracks"
def run_impl(self, dataset: DataSet, args: argparse.Namespace) -> None:
create_tracks.run_dataset(dataset)
def add_arguments_impl(self, parser: argparse.ArgumentParser) -> None:
pass
| {
"content_hash": "783d166e21e0518a355fdc2465039c27",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 75,
"avg_line_length": 27.9375,
"alnum_prop": 0.727069351230425,
"repo_name": "mapillary/OpenSfM",
"id": "d5ee6e6ac9d403bc594892f50f726362f079c9ce",
"size": "447",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "opensfm/commands/create_tracks.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "396"
},
{
"name": "C++",
"bytes": "648986"
},
{
"name": "CMake",
"bytes": "78367"
},
{
"name": "CSS",
"bytes": "6426"
},
{
"name": "Dockerfile",
"bytes": "642"
},
{
"name": "HTML",
"bytes": "63144"
},
{
"name": "JavaScript",
"bytes": "1054984"
},
{
"name": "Python",
"bytes": "1141169"
},
{
"name": "Shell",
"bytes": "4006"
}
],
"symlink_target": ""
} |
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
__all__ = [
"network_split_edge",
]
def network_split_edge(network, u, v, t=0.5):
"""Split and edge by inserting a node along its length.
Parameters
----------
u : str
The key of the first node of the edge.
v : str
The key of the second node of the edge.
t : float, optional
The position of the inserted node on the edge.
Returns
-------
hashable
The key of the inserted node.
Raises
------
ValueError
If `t` is not in the range 0-1.
Exception
If `u` and `v` are not neighbors.
"""
if not network.has_edge(u, v):
return
if t <= 0.0:
raise ValueError("t should be greater than 0.0.")
if t >= 1.0:
raise ValueError("t should be smaller than 1.0.")
# the split node
x, y, z = network.edge_point(u, v, t)
w = network.add_node(x=x, y=y, z=z)
network.add_edge(u, w)
network.add_edge(w, v)
if v in network.edge[u]:
del network.edge[u][v]
elif u in network.edge[v]:
del network.edge[v][u]
else:
raise Exception
# split half-edge UV
network.adjacency[u][w] = None
network.adjacency[w][v] = None
del network.adjacency[u][v]
# split half-edge VU
network.adjacency[v][w] = None
network.adjacency[w][u] = None
del network.adjacency[v][u]
# return the key of the split node
return w
| {
"content_hash": "234aae3c0351f456fa01858c63bc31ef",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 59,
"avg_line_length": 22.217391304347824,
"alnum_prop": 0.5772994129158513,
"repo_name": "compas-dev/compas",
"id": "04fb9a20944274e7f7aa7290075ee40b0a02dfee",
"size": "1533",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "src/compas/datastructures/network/operations/split.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "3181804"
}
],
"symlink_target": ""
} |
"""
web storage for posting and getting data for a given pubkeyid.
The following requests are handled:
POST /PUBKEYID (content-type=application-json)
{"key": FULL_PUBLIC_KEY, # must match pubkeyid,
"message": json_blob,
"signature": signature for data blob,
}
GET /PUBKEYID (reply will have content-type=application-json)
same content as previously posted
A client will typically do the following to POST new information:
data = # some data structure
message = json.dumps(data)
signature = sign_data(message, key)
POST(URL + pubkeyid, data=json.dumps(
{"key": full_pubkey,
"message": message,
"signature": signature
}
)
and it will do the following to GET and verify information:
r = GET(URL + pubkeyid)
json = r.json()
message = json["message"]
key = json["key"]
verify_pubkeyid_belongs_to_pubkey(pubkeyid, key)
verify_signature(message, signature, key)
data = # the data structure posted above.
"""
import sys
import argparse
import logging
from simplejson import loads, dumps
from webob import Request, Response, exc
from friendsecure import crypto
def main(args=None):
from wsgiref import simple_server
parser = argparse.ArgumentParser(
usage="%prog [OPTIONS]")
parser.add_argument(
'-p', '--port', default='8080', type=int,
help='Port to serve on (default 8080)')
parser.add_argument(
'-H', '--host', default='0.0.0.0', type=str,
help='Host to serve on (default localhost; 0.0.0.0 to make public)')
if args is None:
args = sys.argv[1:]
args = parser.parse_args()
app = make_app()
server = simple_server.make_server(args.host, args.port, app)
print ('Serving on http://%s:%s' % (args.host, args.port))
server.serve_forever()
def make_app():
return JsonRpcApp()
class JsonRpcApp(object):
def __init__(self):
self._pubkeyid2data = {}
def __call__(self, environ, start_response):
req = Request(environ)
try:
meth = getattr(self, req.method, None)
if meth is None:
raise exc.HTTPMethodNotAllowed(
"method %r not allowed" % req.method)
#allowed='POST')
pubkeyid = req.path.split("/")[-1]
resp = meth(req, pubkeyid)
except ValueError, e:
resp = exc.HTTPBadRequest(str(e))
except exc.HTTPException, e:
resp = e
return resp(environ, start_response)
def POST(self, req, pubkeyid):
# XXX verify signature
json = loads(req.body)
self.verify_signed_json_presence(pubkeyid, json)
logging.warn("registering %s" %(json,))
self._pubkeyid2data[pubkeyid] = json
return Response(status=200)
def GET(self, req, pubkeyid):
# XXX also transport back signature
data = self._pubkeyid2data[pubkeyid]
resp = Response(
content_type='application/json',
body=dumps(dict(result=data, error=None)))
return resp
_JSON_KEYS = set(("key", "message", "signature"))
def verify_signed_json_presence(self, pubkeyid, json):
if not set(json.keys()) == self._JSON_KEYS:
raise exc.HTTPBadRequest(
"json must have these keys: %s" %(self._JSON_KEYS))
if not crypto.verify_message(fingerprint=pubkeyid, **json):
raise exc.HTTPBadRequest("bad message integrity")
#verify that pubkeyid fits to pubkey and that signature is a valid
#signature for data
| {
"content_hash": "9e44f96ee293d896cd299e2912a0c073",
"timestamp": "",
"source": "github",
"line_count": 113,
"max_line_length": 76,
"avg_line_length": 32.097345132743364,
"alnum_prop": 0.612351805900193,
"repo_name": "hpk42/p4p",
"id": "0514662ccaea849e66ae3c42f3a6c695fda6628b",
"size": "3627",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "friendsecure/lookup_server.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "30506"
}
],
"symlink_target": ""
} |
from functools import partial
import threading
from PyQt4.Qt import Qt
from PyQt4.Qt import QGridLayout, QInputDialog, QPushButton
from PyQt4.Qt import QVBoxLayout, QLabel, SIGNAL
from electrum_cesc_gui.qt.main_window import StatusBarButton
from electrum_cesc_gui.qt.util import *
from .plugin import TIM_NEW, TIM_RECOVER, TIM_MNEMONIC
from ..hw_wallet.qt import QtHandlerBase
from electrum_cesc.i18n import _
from electrum_cesc.plugins import hook, DeviceMgr
from electrum_cesc.util import PrintError, UserCancelled
from electrum_cesc.wallet import Wallet, BIP44_Wallet
PASSPHRASE_HELP_SHORT =_(
"Passphrases allow you to access new wallets, each "
"hidden behind a particular case-sensitive passphrase.")
PASSPHRASE_HELP = PASSPHRASE_HELP_SHORT + " " + _(
"You need to create a separate Electrum wallet for each passphrase "
"you use as they each generate different addresses. Changing "
"your passphrase does not lose other wallets, each is still "
"accessible behind its own passphrase.")
RECOMMEND_PIN = _(
"You should enable PIN protection. Your PIN is the only protection "
"for your cryptoescudos if your device is lost or stolen.")
PASSPHRASE_NOT_PIN = _(
"If you forget a passphrase you will be unable to access any "
"cryptoescudos in the wallet behind it. A passphrase is not a PIN. "
"Only change this if you are sure you understand it.")
CHARACTER_RECOVERY = (
"Use the recovery cipher shown on your device to input your seed words. "
"The cipher changes with every keypress.\n"
"After at most 4 letters the device will auto-complete a word.\n"
"Press SPACE or the Accept Word button to accept the device's auto-"
"completed word and advance to the next one.\n"
"Press BACKSPACE to go back a character or word.\n"
"Press ENTER or the Seed Entered button once the last word in your "
"seed is auto-completed.")
class CharacterButton(QPushButton):
def __init__(self, text=None):
QPushButton.__init__(self, text)
def keyPressEvent(self, event):
event.setAccepted(False) # Pass through Enter and Space keys
class CharacterDialog(WindowModalDialog):
def __init__(self, parent):
super(CharacterDialog, self).__init__(parent)
self.setWindowTitle(_("KeepKey Seed Recovery"))
self.character_pos = 0
self.word_pos = 0
self.loop = QEventLoop()
self.word_help = QLabel()
self.char_buttons = []
vbox = QVBoxLayout(self)
vbox.addWidget(WWLabel(CHARACTER_RECOVERY))
hbox = QHBoxLayout()
hbox.addWidget(self.word_help)
for i in range(4):
char_button = CharacterButton('*')
char_button.setMaximumWidth(36)
self.char_buttons.append(char_button)
hbox.addWidget(char_button)
self.accept_button = CharacterButton(_("Accept Word"))
self.accept_button.clicked.connect(partial(self.process_key, 32))
self.rejected.connect(partial(self.loop.exit, 1))
hbox.addWidget(self.accept_button)
hbox.addStretch(1)
vbox.addLayout(hbox)
self.finished_button = QPushButton(_("Seed Entered"))
self.cancel_button = QPushButton(_("Cancel"))
self.finished_button.clicked.connect(partial(self.process_key,
Qt.Key_Return))
self.cancel_button.clicked.connect(self.rejected)
buttons = Buttons(self.finished_button, self.cancel_button)
vbox.addSpacing(40)
vbox.addLayout(buttons)
self.refresh()
self.show()
def refresh(self):
self.word_help.setText("Enter seed word %2d:" % (self.word_pos + 1))
self.accept_button.setEnabled(self.character_pos >= 3)
self.finished_button.setEnabled((self.word_pos in (11, 17, 23)
and self.character_pos >= 3))
for n, button in enumerate(self.char_buttons):
button.setEnabled(n == self.character_pos)
if n == self.character_pos:
button.setFocus()
def is_valid_alpha_space(self, key):
# Auto-completion requires at least 3 characters
if key == ord(' ') and self.character_pos >= 3:
return True
# Firmware aborts protocol if the 5th character is non-space
if self.character_pos >= 4:
return False
return (key >= ord('a') and key <= ord('z')
or (key >= ord('A') and key <= ord('Z')))
def process_key(self, key):
self.data = None
if key == Qt.Key_Return and self.finished_button.isEnabled():
self.data = {'done': True}
elif key == Qt.Key_Backspace and (self.word_pos or self.character_pos):
self.data = {'delete': True}
elif self.is_valid_alpha_space(key):
self.data = {'character': chr(key).lower()}
if self.data:
self.loop.exit(0)
def keyPressEvent(self, event):
self.process_key(event.key())
if not self.data:
QDialog.keyPressEvent(self, event)
def get_char(self, word_pos, character_pos):
self.word_pos = word_pos
self.character_pos = character_pos
self.refresh()
if self.loop.exec_():
self.data = None # User cancelled
class QtHandler(QtHandlerBase):
charSig = pyqtSignal(object)
def __init__(self, win, pin_matrix_widget_class, device):
super(QtHandler, self).__init__(win, device)
win.connect(win, SIGNAL('pin_dialog'), self.pin_dialog)
self.charSig.connect(self.update_character_dialog)
self.pin_matrix_widget_class = pin_matrix_widget_class
self.character_dialog = None
def get_char(self, msg):
self.done.clear()
self.charSig.emit(msg)
self.done.wait()
data = self.character_dialog.data
if not data or 'done' in data:
self.character_dialog.accept()
self.character_dialog = None
return data
def get_pin(self, msg):
self.done.clear()
self.win.emit(SIGNAL('pin_dialog'), msg)
self.done.wait()
return self.response
def pin_dialog(self, msg):
# Needed e.g. when resetting a device
self.clear_dialog()
dialog = WindowModalDialog(self.top_level_window(), _("Enter PIN"))
matrix = self.pin_matrix_widget_class()
vbox = QVBoxLayout()
vbox.addWidget(QLabel(msg))
vbox.addWidget(matrix)
vbox.addLayout(Buttons(CancelButton(dialog), OkButton(dialog)))
dialog.setLayout(vbox)
dialog.exec_()
self.response = str(matrix.get_value())
self.done.set()
def update_character_dialog(self, msg):
if not self.character_dialog:
self.character_dialog = CharacterDialog(self.top_level_window())
self.character_dialog.get_char(msg.word_pos, msg.character_pos)
self.done.set()
def request_trezor_init_settings(self, method, device):
wizard = self.win
vbox = QVBoxLayout()
next_enabled=True
label = QLabel(_("Enter a label to name your device:"))
name = QLineEdit()
hl = QHBoxLayout()
hl.addWidget(label)
hl.addWidget(name)
hl.addStretch(1)
vbox.addLayout(hl)
def clean_text(widget):
text = unicode(widget.toPlainText()).strip()
return ' '.join(text.split())
if method in [TIM_NEW, TIM_RECOVER]:
gb = QGroupBox()
hbox1 = QHBoxLayout()
gb.setLayout(hbox1)
# KeepKey recovery doesn't need a word count
if method == TIM_NEW or self.device == 'TREZOR':
vbox.addWidget(gb)
gb.setTitle(_("Select your seed length:"))
bg = QButtonGroup()
for i, count in enumerate([12, 18, 24]):
rb = QRadioButton(gb)
rb.setText(_("%d words") % count)
bg.addButton(rb)
bg.setId(rb, i)
hbox1.addWidget(rb)
rb.setChecked(True)
cb_pin = QCheckBox(_('Enable PIN protection'))
cb_pin.setChecked(True)
else:
text = QTextEdit()
text.setMaximumHeight(60)
if method == TIM_MNEMONIC:
msg = _("Enter your BIP39 mnemonic:")
else:
msg = _("Enter the master private key beginning with xprv:")
def set_enabled():
wizard.next_button.setEnabled(Wallet.is_xprv(clean_text(text)))
text.textChanged.connect(set_enabled)
next_enabled = False
vbox.addWidget(QLabel(msg))
vbox.addWidget(text)
pin = QLineEdit()
pin.setValidator(QRegExpValidator(QRegExp('[1-9]{0,10}')))
pin.setMaximumWidth(100)
hbox_pin = QHBoxLayout()
hbox_pin.addWidget(QLabel(_("Enter your PIN (digits 1-9):")))
hbox_pin.addWidget(pin)
hbox_pin.addStretch(1)
if method in [TIM_NEW, TIM_RECOVER]:
vbox.addWidget(WWLabel(RECOMMEND_PIN))
vbox.addWidget(cb_pin)
else:
vbox.addLayout(hbox_pin)
passphrase_msg = WWLabel(PASSPHRASE_HELP_SHORT)
passphrase_warning = WWLabel(PASSPHRASE_NOT_PIN)
passphrase_warning.setStyleSheet("color: red")
cb_phrase = QCheckBox(_('Enable passphrases'))
cb_phrase.setChecked(False)
vbox.addWidget(passphrase_msg)
vbox.addWidget(passphrase_warning)
vbox.addWidget(cb_phrase)
wizard.set_main_layout(vbox, next_enabled=next_enabled)
if method in [TIM_NEW, TIM_RECOVER]:
item = bg.checkedId()
pin = cb_pin.isChecked()
else:
item = ' '.join(str(clean_text(text)).split())
pin = str(pin.text())
return (item, unicode(name.text()), pin, cb_phrase.isChecked())
def qt_plugin_class(base_plugin_class):
class QtPlugin(base_plugin_class):
# Derived classes must provide the following class-static variables:
# icon_file
# pin_matrix_widget_class
def create_handler(self, window):
return QtHandler(window, self.pin_matrix_widget_class(), self.device)
@hook
def load_wallet(self, wallet, window):
if type(wallet) != self.wallet_class:
return
window.tzb = StatusBarButton(QIcon(self.icon_file), self.device,
partial(self.settings_dialog, window))
window.statusBar().addPermanentWidget(window.tzb)
wallet.handler = self.create_handler(window)
# Trigger a pairing
wallet.thread.add(partial(self.get_client, wallet))
def on_create_wallet(self, wallet, wizard):
assert type(wallet) == self.wallet_class
wallet.handler = self.create_handler(wizard)
wallet.thread = TaskThread(wizard, wizard.on_error)
# Setup device and create accounts in separate thread; wait until done
loop = QEventLoop()
exc_info = []
self.setup_device(wallet, on_done=loop.quit,
on_error=lambda info: exc_info.extend(info))
loop.exec_()
# If an exception was thrown, show to user and exit install wizard
if exc_info:
wizard.on_error(exc_info)
raise UserCancelled
@hook
def receive_menu(self, menu, addrs, wallet):
if type(wallet) == self.wallet_class and len(addrs) == 1:
def show_address():
wallet.thread.add(partial(self.show_address, wallet, addrs[0]))
menu.addAction(_("Show on %s") % self.device, show_address)
def settings_dialog(self, window):
device_id = self.choose_device(window)
if device_id:
SettingsDialog(window, self, device_id).exec_()
def choose_device(self, window):
'''This dialog box should be usable even if the user has
forgotten their PIN or it is in bootloader mode.'''
device_id = self.device_manager().wallet_id(window.wallet)
if not device_id:
info = self.device_manager().select_device(window.wallet, self)
device_id = info.device.id_
return device_id
def query_choice(self, window, msg, choices):
dialog = WindowModalDialog(window)
clayout = ChoicesLayout(msg, choices)
layout = clayout.layout()
layout.addStretch(1)
layout.addLayout(Buttons(CancelButton(dialog), OkButton(dialog)))
dialog.setLayout(layout)
if not dialog.exec_():
return None
return clayout.selected_index()
return QtPlugin
class SettingsDialog(WindowModalDialog):
'''This dialog doesn't require a device be paired with a wallet.
We want users to be able to wipe a device even if they've forgotten
their PIN.'''
def __init__(self, window, plugin, device_id):
title = _("%s Settings") % plugin.device
super(SettingsDialog, self).__init__(window, title)
self.setMaximumWidth(540)
devmgr = plugin.device_manager()
config = devmgr.config
handler = window.wallet.handler
thread = window.wallet.thread
# wallet can be None, needn't be window.wallet
wallet = devmgr.wallet_by_id(device_id)
hs_rows, hs_cols = (64, 128)
def invoke_client(method, *args, **kw_args):
unpair_after = kw_args.pop('unpair_after', False)
def task():
client = devmgr.client_by_id(device_id, handler)
if not client:
raise RuntimeError("Device not connected")
if method:
getattr(client, method)(*args, **kw_args)
if unpair_after:
devmgr.unpair_id(device_id)
return client.features
thread.add(task, on_success=update)
def update(features):
self.features = features
set_label_enabled()
bl_hash = features.bootloader_hash.encode('hex')
bl_hash = "\n".join([bl_hash[:32], bl_hash[32:]])
noyes = [_("No"), _("Yes")]
endis = [_("Enable Passphrases"), _("Disable Passphrases")]
disen = [_("Disabled"), _("Enabled")]
setchange = [_("Set a PIN"), _("Change PIN")]
version = "%d.%d.%d" % (features.major_version,
features.minor_version,
features.patch_version)
coins = ", ".join(coin.coin_name for coin in features.coins)
device_label.setText(features.label)
pin_set_label.setText(noyes[features.pin_protection])
passphrases_label.setText(disen[features.passphrase_protection])
bl_hash_label.setText(bl_hash)
label_edit.setText(features.label)
device_id_label.setText(features.device_id)
initialized_label.setText(noyes[features.initialized])
version_label.setText(version)
coins_label.setText(coins)
clear_pin_button.setVisible(features.pin_protection)
clear_pin_warning.setVisible(features.pin_protection)
pin_button.setText(setchange[features.pin_protection])
pin_msg.setVisible(not features.pin_protection)
passphrase_button.setText(endis[features.passphrase_protection])
language_label.setText(features.language)
def set_label_enabled():
label_apply.setEnabled(label_edit.text() != self.features.label)
def rename():
invoke_client('change_label', unicode(label_edit.text()))
def toggle_passphrase():
title = _("Confirm Toggle Passphrase Protection")
currently_enabled = self.features.passphrase_protection
if currently_enabled:
msg = _("After disabling passphrases, you can only pair this "
"Electrum wallet if it had an empty passphrase. "
"If its passphrase was not empty, you will need to "
"create a new wallet with the install wizard. You "
"can use this wallet again at any time by re-enabling "
"passphrases and entering its passphrase.")
else:
msg = _("Your current Electrum wallet can only be used with "
"an empty passphrase. You must create a separate "
"wallet with the install wizard for other passphrases "
"as each one generates a new set of addresses.")
msg += "\n\n" + _("Are you sure you want to proceed?")
if not self.question(msg, title=title):
return
invoke_client('toggle_passphrase', unpair_after=currently_enabled)
def change_homescreen():
from PIL import Image # FIXME
dialog = QFileDialog(self, _("Choose Homescreen"))
filename = dialog.getOpenFileName()
if filename:
im = Image.open(str(filename))
if im.size != (hs_cols, hs_rows):
raise Exception('Image must be 64 x 128 pixels')
im = im.convert('1')
pix = im.load()
img = ''
for j in range(hs_rows):
for i in range(hs_cols):
img += '1' if pix[i, j] else '0'
img = ''.join(chr(int(img[i:i + 8], 2))
for i in range(0, len(img), 8))
invoke_client('change_homescreen', img)
def clear_homescreen():
invoke_client('change_homescreen', '\x00')
def set_pin():
invoke_client('set_pin', remove=False)
def clear_pin():
invoke_client('set_pin', remove=True)
def wipe_device():
if wallet and sum(wallet.get_balance()):
title = _("Confirm Device Wipe")
msg = _("Are you SURE you want to wipe the device?\n"
"Your wallet still has cryptoescudos in it!")
if not self.question(msg, title=title,
icon=QMessageBox.Critical):
return
invoke_client('wipe_device', unpair_after=True)
def slider_moved():
mins = timeout_slider.sliderPosition()
timeout_minutes.setText(_("%2d minutes") % mins)
def slider_released():
config.set_session_timeout(timeout_slider.sliderPosition() * 60)
# Information tab
info_tab = QWidget()
info_layout = QVBoxLayout(info_tab)
info_glayout = QGridLayout()
info_glayout.setColumnStretch(2, 1)
device_label = QLabel()
pin_set_label = QLabel()
passphrases_label = QLabel()
version_label = QLabel()
device_id_label = QLabel()
bl_hash_label = QLabel()
bl_hash_label.setWordWrap(True)
coins_label = QLabel()
coins_label.setWordWrap(True)
language_label = QLabel()
initialized_label = QLabel()
rows = [
(_("Device Label"), device_label),
(_("PIN set"), pin_set_label),
(_("Passphrases"), passphrases_label),
(_("Firmware Version"), version_label),
(_("Device ID"), device_id_label),
(_("Bootloader Hash"), bl_hash_label),
(_("Supported Coins"), coins_label),
(_("Language"), language_label),
(_("Initialized"), initialized_label),
]
for row_num, (label, widget) in enumerate(rows):
info_glayout.addWidget(QLabel(label), row_num, 0)
info_glayout.addWidget(widget, row_num, 1)
info_layout.addLayout(info_glayout)
# Settings tab
settings_tab = QWidget()
settings_layout = QVBoxLayout(settings_tab)
settings_glayout = QGridLayout()
# Settings tab - Label
label_msg = QLabel(_("Name this %s. If you have mutiple devices "
"their labels help distinguish them.")
% plugin.device)
label_msg.setWordWrap(True)
label_label = QLabel(_("Device Label"))
label_edit = QLineEdit()
label_edit.setMinimumWidth(150)
label_edit.setMaxLength(plugin.MAX_LABEL_LEN)
label_apply = QPushButton(_("Apply"))
label_apply.clicked.connect(rename)
label_edit.textChanged.connect(set_label_enabled)
settings_glayout.addWidget(label_label, 0, 0)
settings_glayout.addWidget(label_edit, 0, 1, 1, 2)
settings_glayout.addWidget(label_apply, 0, 3)
settings_glayout.addWidget(label_msg, 1, 1, 1, -1)
# Settings tab - PIN
pin_label = QLabel(_("PIN Protection"))
pin_button = QPushButton()
pin_button.clicked.connect(set_pin)
settings_glayout.addWidget(pin_label, 2, 0)
settings_glayout.addWidget(pin_button, 2, 1)
pin_msg = QLabel(_("PIN protection is strongly recommended. "
"A PIN is your only protection against someone "
"stealing your cryptoescudos if they obtain physical "
"access to your %s.") % plugin.device)
pin_msg.setWordWrap(True)
pin_msg.setStyleSheet("color: red")
settings_glayout.addWidget(pin_msg, 3, 1, 1, -1)
# Settings tab - Homescreen
if plugin.device != 'KeepKey': # Not yet supported by KK firmware
homescreen_layout = QHBoxLayout()
homescreen_label = QLabel(_("Homescreen"))
homescreen_change_button = QPushButton(_("Change..."))
homescreen_clear_button = QPushButton(_("Reset"))
homescreen_change_button.clicked.connect(change_homescreen)
homescreen_clear_button.clicked.connect(clear_homescreen)
homescreen_msg = QLabel(_("You can set the homescreen on your "
"device to personalize it. You must "
"choose a %d x %d monochrome black and "
"white image.") % (hs_rows, hs_cols))
homescreen_msg.setWordWrap(True)
settings_glayout.addWidget(homescreen_label, 4, 0)
settings_glayout.addWidget(homescreen_change_button, 4, 1)
settings_glayout.addWidget(homescreen_clear_button, 4, 2)
settings_glayout.addWidget(homescreen_msg, 5, 1, 1, -1)
# Settings tab - Session Timeout
timeout_label = QLabel(_("Session Timeout"))
timeout_minutes = QLabel()
timeout_slider = QSlider(Qt.Horizontal)
timeout_slider.setRange(1, 60)
timeout_slider.setSingleStep(1)
timeout_slider.setTickInterval(5)
timeout_slider.setTickPosition(QSlider.TicksBelow)
timeout_slider.setTracking(True)
timeout_msg = QLabel(
_("Clear the session after the specified period "
"of inactivity. Once a session has timed out, "
"your PIN and passphrase (if enabled) must be "
"re-entered to use the device."))
timeout_msg.setWordWrap(True)
timeout_slider.setSliderPosition(config.get_session_timeout() // 60)
slider_moved()
timeout_slider.valueChanged.connect(slider_moved)
timeout_slider.sliderReleased.connect(slider_released)
settings_glayout.addWidget(timeout_label, 6, 0)
settings_glayout.addWidget(timeout_slider, 6, 1, 1, 3)
settings_glayout.addWidget(timeout_minutes, 6, 4)
settings_glayout.addWidget(timeout_msg, 7, 1, 1, -1)
settings_layout.addLayout(settings_glayout)
settings_layout.addStretch(1)
# Advanced tab
advanced_tab = QWidget()
advanced_layout = QVBoxLayout(advanced_tab)
advanced_glayout = QGridLayout()
# Advanced tab - clear PIN
clear_pin_button = QPushButton(_("Disable PIN"))
clear_pin_button.clicked.connect(clear_pin)
clear_pin_warning = QLabel(
_("If you disable your PIN, anyone with physical access to your "
"%s device can spend your cryptoescudos.") % plugin.device)
clear_pin_warning.setWordWrap(True)
clear_pin_warning.setStyleSheet("color: red")
advanced_glayout.addWidget(clear_pin_button, 0, 2)
advanced_glayout.addWidget(clear_pin_warning, 1, 0, 1, 5)
# Advanced tab - toggle passphrase protection
passphrase_button = QPushButton()
passphrase_button.clicked.connect(toggle_passphrase)
passphrase_msg = WWLabel(PASSPHRASE_HELP)
passphrase_warning = WWLabel(PASSPHRASE_NOT_PIN)
passphrase_warning.setStyleSheet("color: red")
advanced_glayout.addWidget(passphrase_button, 3, 2)
advanced_glayout.addWidget(passphrase_msg, 4, 0, 1, 5)
advanced_glayout.addWidget(passphrase_warning, 5, 0, 1, 5)
# Advanced tab - wipe device
wipe_device_button = QPushButton(_("Wipe Device"))
wipe_device_button.clicked.connect(wipe_device)
wipe_device_msg = QLabel(
_("Wipe the device, removing all data from it. The firmware "
"is left unchanged."))
wipe_device_msg.setWordWrap(True)
wipe_device_warning = QLabel(
_("Only wipe a device if you have the recovery seed written down "
"and the device wallet(s) are empty, otherwise the cryptoescudos "
"will be lost forever."))
wipe_device_warning.setWordWrap(True)
wipe_device_warning.setStyleSheet("color: red")
advanced_glayout.addWidget(wipe_device_button, 6, 2)
advanced_glayout.addWidget(wipe_device_msg, 7, 0, 1, 5)
advanced_glayout.addWidget(wipe_device_warning, 8, 0, 1, 5)
advanced_layout.addLayout(advanced_glayout)
advanced_layout.addStretch(1)
tabs = QTabWidget(self)
tabs.addTab(info_tab, _("Information"))
tabs.addTab(settings_tab, _("Settings"))
tabs.addTab(advanced_tab, _("Advanced"))
dialog_vbox = QVBoxLayout(self)
dialog_vbox.addWidget(tabs)
dialog_vbox.addLayout(Buttons(CloseButton(self)))
# Update information
invoke_client(None)
| {
"content_hash": "c9a94243d161afd13933429e8a488899",
"timestamp": "",
"source": "github",
"line_count": 636,
"max_line_length": 83,
"avg_line_length": 41.74842767295598,
"alnum_prop": 0.592912021693281,
"repo_name": "Marcdnd/electrum-cesc",
"id": "3091b3875aa32eada4e147d829f8925a3c8accf9",
"size": "26552",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "plugins/trezor/qt_generic.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3536"
},
{
"name": "GLSL",
"bytes": "289"
},
{
"name": "HTML",
"bytes": "3354"
},
{
"name": "Makefile",
"bytes": "849"
},
{
"name": "NSIS",
"bytes": "6970"
},
{
"name": "PHP",
"bytes": "404"
},
{
"name": "Protocol Buffer",
"bytes": "2354"
},
{
"name": "Python",
"bytes": "2163404"
},
{
"name": "Shell",
"bytes": "7908"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
import unittest
from ossimdb_to_json.db import Db as AvDb
from ossimdb_to_json.db import DbConnectionError
# Configure your testing database here.
HOST = 'X.X.X.X'
USER = 'user'
PWD = 'password'
DB = 'alienvault'
class DbTest(unittest.TestCase):
"""Test basic database connection and queries."""
@classmethod
def setUpClass(self):
self._connection = AvDb()
self._connection.connect(HOST, USER, PWD, DB)
@classmethod
def tearDownClass(self):
self._connection.close()
def test_can_fetch_data(self):
"""Test we can fetch data from database."""
query = "SELECT * FROM host_ip;"
data = self._connection.make_query(query)
self.assertNotEqual(data['count'], 0)
def test_queries_params_work(self):
"""Test we can fetch data from database using prepared statements."""
query = ("SELECT hex(host_id) AS id "
"FROM host_ip "
"WHERE hex(host_id) = %s;")
params = ("2949A99CA33F11E5881D000CE21AE882", )
data = self._connection.make_query(query, params)
self.assertNotEqual(data['count'], 0)
def test_throw_connect_exception(self):
"""Test we throw a valid exception when we can't connect to MySQL."""
connection = AvDb()
non_valid_host = "192.168.2.2"
self.assertRaises(DbConnectionError,
connection.connect,
non_valid_host,
USER,
PWD,
DB,
timeout=5)
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "6c474f48a731f56cce6ac9831b47fa17",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 77,
"avg_line_length": 29.103448275862068,
"alnum_prop": 0.5740521327014217,
"repo_name": "rarguelloF/ossimdb-to-json",
"id": "ee348cc05f6dd3dc30a2eeea21209c9d989e3980",
"size": "1688",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_db.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "20746"
}
],
"symlink_target": ""
} |
"""RNN utils for RLlib.
The main trick here is that we add the time dimension at the last moment.
The non-LSTM layers of the model see their inputs as one flat batch. Before
the LSTM cell, we reshape the input to add the expected time dimension. During
postprocessing, we dynamically pad the experience batches so that this
reshaping is possible.
Note that this padding strategy only works out if we assume zero inputs don't
meaningfully affect the loss function. This happens to be true for all the
current algorithms: https://github.com/ray-project/ray/issues/2992
"""
import numpy as np
from ray.rllib.utils.annotations import DeveloperAPI
from ray.rllib.utils import try_import_tf
tf = try_import_tf()
@DeveloperAPI
def add_time_dimension(padded_inputs, seq_lens):
"""Adds a time dimension to padded inputs.
Arguments:
padded_inputs (Tensor): a padded batch of sequences. That is,
for seq_lens=[1, 2, 2], then inputs=[A, *, B, B, C, C], where
A, B, C are sequence elements and * denotes padding.
seq_lens (Tensor): the sequence lengths within the input batch,
suitable for passing to tf.nn.dynamic_rnn().
Returns:
Reshaped tensor of shape [NUM_SEQUENCES, MAX_SEQ_LEN, ...].
"""
# Sequence lengths have to be specified for LSTM batch inputs. The
# input batch must be padded to the max seq length given here. That is,
# batch_size == len(seq_lens) * max(seq_lens)
padded_batch_size = tf.shape(padded_inputs)[0]
max_seq_len = padded_batch_size // tf.shape(seq_lens)[0]
# Dynamically reshape the padded batch to introduce a time dimension.
new_batch_size = padded_batch_size // max_seq_len
new_shape = ([new_batch_size, max_seq_len] +
padded_inputs.get_shape().as_list()[1:])
return tf.reshape(padded_inputs, new_shape)
@DeveloperAPI
def chop_into_sequences(episode_ids,
unroll_ids,
agent_indices,
feature_columns,
state_columns,
max_seq_len,
dynamic_max=True,
shuffle=False,
_extra_padding=0):
"""Truncate and pad experiences into fixed-length sequences.
Arguments:
episode_ids (list): List of episode ids for each step.
unroll_ids (list): List of identifiers for the sample batch. This is
used to make sure sequences are cut between sample batches.
agent_indices (list): List of agent ids for each step. Note that this
has to be combined with episode_ids for uniqueness.
feature_columns (list): List of arrays containing features.
state_columns (list): List of arrays containing LSTM state values.
max_seq_len (int): Max length of sequences before truncation.
dynamic_max (bool): Whether to dynamically shrink the max seq len.
For example, if max len is 20 and the actual max seq len in the
data is 7, it will be shrunk to 7.
shuffle (bool): Whether to shuffle the sequence outputs.
_extra_padding (int): Add extra padding to the end of sequences.
Returns:
f_pad (list): Padded feature columns. These will be of shape
[NUM_SEQUENCES * MAX_SEQ_LEN, ...].
s_init (list): Initial states for each sequence, of shape
[NUM_SEQUENCES, ...].
seq_lens (list): List of sequence lengths, of shape [NUM_SEQUENCES].
Examples:
>>> f_pad, s_init, seq_lens = chop_into_sequences(
episode_ids=[1, 1, 5, 5, 5, 5],
unroll_ids=[4, 4, 4, 4, 4, 4],
agent_indices=[0, 0, 0, 0, 0, 0],
feature_columns=[[4, 4, 8, 8, 8, 8],
[1, 1, 0, 1, 1, 0]],
state_columns=[[4, 5, 4, 5, 5, 5]],
max_seq_len=3)
>>> print(f_pad)
[[4, 4, 0, 8, 8, 8, 8, 0, 0],
[1, 1, 0, 0, 1, 1, 0, 0, 0]]
>>> print(s_init)
[[4, 4, 5]]
>>> print(seq_lens)
[2, 3, 1]
"""
prev_id = None
seq_lens = []
seq_len = 0
unique_ids = np.add(
np.add(episode_ids, agent_indices),
np.array(unroll_ids) << 32)
for uid in unique_ids:
if (prev_id is not None and uid != prev_id) or \
seq_len >= max_seq_len:
seq_lens.append(seq_len)
seq_len = 0
seq_len += 1
prev_id = uid
if seq_len:
seq_lens.append(seq_len)
assert sum(seq_lens) == len(unique_ids)
seq_lens = np.array(seq_lens)
# Dynamically shrink max len as needed to optimize memory usage
if dynamic_max:
max_seq_len = max(seq_lens) + _extra_padding
feature_sequences = []
for f in feature_columns:
f = np.array(f)
f_pad = np.zeros((len(seq_lens) * max_seq_len, ) + np.shape(f)[1:])
seq_base = 0
i = 0
for l in seq_lens:
for seq_offset in range(l):
f_pad[seq_base + seq_offset] = f[i]
i += 1
seq_base += max_seq_len
assert i == len(unique_ids), f
feature_sequences.append(f_pad)
initial_states = []
for s in state_columns:
s = np.array(s)
s_init = []
i = 0
for l in seq_lens:
s_init.append(s[i])
i += l
initial_states.append(np.array(s_init))
if shuffle:
permutation = np.random.permutation(len(seq_lens))
for i, f in enumerate(feature_sequences):
orig_shape = f.shape
f = np.reshape(f, (len(seq_lens), -1) + f.shape[1:])
f = f[permutation]
f = np.reshape(f, orig_shape)
feature_sequences[i] = f
for i, s in enumerate(initial_states):
s = s[permutation]
initial_states[i] = s
seq_lens = seq_lens[permutation]
return feature_sequences, initial_states, seq_lens
| {
"content_hash": "f83002f16e53cca7390fe06c106a5677",
"timestamp": "",
"source": "github",
"line_count": 161,
"max_line_length": 78,
"avg_line_length": 37.37888198757764,
"alnum_prop": 0.5744433366566966,
"repo_name": "stephanie-wang/ray",
"id": "6f4c97c93086f9d37e0091f79ef6514a45330509",
"size": "6018",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rllib/policy/rnn_sequencing.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "29882"
},
{
"name": "C++",
"bytes": "2149909"
},
{
"name": "CSS",
"bytes": "8025"
},
{
"name": "Dockerfile",
"bytes": "5499"
},
{
"name": "Go",
"bytes": "28481"
},
{
"name": "HTML",
"bytes": "30435"
},
{
"name": "Java",
"bytes": "738348"
},
{
"name": "JavaScript",
"bytes": "444"
},
{
"name": "Jupyter Notebook",
"bytes": "1615"
},
{
"name": "Makefile",
"bytes": "1965"
},
{
"name": "Python",
"bytes": "4058862"
},
{
"name": "Shell",
"bytes": "88736"
},
{
"name": "Starlark",
"bytes": "121207"
},
{
"name": "TypeScript",
"bytes": "64161"
}
],
"symlink_target": ""
} |
import sys
sys.dont_write_bytecode=True
def upcase(context, str):
return str.upper()
extensions = {('http://example.org/e', 'upcase'): upcase}
| {
"content_hash": "7aa541925616c31cfe29f899eef2c267",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 57,
"avg_line_length": 21.285714285714285,
"alnum_prop": 0.6912751677852349,
"repo_name": "hisashim/pyxsltp",
"id": "f395518dd036cb58ea133367683f2bc0fc49dd8d",
"size": "168",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "doc/examples/upcase.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "3152"
},
{
"name": "Python",
"bytes": "8365"
},
{
"name": "Shell",
"bytes": "2094"
},
{
"name": "XSLT",
"bytes": "1778"
}
],
"symlink_target": ""
} |
"""
===============================
aiohttp_middlewares.annotations
===============================
Type annotation shortcuts for ``aiohttp_middlewares`` library.
"""
from typing import (
Any,
Awaitable,
Callable,
Collection,
Dict,
Pattern,
Type,
Union,
)
from aiohttp import web
from aiohttp.web_middlewares import _Middleware as Middleware
from yarl import URL
# Make flake8 happy
(Middleware,)
DictStrAny = Dict[str, Any]
DictStrStr = Dict[str, str]
ExceptionType = Type[Exception]
# FIXME: Drop Handler type definition after `aiohttp-middlewares` will require
# only `aiohttp>=3.8.0`
Handler = Callable[[web.Request], Awaitable[web.StreamResponse]]
IntCollection = Collection[int]
StrCollection = Collection[str]
Url = Union[str, Pattern[str], URL]
UrlCollection = Collection[Url]
UrlDict = Dict[Url, StrCollection]
Urls = Union[UrlCollection, UrlDict]
| {
"content_hash": "dd523d59f4c245cdbb364a74224ec6d6",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 78,
"avg_line_length": 20.906976744186046,
"alnum_prop": 0.6829810901001112,
"repo_name": "playpauseandstop/aiohttp-middlewares",
"id": "506b4ccc3afeb5f68d0f40df79bc33bd69cac241",
"size": "899",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "src/aiohttp_middlewares/annotations.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "3649"
},
{
"name": "Python",
"bytes": "55620"
},
{
"name": "Shell",
"bytes": "603"
}
],
"symlink_target": ""
} |
from collections import defaultdict
from django.conf import settings
from django.db.models import Q
from django.shortcuts import redirect
from django.utils.encoding import smart_str
import commonware.log
import jingo
from tower import ugettext as _
from mobility.decorators import mobile_template
import amo
import bandwagon.views
import browse.views
from addons.models import Addon, Category
from amo.decorators import json_view
from amo.helpers import locale_url, urlparams
from amo.urlresolvers import reverse
from amo.utils import MenuItem, sorted_groupby
from versions.compare import dict_from_int, version_int
from webapps.models import Webapp
from . import forms
from .client import (Client as SearchClient, SearchError,
CollectionsClient, PersonasClient, sphinx)
from .forms import SearchForm, SecondarySearchForm, ESSearchForm
DEFAULT_NUM_RESULTS = 20
log = commonware.log.getLogger('z.search')
def _get_versions(request, versions, version):
compats = []
url = request.get_full_path()
c = MenuItem()
(c.text, c.url) = (_('All Versions'), urlparams(url, lver=None, page=None))
if not version or version == 'any':
c.selected = True
compats.append(c)
seen = {}
exclude = request.APP.__dict__.get('exclude_versions', [])
versions.sort(reverse=True)
for v in versions:
# v is a version_int so we can get the major and minor:
v = dict_from_int(v)
if v['minor1'] == 99:
text = '%s.*' % v['major']
v_float = float('%s.99' % v['major'])
else:
text = '%s.%s' % (v['major'], v['minor1'])
v_float = float(text)
if seen.get(text):
continue
seen[text] = 1
if v_float < request.APP.min_display_version or v_float in exclude:
continue
c = MenuItem()
c.text = text
c.url = urlparams(url, lver=c.text, page=None)
if c.text == version:
c.selected = True
compats.append(c)
return compats
def _get_categories(request, categories, addon_type=None, category=None):
items = []
url = request.get_full_path()
i = MenuItem()
(i.text, i.url) = (_('All'), urlparams(url, atype=None, cat=None,
page=None))
if not addon_type and not category:
i.selected = True
items.append(i)
# Bucket the categories as addon_types so we can display them in a
# hierarchy.
bucket = defaultdict(list)
for cat in categories:
item = MenuItem()
(item.text, item.url) = (cat.name, urlparams(url, atype=None,
page=None, cat="%d,%d" % (cat.type, cat.id)))
if category == cat.id:
item.selected = True
bucket[cat.type].append(item)
for key in sorted(bucket):
children = bucket[key]
item = MenuItem()
item.children = children
(item.text, item.url) = (amo.ADDON_TYPES[key],
urlparams(url, atype=key, cat=None,
page=None))
if not category and addon_type == key:
item.selected = True
items.append(item)
return items
def _get_platforms(request, platforms, selected=None):
items = []
url = request.get_full_path()
if amo.PLATFORM_ALL.id in platforms:
platforms = amo.PLATFORMS.keys()
for platform in platforms:
if platform == amo.PLATFORM_ALL.id:
continue
item = MenuItem()
p = amo.PLATFORMS[platform]
(item.text, item.url) = (p.name,
urlparams(url, pid=(p.id or None), page=None))
if p.id == selected:
item.selected = True
items.append(item)
return items
def _get_tags(request, tags, selected):
items = []
url = request.get_full_path()
for tag in tags:
item = MenuItem()
(item.text, item.url) = (tag.tag_text.lower(),
urlparams(url, tag=tag.tag_text.encode('utf8').lower(),
page=None))
if tag.tag_text.lower() == selected:
item.selected = True
items.append(item)
return items
def _get_sort_menu(request, sort):
items = []
sorts = forms.sort_by
item = (None, _('Keyword Match'))
items.append(item)
for key, val in sorts:
if key == '':
continue
item = (key, val)
items.append(item)
return items
def _get_sorts(request, sort):
items = []
url = request.get_full_path()
sorts = forms.sort_by
item = MenuItem()
(item.text, item.url) = (_('Keyword Match'), urlparams(url, sort=None))
if not sort:
item.selected = True
items.append(item)
for key, val in sorts:
if key == '':
continue
item = MenuItem()
(item.text, item.url) = (val, urlparams(url, sort=key, page=None))
if sort == key:
item.selected = True
items.append(item)
return items
def _personas(request):
"""Handle the request for persona searches."""
form = SecondarySearchForm(request.GET)
if not form.is_valid():
log.error(form.errors)
query = form.data.get('q', '')
search_opts = {}
search_opts['limit'] = form.cleaned_data.get('pp', DEFAULT_NUM_RESULTS)
page = form.cleaned_data.get('page') or 1
search_opts['offset'] = (page - 1) * search_opts['limit']
try:
results = PersonasClient().query(query, **search_opts)
except SearchError:
return jingo.render(request, 'search/down.html', {}, status=503)
pager = amo.utils.paginate(request, results, search_opts['limit'])
categories, filter, _, _ = browse.views.personas_listing(request)
c = dict(pager=pager, form=form, categories=categories, query=query,
filter=filter)
return jingo.render(request, 'search/personas.html', c)
def _collections(request):
"""Handle the request for collections."""
form = SecondarySearchForm(request.GET)
form.is_valid()
query = form.cleaned_data.get('q', '')
search_opts = {}
search_opts['limit'] = form.cleaned_data.get('pp', DEFAULT_NUM_RESULTS)
page = form.cleaned_data.get('page') or 1
search_opts['offset'] = (page - 1) * search_opts['limit']
search_opts['sort'] = form.cleaned_data.get('sortby')
try:
results = CollectionsClient().query(query, **search_opts)
except SearchError:
return jingo.render(request, 'search/down.html', {}, status=503)
pager = amo.utils.paginate(request, results, search_opts['limit'])
c = dict(pager=pager, form=form, query=query, opts=search_opts,
filter=bandwagon.views.get_filter(request))
return jingo.render(request, 'search/collections.html', c)
class BaseAjaxSearch(object):
"""Generates a list of dictionaries of add-on objects based on
ID or name matches. Safe to be served to a JSON-friendly view.
Sample output:
[
{
"id": 1865,
"label": "Adblock Plus",
"url": "http://path/to/details/page",
"icon": "http://path/to/icon",
},
...
]
"""
def __init__(self, request, excluded_ids=[]):
self.request = request
self.excluded_ids = excluded_ids
self.types = getattr(self, 'types', amo.ADDON_SEARCH_TYPES)
self.limit = 10
self.key = 'q' # Name of search field.
# Mapping of JSON key => add-on property.
default_fields = {
'id': 'id',
'name': 'name',
'url': 'get_url_path',
'icon': 'icon_url'
}
self.fields = getattr(self, 'fields', default_fields)
self.items = self.build_list()
def queryset(self):
"""Get items based on ID or search by name."""
results = []
if self.key in self.request.GET:
q = self.request.GET[self.key]
if q.isdigit() or (not q.isdigit() and len(q) > 2):
if q.isdigit():
qs = Addon.objects.filter(id=int(q),
disabled_by_user=False)
else:
# Oh, how I wish I could elastically exclude terms.
qs = (Addon.search().query(or_=name_only_query(q))
.filter(is_disabled=False))
results = qs.filter(type__in=self.types,
status__in=amo.REVIEWED_STATUSES)
return results
def build_list(self):
"""Populate a list of dictionaries based on label => property."""
results = []
for item in self.queryset()[:self.limit]:
if item.id in self.excluded_ids:
continue
d = {}
for key, prop in self.fields.iteritems():
val = getattr(item, prop, '')
if callable(val):
val = val()
d[key] = unicode(val)
results.append(d)
return results
class SuggestionsAjax(BaseAjaxSearch):
# No personas. No webapps.
types = [amo.ADDON_ANY, amo.ADDON_EXTENSION, amo.ADDON_THEME,
amo.ADDON_DICT, amo.ADDON_SEARCH, amo.ADDON_LPAPP]
@json_view
def ajax_search(request):
"""This is currently used only to return add-ons for populating a
new collection. Personas are included by default, so this can be
used elsewhere.
"""
return BaseAjaxSearch(request).items
@json_view
def ajax_search_suggestions(request):
# TODO(cvan): Tests will come when I know this is what fligtar wants.
results = []
q = request.GET.get('q')
if q and (q.isdigit() or (not q.isdigit() and len(q) > 2)):
q_ = q.lower()
# Applications.
for a in amo.APP_USAGE:
if q_ in unicode(a.pretty).lower():
results.append({
'id': a.id,
'name': _(u'{0} Add-ons').format(a.pretty),
'url': locale_url(a.short),
'cls': 'app ' + a.short
})
# Categories.
cats = (Category.objects
.filter(Q(application=request.APP.id) |
Q(type=amo.ADDON_SEARCH))
.exclude(type=amo.ADDON_WEBAPP))
for c in cats:
if not c.name:
continue
name_ = unicode(c.name).lower()
word_matches = [w for w in q_.split() if name_ in w]
if q_ in name_ or word_matches:
results.append({
'id': c.id,
'name': unicode(c.name),
'url': c.get_url_path(),
'cls': 'cat'
})
# Add-ons.
results += SuggestionsAjax(request).items
return results
def name_only_query(q):
return dict(name__text={'query': q, 'boost': 3, 'analyzer': 'standard'},
name__fuzzy={'value': q, 'boost': 2, 'prefix_length': 4},
name__startswith={'value': q, 'boost': 1.5})
def name_query(q):
# 1. Prefer text matches first, using the standard text analyzer (boost=3).
# 2. Then try fuzzy matches ("fire bug" => firebug) (boost=2).
# 3. Then look for the query as a prefix of a name (boost=1.5).
# 4. Look for phrase matches inside the summary (boost=0.8).
# 5. Look for phrase matches inside the description (boost=0.3).
more = dict(summary__text={'query': q, 'boost': 0.8, 'type': 'phrase'},
description__text={'query': q, 'boost': 0.3, 'type': 'phrase'})
return dict(more, **name_only_query(q))
@mobile_template('search/{mobile/}results.html')
def app_search(request, template=None):
form = ESSearchForm(request.GET or {}, type=amo.ADDON_WEBAPP)
form.is_valid() # Let the form try to clean data.
query = form.cleaned_data
qs = (Webapp.search().query(or_=name_query(query['q']))
.filter(type=amo.ADDON_WEBAPP, status=amo.STATUS_PUBLIC,
is_disabled=False)
.facet(tags={'terms': {'field': 'tag'}},
categories={'terms': {'field': 'category', 'size': 100}}))
if query.get('tag'):
qs = qs.filter(tag=query['tag'])
if query.get('cat'):
qs = qs.filter(category=query['cat'])
if query.get('sort'):
mapping = {'downloads': '-weekly_downloads',
'rating': '-bayesian_rating',
'created': '-created',
'name': '-name_sort',
'hotness': '-hotness'}
qs = qs.order_by(mapping[query['sort']])
pager = amo.utils.paginate(request, qs)
facets = pager.object_list.facets
ctx = {
'is_pjax': request.META.get('HTTP_X_PJAX'),
'pager': pager,
'query': query,
'form': form,
'sorting': sort_sidebar(request, query, form),
'sort_opts': form.fields['sort'].choices,
}
if not ctx['is_pjax']:
ctx.update({
'categories': category_sidebar(request, query, facets),
'tags': tag_sidebar(request, query, facets),
})
return jingo.render(request, template, ctx)
@mobile_template('search/{mobile/}results.html')
def search(request, tag_name=None, template=None):
APP = request.APP
types = (amo.ADDON_EXTENSION, amo.ADDON_THEME, amo.ADDON_DICT,
amo.ADDON_SEARCH, amo.ADDON_LPAPP)
fixed = fix_search_query(request.GET)
if fixed is not request.GET:
return redirect(urlparams(request.path, **fixed), permanent=True)
form = ESSearchForm(request.GET or {})
form.is_valid() # Let the form try to clean data.
category = request.GET.get('cat')
query = form.cleaned_data
if category == 'collections':
return _collections(request)
elif category == 'personas' or query.get('atype') == amo.ADDON_PERSONA:
return _personas(request)
sort, extra_sort = split_choices(form.fields['sort'].choices, 'created')
qs = (Addon.search()
.filter(status__in=amo.REVIEWED_STATUSES, is_disabled=False,
app=APP.id)
.facet(tags={'terms': {'field': 'tag'}},
platforms={'terms': {'field': 'platform'}},
appversions={'terms':
{'field': 'appversion.%s.max' % APP.id}},
categories={'terms': {'field': 'category', 'size': 100}}))
if query.get('q'):
qs = qs.query(or_=name_query(query['q']))
if tag_name or query.get('tag'):
qs = qs.filter(tag=tag_name or query['tag'])
if query.get('platform') and query['platform'] in amo.PLATFORM_DICT:
ps = (amo.PLATFORM_DICT[query['platform']].id, amo.PLATFORM_ALL.id)
qs = qs.filter(platform__in=ps)
if query.get('appver'):
# Get a min version less than X.0.
low = version_int(query['appver'])
# Get a max version greater than X.0a.
high = version_int(query['appver'] + 'a')
qs = qs.filter(**{'appversion.%s.max__gte' % APP.id: high,
'appversion.%s.min__lte' % APP.id: low})
if query.get('atype') and query['atype'] in amo.ADDON_TYPES:
qs = qs.filter(type=query['atype'])
if query['atype'] == amo.ADDON_SEARCH:
# Search add-ons should not be searched by ADU, so replace 'Users'
# sort with 'Weekly Downloads'.
sort[1] = extra_sort[1]
del extra_sort[1]
else:
qs = qs.filter(type__in=types)
if query.get('cat'):
qs = qs.filter(category=query['cat'])
if query.get('sort'):
mapping = {'users': '-average_daily_users',
'rating': '-bayesian_rating',
'created': '-created',
'name': 'name_sort',
'downloads': '-weekly_downloads',
'updated': '-last_updated',
'hotness': '-hotness'}
qs = qs.order_by(mapping[query['sort']])
elif not query.get('q'):
# Sort by weekly downloads if there was no query so we get predictable
# results.
qs = qs.order_by('-weekly_downloads')
pager = amo.utils.paginate(request, qs)
ctx = {
'is_pjax': request.META.get('HTTP_X_PJAX'),
'pager': pager,
'query': query,
'form': form,
'sort_opts': sort,
'extra_sort_opts': extra_sort,
'sorting': sort_sidebar(request, query, form),
}
if not ctx['is_pjax']:
facets = pager.object_list.facets
ctx.update({
'categories': category_sidebar(request, query, facets),
'platforms': platform_sidebar(request, query, facets),
'versions': version_sidebar(request, query, facets),
'tags': tag_sidebar(request, query, facets),
})
return jingo.render(request, template, ctx)
class FacetLink(object):
def __init__(self, text, urlparams, selected=False, children=None):
self.text = text
self.urlparams = urlparams
self.selected = selected
self.children = children or []
def sort_sidebar(request, query, form):
sort = query.get('sort')
return [FacetLink(text, dict(sort=key), key == sort)
for key, text in form.fields['sort'].choices]
def category_sidebar(request, query, facets):
APP = request.APP
qatype, qcat = query.get('atype'), query.get('cat')
cats = [f['term'] for f in facets['categories']]
categories = (Category.objects.filter(id__in=cats)
# Search categories don't have an application.
.filter(Q(application=APP.id) | Q(type=amo.ADDON_SEARCH)))
if qatype in amo.ADDON_TYPES:
categories = categories.filter(type=qatype)
categories = [(atype, sorted(cats, key=lambda x: x.name))
for atype, cats in sorted_groupby(categories, 'type')]
rv = [FacetLink(_(u'All Add-ons'), dict(atype=None, cat=None), not qatype)]
for addon_type, cats in categories:
link = FacetLink(amo.ADDON_TYPES[addon_type],
dict(atype=addon_type, cat=None),
addon_type == qatype and not qcat)
link.children = [FacetLink(c.name, dict(atype=addon_type, cat=c.id),
c.id == qcat) for c in cats]
rv.append(link)
return rv
def version_sidebar(request, query, facets):
appver = query.get('appver')
app = unicode(request.APP.pretty)
exclude_versions = getattr(request.APP, 'exclude_versions', [])
# L10n: {0} is an application, such as Firefox. This means "any version of
# Firefox."
rv = [FacetLink(_(u'Any {0}').format(app), dict(appver=None), not appver)]
vs = [dict_from_int(f['term']) for f in facets['appversions']]
vs = set((v['major'], v['minor1'] if v['minor1'] != 99 else 0)
for v in vs)
versions = ['%s.%s' % v for v in sorted(vs, reverse=True)]
for version, floated in zip(versions, map(float, versions)):
if (floated not in exclude_versions
and floated > request.APP.min_display_version):
rv.append(FacetLink('%s %s' % (app, version), dict(appver=version),
appver == version))
return rv
def platform_sidebar(request, query, facets):
qplatform = query.get('platform')
app_platforms = request.APP.platforms.values()
ALL = app_platforms[0]
platforms = [facet['term'] for facet in facets['platforms']
if facet['term'] != ALL.id]
all_selected = not qplatform or qplatform == ALL.shortname
rv = [FacetLink(_(u'All Systems'), dict(platform=ALL.shortname),
all_selected)]
for platform in app_platforms[1:]:
if platform.id in platforms:
rv.append(FacetLink(platform.name,
dict(platform=platform.shortname),
platform.shortname == qplatform))
return rv
def tag_sidebar(request, query, facets):
qtag = query.get('tag')
rv = [FacetLink(_(u'All Tags'), dict(tag=None), not qtag)]
tags = [facet['term'] for facet in facets['tags']]
rv += [FacetLink(tag, dict(tag=tag), tag == qtag) for tag in tags]
return rv
def fix_search_query(query):
rv = dict((smart_str(k), v) for k, v in query.items())
changed = False
# Change old keys to new names.
keys = {
'lver': 'appver',
'pid': 'platform',
}
for old, new in keys.items():
if old in query:
rv[new] = rv.pop(old)
changed = True
# Change old parameter values to new values.
params = {
'sort': {
'newest': 'updated',
'weeklydownloads': 'users',
'averagerating': 'rating',
},
'platform': dict((str(p.id), p.shortname)
for p in amo.PLATFORMS.values())
}
for key, fixes in params.items():
if key in rv and rv[key] in fixes:
rv[key] = fixes[rv[key]]
changed = True
return rv if changed else query
def split_choices(choices, split):
"""Split a list of [(key, title)] pairs after key == split."""
index = [idx for idx, (key, title) in enumerate(choices)
if key == split]
if index:
index = index[0] + 1
return choices[:index], choices[index:]
else:
return choices, []
| {
"content_hash": "f55217d098fae317f1208a59b1d69596",
"timestamp": "",
"source": "github",
"line_count": 647,
"max_line_length": 79,
"avg_line_length": 33.19938176197836,
"alnum_prop": 0.5625698324022347,
"repo_name": "jbalogh/zamboni",
"id": "e7c5681371eaa6e996277ceca7d7cee5d3057aad",
"size": "21480",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "apps/search/views.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "4145"
},
{
"name": "JavaScript",
"bytes": "1553612"
},
{
"name": "Python",
"bytes": "2860649"
},
{
"name": "Shell",
"bytes": "8095"
}
],
"symlink_target": ""
} |
import jsonschema
from unittest.mock import MagicMock
from unittest.mock import Mock
from unittest.mock import patch
from testtools.matchers import Is, Equals
from trove.cluster import models
from trove.cluster.models import Cluster, DBCluster
from trove.cluster.service import ClusterController
from trove.cluster.tasks import ClusterTasks
from trove.cluster import views
import trove.common.cfg as cfg
from trove.common import exception
from trove.common.strategies.cluster import strategy
from trove.common import utils
from trove.datastore import models as datastore_models
from trove.tests.unittests import trove_testtools
class TestClusterController(trove_testtools.TestCase):
def setUp(self):
super(TestClusterController, self).setUp()
self.controller = ClusterController()
self.locality = 'anti-affinity'
instances = [
{
"flavorRef": "7",
"volume": {
"size": 1
},
"availability_zone": "az",
"nics": [
{"net-id": "e89aa5fd-6b0a-436d-a75c-1545d34d5331"}
]
}
] * 5
self.cluster = {
"cluster": {
"name": "products",
"datastore": {
"type": "mongodb",
"version": "2.4.10"
},
"instances": instances,
"locality": self.locality,
}
}
self.add_shard = {
"add_shard": {}
}
self.grow = {
"grow": [
{"flavorRef": "7"},
]
}
self.shrink = {
"shrink": [
{"id": "e89aa5fd-6b0a-436d-a75c-1545d34d5331"},
]
}
self.upgrade = {
"upgrade": {
"datastore_version": "2.4.10"
}
}
def test_get_schema_create(self):
schema = self.controller.get_schema('create', self.cluster)
self.assertIsNotNone(schema)
self.assertIn('cluster', schema['properties'])
def test_get_schema_action_add_shard(self):
schema = self.controller.get_schema('action', self.add_shard)
self.assertIsNotNone(schema)
self.assertIn('add_shard', schema['properties'])
def test_get_schema_action_grow(self):
schema = self.controller.get_schema('action', self.grow)
self.assertIsNotNone(schema)
self.assertIn('grow', schema['properties'])
def test_get_schema_action_shrink(self):
schema = self.controller.get_schema('action', self.shrink)
self.assertIsNotNone(schema)
self.assertIn('shrink', schema['properties'])
def test_get_schema_action_upgrade(self):
schema = self.controller.get_schema('action', self.upgrade)
self.assertIsNotNone(schema)
self.assertIn('upgrade', schema['properties'])
def test_get_schema_action_invalid(self):
schema = self.controller.get_schema('action', {'wow': {}})
self.assertIsNotNone(schema)
self.assertThat(len(schema.keys()), Is(0))
def test_validate_create(self):
body = self.cluster
schema = self.controller.get_schema('create', body)
validator = jsonschema.Draft4Validator(schema)
self.assertTrue(validator.is_valid(body))
def test_validate_add_shard(self):
body = self.add_shard
schema = self.controller.get_schema('action', body)
validator = jsonschema.Draft4Validator(schema)
self.assertTrue(validator.is_valid(body))
def test_validate_grow(self):
body = self.grow
schema = self.controller.get_schema('action', body)
validator = jsonschema.Draft4Validator(schema)
self.assertTrue(validator.is_valid(body))
def test_validate_shrink(self):
body = self.shrink
schema = self.controller.get_schema('action', body)
validator = jsonschema.Draft4Validator(schema)
self.assertTrue(validator.is_valid(body))
def test_validate_upgrade(self):
body = self.upgrade
schema = self.controller.get_schema('action', body)
validator = jsonschema.Draft4Validator(schema)
self.assertTrue(validator.is_valid(body))
def test_validate_create_blankname(self):
body = self.cluster
body['cluster']['name'] = " "
schema = self.controller.get_schema('create', body)
validator = jsonschema.Draft4Validator(schema)
self.assertFalse(validator.is_valid(body))
errors = sorted(validator.iter_errors(body), key=lambda e: e.path)
self.assertThat(len(errors), Is(1))
self.assertThat(errors[0].message,
Equals("' ' does not match '^.*[0-9a-zA-Z]+.*$'"))
def test_validate_create_blank_datastore(self):
body = self.cluster
body['cluster']['datastore']['type'] = ""
schema = self.controller.get_schema('create', body)
validator = jsonschema.Draft4Validator(schema)
self.assertFalse(validator.is_valid(body))
errors = sorted(validator.iter_errors(body), key=lambda e: e.path)
error_messages = [error.message for error in errors]
error_paths = [error.path.pop() for error in errors]
self.assertThat(len(errors), Is(2))
self.assertIn("'' is too short", error_messages)
self.assertIn("'' does not match '^.*[0-9a-zA-Z]+.*$'", error_messages)
self.assertIn("type", error_paths)
def test_validate_create_bad_locality(self):
body = self.cluster
body['cluster']['locality'] = "$%^&"
schema = self.controller.get_schema('create', body)
validator = jsonschema.Draft4Validator(schema)
self.assertFalse(validator.is_valid(body))
errors = sorted(validator.iter_errors(body), key=lambda e: e.path)
error_messages = [error.message for error in errors]
error_paths = [error.path.pop() for error in errors]
self.assertThat(len(errors), Is(1))
self.assertIn("'$%^&' does not match '^.*[0-9a-zA-Z]+.*$'",
error_messages)
self.assertIn("locality", error_paths)
@patch.object(datastore_models, 'get_datastore_version')
def test_create_clusters_disabled(self, mock_get_datastore_version):
body = self.cluster
tenant_id = Mock()
context = trove_testtools.TroveTestContext(self)
req = Mock()
req.environ = MagicMock()
req.environ.get = Mock(return_value=context)
datastore_version = Mock()
datastore_version.manager = 'mysql'
mock_get_datastore_version.return_value = (Mock(), datastore_version)
self.assertRaises(exception.ClusterDatastoreNotSupported,
self.controller.create,
req,
body,
tenant_id)
@patch.object(Cluster, 'create')
@patch.object(utils, 'get_id_from_href')
@patch.object(datastore_models, 'get_datastore_version')
def test_create_clusters(self,
mock_get_datastore_version,
mock_id_from_href,
mock_cluster_create):
body = self.cluster
tenant_id = Mock()
context = trove_testtools.TroveTestContext(self)
req = Mock()
req.environ = Mock()
req.environ.__getitem__ = Mock(return_value=context)
datastore_version = Mock()
datastore_version.manager = 'mongodb'
datastore = Mock()
mock_get_datastore_version.return_value = (datastore,
datastore_version)
instances = [
{
'volume_size': 1,
'volume_type': None,
'flavor_id': '1234',
'availability_zone': 'az',
'modules': None,
'region_name': None,
'nics': [
{'net-id': 'e89aa5fd-6b0a-436d-a75c-1545d34d5331'}
]
}
] * 5
mock_id_from_href.return_value = '1234'
mock_cluster = Mock()
mock_cluster.instances = []
mock_cluster.instances_without_server = []
mock_cluster.datastore_version.manager = 'mongodb'
mock_cluster_create.return_value = mock_cluster
self.controller.create(req, body, tenant_id)
mock_cluster_create.assert_called_with(context, 'products',
datastore, datastore_version,
instances, {},
self.locality, None, None)
@patch.object(Cluster, 'load')
def test_show_cluster(self,
mock_cluster_load):
tenant_id = Mock()
id = Mock()
context = trove_testtools.TroveTestContext(self)
req = Mock()
req.environ = Mock()
req.environ.__getitem__ = Mock(return_value=context)
mock_cluster = Mock()
mock_cluster.instances = []
mock_cluster.instances_without_server = []
mock_cluster.datastore_version.manager = 'mongodb'
mock_cluster_load.return_value = mock_cluster
mock_cluster.locality = self.locality
self.controller.show(req, tenant_id, id)
mock_cluster_load.assert_called_with(context, id)
@patch.object(Cluster, 'load')
@patch.object(Cluster, 'load_instance')
def test_show_cluster_instance(self,
mock_cluster_load_instance,
mock_cluster_load):
tenant_id = Mock()
cluster_id = Mock()
instance_id = Mock()
context = trove_testtools.TroveTestContext(self)
req = Mock()
req.environ = Mock()
req.environ.__getitem__ = Mock(return_value=context)
cluster = Mock()
mock_cluster_load.return_value = cluster
cluster.id = cluster_id
self.controller.show_instance(req, tenant_id, cluster_id, instance_id)
mock_cluster_load_instance.assert_called_with(context, cluster.id,
instance_id)
@patch.object(Cluster, 'load')
def test_delete_cluster(self, mock_cluster_load):
tenant_id = Mock()
cluster_id = Mock()
req = MagicMock()
cluster = Mock()
trove_testtools.patch_notifier(self)
mock_cluster_load.return_value = cluster
self.controller.delete(req, tenant_id, cluster_id)
cluster.delete.assert_called_with()
class TestClusterControllerWithStrategy(trove_testtools.TestCase):
def setUp(self):
super(TestClusterControllerWithStrategy, self).setUp()
self.controller = ClusterController()
self.cluster = {
"cluster": {
"name": "products",
"datastore": {
"type": "mongodb",
"version": "2.4.10"
},
"instances": [
{
"flavorRef": "7",
"volume": {
"size": 1
},
},
{
"flavorRef": "7",
"volume": {
"size": 1
},
},
{
"flavorRef": "7",
"volume": {
"size": 1
},
},
{
"flavorRef": "7",
"volume": {
"size": 1
},
},
{
"flavorRef": "7",
"volume": {
"size": 1
},
}
]
}
}
def tearDown(self):
super(TestClusterControllerWithStrategy, self).tearDown()
cfg.CONF.clear_override('cluster_support', group='mongodb')
cfg.CONF.clear_override('api_strategy', group='mongodb')
@patch.object(datastore_models, 'get_datastore_version')
@patch.object(models.Cluster, 'create')
def test_create_clusters_disabled(self,
mock_cluster_create,
mock_get_datastore_version):
cfg.CONF.set_override('cluster_support', False, group='mongodb')
body = self.cluster
tenant_id = Mock()
context = trove_testtools.TroveTestContext(self)
req = Mock()
req.environ = MagicMock()
req.environ.get = Mock(return_value=context)
datastore_version = Mock()
datastore_version.manager = 'mongodb'
mock_get_datastore_version.return_value = (Mock(), datastore_version)
self.assertRaises(exception.TroveError, self.controller.create, req,
body, tenant_id)
@patch.object(views.ClusterView, 'data', return_value={})
@patch.object(datastore_models, 'get_datastore_version')
@patch.object(models.Cluster, 'create')
def test_create_clusters_enabled(self,
mock_cluster_create,
mock_get_datastore_version,
mock_cluster_view_data):
cfg.CONF.set_override('cluster_support', True, group='mongodb')
body = self.cluster
tenant_id = Mock()
context = trove_testtools.TroveTestContext(self)
req = Mock()
req.environ = MagicMock()
req.environ.get = Mock(return_value=context)
datastore_version = Mock()
datastore_version.manager = 'mongodb'
mock_get_datastore_version.return_value = (Mock(), datastore_version)
mock_cluster = Mock()
mock_cluster.datastore_version.manager = 'mongodb'
mock_cluster_create.return_value = mock_cluster
self.controller.create(req, body, tenant_id)
@patch.object(models.Cluster, 'load')
def test_controller_action_multi_action(self,
mock_cluster_load):
body = {'do_stuff': {}, 'do_stuff2': {}}
tenant_id = Mock()
context = trove_testtools.TroveTestContext(self)
cluster_id = Mock()
req = Mock()
req.environ = MagicMock()
req.environ.get = Mock(return_value=context)
cluster = Mock()
cluster.instances_without_server = [Mock()]
cluster.datastore_version.manager = 'test_dsv'
mock_cluster_load.return_value = cluster
self.assertRaisesRegex(exception.TroveError,
'should have exactly one action specified',
self.controller.action, req,
body, tenant_id, cluster_id)
@patch.object(models.Cluster, 'load')
def test_controller_action_no_strategy(self,
mock_cluster_load):
body = {'do_stuff2': {}}
tenant_id = Mock()
context = trove_testtools.TroveTestContext(self)
cluster_id = Mock()
req = Mock()
req.environ = MagicMock()
req.environ.get = Mock(return_value=context)
db_info = DBCluster(ClusterTasks.NONE, id=cluster_id,
tenant_id=tenant_id)
cluster = Cluster(context, db_info, datastore='test_ds',
datastore_version='test_dsv')
mock_cluster_load.return_value = cluster
self.assertRaisesRegex(exception.TroveError,
'Action do_stuff2 not supported',
self.controller.action, req,
body, tenant_id, cluster_id)
@patch.object(strategy, 'load_api_strategy')
@patch.object(models.Cluster, 'load')
def test_controller_action_found(self,
mock_cluster_load,
mock_cluster_api_strategy):
body = {'grow': {}}
tenant_id = Mock()
context = trove_testtools.TroveTestContext(self)
cluster_id = 'test_uuid'
req = Mock()
req.environ = MagicMock()
req.environ.get = Mock(return_value=context)
cluster = Mock()
cluster.instances_without_server = [Mock()]
cluster.datastore_version.manager = 'test_dsv'
mock_cluster_load.return_value = cluster
self.controller.action(req, body, tenant_id, cluster_id)
self.assertEqual(1, cluster.action.call_count)
| {
"content_hash": "3ab572368c80439fbd840815ffff62c6",
"timestamp": "",
"source": "github",
"line_count": 455,
"max_line_length": 79,
"avg_line_length": 37.03956043956044,
"alnum_prop": 0.5431673885955023,
"repo_name": "openstack/trove",
"id": "1d7453772b4f5835223bd33edf2f6516c2022ecc",
"size": "17494",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "trove/tests/unittests/cluster/test_cluster_controller.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1166"
},
{
"name": "Python",
"bytes": "3667406"
},
{
"name": "Shell",
"bytes": "136049"
}
],
"symlink_target": ""
} |
project = "KumaROOT"
author = "Shota TAKAHASHI"
copyright = "2015 - 2022, Shota TAKAHASHI"
version = "1.6.0"
release = "1.6.0"
# -- General configuration ---------------------------------------------------
# https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.doctest",
"sphinx.ext.intersphinx",
"sphinx.ext.todo",
"sphinx.ext.coverage",
"sphinx.ext.mathjax",
"sphinx.ext.ifconfig",
"sphinx.ext.viewcode",
"sphinx_rtd_theme",
"myst_parser",
]
templates_path = ["_templates"]
exclude_patterns = []
language = "ja"
# source_suffix = {
# '.rst': 'restructuredtext',
# '.md': 'markdown',
# }
# source_encoding = 'utf-8-sig'
numfig = True
# 図表番号表示のカスタム設定
# フォーマット文字列を辞書型で指定
# デフォルトの設定を書いた後、カスタム設定で上書き
numfig_format = {"figure": "Fig. %s", "table": "Table %s", "code-block": "Listing %s"}
numfig_format["figure"] = "図 %s"
numfig_format["table"] = "表 %s"
numfig_format["code-block"] = "コードサンプル %s"
# 図表番号のスコープ
# 0: 全てのドキュメントで通し番号
# 1: セクション毎に番号を付与(x.1, x.2, x.3, ...)
# 2: サブセクション毎に番号を付与(x.x.1, x.x.2, x.x.3, ...)
# デフォルトは 1
numfig_secnum_depth = 1
today_fmt = "%Y-%m-%d"
# -- Options for MyST Parser -------------------------------------------------
myst_enable_extensions = [
"amsmath",
"colon_fence",
"deflist",
"dollarmath",
"fieldlist",
"html_admonition",
"html_image",
# "linkify",
"replacements",
"smartquotes",
"strikethrough",
"substitution",
"tasklist",
]
# -- Options for Math -------------------------------------------------
# https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-math
# math_number_all = False
# math_eqref_format = ""
# math_numfig = True
# -- Options for HTML output -------------------------------------------------
# https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output
html_theme = "sphinx_rtd_theme"
html_static_path = ["_static"]
html_theme_options = {
"analytics_id": "G-F2T33GE7N3", # Provided by Google in your dashboard
"prev_next_buttons_location": "both",
"style_external_links": True,
"style_nav_header_background": "darkorange",
}
html_logo = "./_static/quma.jpeg"
# -*- coding: utf-8 -*-
#
# KumaROOT documentation build configuration file, created by
# sphinx-quickstart on Sat Jul 11 17:44:03 2015.
#
#################################################
# Options for sphinx.ext.todo
##################################################
todo_include_todos = True
# htmlhelp_basename = 'KumaROOTdoc'
##################################################
## Options for LaTeX output
##################################################
# https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-latex-output
latex_engine = "lualatex"
latex_docclass = {
"howto": "ltjsreport",
"manual": "ltjsreport",
}
# latex_toplevel_sectioning = "chapter"
latex_elements = {
"papersize": "a4paper",
"pointsize": "12pt",
# "extraclassoptions": "tombow",
"preamble": "",
"polyglossia": "",
}
latex_elements["preamble"] += "\\setlength{\\footskip}{3\\zw}"
# latex_elements['preamble'] += '\\usepackage{pxjahyper}\n'
# latex_elements['preamble'] += '\\usepackage{graphics}\n'
# latex_elements['preamble'] += '\\hypersetup{bookmarksnumbered=true}\n'
# latex_elements['preamble'] += '\\hypersetup{bookmarksopen=true}\n'
# latex_elements['preamble'] += '\\hypersetup{bookmarksopenlevel=2}\n'
# latex_elements['preamble'] += '\\hypersetup{colorlinks=true}\n'
# latex_elements['preamble'] += '\\hypersetup{pdfpagemode=UseOutlines}\n'
# latex_elements['preamble'] += '\\renewcommand{\\familydefault}{\\sfdefault}\n'
# latex_elements['preamble'] += '\\renewcommand{\\kanjifamilydefault}{\\gtdefault}\n'
# latex_logo = "./_static/quma.jpeg"
# latex_use_parts = False
# latex_show_pagerefs = False
# latex_show_urls = False
# latex_appendices = []
# latex_domain_indices = True
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {"https://docs.python.org/": None}
| {
"content_hash": "9e12b86feca143011d7d3c91ca46bb60",
"timestamp": "",
"source": "github",
"line_count": 141,
"max_line_length": 88,
"avg_line_length": 29.361702127659573,
"alnum_prop": 0.6007246376811595,
"repo_name": "shotakaha/kumaroot",
"id": "b327107f0fd0671463d51f24427d6536b2f2abc7",
"size": "4749",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "docs/source/conf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "25723"
},
{
"name": "TeX",
"bytes": "27437"
}
],
"symlink_target": ""
} |
"""Simulation bootstrapper for contour plot"""
from formation_flight.formation import handlers as formation_handlers
from formation_flight.aircraft import handlers as aircraft_handlers
from formation_flight.aircraft import generators
from formation_flight.hub import builders
from formation_flight.hub import allocators
from formation_flight import visualization
from lib import sim, debug, sink
from lib.debug import print_line as p
from formation_flight import statistics
import config
import os
from lib.geo.point import Point
from lib.geo.waypoint import Waypoint
from formation_flight.aircraft.models import Aircraft
from lib.geo.route import Route
import numpy as np
hub = Waypoint('DUB')
hub.origins = [Waypoint('LHR')]
config.hubs = []
config.hubs.append(hub)
def execute():
for hub in config.hubs:
sim.init()
aircraft_handlers.init()
formation_handlers.init()
statistics.init()
visualization.init()
# Construct flight list
planes = [
Aircraft('FLT001', Route([Waypoint('LHR'), Waypoint('IAD')]), 0),
Aircraft('FLT001', Route([Waypoint('LHR'), Waypoint('IAD')]), 0),
Aircraft('FLT001', Route([Waypoint('LHR'), Waypoint('IAD')]), 0),
Aircraft('FLT002', Route([Waypoint('LHR'), Waypoint('JFK')]), 0),
#Aircraft('FLT003', Route([Waypoint('LHR'), Waypoint('SFO')]), 0),
Aircraft('FLT003', Route([Waypoint('LHR'), Waypoint('ORD')]), 0),
]
# Allocate hubs to flights
allocators.allocate(planes, config.hubs)
for flight in planes:
sim.events.append(sim.Event('aircraft-init', flight, 0))
sim.run()
debug.print_dictionary(statistics.vars) | {
"content_hash": "1dff05196fff846c79016ce5b4332d2d",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 78,
"avg_line_length": 29.728813559322035,
"alnum_prop": 0.6664766248574686,
"repo_name": "mauzeh/formation-flight",
"id": "fb1d5a334e13bd5d258381ec06f763f780991100",
"size": "1776",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "runs/singlehub/validation/run.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "209702"
}
],
"symlink_target": ""
} |
import time
import unittest
import command
from command import CheckType
import config
import mle
import network_layer
import node
LEADER = 1
ROUTER1 = 2
class Cert_5_1_06_RemoveRouterId(unittest.TestCase):
def setUp(self):
self.simulator = config.create_default_simulator()
self.nodes = {}
for i in range(1, 3):
self.nodes[i] = node.Node(i, simulator=self.simulator)
self.nodes[LEADER].set_panid(0xface)
self.nodes[LEADER].set_mode('rsdn')
self.nodes[LEADER].add_whitelist(self.nodes[ROUTER1].get_addr64())
self.nodes[LEADER].enable_whitelist()
self.nodes[ROUTER1].set_panid(0xface)
self.nodes[ROUTER1].set_mode('rsdn')
self.nodes[ROUTER1].add_whitelist(self.nodes[LEADER].get_addr64())
self.nodes[ROUTER1].enable_whitelist()
self.nodes[ROUTER1].set_router_selection_jitter(1)
def tearDown(self):
for node in list(self.nodes.values()):
node.stop()
node.destroy()
self.simulator.stop()
def test(self):
self.nodes[LEADER].start()
self.simulator.go(5)
self.assertEqual(self.nodes[LEADER].get_state(), 'leader')
self.nodes[ROUTER1].start()
self.simulator.go(5)
self.assertEqual(self.nodes[ROUTER1].get_state(), 'router')
rloc16 = self.nodes[ROUTER1].get_addr16()
for addr in self.nodes[ROUTER1].get_addrs():
self.assertTrue(self.nodes[LEADER].ping(addr))
self.nodes[LEADER].release_router_id(rloc16 >> 10)
self.simulator.go(5)
self.assertEqual(self.nodes[ROUTER1].get_state(), 'router')
leader_messages = self.simulator.get_messages_sent_by(LEADER)
router1_messages = self.simulator.get_messages_sent_by(ROUTER1)
# 1 - All
leader_messages.next_mle_message(mle.CommandType.ADVERTISEMENT)
router1_messages.next_mle_message(mle.CommandType.PARENT_REQUEST)
leader_messages.next_mle_message(mle.CommandType.PARENT_RESPONSE)
router1_messages.next_mle_message(mle.CommandType.CHILD_ID_REQUEST)
leader_messages.next_mle_message(mle.CommandType.CHILD_ID_RESPONSE)
msg = router1_messages.next_coap_message("0.02")
msg.assertCoapMessageRequestUriPath("/a/as")
leader_messages.next_coap_message("2.04")
# 2 - N/A
# 3 - Router1
msg = router1_messages.next_mle_message(mle.CommandType.PARENT_REQUEST)
command.check_parent_request(msg, is_first_request=True)
msg = router1_messages.next_mle_message(mle.CommandType.CHILD_ID_REQUEST, sent_to_node=self.nodes[LEADER])
command.check_child_id_request(msg, tlv_request=CheckType.CONTAIN,
mle_frame_counter=CheckType.OPTIONAL, address_registration=CheckType.NOT_CONTAIN,
active_timestamp=CheckType.OPTIONAL, pending_timestamp=CheckType.OPTIONAL)
msg = router1_messages.next_coap_message(code="0.02")
command.check_address_solicit(msg, was_router=True)
# 4 - Router1
for addr in self.nodes[ROUTER1].get_addrs():
self.assertTrue(self.nodes[LEADER].ping(addr))
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "423cb4ea7a597a91f552fdbf1cb71262",
"timestamp": "",
"source": "github",
"line_count": 95,
"max_line_length": 114,
"avg_line_length": 33.90526315789474,
"alnum_prop": 0.6584911518162061,
"repo_name": "erja-gp/openthread",
"id": "7646ec312864748e185400ab18d14f274986461a",
"size": "4825",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/scripts/thread-cert/Cert_5_1_06_RemoveRouterId.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "15850"
},
{
"name": "C",
"bytes": "940119"
},
{
"name": "C#",
"bytes": "18077"
},
{
"name": "C++",
"bytes": "4306681"
},
{
"name": "Dockerfile",
"bytes": "6256"
},
{
"name": "M4",
"bytes": "63303"
},
{
"name": "Makefile",
"bytes": "133368"
},
{
"name": "Python",
"bytes": "2012919"
},
{
"name": "Ruby",
"bytes": "3397"
},
{
"name": "Shell",
"bytes": "74907"
}
],
"symlink_target": ""
} |
import re
import logging
from datetime import datetime, timedelta, date
from django.db import models
from django.db.models.query_utils import Q
from django.utils.translation import ugettext_lazy as _, ugettext
from django.utils.text import Truncator
from django.contrib.contenttypes import generic
from django.contrib.auth.models import User
from django.core.cache import cache
from django.utils.functional import cached_property
from django.conf import settings
from tagging.models import Tag, TaggedItem
from djangoratings.fields import RatingField
from committees.enums import CommitteeTypes
from events.models import Event
from links.models import Link
from mks.models import Knesset
from lobbyists.models import LobbyistCorporation
from itertools import groupby
from hebrew_numbers import gematria_to_int
from knesset_data_django.committees import members_extended
COMMITTEE_PROTOCOL_PAGINATE_BY = 120
logger = logging.getLogger("open-knesset.committees.models")
class Committee(models.Model):
name = models.CharField(max_length=256)
# comma separated list of names used as name aliases for harvesting
aliases = models.TextField(null=True, blank=True)
members = models.ManyToManyField('mks.Member', related_name='committees',
blank=True)
chairpersons = models.ManyToManyField('mks.Member',
related_name='chaired_committees',
blank=True)
replacements = models.ManyToManyField('mks.Member',
related_name='replacing_in_committees',
blank=True)
events = generic.GenericRelation(Event, content_type_field="which_type",
object_id_field="which_pk")
description = models.TextField(null=True, blank=True)
portal_knesset_broadcasts_url = models.URLField(max_length=1000,
blank=True)
type = models.CharField(max_length=10, default=CommitteeTypes.committee,
choices=CommitteeTypes.as_choices(),
db_index=True)
hide = models.BooleanField(default=False)
# Deprecated? In use? does not look in use
protocol_not_published = models.BooleanField(default=False)
knesset_id = models.IntegerField(null=True, blank=True)
knesset_type_id = models.IntegerField(null=True, blank=True)
knesset_parent_id = models.IntegerField(null=True, blank=True)
# Deprecated? In use? does not look
last_scrape_time = models.DateTimeField(null=True, blank=True)
name_eng = models.CharField(max_length=256, null=True, blank=True)
name_arb = models.CharField(max_length=256, null=True, blank=True)
start_date = models.DateTimeField(null=True, blank=True)
end_date = models.DateTimeField(null=True, blank=True)
knesset_description = models.TextField(null=True, blank=True)
knesset_description_eng = models.TextField(null=True, blank=True)
knesset_description_arb = models.TextField(null=True, blank=True)
knesset_note = models.TextField(null=True, blank=True)
knesset_note_eng = models.TextField(null=True, blank=True)
knesset_portal_link = models.TextField(null=True, blank=True)
@property
def gender_presence(self):
# returns a touple of (female_presence, male_presence
r = {'F': 0, 'M': 0}
for cm in self.meetings.all():
try:
results = groupby(cm.mks_attended.all(), lambda mk: mk.gender)
except ValueError:
continue
for i in results:
key, count = i[0], len(list(i[1]))
r[key] += count
return r['F'], r['M']
def __unicode__(self):
if self.type == 'plenum':
return "%s" % ugettext('Plenum')
else:
return "%s" % self.name
@models.permalink
def get_absolute_url(self):
if self.type == 'plenum':
return 'plenum', []
else:
return 'committee-detail', [str(self.id)]
@property
def annotations(self):
protocol_part_tn = ProtocolPart._meta.db_table
meeting_tn = CommitteeMeeting._meta.db_table
committee_tn = Committee._meta.db_table
annotation_tn = Annotation._meta.db_table
protocol_part_ct = ContentType.objects.get_for_model(ProtocolPart)
ret = Annotation.objects.select_related().filter(
content_type=protocol_part_ct)
return ret.extra(tables=[protocol_part_tn,
meeting_tn, committee_tn],
where=["%s.object_id=%s.id" % (
annotation_tn, protocol_part_tn),
"%s.meeting_id=%s.id" % (
protocol_part_tn, meeting_tn),
"%s.committee_id=%%s" % meeting_tn],
params=[self.id]).distinct()
def members_by_name(self, ids=None, current_only=False):
"""Return a queryset of all members, sorted by their name."""
members = members_extended(self, current_only=current_only, ids=ids)
return members.order_by('name')
def recent_meetings(self, limit=10, do_limit=True):
relevant_meetings = self.meetings.all().order_by('-date')
if do_limit:
more_available = relevant_meetings.count() > limit
return relevant_meetings[:limit], more_available
else:
return relevant_meetings
def future_meetings(self, limit=10, do_limit=True):
current_date = datetime.now()
relevant_events = self.events.filter(when__gt=current_date).order_by(
'when')
if do_limit:
more_available = relevant_events.count() > limit
return relevant_events[:limit], more_available
else:
return relevant_events
def protocol_not_yet_published_meetings(self, end_date, limit=10,
do_limit=True):
start_date = self.meetings.all().order_by(
'-date').first().date + timedelta(days=1) \
if self.meetings.count() > 0 \
else datetime.now()
relevant_events = self.events.filter(when__gt=start_date,
when__lte=end_date).order_by(
'-when')
if do_limit:
more_available = relevant_events.count() > limit
return relevant_events[:limit], more_available
else:
return relevant_events
not_header = re.compile(
r'(^אני )|((אלה|אלו|יבוא|מאלה|ייאמר|אומר|אומרת|נאמר|כך|הבאים|הבאות):$)|(\(.\))|(\(\d+\))|(\d\.)'.decode(
'utf8'))
def legitimate_header(line):
"""Returns true if 'line' looks like something should be a protocol part header"""
if re.match(r'^\<.*\>\W*$', line): # this is a <...> line.
return True
if not (line.strip().endswith(':')) or len(line) > 50 or not_header.search(
line):
return False
return True
class CommitteeMeetingManager(models.Manager):
def filter_and_order(self, *args, **kwargs):
qs = self.all()
# In dealing with 'tagged' we use an ugly workaround for the fact that generic relations
# don't work as expected with annotations.
# please read http://code.djangoproject.com/ticket/10461 before trying to change this code
if kwargs.get('tagged'):
if kwargs['tagged'] == ['false']:
qs = qs.exclude(tagged_items__isnull=False)
elif kwargs['tagged'] != ['all']:
qs = qs.filter(tagged_items__tag__name__in=kwargs['tagged'])
if kwargs.get('to_date'):
qs = qs.filter(time__lte=kwargs['to_date'] + timedelta(days=1))
if kwargs.get('from_date'):
qs = qs.filter(time__gte=kwargs['from_date'])
return qs.select_related('committee')
class CommitteesMeetingsOnlyManager(CommitteeMeetingManager):
def get_queryset(self):
return super(CommitteesMeetingsOnlyManager,
self).get_queryset().exclude(
committee__type=CommitteeTypes.plenum)
class CommitteeMeeting(models.Model):
committee = models.ForeignKey(Committee, related_name='meetings')
date_string = models.CharField(max_length=256)
date = models.DateField(db_index=True)
mks_attended = models.ManyToManyField('mks.Member',
related_name='committee_meetings')
votes_mentioned = models.ManyToManyField('laws.Vote',
related_name='committee_meetings',
blank=True)
protocol_text = models.TextField(null=True, blank=True)
# the date the protocol text was last downloaded and saved
protocol_text_update_date = models.DateField(blank=True, null=True)
# the date the protocol parts were last parsed and saved
protocol_parts_update_date = models.DateField(blank=True, null=True)
topics = models.TextField(null=True, blank=True)
src_url = models.URLField(max_length=1024, null=True, blank=True)
tagged_items = generic.GenericRelation(TaggedItem,
object_id_field="object_id",
content_type_field="content_type")
lobbyists_mentioned = models.ManyToManyField('lobbyists.Lobbyist',
related_name='committee_meetings',
blank=True)
lobbyist_corporations_mentioned = models.ManyToManyField(
'lobbyists.LobbyistCorporation',
related_name='committee_meetings', blank=True)
datetime = models.DateTimeField(db_index=True, null=True, blank=True)
knesset_id = models.IntegerField(null=True, blank=True)
objects = CommitteeMeetingManager()
committees_only = CommitteesMeetingsOnlyManager()
class Meta:
ordering = ('-date',)
verbose_name = _('Committee Meeting')
verbose_name_plural = _('Committee Meetings')
def title(self):
truncator = Truncator(self.topics)
return truncator.words(12)
def __unicode__(self):
cn = cache.get('committee_%d_name' % self.committee_id)
if not cn:
if self.committee.type == 'plenum':
cn = 'Plenum'
else:
cn = unicode(self.committee)
cache.set('committee_%d_name' % self.committee_id,
cn,
settings.LONG_CACHE_TIME)
if cn == 'Plenum':
return (u"%s" % (self.title())).replace(" ", u"\u00A0")
else:
return (u"%s - %s" % (cn,
self.title())).replace(" ", u"\u00A0")
@models.permalink
def get_absolute_url(self):
if self.committee.type == 'plenum':
return 'plenum-meeting', [str(self.id)]
else:
return 'committee-meeting', [str(self.id)]
def _get_tags(self):
tags = Tag.objects.get_for_object(self)
return tags
def _set_tags(self, tag_list):
Tag.objects.update_tags(self, tag_list)
tags = property(_get_tags, _set_tags)
def save(self, **kwargs):
super(CommitteeMeeting, self).save(**kwargs)
def create_protocol_parts(self, delete_existing=False, mks=None, mk_names=None):
from knesset_data_django.committees.meetings import create_protocol_parts
create_protocol_parts(self, delete_existing, mks, mk_names)
def redownload_protocol(self):
from knesset_data_django.committees.meetings import redownload_protocol
redownload_protocol(self)
def reparse_protocol(self, redownload=True, mks=None, mk_names=None):
from knesset_data_django.committees.meetings import reparse_protocol
reparse_protocol(self, redownload, mks, mk_names)
def update_from_dataservice(self, dataservice_object=None):
# TODO: obviousely broken, not sure what was here originaly and where it moved
from committees.management.commands.scrape_committee_meetings import \
Command as ScrapeCommitteeMeetingCommand
from knesset_data.dataservice.committees import \
CommitteeMeeting as DataserviceCommitteeMeeting
if dataservice_object is None:
ds_meetings = [
ds_meeting for ds_meeting
in DataserviceCommitteeMeeting.get(self.committee.knesset_id,
self.date - timedelta(
days=1),
self.date + timedelta(
days=1))
if str(ds_meeting.id) == str(self.knesset_id)
]
if len(ds_meetings) != 1:
raise Exception(
'could not found corresponding dataservice meeting')
dataservice_object = ds_meetings[0]
meeting_transformed = ScrapeCommitteeMeetingCommand().get_committee_meeting_fields_from_dataservice(
dataservice_object)
[setattr(self, k, v) for k, v in meeting_transformed.iteritems()]
self.save()
@property
def plenum_meeting_number(self):
res = None
parts = self.parts.filter(body__contains=u'ישיבה')
if parts.count() > 0:
r = re.search(u'ישיבה (.*)$', self.parts.filter(
body__contains=u'ישיבה').first().body)
if r:
res = gematria_to_int(r.groups()[0])
return res
def plenum_link_votes(self):
from laws.models import Vote
if self.plenum_meeting_number:
for vote in Vote.objects.filter(
meeting_number=self.plenum_meeting_number):
for part in self.parts.filter(header__contains=u'הצבעה'):
r = re.search(r' (\d+)$', part.header)
if r and vote.vote_number == int(r.groups()[0]):
url = part.get_absolute_url()
Link.objects.get_or_create(
object_pk=vote.pk,
content_type=ContentType.objects.get_for_model(
Vote),
url=url,
defaults={
'title': u'לדיון בישיבת המליאה'
}
)
def get_bg_material(self):
"""
returns any background material for the committee meeting, or [] if none
"""
import urllib2
from BeautifulSoup import BeautifulSoup
time = re.findall(r'(\d\d:\d\d)', self.date_string)[0]
date = self.date.strftime('%d/%m/%Y')
cid = self.committee.knesset_id
if cid is None: # missing this committee knesset id
return [] # can't get bg material
url = 'http://www.knesset.gov.il/agenda/heb/material.asp?c=%s&t=%s&d=%s' % (
cid, time, date)
data = urllib2.urlopen(url)
bg_links = []
if data.url == url: # if no bg material exists we get redirected to a different page
bgdata = BeautifulSoup(data.read()).findAll('a')
for i in bgdata:
bg_links.append(
{'url': 'http://www.knesset.gov.il' + i['href'],
'title': i.string})
return bg_links
@property
def bg_material(self):
return Link.objects.filter(object_pk=self.id,
content_type=ContentType.objects.get_for_model(
CommitteeMeeting).id)
def find_attending_members(self, mks=None, mk_names=None):
from knesset_data_django.committees.meetings import find_attending_members
find_attending_members(self, mks, mk_names)
@cached_property
def main_lobbyist_corporations_mentioned(self):
ret = []
for corporation in self.lobbyist_corporations_mentioned.all():
main_corporation = corporation.main_corporation
if main_corporation not in ret:
ret.append(main_corporation)
for lobbyist in self.main_lobbyists_mentioned:
latest_corporation = lobbyist.cached_data.get('latest_corporation')
if latest_corporation:
corporation = LobbyistCorporation.objects.get(
id=latest_corporation['id'])
if corporation not in ret and corporation.main_corporation == corporation:
ret.append(corporation)
return ret
@cached_property
def main_lobbyists_mentioned(self):
return self.lobbyists_mentioned.all()
class ProtocolPartManager(models.Manager):
def list(self):
return self.order_by("order")
class ProtocolPart(models.Model):
meeting = models.ForeignKey(CommitteeMeeting, related_name='parts')
order = models.IntegerField()
header = models.TextField(blank=True, null=True)
body = models.TextField(blank=True, null=True)
speaker = models.ForeignKey('persons.Person', blank=True, null=True,
related_name='protocol_parts')
objects = ProtocolPartManager()
type = models.TextField(blank=True, null=True, max_length=20)
annotatable = True
class Meta:
ordering = ('order', 'id')
def get_absolute_url(self):
if self.order == 1:
return self.meeting.get_absolute_url()
else:
page_num = 1 + (self.order - 1) / COMMITTEE_PROTOCOL_PAGINATE_BY
if page_num == 1: # this is on first page
return "%s#speech-%d-%d" % (self.meeting.get_absolute_url(),
self.meeting.id, self.order)
else:
return "%s?page=%d#speech-%d-%d" % (
self.meeting.get_absolute_url(),
page_num,
self.meeting.id, self.order)
def __unicode__(self):
return "%s %s: %s" % (self.meeting.committee.name, self.header,
self.body)
TOPIC_PUBLISHED, TOPIC_FLAGGED, TOPIC_REJECTED, \
TOPIC_ACCEPTED, TOPIC_APPEAL, TOPIC_DELETED = range(6)
PUBLIC_TOPIC_STATUS = (TOPIC_PUBLISHED, TOPIC_ACCEPTED)
class TopicManager(models.Manager):
''' '''
get_public = lambda self: self.filter(status__in=PUBLIC_TOPIC_STATUS)
by_rank = lambda self: self.extra(select={
'rank': '((100/%s*rating_score/(1+rating_votes+%s))+100)/2' % (
Topic.rating.range, Topic.rating.weight)
}).order_by('-rank')
def summary(self, order='-rank'):
return self.filter(status__in=PUBLIC_TOPIC_STATUS).extra(select={
'rank': '((100/%s*rating_score/(1+rating_votes+%s))+100)/2' % (
Topic.rating.range, Topic.rating.weight)
}).order_by(order)
class Topic(models.Model):
'''
Topic is used to hold the latest event about a topic and a committee
Fields:
title - the title
description - its description
created - the time a topic was first connected to a committee
modified - last time the status or the message was updated
editor - the user that entered the data
status - the current status
log - a text log that keeps text messages for status changes
committees - defined using a many to many from `Committee`
'''
creator = models.ForeignKey(User)
editors = models.ManyToManyField(User, related_name='editing_topics',
null=True, blank=True)
title = models.CharField(max_length=256,
verbose_name=_('Title'))
description = models.TextField(blank=True,
verbose_name=_('Description'))
status = models.IntegerField(choices=(
(TOPIC_PUBLISHED, _('published')),
(TOPIC_FLAGGED, _('flagged')),
(TOPIC_REJECTED, _('rejected')),
(TOPIC_ACCEPTED, _('accepted')),
(TOPIC_APPEAL, _('appeal')),
(TOPIC_DELETED, _('deleted')),
), default=TOPIC_PUBLISHED)
rating = RatingField(range=7, can_change_vote=True, allow_delete=True)
links = generic.GenericRelation(Link, content_type_field="content_type",
object_id_field="object_pk")
events = generic.GenericRelation(Event, content_type_field="which_type",
object_id_field="which_pk")
# no related name as `topics` is already defined in CommitteeMeeting as text
committees = models.ManyToManyField(Committee,
verbose_name=_('Committees'))
meetings = models.ManyToManyField(CommitteeMeeting, null=True, blank=True)
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
log = models.TextField(default="", blank=True)
class Meta:
verbose_name = _('Topic')
verbose_name_plural = _('Topics')
@models.permalink
def get_absolute_url(self):
return 'topic-detail', [str(self.id)]
def __unicode__(self):
return "%s" % self.title
objects = TopicManager()
def set_status(self, status, message=''):
self.status = status
self.log = '\n'.join(
(u'%s: %s' % (self.get_status_display(), datetime.now()),
u'\t%s' % message,
self.log,)
)
self.save()
def can_edit(self, user):
return user.is_superuser or user == self.creator or \
user in self.editors.all()
from listeners import *
| {
"content_hash": "aed1ff0b82da61afac131fb146d1680f",
"timestamp": "",
"source": "github",
"line_count": 530,
"max_line_length": 108,
"avg_line_length": 41.3811320754717,
"alnum_prop": 0.5838956775487871,
"repo_name": "OriHoch/Open-Knesset",
"id": "b88b075cf2d0b48fa697a1cf6a2014dabe51d566",
"size": "22034",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "committees/models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "350330"
},
{
"name": "HTML",
"bytes": "763338"
},
{
"name": "JavaScript",
"bytes": "220620"
},
{
"name": "Python",
"bytes": "4504481"
},
{
"name": "Shell",
"bytes": "383"
}
],
"symlink_target": ""
} |
import io
import os
import requests
from tacker import auth
import time
import zipfile
from oslo_config import cfg
from oslo_log import log as logging
LOG = logging.getLogger(__name__)
def config_opts():
return [('connect_vnf_packages', VnfPackageRequest.OPTS),
('connect_grant', GrantRequest.OPTS)]
class UndefinedExternalSettingException(Exception):
pass
class FaliedDownloadContentException(Exception):
pass
class _Connect:
def __init__(self, retry_num=0, retry_wait=0, timeout=0):
self.retry_num = retry_num
self.retry_wait = retry_wait
self.timeout = timeout
def replace_placeholder_url(self, base_url, path, *params):
replace_placeholder_url = os.path.join(base_url, path)
try:
return replace_placeholder_url.format(*params)
except Exception:
return replace_placeholder_url
def request(self, *args, **kwargs):
return self.__request(
auth.auth_manager.get_auth_client().request,
*args,
timeout=self.timeout,
**kwargs)
def __request(self, request_function, *args, **kwargs):
response = None
for retry_cnt in range(self.retry_num + 1):
LOG.info("Connecting to <{ip}:{port}>, count=<{count}>".format(
ip=args[0], port=args[1], count=retry_cnt))
if 'headers' in kwargs:
LOG.debug("[REQ] HEADERS={}".format(kwargs['headers']))
if 'data' in kwargs:
LOG.debug("[REQ] BODY={}".format(kwargs['data']))
elif 'json' in kwargs:
LOG.debug("[REQ] BODY={}".format(kwargs['json']))
try:
response = request_function(*args, **kwargs)
if 200 <= response.status_code <= 299:
return response
LOG.error("Connected error. Failed http status=<{}>".format(
response.status_code))
except requests.exceptions.ConnectTimeout as e:
LOG.error("Connected error. details=<{}>".format(e))
if retry_cnt < self.retry_num:
time.sleep(self.retry_wait)
raise requests.exceptions.RequestException(response=response)
class VnfPackageRequest:
OPTS = [
cfg.StrOpt('base_url',
default=None,
help="vnf_packages base_url"),
cfg.ListOpt('pipeline',
default=None,
help="Get vnf_packages api pipeline"),
cfg.IntOpt('retry_num',
default=2,
help="Number of vnf_packages retry count"),
cfg.IntOpt('retry_wait',
default=30,
help="Number of vnf_packages retry wait"),
cfg.IntOpt('timeout',
default=20,
help="Number of vnf_packages connect timeout")
]
cfg.CONF.register_opts(OPTS, group='connect_vnf_packages')
_connector = _Connect(
cfg.CONF.connect_vnf_packages.retry_num,
cfg.CONF.connect_vnf_packages.retry_wait,
cfg.CONF.connect_vnf_packages.timeout)
@classmethod
def validate(cls):
"""Check config settings.
Raises:
UndefinedExternalSettingException: tacker.conf undefined setting.
"""
if (not cfg.CONF.connect_vnf_packages.base_url or
cfg.CONF.connect_vnf_packages.base_url.strip() == ''):
raise UndefinedExternalSettingException(
"Vnf package the external setting to 'base_url' undefined.")
@classmethod
def _write(cls, vnf_package_zip, response, filename=None):
def write_zip():
with zipfile.ZipFile(io.BytesIO(response.content)) as fin:
for info in fin.infolist():
vnf_package_zip.writestr(
info.filename, fin.read(info.filename))
def get_filename():
content_disposition = response.headers.get('Content-Disposition')
if not content_disposition:
return None
attribute = 'filename='
return content_disposition[content_disposition.find(
attribute) + len(attribute):]
if response.headers.get('Content-Type') == 'application/zip':
write_zip()
return
filename = get_filename() if (not filename) else filename
if filename:
vnf_package_zip.writestr(filename, response.content)
return
raise FaliedDownloadContentException(
"Failed response content, vnf_package_zip={}".format(
vnf_package_zip))
@classmethod
def download_vnf_packages(cls, vnf_package_id, artifact_paths=None):
"""Get vnf packages from the nfvo.
Args:
vnf_package_id (UUID): VNF Package ID
artifact_paths (list, optional): artifatcs paths. Defaults to [].
Returns:
io.BytesIO: zip archive for vnf packages content.
Raises:
takcer.nfvo.nfvo_client.UndefinedExternalSettingException:
tacker.conf undefined setting.
requests.exceptions.RequestException:
Failed connected, download vnf packages.
takcer.nfvo.nfvo_client.FaliedDownloadContentException:
Failed content, create vnf package zip file.
"""
cls.validate()
if not cfg.CONF.connect_vnf_packages.pipeline or len(
cfg.CONF.connect_vnf_packages.pipeline) == 0:
raise UndefinedExternalSettingException(
"Vnf package the external setting to 'pipeline' undefined.")
if artifact_paths is None:
artifact_paths = []
def download_vnf_package(pipeline_type, vnf_package_zip):
if pipeline_type == 'package_content':
cls._download_package_content(vnf_package_zip, vnf_package_id)
elif pipeline_type == 'vnfd':
cls._download_vnfd(
vnf_package_zip, vnf_package_id)
elif pipeline_type == 'artifacts':
cls._download_artifacts(vnf_package_zip, vnf_package_id,
artifact_paths)
else:
raise UndefinedExternalSettingException(
"Vnf package the external setting to 'pipeline=<{}>'"
" not supported.".format(pipeline_type))
zip_buffer = io.BytesIO()
with zipfile.ZipFile(zip_buffer,
mode='w',
compression=zipfile.ZIP_DEFLATED) as vnf_package_zip:
for pipeline_type in cfg.CONF.connect_vnf_packages.pipeline:
download_vnf_package(pipeline_type, vnf_package_zip)
zip_buffer.seek(0)
return zip_buffer
@classmethod
def _download_package_content(cls, vnf_package_zip, vnf_package_id):
LOG.info("Processing: download vnf_package to package content.")
request_url = cls._connector.replace_placeholder_url(
cfg.CONF.connect_vnf_packages.base_url,
"{}/package_content",
vnf_package_id)
headers = {'Accept': 'application/zip'}
response = cls._connector.request('GET', request_url, headers=headers)
cls._write(vnf_package_zip, response)
@classmethod
def _download_vnfd(cls, vnf_package_zip, vnf_package_id):
LOG.info("Processing: download vnf_package to vnfd.")
request_url = cls._connector.replace_placeholder_url(
cfg.CONF.connect_vnf_packages.base_url,
"{}/vnfd",
vnf_package_id)
# zip format only.
headers = {'Accept': 'application/zip'}
response = cls._connector.request('GET', request_url, headers=headers)
cls._write(vnf_package_zip, response)
@classmethod
def _download_artifacts(
cls,
vnf_package_zip,
vnf_package_id,
artifact_paths):
LOG.info("Processing: download vnf_package to artifact.")
for artifact_path in artifact_paths:
request_url = cls._connector.replace_placeholder_url(
cfg.CONF.connect_vnf_packages.base_url,
"{}/artifacts/{}",
vnf_package_id,
artifact_path)
headers = {'Accept': 'application/zip'}
response = cls._connector.request(
'GET', request_url, headers=headers)
cls._write(vnf_package_zip, response, artifact_path)
@classmethod
def index(cls, **kwargs):
"""List vnf package.
Args:
kwargs:
any other parameter that can be passed
to requests.Session.request.
Returns:
requests.Response: individual vnf package.
"""
cls.validate()
LOG.info("Processing: index vnf_package.")
return cls._connector.request(
'GET', cfg.CONF.connect_vnf_packages.base_url, **kwargs)
@classmethod
def show(cls, vnf_package_id, **kwargs):
"""Individual vnf package.
Args:
vnf_package_id (UUID): VNF Package ID.
kwargs:
any other parameter that can be passed
to requests.Session.request.
Returns:
requests.Response: individual vnf package.
"""
cls.validate()
LOG.info("Processing: show vnf_package.")
request_url = cls._connector.replace_placeholder_url(
cfg.CONF.connect_vnf_packages.base_url, vnf_package_id)
return cls._connector.request('GET', request_url, **kwargs)
class GrantRequest:
OPTS = [
cfg.StrOpt('base_url',
default=None,
help="grant of base_url"),
cfg.IntOpt('retry_num',
default=2,
help="Number of grant retry count"),
cfg.IntOpt('retry_wait',
default=30,
help="Number of grant retry wait"),
cfg.IntOpt('timeout',
default=20,
help="Number of grant connect timeout")
]
cfg.CONF.register_opts(OPTS, group='connect_grant')
_connector = _Connect(
cfg.CONF.connect_grant.retry_num,
cfg.CONF.connect_grant.retry_wait,
cfg.CONF.connect_grant.timeout)
@classmethod
def validate(cls):
"""Check config settings.
Raises:
UndefinedExternalSettingException: tacker.conf undefined setting.
"""
if (not cfg.CONF.connect_grant.base_url or
cfg.CONF.connect_grant.base_url.strip() == ''):
raise UndefinedExternalSettingException(
"Grant the external setting to 'base_url' undefined.")
@classmethod
def grants(cls, **kwargs):
"""grants request.
Args:
kwargs:
any other parameter that can be passed
to requests.Session.request.
Returns:
io.BytesIO: zip archive for vnf packages content.
Raises:
takcer.nfvo.nfvo_client.UndefinedExternalSettingException:
tacker.conf undefined setting.
requests.exceptions.RequestException:
Failed connected, download vnf packages.
takcer.nfvo.nfvo_client.FaliedDownloadContentException:
Failed content, create vnf package zip file.
"""
cls.validate()
return cls._connector.request(
'POST', cfg.CONF.connect_grant.base_url, **kwargs)
| {
"content_hash": "838ff62712451590f36f0a2a3d38e4fe",
"timestamp": "",
"source": "github",
"line_count": 336,
"max_line_length": 78,
"avg_line_length": 34.70238095238095,
"alnum_prop": 0.5722984562607204,
"repo_name": "openstack/tacker",
"id": "00be84129088bfb00c54d1056bd10dc73e218c23",
"size": "12233",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tacker/vnfm/nfvo_client.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jinja",
"bytes": "10809"
},
{
"name": "Mako",
"bytes": "1046"
},
{
"name": "Python",
"bytes": "7648075"
},
{
"name": "Ruby",
"bytes": "2841"
},
{
"name": "Shell",
"bytes": "61750"
},
{
"name": "Smarty",
"bytes": "3624"
}
],
"symlink_target": ""
} |
import os
import sys
from glob import glob
from inspect import isclass
from importlib import import_module
from collections import namedtuple
import logging
class Workload(object):
"""
Base class for Android related workloads
"""
_availables = None
# Setup logger
logger = logging.getLogger('Workload')
logger.setLevel(logging.INFO)
_AW = namedtuple('AndroidWorkload',
['module_name', 'module', 'class_name', 'ctor'])
@staticmethod
def get(te, name='YouTube'):
"""
Get a reference to the specified Android workload
"""
if Workload._availables is None:
Workload.availables(te.target)
# Build list of case insensitive workload names
if name not in Workload._availables:
logging.warning('Workload [%s] not available on target', name)
return None
return Workload._availables[name].ctor(te)
@staticmethod
def availables(target):
"""
List the supported android workloads which are available on the target
"""
if Workload._availables:
return Workload._availables.keys()
Workload._availables = {}
# Add workloads dir to system path
workloads_dir = os.path.dirname(os.path.abspath(__file__))
workloads_dir = os.path.join(workloads_dir, 'workloads')
logging.debug('%14s - Workdir: %s', 'Workload', workloads_dir)
sys.path.insert(0, workloads_dir)
logging.debug('%14s - Syspath: %s', 'Workload', format(sys.path))
for filepath in glob(os.path.join(workloads_dir, '*.py')):
filename = os.path.splitext(os.path.basename(filepath))[0]
logging.debug('%14s - Filename: %s', 'Workload', filename)
# Ignore __init__ files
if filename.startswith('__'):
continue
# Import the module for inspection
module = import_module(filename)
for member in dir(module):
# Ignore the base class
if member == 'Workload':
continue
handler = getattr(module, member)
if handler and isclass(handler) and \
issubclass(handler, Workload):
class_name = handler.__name__
module_name = module.__name__
# Check if a package is required and is available on target
aw = Workload._AW(module_name, module, class_name, handler)
if (Workload._is_available(target, aw)):
# Keep track of classes which are 'Android.Workload'
Workload._availables[class_name] = aw
return Workload._availables.keys()
@staticmethod
def _is_available(target, aw):
try:
package = getattr(aw.ctor, 'package')
except AttributeError:
# Assume workloads not requiring a package
# are always available
return True
# Check for the package being available
count = target.execute('pm list packages | grep {} | wc -l'\
.format(package))
if int(count) >= 1:
return True
logging.warning('%14s - Package [%s] not installed',
'Workload', package)
logging.warning('%14s - Workload [%s] disabled',
'Workload', aw.class_name)
return False
def __init__(self, test_env):
"""
Initialized workloads available on the specified test environment
test_env: target test environmen
"""
self.te = test_env
self.target = test_env.target
self.logger = self.target.logger
logging.debug('%14s - Building list of available workloads...', 'Workload')
wloads = Workload.availables(self.target)
logging.info('%14s - Workloads available on target:', 'Workload')
logging.info('%14s - %s', 'Workload', wloads)
def _adb(self, cmd):
return 'adb -s {} {}'.format(self.target.adb_name, cmd)
def run(self, exp_dir, **kwargs):
raise RuntimeError('Not implemeted')
# vim :set tabstop=4 shiftwidth=4 expandtab
| {
"content_hash": "c17bef275cfbd987e2ff8feb1f2fb956",
"timestamp": "",
"source": "github",
"line_count": 128,
"max_line_length": 83,
"avg_line_length": 33.3046875,
"alnum_prop": 0.5740089139103918,
"repo_name": "JaviMerino/lisa",
"id": "8b097a162453ba1dea7edd25709374a9455db79d",
"size": "4900",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "libs/utils/android/workload.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "6979934"
},
{
"name": "Python",
"bytes": "334420"
}
],
"symlink_target": ""
} |
import subprocess
class LocalIP:
def __init__(self):
pass
def getIps(self):
ip_lines = self.getIpLines()
return self.parseIps(ip_lines)
def getIpLines(self):
# get ip lines
arg='ip route list'
p=subprocess.Popen(arg,shell=True,stdout=subprocess.PIPE)
data = p.communicate()
return data[0].splitlines()
def parseIps(self, ip_lines):
ips = []
it_lines = iter(ip_lines)
next(it_lines)
for ip_line in it_lines:
split_line = ip_line.split()
ip_type = split_line[split_line.index('dev')+1]
ip_addr= split_line[split_line.index('src')+1]
ips.append((ip_type, ip_addr))
return ips
| {
"content_hash": "824c07fe3096824052aca441492acfb9",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 65,
"avg_line_length": 27.333333333333332,
"alnum_prop": 0.5636856368563685,
"repo_name": "AlbertPumarola/raspberrypi_scripts",
"id": "488ae6614eb59babc1d95d5b4f6d6d77ccbf7e1d",
"size": "738",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "utils/get_local_ips.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "5483"
},
{
"name": "Shell",
"bytes": "176"
}
],
"symlink_target": ""
} |
import unittest
import pandas as pd
from banpei.sst import SST
class TestSST(unittest.TestCase):
def setUp(self):
self.raw_data = pd.read_csv('tests/test_data/periodic_wave.csv')
self.data = self.raw_data['y']
def test_detect_by_svd(self):
model = SST(w=50)
results = model.detect(self.data)
self.assertEqual(len(self.data), len(results))
def test_detect_by_lanczos(self):
model = SST(w=50)
results = model.detect(self.data, is_lanczos=True)
self.assertEqual(len(self.data), len(results))
def test_stream_detect(self):
model = SST(w=50)
result = model.stream_detect(self.data)
self.assertIsInstance(result, float)
if __name__ == "__main__":
unittest.main()
| {
"content_hash": "34ad0b6f68dd50ec2be394c71c55b5f8",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 72,
"avg_line_length": 26.655172413793103,
"alnum_prop": 0.6274256144890039,
"repo_name": "tsurubee/banpei",
"id": "cba90a2ba8604f89f0daa00646d546ade2e1fe65",
"size": "773",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_sst.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "11621"
}
],
"symlink_target": ""
} |
import unittest
from boundary import AlarmCreate
class TestCommand(unittest.TestCase):
def setUp(self):
self.alarm_create = AlarmCreate()
pass
def test_cli_description(self):
self.assertEqual('Creates a alarm definition in an Boundary account',
self.alarm_create.getDescription(), 'Check AlarmCreate description')
| {
"content_hash": "646c9f3dc1183c1cee8cde7eec3dded7",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 93,
"avg_line_length": 31.166666666666668,
"alnum_prop": 0.6818181818181818,
"repo_name": "wcainboundary/boundary-api-cli",
"id": "223b393021c10f077d6cd07cd12c74e7148ec761",
"size": "985",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/unit/boundary/alarm_create_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "347"
},
{
"name": "Python",
"bytes": "160179"
},
{
"name": "Shell",
"bytes": "42131"
}
],
"symlink_target": ""
} |
from django.contrib.auth.forms import UserCreationForm
from django import forms
from django.contrib.auth.models import User
class RegisterForm(UserCreationForm):
class Meta:
model = User
fields = ['email', 'first_name', 'last_name']
def save(self, commit=True):
user = super().save(False)
# username and email are the same in this scheme. Enforce it!
user.username = user.email
if commit:
user.save()
return user
| {
"content_hash": "bdcb862a327b084ed1abb152fa17803e",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 69,
"avg_line_length": 28.27777777777778,
"alnum_prop": 0.6247544204322201,
"repo_name": "aabmass/pswebsite_django",
"id": "4363687875f67ca9051e2292875d72ba58f87eb7",
"size": "509",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pswebsite/forms.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "33715"
},
{
"name": "HTML",
"bytes": "41875"
},
{
"name": "JavaScript",
"bytes": "2505"
},
{
"name": "Python",
"bytes": "24484"
},
{
"name": "Shell",
"bytes": "214"
}
],
"symlink_target": ""
} |
import os
import kfp.compiler as compiler
import kfp.components as components
from kfp.azure import use_azure_secret
import kfp.dsl as dsl
import argparse
parser = argparse.ArgumentParser(description='Process inputs.')
parser.add_argument('--image_name', type=str, default='kubeflow_synapse_component')
parser.add_argument('--image_repo_name', type=str, default='kubeflowdemo')
args = parser.parse_args()
component_root = os.path.join(os.path.dirname(os.path.abspath(__file__)), ".")
image_repo_name = args.image_repo_name # the container registery for the container operation and path in the ACR
image_name = args.image_name
file_path = os.path.join(component_root, "component.yaml")
# Loading the component.yaml file for deployment operation
run_job_operation = components.load_component_from_file(file_path)
# The run_job_image_name shall be the container image for the operation
# It shall be something like <your_acr_name>.azurecr.io/deploy/aml-deploy-model:latest
# If you are using a container registry other than Azure Container Registry, update the image name correspondingly
run_job_image_name = image_repo_name + '.azurecr.io/deploy/' + image_name + ':latest'
print(run_job_image_name)
def use_image(image_name):
def _use_image(task):
task.image = image_name
return task
return _use_image
@dsl.pipeline(
name='Azure Synapse Component Sample',
description='Run spark jobs in Azure Synapse'
)
def run_spark_job(
main_definition_file,
command_line_arguments
):
operation = run_job_operation(executor_size='Small',
executors=1,
main_class_name='""',
main_definition_file=main_definition_file,
name='kubeflowsynapsetest',
tenant_id='$(AZ_TENANT_ID)',
service_principal_id='$(AZ_CLIENT_ID)',
service_principal_password='$(AZ_CLIENT_SECRET)',
subscription_id='$(AZ_SUBSCRIPTION_ID)',
resource_group='kubeflow-demo-rg',
command_line_arguments=command_line_arguments,
spark_pool_name='kubeflowsynapse',
language='',
reference_files='',
configuration='',
tags='',
spark_pool_config_file='./src/spark_pool_config.yaml',
wait_until_job_finished=True,
waiting_timeout_in_seconds=3600,
workspace_name='kubeflow-demo'). \
apply(use_azure_secret()). \
apply(use_image(run_job_image_name))
if __name__ == '__main__':
compiler.Compiler().compile(run_spark_job, __file__ + '.tar.gz') | {
"content_hash": "9c4847e967e62f0a80998b7a0f13f122",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 114,
"avg_line_length": 45.515151515151516,
"alnum_prop": 0.5745672436750998,
"repo_name": "kubeflow/pipelines",
"id": "3bcb5419f476d568f7490c0753206df3429db413",
"size": "3004",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "components/azure/azuresynapse/runsparkjob/sample.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "799"
},
{
"name": "CSS",
"bytes": "2171"
},
{
"name": "Dockerfile",
"bytes": "49331"
},
{
"name": "Go",
"bytes": "1903937"
},
{
"name": "HTML",
"bytes": "3656"
},
{
"name": "JavaScript",
"bytes": "544297"
},
{
"name": "Jinja",
"bytes": "938"
},
{
"name": "Jupyter Notebook",
"bytes": "359548"
},
{
"name": "Makefile",
"bytes": "22164"
},
{
"name": "Mustache",
"bytes": "23652"
},
{
"name": "PowerShell",
"bytes": "3194"
},
{
"name": "Python",
"bytes": "5684887"
},
{
"name": "Shell",
"bytes": "264595"
},
{
"name": "Smarty",
"bytes": "8295"
},
{
"name": "Starlark",
"bytes": "553"
},
{
"name": "TypeScript",
"bytes": "4294958"
}
],
"symlink_target": ""
} |
"""
Necessary bindings for Pybuild files.
"""
from __future__ import absolute_import, division, print_function
from mybuild._compat import *
import inspect
from mybuild import core
from mybuild.util.deco import constructor_decorator
__author__ = "Eldar Abusalimov"
__date__ = "2013-07-29"
__all__ = ['module', 'project', 'option']
class PyDslModuleMeta(core.ModuleMetaBase):
"""Infers options from class constructor.
Adds an optional 'internal' keyword argument.
Produces real modules by default, however subclasses must still pass
an 'option_types' keyword argument or provide a resonable implementation
of '_prepare_optypes' method.
"""
def __init__(cls, name, bases, attrs, internal=False, **kwargs):
"""Keyword arguments are passed to '_prepare_optypes' method."""
super(PyDslModuleMeta, cls).__init__(name, bases, attrs,
option_types=(None if internal else
cls._prepare_optypes(**kwargs)))
def _prepare_optypes(cls):
"""Converts a constructor argspec into a list of Optype objects."""
func = cls.__dict__.get('__init__') # to avoid MRO lookup
try:
argspec = inspect.getargspec(inspect.unwrap(func))
except TypeError: # no constructor, or it is a wrapper descriptor
return []
else:
args, va, kw, dfls = argspec
dfls = dfls or []
if not args and not va:
raise TypeError('Module must accept at least one argument')
for arg in args:
if not isinstance(arg, str):
raise TypeError('Tuple parameter unpacking '
'is not supported: {arg}'.format(**locals()))
if args: # forget about the first arg (which is usually 'self')
if len(args) == len(dfls):
del dfls[0]
del args[0]
def to_optype(optype_or_default):
if isinstance(optype_or_default, core.Optype):
return optype_or_default
return core.Optype(optype_or_default)
head = [core.Optype() for _ in range(len(args) - len(dfls))]
tail = list(map(to_optype, dfls))
return [(name, optype.set(name=name))
for name, optype in zip(args, head + tail)]
def _instantiate(cls, optuple, *args, **kwargs):
instance = cls.__new__(cls, optuple, *args, **kwargs)
if isinstance(instance, cls):
# The following dirty hack is to be sure that Module.__init__ gets
# called with proper arguments and exactly once.
super(PyDslModuleBase, instance).__init__(optuple, *args, **kwargs)
# On the other hand, the real __init__ is invoked with keyword
# arguments holding option values and it is not required to call
# super constructor (which anyway does nothing, see
# PyDslModule.__init__).
instance.__init__(**optuple._asdict())
return instance
class PyDslModuleBase(extend(core.ModuleBase,
metaclass=PyDslModuleMeta, internal=True)):
"""
Example of a simple module without any options:
>>> @module
... def modname(self):
... pass
More examples:
>>> @module
... def m(self,
... foo = option(0, 'one', 'two'), # one of these, or any other
... bar = option.enum(38400, 115200), # enumeration of two values
... baz = option.bool(default=True), # boolean flag
... ):
... pass
>>> list(m._options)
['foo', 'bar', 'baz']
>>> class modclass(module):
... def __init__(self, opt = option.bool()):
... pass
"""
def __init__(_self, *args, **kwargs):
# Notice the absence of super constructor call,
# see PyDslModuleMeta._instantiate for explanations.
pass
def __repr__(self):
return repr(self._optuple)
# XXX
constrain = core.Module._constrain
consider = _consider = core.Module._discover
def new_module_type(name, *bases):
return new_type(name, bases, {}, metaclass=core.ModuleMetaBase, internal=True)
module = constructor_decorator(new_module_type('PyDslModule',
PyDslModuleBase, core.Module))
project = constructor_decorator(new_module_type('PyDslProject',
PyDslModuleBase, core.Project))
application = None
library = None
option = core.Optype
| {
"content_hash": "d6486fd4fe5b60479e7c85e389912dc5",
"timestamp": "",
"source": "github",
"line_count": 139,
"max_line_length": 82,
"avg_line_length": 32.42446043165467,
"alnum_prop": 0.5870867539383182,
"repo_name": "abusalimov/mybuild",
"id": "f6b84cbbf3657b4b4957cd0b410a28ad9e4a087d",
"size": "4507",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "mybuild/binding/pydsl.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "590"
},
{
"name": "Makefile",
"bytes": "165"
},
{
"name": "Python",
"bytes": "208966"
}
],
"symlink_target": ""
} |
"""
Find regions of first bed file that overlap regions in a second bed file. The
output preserves all fields from the input.
NOTE: -u and -d options are currently not functional!
usage: %prog bed_file_1 bed_file_2
-m, --mincols=N: Require this much overlap (default 1bp)
-u, --upstream_pad=N: upstream interval padding (default 0bp)
-d, --downstream_pad=N: downstream interval padding (default 0bp)
-v, --reverse: Print regions that DO NOT overlap
-b, --booleans: Just print '1' if interval overlaps or '0' otherwise
"""
import sys
from warnings import warn
from bx.bitset import *
from bx.bitset_builders import *
from bx.cookbook import doc_optparse
mincols = 1
upstream_pad = 0
downstream_pad = 0
options, args = doc_optparse.parse( __doc__ )
try:
if options.mincols: mincols = int( options.mincols )
if options.upstream_pad: upstream_pad = int( options.upstream_pad )
if options.downstream_pad: downstream_pad = int( options.downstream_pad )
reverse = bool( options.reverse )
booleans = bool( options.booleans )
in_fname, in2_fname = args
except:
doc_optparse.exit()
# Read first bed into some bitsets
bitsets = binned_bitsets_from_file( open( in2_fname ) )
# Read second BED and intersect
for line in open( in_fname ):
if line.startswith("#") or line.isspace():
continue
fields = line.split()
start, end = int( fields[1] ), int( fields[2] )
if start > end:
warn( "Bed interval start after end!" )
if fields[0] in bitsets and bitsets[fields[0]].count_range( start, end-start ) >= mincols:
if booleans:
if reverse:
print 0
else:
print 1
elif not reverse:
print line,
else:
if booleans:
if reverse:
print 1
else:
print 0
elif reverse:
print line,
| {
"content_hash": "864eef863e3a4d1ef3bc7520a7124157",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 94,
"avg_line_length": 29.106060606060606,
"alnum_prop": 0.631441957313899,
"repo_name": "bxlab/HiFive_Paper",
"id": "52e1bab5acf5b39ec4501e40df0201f67c652820",
"size": "1963",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Scripts/HiCLib/bx-python-0.7.1/build/scripts-2.7/bed_intersect.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "5096"
},
{
"name": "C",
"bytes": "107381"
},
{
"name": "C++",
"bytes": "182835"
},
{
"name": "CMake",
"bytes": "3353"
},
{
"name": "Forth",
"bytes": "152"
},
{
"name": "Makefile",
"bytes": "22978"
},
{
"name": "Perl",
"bytes": "25453"
},
{
"name": "Python",
"bytes": "4229513"
},
{
"name": "R",
"bytes": "43022"
},
{
"name": "Shell",
"bytes": "10798"
}
],
"symlink_target": ""
} |
import json
def break_into_words(keystrokes):
words = []
word = ""
for keystroke in keystrokes:
if keystroke == " " or keystroke == " ": #keeping residual "double option" because it could be either thumb that hits the "keyboard"
words.append(word)
word = ""
else:
word += keystroke
#don't lose the last word in a sentence that doesn't have a space after it
if word != []:
words.append(word)
return words
def get_dictionary(file_name):
#reads in wordlist (dictionary) data structure and turns from text into data structure
with open(file_name, "r") as read_f:
dictionary_line = read_f.readline()
dictionary_structure = json.loads(dictionary_line) #can install yaml and do yaml.load() instead of json.loads(). will print prettier, no unicode
return dictionary_structure
def find_next_word(keystrokes, dictionary):
#takes keystrokes for next typed word and returns the list of possible words meant by keystroke combination
word_length = len(keystrokes)
if keystrokes in dictionary[word_length - 1].keys():
possible_words = dictionary[word_length - 1][keystrokes]
else:
possible_words = ["NOT FOUND"]
return possible_words
def parse_keystrokes(keystrokes):
dictionary = get_dictionary("RZ_edit_data_structure.txt")
# dictionary = get_dictionary("intersecting_words_data_structure.txt")
#words_data_structure.txt is a list (separated by wordlength) of key-value pairs of keystrokes and list of possibly intended words
#with each list sorted by frequency of use, according to Google 1-grams (see other files for how the 1-grams were pre-processed)
# keystrokes = raw_input("type words here: (q to quit) ")
words = break_into_words(keystrokes)
sentence = []
for word in words:
sentence.append(find_next_word(word, dictionary))
return sentence | {
"content_hash": "0d3f4a4c67686a10344154bf2fd539f0",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 153,
"avg_line_length": 41.93478260869565,
"alnum_prop": 0.6884396060134785,
"repo_name": "ctlewitt/Invisible-Keyboard",
"id": "81e69d8ec2fa241a69e0b3e0af5f3bcc1a96828d",
"size": "1942",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "invisiblekeyboard/parsing_keystrokes.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "546"
},
{
"name": "Python",
"bytes": "24452"
}
],
"symlink_target": ""
} |
import string
#Open text file and remove punctuation and set to lower case
lines = sc.textFile("test1.txt")
punc = lines.map(lambda text: text.lower())
punc = punc.map(lambda text: text.translate({ord(c): None for c in string.punctuation}))
#Split lines into words
split = punc.flatMap(lambda line: line.split())
#Map each word to a count of 1
words = split.map(lambda w: (w,1))
#Then count each occurance of a word
word_count = words.reduceByKey(lambda w, c: w + c)
word_count.collect()
# In[ ]:
| {
"content_hash": "baa39efaf091e12a38853287999ea61e",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 88,
"avg_line_length": 25.2,
"alnum_prop": 0.7142857142857143,
"repo_name": "gjwajda/Computational-Tools-For-Big-Data",
"id": "c6ea865134a4bfc80fcb6e698a5fe8d54af65170",
"size": "533",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Exercise9/exercise9_1.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "1026222"
},
{
"name": "Python",
"bytes": "58002"
},
{
"name": "Shell",
"bytes": "395"
}
],
"symlink_target": ""
} |
import argparse
import sys
import os
# Make utilities folder available
sys.path.append(os.path.abspath("../"))
from utilities.runner import exec_in_row
#==============================================================================
#Command line options==========================================================
#==============================================================================
parser = argparse.ArgumentParser()
parser.add_argument("spleen", type=str,
help="A directory containing spleen files")
parser.add_argument("heart", type=str,
help="A directory containing heart files")
parser.add_argument("liver", type=str,
help="A directory containing liver files")
parser.add_argument("gonad", type=str,
help="A directory containing gonad files")
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
#==============================================================================
def get_filtered_alignments(infolder):
alns = []
for root, dirs, files in os.walk(infolder):
if "filter.sorted.bam" in files:
alns.append(os.path.join(root, "filter.sorted.bam"))
return alns
def get_samtools_params(aln):
samtools_params = ["samtools",
"index",
aln]
return samtools_params
def run_samtools_index(all_alns):
samtools_commands = []
for aln in all_alns:
samtools_commands.append(get_samtools_params(aln))
exec_in_row(samtools_commands)
def main():
spleen_alns = get_filtered_alignments(args.spleen)
gonad_alns = get_filtered_alignments(args.gonad)
liver_alns = get_filtered_alignments(args.liver)
heart_alns = get_filtered_alignments(args.heart)
run_samtools_index(spleen_alns)
run_samtools_index(gonad_alns)
run_samtools_index(liver_alns)
run_samtools_index(heart_alns)
if __name__ == "__main__":
main()
| {
"content_hash": "45d0eeaa8470a9fe81b9fe2e72ba5f46",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 79,
"avg_line_length": 32.916666666666664,
"alnum_prop": 0.5529113924050633,
"repo_name": "qfma/ohnolog-dc",
"id": "0a4fd72b35d1969a22e43846de2611465da591e6",
"size": "2129",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "count-extraction/index-bam.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "467069"
},
{
"name": "Python",
"bytes": "71993"
},
{
"name": "R",
"bytes": "16709"
}
],
"symlink_target": ""
} |
import getopt
import fnmatch
import os
import re
import sys
# The default path to the Subversion configuration file.
SVN_CONFIG_FILENAME = os.path.expandvars('$HOME/.subversion/config')
# The name of Subversion's private directory in working copies.
SVN_WC_ADM_DIR_NAME = '.svn'
# The name this script was invoked as.
PROGNAME = os.path.basename(sys.argv[0])
def usage():
print("""This script reads the auto-properties defined in the file
'%s'
and applies them recursively to all the files and directories in the
current working copy. It may behave differently than the Subversion
command line; where the subversion command line may only apply a single
matching auto-property to a single pathname, this script will apply all
matching lines to a single pathname.
Usage:
%s [options] [WC_PATH]
where WC_PATH is the path to a working copy.
If WC_PATH is not specified, '.' is assumed.
Valid options are:
--help, -h : Print this help text.
--config ARG : Read the Subversion config file at path ARG
instead of '%s'.
""" % (SVN_CONFIG_FILENAME, PROGNAME, SVN_CONFIG_FILENAME))
def get_autoprop_lines(fd):
lines = []
reading_autoprops = 0
re_start_autoprops = re.compile('^\s*\[auto-props\]\s*')
re_end_autoprops = re.compile('^\s*\[\w+\]\s*')
for line in fd.xreadlines():
if reading_autoprops:
if re_end_autoprops.match(line):
reading_autoprops = 0
continue
else:
if re_start_autoprops.match(line):
reading_autoprops = 1
continue
if reading_autoprops:
lines += [line]
return lines
def process_autoprop_lines(lines):
result = []
for line in lines:
# Split the line on the = separating the fnmatch string from the
# properties.
try:
(fnmatch, props) = line.split('=', 1)
except ValueError:
continue
# Remove leading and trailing whitespace from the fnmatch and
# properties.
fnmatch = fnmatch.strip()
props = props.strip()
# Create a list of property name and property values. Remove all
# leading and trailing whitespce from the propery names and
# values.
props_list = []
for prop in props.split(';'):
prop = prop.strip()
if not len(prop):
continue
try:
(prop_name, prop_value) = prop.split('=', 1)
prop_name = prop_name.strip()
prop_value = prop_value.strip()
except ValueError:
prop_name = prop
prop_value = '*'
if len(prop_name):
props_list += [(prop_name, prop_value)]
result += [(fnmatch, props_list)]
return result
def filter_walk(autoprop_lines, dirname, filenames):
# Do not descend into a .svn directory.
try:
filenames.remove(SVN_WC_ADM_DIR_NAME)
except ValueError:
pass
filenames.sort()
# Find those filenames that match each fnmatch.
for autoprops_line in autoprop_lines:
fnmatch_str = autoprops_line[0]
prop_list = autoprops_line[1]
matching_filenames = fnmatch.filter(filenames, fnmatch_str)
matching_filenames = [f for f in matching_filenames \
if not os.path.islink(os.path.join(dirname, f))]
if not matching_filenames:
continue
for prop in prop_list:
command = ['svn', 'propset', prop[0], prop[1]]
for f in matching_filenames:
command += ["%s/%s" % (dirname, f)]
status = os.spawnvp(os.P_WAIT, 'svn', command)
if status:
print('Command %s failed with exit status %s' \
% (command, status))
def main():
try:
opts, args = getopt.getopt(sys.argv[1:], 'h', ['help', 'config='])
except getopt.GetoptError as e:
usage()
return 1
config_filename = None
for (o, a) in opts:
if o == '-h' or o == '--help':
usage()
return 0
elif o == '--config':
config_filename = os.path.abspath(a)
if not config_filename:
config_filename = SVN_CONFIG_FILENAME
if len(args) == 0:
wc_path = '.'
elif len(args) == 1:
wc_path = args[0]
else:
usage()
print("Too many arguments: %s" % ' '.join(args))
return 1
try:
fd = file(config_filename)
except IOError:
print("Cannot open svn configuration file '%s' for reading: %s" \
% (config_filename, sys.exc_value.strerror))
return 1
autoprop_lines = get_autoprop_lines(fd)
fd.close()
autoprop_lines = process_autoprop_lines(autoprop_lines)
os.path.walk(wc_path, filter_walk, autoprop_lines)
if __name__ == '__main__':
sys.exit(main())
| {
"content_hash": "8322c903bcf465bb9e6952f9e4aa37c1",
"timestamp": "",
"source": "github",
"line_count": 169,
"max_line_length": 71,
"avg_line_length": 26.62130177514793,
"alnum_prop": 0.6374749944432097,
"repo_name": "YueLinHo/Subversion",
"id": "061c5c501465d6f1ec2a3ac2a11832898ccadd08",
"size": "5384",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "contrib/client-side/svn_apply_autoprops.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "2407"
},
{
"name": "C",
"bytes": "23344368"
},
{
"name": "C++",
"bytes": "1110396"
},
{
"name": "CSS",
"bytes": "1216"
},
{
"name": "Emacs Lisp",
"bytes": "401177"
},
{
"name": "HTML",
"bytes": "404487"
},
{
"name": "Java",
"bytes": "1698548"
},
{
"name": "M4",
"bytes": "204671"
},
{
"name": "Makefile",
"bytes": "50827"
},
{
"name": "Objective-C",
"bytes": "534640"
},
{
"name": "PLSQL",
"bytes": "1622"
},
{
"name": "PLpgSQL",
"bytes": "4534"
},
{
"name": "Perl",
"bytes": "395610"
},
{
"name": "Python",
"bytes": "6205629"
},
{
"name": "Roff",
"bytes": "33424"
},
{
"name": "Ruby",
"bytes": "437540"
},
{
"name": "Shell",
"bytes": "196621"
},
{
"name": "Vim script",
"bytes": "1123"
},
{
"name": "XSLT",
"bytes": "24950"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import argparse
import errno
from io import open
import os
import subprocess
import yaml
"""Generate a changelog entry file in git project root.
Automatically stages the file and amends the previous commit if the `--amend` argument is used.
"""
def get_title():
return subprocess.check_output("git log --format='%s' -1", shell=True).decode('utf-8').strip()
def get_author():
return subprocess.check_output("git config user.name", shell=True).decode('utf-8').strip()
def get_branch_name():
return subprocess.check_output("git symbolic-ref --short HEAD", shell=True).decode('utf-8').strip()
def get_git_root():
return subprocess.check_output("git rev-parse --show-toplevel", shell=True).decode('utf-8').strip()
def get_yml_file_path():
path = os.path.join(get_git_root(), 'changelogs', 'unreleased')
try:
os.makedirs(path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
file_name = get_branch_name().replace('/', '-') + '.yml'
file_path = os.path.join(path, file_name)
return file_path
def write_changelog(log_entry, rewrite=False):
file_path = get_yml_file_path()
if os.path.exists(file_path):
if rewrite:
print("File {PATH} already exists, and will be lost.".format(PATH=file_path))
else:
with open(file_path, 'r') as f:
old_title = yaml.load(f)['title']
if isinstance(old_title, list):
log_entry['title'] = old_title + [log_entry['title']]
else:
log_entry['title'] = [old_title, log_entry['title']]
print("Saving change into file: " + file_path)
yml_content = yaml.safe_dump(log_entry, allow_unicode=True, default_flow_style=False, encoding=None)
print(yml_content)
with open(file_path, 'w', encoding='utf-8') as f:
f.write(yml_content)
def commit_changes(yaml_file_path):
subprocess.call("git commit --amend", shell=True)
print("Changes have been committed to local git repository.")
def main(app_args=None):
if not app_args:
parser = argparse.ArgumentParser(description='Generate a changelog entry file in git project root.')
parser.add_argument('title', nargs='?', default=get_title())
parser.add_argument('--author', default=get_author())
parser.add_argument('--amend', action='store_true')
parser.add_argument('--rewrite', action='store_true')
app_args = parser.parse_args()
log_entry = {
'title': app_args.title,
'author': app_args.author
}
write_changelog(log_entry, rewrite=app_args.rewrite)
subprocess.call("git add {FILENAME}".format(FILENAME=get_yml_file_path()), shell=True)
if app_args.amend:
commit_changes(get_yml_file_path())
if __name__ == '__main__':
main()
| {
"content_hash": "1d6028f9a627c1caf09e8fa78b6b0cba",
"timestamp": "",
"source": "github",
"line_count": 92,
"max_line_length": 108,
"avg_line_length": 31.380434782608695,
"alnum_prop": 0.6328368548666435,
"repo_name": "istarion/changelog-helper",
"id": "46dcbb7d6747e18fea6e530c4a55c397ac3c6ccf",
"size": "2909",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "changelog_helper/add_changelog.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "11306"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from datetime import datetime, timedelta
from dateutil.parser import parse
from flask import jsonify
def fmt_date(date):
return date.strftime("%Y%m%d")
def datetime_iterator(from_date=None, to_date=None, delta=timedelta(days=1)):
from_date = from_date or datetime.now()
to_date = to_date or datetime.now()
if from_date > to_date:
while from_date >= to_date:
yield from_date
from_date = from_date - delta
else:
while from_date <= to_date:
yield from_date
from_date = from_date + delta
def date_iterator(from_date, to_date):
"""
Should Iterate through datetime and return formatted string
>>> map(fmt_date, date_iterator(parse("01-Feb-2003"), parse("3-Feb-2003")))
['20030201', '20030202', '20030203']
>>> map(fmt_date, date_iterator(parse("Jan 31 2011"), parse("Feb 2 2011")))
['20110131', '20110201', '20110202']
>>> map(fmt_date, date_iterator(parse("Feb 2 2011"), parse("Jan 31 2011")))
['20110202', '20110201', '20110131']
"""
return datetime_iterator(from_date, to_date, timedelta(days=1))
def week_iterator(from_date, to_date):
"""
>>> map(fmt_date, week_iterator(parse("1-Sep-2011"), parse("14-Sep-2011")))
['20110829', '20110905', '20110912']
"""
from_date = from_date - timedelta(days=from_date.weekday())
to_date = to_date - timedelta(days=to_date.weekday())
return datetime_iterator(from_date, to_date, timedelta(days=7))
def month_iterator(from_date, to_date):
"""
>>> map(fmt_date, month_iterator(parse("15-11-2005"), parse("20-3-2006")))
['20051101', '20051201', '20060101', '20060201', '20060301']
>>> map(fmt_date, month_iterator(parse("20-3-2006"), parse("15-11-2005")))
['20060301', '20060201', '20060101', '20051201', '20051101']
"""
from_date = from_date.replace(day=1)
to_date = to_date.replace(day=1)
if from_date > to_date:
while from_date >= to_date:
yield from_date
if from_date.month == 1:
from_date = from_date.replace(year=from_date.year - 1,
month=12)
else:
from_date = from_date.replace(month=from_date.month - 1)
else:
while from_date <= to_date:
yield from_date
if from_date.month == 12:
from_date = from_date.replace(year=from_date.year + 1,
month=1)
else:
from_date = from_date.replace(month=from_date.month + 1)
def year_iterator(from_date, to_date):
"""
>>> map(fmt_date, year_iterator(parse("01-Feb-2003"), parse("3-Feb-2005")))
['20030101', '20040101', '20050101']
"""
from_date = from_date.replace(day=1, month=1, tzinfo=None)
to_date = to_date.replace(day=1, month=1, tzinfo=None)
if from_date > to_date:
while from_date >= to_date:
yield from_date
from_date = from_date.replace(year=from_date.year - 1)
else:
while from_date <= to_date:
yield from_date
from_date = from_date.replace(year=from_date.year + 1)
def json_response(f):
def new_f(*args, **kwargs):
return jsonify(f(*args, **kwargs))
return new_f
def construct_key(*args):
"""
>>> construct_key()
''
>>> construct_key('Activity', [''])
'Activity'
>>> construct_key('Activity', ['Month', '20111101'], [])
'Activity:Month:20111101'
>>> construct_key('Activity', ['Month', '20111101'], ['Practice', 1])
'Activity:Month:20111101:Practice:1'
>>> construct_key('Activity', 'Month:20111101', 'Practice:1')
'Activity:Month:20111101:Practice:1'
>>> construct_key('Activity', ['Month', '20111101'], None)
'Activity:Month:20111101'
"""
def flatten_args(args):
flattened = []
for arg in args:
if type(arg) == list or type(arg) == tuple:
flattened.extend(flatten_args(arg))
elif arg is None or str(arg) == '':
continue
else:
flattened.append(str(arg))
return flattened
flattened_args = flatten_args(args)
if flattened_args == []:
return ''
return reduce(lambda x, y: x + ':' + y, flattened_args)
if __name__ == "__main__":
fmt_date(parse("Jan 31 2011")) # suppress unused 'parse' warning
| {
"content_hash": "b93a92803d9aa6fc2d7b7f6483ab6925",
"timestamp": "",
"source": "github",
"line_count": 136,
"max_line_length": 79,
"avg_line_length": 32.93382352941177,
"alnum_prop": 0.5733422638981915,
"repo_name": "practo/r5d4",
"id": "ec0c5dfcba4d342dae39e4cd48d3902033bbb26c",
"size": "4479",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "r5d4/utility.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "52496"
}
],
"symlink_target": ""
} |
import argparse
import json
class Inventory:
'''
Ansible inventory , generated from config file
'''
def __init__(self, fname):
self.__load_config__(fname)
def __load_config__(self, name):
with open(name) as f:
# this function is a bit ugly and should be rewritten
self.conf = json.loads(f.read())
def build_devices_dict(self):
# Devices dict should be of the form:
# {
# "object_devices": [ { "type": "object", "dev": "vdb", "region": "regionOne", "zone": "1", "weight": "100" },...]
# "container_devices": [ { "type": "container",... },... ]
# "account_devices": [ { "type": "account",... },... ]
# }
def add_device_to_list(d, l):
#if not d['name'] in [dev['name'] for dev in l]:
# l.append(d)
l.append(d)
def iterate_devices_in_node(device_type, node):
for d in node['swift_devices'][device_type]:
yield d
def append_ring_info_to_device(d,n):
device['ip'] = n['ip']
device['zone'] = n['zone']
device['region'] = n['region']
object_devices = list()
container_devices = list()
account_devices = list()
for node in self.conf['groups']['swift-object']:
node_conf = self.conf[node]
for device in iterate_devices_in_node('object_devices', node_conf):
device['type'] = 'object'
append_ring_info_to_device(device, node_conf['rings_info'])
add_device_to_list(device, object_devices)
for node in self.conf['groups']['swift-md']:
node_conf = self.conf[node]
for device in iterate_devices_in_node('container_devices', node_conf):
device['type'] = 'container'
append_ring_info_to_device(device, node_conf['rings_info'])
add_device_to_list(device, container_devices)
for device in iterate_devices_in_node('account_devices', node_conf):
device['type'] = 'account'
append_ring_info_to_device(device, node_conf['rings_info'])
add_device_to_list(device, account_devices)
res = dict()
res['object_devices'] = object_devices
res['container_devices'] = container_devices
res['account_devices'] = account_devices
return res
def show_list(self):
g = {}
for group in ['keystone', 'swift-proxy', 'swift-md', 'swift-object',
'swift-ring-builder']:
g[group] = dict()
g[group]['hosts'] = self.conf['groups'][group]
g[group]['vars'] = dict()
if group == 'swift-ring-builder':
g[group]['vars'] = self.conf[group]
g[group]['vars'].update(self.conf['keystone']['vars'])
g[group]['vars'].update(self.conf['swift']['vars'])
return g
def show_host(self, name):
res = self.conf[name]
# For the swift-ring-builder host we
# need to dynamically construct:
# object_devices, container_devices, account_devices
# See swift-create-rings role
storage_devices = self.build_devices_dict()
res.update(storage_devices)
#for group_vars in ['keystone', 'swift']:
# res.update(self.conf[group_vars]['vars'])
return res
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--list', action='store_true')
parser.add_argument('--host')
args = parser.parse_args()
inventory = Inventory('cluster_config.json')
out = {}
if args.list:
out = inventory.show_list()
if args.host:
out = inventory.show_host(args.host)
print json.dumps(out)
if __name__ == '__main__':
main()
| {
"content_hash": "429658927177eb5f7f2ce2022150db99",
"timestamp": "",
"source": "github",
"line_count": 111,
"max_line_length": 126,
"avg_line_length": 34.792792792792795,
"alnum_prop": 0.5406525116519938,
"repo_name": "eranr/storlets-swift-install",
"id": "77a5802fa4759fed1ec09e1d7e57432583a8b546",
"size": "3882",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "provisioning/swift_dynamic_inventory.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "14480"
},
{
"name": "Shell",
"bytes": "1806"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('democracylab', '0006_auto_20200427_2336'),
('civictechprojects', '0029_projectcommit'),
]
operations = [
migrations.CreateModel(
name='Event',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('deleted', models.BooleanField(default=False)),
('event_agenda', models.CharField(blank=True, max_length=4000)),
('event_date_created', models.DateTimeField(null=True)),
('event_date_end', models.DateTimeField()),
('event_date_modified', models.DateTimeField(auto_now_add=True, null=True)),
('event_date_start', models.DateTimeField()),
('event_description', models.CharField(blank=True, max_length=4000)),
('event_location', models.CharField(blank=True, max_length=200)),
('event_name', models.CharField(max_length=200)),
('event_rsvp_url', models.CharField(blank=True, max_length=2083)),
('event_short_description', models.CharField(blank=True, max_length=140)),
('is_searchable', models.BooleanField(default=False)),
('is_created', models.BooleanField(default=True)),
('event_creator', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='event_creator', to='democracylab.Contributor')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Group',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('deleted', models.BooleanField(default=False)),
('group_date_created', models.DateTimeField(null=True)),
('group_date_modified', models.DateTimeField(auto_now_add=True, null=True)),
('group_description', models.CharField(blank=True, max_length=4000)),
('group_location', models.CharField(blank=True, max_length=200)),
('group_name', models.CharField(max_length=200)),
('group_short_description', models.CharField(blank=True, max_length=140)),
('is_searchable', models.BooleanField(default=False)),
('is_created', models.BooleanField(default=True)),
('group_creator', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='group_creator', to='democracylab.Contributor')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='ProjectRelationship',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('relationship_event', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='relationships', to='civictechprojects.Event')),
('relationship_group', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='relationships', to='civictechprojects.Group')),
('relationship_project', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='relationships', to='civictechprojects.Project')),
],
),
migrations.AddField(
model_name='projectfile',
name='file_event',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='files', to='civictechprojects.Event'),
),
migrations.AddField(
model_name='projectfile',
name='file_group',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='files', to='civictechprojects.Group'),
),
migrations.AddField(
model_name='projectlink',
name='link_event',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='links', to='civictechprojects.Event'),
),
migrations.AddField(
model_name='projectlink',
name='link_group',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='links', to='civictechprojects.Group'),
),
]
| {
"content_hash": "cb570b3b67f5fdbc70babb724d898358",
"timestamp": "",
"source": "github",
"line_count": 86,
"max_line_length": 190,
"avg_line_length": 55.31395348837209,
"alnum_prop": 0.6056338028169014,
"repo_name": "DemocracyLab/CivicTechExchange",
"id": "e5f4d7db913ceaedff9f16563f67566543551f60",
"size": "4831",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "civictechprojects/migrations/0030_auto_20200501_1755.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "5441"
},
{
"name": "HTML",
"bytes": "27002"
},
{
"name": "JavaScript",
"bytes": "868669"
},
{
"name": "Procfile",
"bytes": "120"
},
{
"name": "Python",
"bytes": "493545"
},
{
"name": "SCSS",
"bytes": "106029"
},
{
"name": "Shell",
"bytes": "20355"
}
],
"symlink_target": ""
} |
import os, sys, struct
from pyscarphase.proto import data_pb2 as data_pb
class DataReader:
'''
Read scarphase protobuf data file
<size of header>
<header>
<size of window 0>
<window 0>
<size of window 1>
<window 1>
...
'''
def __init__(self, filename, uuid=None):
self.messages = []
self.position = 0
self.eof = None
self.open(filename, uuid)
def __iter__(self):
return self
def __next__(self):
return self.next()
def open(self, filename, uuid=None):
self.file = open(filename, 'rb')
data = self.file.read(4)
if len(data) != 4:
raise EOFError()
size = struct.unpack('<i', data)[0]
data = self.file.read(size)
header = data_pb.Header()
header.ParseFromString(data)
if uuid and uuid != header.uuid:
raise Exception('UUID mismatch')
def read(self):
pass
def get(self, index):
# Change position
cur_index = self.tell()
self.seek(index)
# Get window
window = self.next()
# Restore
self.seek(cur_index)
#
return window
def __next(self, skip=True):
# Get message size
data = self.file.read(4)
# Check if end of file
if len(data) != 4:
self.eof = True
raise StopIteration()
# Add to message position list
if self.position == len(self.messages):
self.messages.append(self.file.tell() - 4)
#
self.position += 1
# Parse size
size = struct.unpack('<i', data)[0]
if skip:
self.file.seek(size, os.SEEK_CUR)
else:
# Read message
data = self.file.read(size)
# Parse message
window = data_pb.WindowData()
window.ParseFromString(data)
return window
def __read_all(self):
if not self.eof:
current_mpos, current_fpos = self.position, self.file.tell()
try:
# Go to current end
if len(self.messages) != 0:
current_end = len(self.messages) - 1
self.file.seek(self.messages[current_end])
# Find end
while True:
self.__next(skip=True)
except StopIteration:
self.position = current_mpos
self.file.seek(current_fpos)
def next(self):
return self.__next(skip=False)
def seek(self, position, whence=os.SEEK_SET):
if self.position == position:
return
if whence == os.SEEK_SET:
pass
elif whence == os.SEEK_CUR:
self.seek(self.position + position)
return
elif whence == os.SEEK_END:
if position > 0:
raise IndexError()
self.__read_all()
self.seek(len(self.messages) - 1 + position)
return
else:
pass
# If we know the offset already
if position < len(self.messages):
self.position = position
self.file.seek(self.messages[self.position])
# iterate through messages until we reach right offset
else:
if len(self.messages) > 0:
self.position = len(self.messages) - 1
self.file.seek(self.messages[self.position])
position = position - self.position
try:
while position > 0:
self.__next(skip=True)
position -= 1
except StopIteration:
raise IndexError()
def tell(self):
return self.position
class DataWriter:
'''
Read scarphase protobuf data file
<size of header>
<header>
<size of window 0>
<window 0>
<size of window 1>
<window 1>
...
'''
def __init__(self, filename, uuid=None):
self.open(filename, uuid)
def open(self, filename, uuid=None):
self.file = open(filename, 'wb')
self.uuid = uuid
header = data_pb.Header()
header.uuid = uuid
data = header.SerializeToString()
self.file.write(struct.pack('<i', len(data)))
self.file.write(data)
self.file.flush()
def write(self, window):
data = window.SerializeToString()
self.file.write(struct.pack('<i', len(data)))
self.file.write(data)
| {
"content_hash": "f8763096cbc1d1b35c3d88ba517aa2bd",
"timestamp": "",
"source": "github",
"line_count": 200,
"max_line_length": 72,
"avg_line_length": 23.27,
"alnum_prop": 0.5030081650193382,
"repo_name": "uart/scarphase",
"id": "93b55d1552d34f88cd30890584598228fa17fba6",
"size": "6240",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyscarphase/proto/data.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C++",
"bytes": "69320"
},
{
"name": "CMake",
"bytes": "6608"
},
{
"name": "Protocol Buffer",
"bytes": "4528"
},
{
"name": "Python",
"bytes": "90992"
}
],
"symlink_target": ""
} |
import datetime as dt
import logging
import sys
from ..remittance import RemittanceException
def today_as_string():
now = dt.datetime.now()
return dt.datetime.strftime(now, '%Y-%m-%d')
class SageImportFile:
"""This is the code that glues a remittance document to a SageImport file. By using the methods you can
create a file ready for Sage Import.
Typically it will run all on the create of the class"""
def __init__(self, remittance, sqldata, name='', file_dir='', auto_run=True, modify_name_if_exists=True):
"""Typically all the code and action of creating the file is driven from constructor."""
self.sqldata = sqldata
self.logger = logging.getLogger('SageImportFile')
self.logger.info('-- Starting SageImportFile setup')
self.remittance = remittance
self.tran_date = self.remittance.payment_date
self.modify_name_if_exists = modify_name_if_exists
s = self.remittance.supplier.upper()
if s == 'FENWICK':
self.bank = '1262'
elif s == 'AIS':
self.bank = '1260'
else: # Generic contra account
self.logger.info('** Warning Supplier = {}'.format(s))
self.bank = '1205'
self.ran_ok = True
self.__running_bank_balance = 0
self.logger.info('-- Ending SageImportFile setup')
if auto_run:
self.start_file(name, file_dir)
try:
# Do it in this order so that the accumulated discount in the returns can be net off against all the sales.
self.parse()
finally:
self.close_file()
def check_for_transactions_on_this_day(self, tran_type, account):
test3 = self.sqldata[self.sqldata['TYPE'] == tran_type]
test2 = test3[test3['ACCOUNT_REF'] == account]
test = test2[test2['DATE'] == self.tran_date]
l = len(test)
if l == 0:
comment = 'Found no transactions on {} .'.format(
self.tran_date.strftime('%Y-%m-%d'), )
return False, 0, comment
else:
tn = test[:1]
try:
comment = 'Found {} transactions on {}. First was on {}: details {}: for {}.'.format(
l, self.tran_date.strftime('%Y-%m-%d'),
list(tn['DATE'])[0].strftime('%Y-%m-%d'),
list(tn['DETAILS'])[0],
list(tn['AMOUNT'])[0], )
except:
comment = 'Error looking for transaction see log'
self.logger.info('Exception {},\nProblem with test \n{},\ntran_date = {}'.format(sys.exc_info()[0],
test, self.tran_date))
return True, 0, comment
def check_accruals_for_stop_note(self, stop_note):
test3 = self.sqldata[self.sqldata['TYPE'] == 'JD']
test2 = test3[test3['ACCOUNT_REF'] == 2109]
test = test2[test2['DETAILS'].str.contains(stop_note)]
l = len(test)
if l == 0:
comment = 'Found no transactions to reverse for reference {} .'.format(stop_note)
return True, 0, comment
else:
tn = test[:1]
comment = 'Stopnote {}. Found {} transactions on {}. First was on {}: details {}: for {}.'.format(
stop_note, l, self.tran_date.strftime('%Y-%m-%d'),
list(tn['DATE'])[0].strftime('%Y-%m-%d'),
list(tn['DETAILS'])[0],
list(tn['AMOUNT'])[0], )
return False, 0, comment
def stopnote_check_write_row(self, tran_type, nominal, reference,
date, details, net_amount,
tax_code, account='', tax_amount=0.0,
exchange_rate=1, extra_ref='', user_name='H3', comment='', stop_note=''):
# Todo this should perhaps move to pySage50
r = self.check_accruals_for_stop_note(stop_note)
if r[0]:
# Error There are transactions when there should be none
self.ran_ok = False
tran_type = 'xx' + tran_type
comment = comment + ' ' + r[2]
else:
comment = comment + ' :Checked ' + r[2]
self.si.write_row(tran_type, nominal, reference,
date, details, net_amount,
tax_code, account=account, tax_amount=tax_amount,
exchange_rate=exchange_rate, extra_ref=extra_ref, user_name=user_name, comment=comment)
def start_file(self, name, file_dir):
self.si = self.sage_import = SageImport(home_directory=file_dir)
self.si.start_file(name, modify_name_if_exists=self.modify_name_if_exists)
def close_file(self):
self.si.close_file()
@property
def running_bank_balance(self):
return self.__running_bank_balance
@running_bank_balance.setter
def running_bank_balance(self, new_balance):
# For debugging
self.logger.info('Change in running bank balance = {:,}'.format(new_balance - self.__running_bank_balance))
self.__running_bank_balance = new_balance
def parse(self):
# Create all the transactions into the group account
# Create running balance
self.running_bank_balance = 0
for i in self.remittance.items:
try:
i.create_transactions(self)
except RemittanceException as err:
self.si.write_error_row("**Exception raised during item: {}".format(err))
self.logger.info('Calculated running bank balance = {}'.format(self.running_bank_balance))
try:
self.remittance.create_transactions(self) # create final transaction eg moving bank balance
except RemittanceException as err:
self.si.write_error_row("**Exception raised during creating final transaction: {}".format(err))
| {
"content_hash": "0bae89ed23b1521328afeb3fe522b1ab",
"timestamp": "",
"source": "github",
"line_count": 136,
"max_line_length": 123,
"avg_line_length": 43.97794117647059,
"alnum_prop": 0.559438221033272,
"repo_name": "drummonds/remittance",
"id": "57e7c9ec478b5bf23ba76a6694c17e996547e779",
"size": "5981",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "remittance/ais/sage_import.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "2298"
},
{
"name": "Python",
"bytes": "79736"
}
],
"symlink_target": ""
} |
from plotly.basedatatypes import BaseLayoutHierarchyType as _BaseLayoutHierarchyType
import copy as _copy
class Image(_BaseLayoutHierarchyType):
# class properties
# --------------------
_parent_path_str = "layout"
_path_str = "layout.image"
_valid_props = {
"layer",
"name",
"opacity",
"sizex",
"sizey",
"sizing",
"source",
"templateitemname",
"visible",
"x",
"xanchor",
"xref",
"y",
"yanchor",
"yref",
}
# layer
# -----
@property
def layer(self):
"""
Specifies whether images are drawn below or above traces. When
`xref` and `yref` are both set to `paper`, image is drawn below
the entire plot area.
The 'layer' property is an enumeration that may be specified as:
- One of the following enumeration values:
['below', 'above']
Returns
-------
Any
"""
return self["layer"]
@layer.setter
def layer(self, val):
self["layer"] = val
# name
# ----
@property
def name(self):
"""
When used in a template, named items are created in the output
figure in addition to any items the figure already has in this
array. You can modify these items in the output figure by
making your own item with `templateitemname` matching this
`name` alongside your modifications (including `visible: false`
or `enabled: false` to hide it). Has no effect outside of a
template.
The 'name' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["name"]
@name.setter
def name(self, val):
self["name"] = val
# opacity
# -------
@property
def opacity(self):
"""
Sets the opacity of the image.
The 'opacity' property is a number and may be specified as:
- An int or float in the interval [0, 1]
Returns
-------
int|float
"""
return self["opacity"]
@opacity.setter
def opacity(self, val):
self["opacity"] = val
# sizex
# -----
@property
def sizex(self):
"""
Sets the image container size horizontally. The image will be
sized based on the `position` value. When `xref` is set to
`paper`, units are sized relative to the plot width.
The 'sizex' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["sizex"]
@sizex.setter
def sizex(self, val):
self["sizex"] = val
# sizey
# -----
@property
def sizey(self):
"""
Sets the image container size vertically. The image will be
sized based on the `position` value. When `yref` is set to
`paper`, units are sized relative to the plot height.
The 'sizey' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["sizey"]
@sizey.setter
def sizey(self, val):
self["sizey"] = val
# sizing
# ------
@property
def sizing(self):
"""
Specifies which dimension of the image to constrain.
The 'sizing' property is an enumeration that may be specified as:
- One of the following enumeration values:
['fill', 'contain', 'stretch']
Returns
-------
Any
"""
return self["sizing"]
@sizing.setter
def sizing(self, val):
self["sizing"] = val
# source
# ------
@property
def source(self):
"""
Specifies the URL of the image to be used. The URL must be
accessible from the domain where the plot code is run, and can
be either relative or absolute.
The 'source' property is an image URI that may be specified as:
- A remote image URI string
(e.g. 'http://www.somewhere.com/image.png')
- A data URI image string
(e.g. 'data:image/png;base64,iVBORw0KGgoAAAANSU')
- A PIL.Image.Image object which will be immediately converted
to a data URI image string
See http://pillow.readthedocs.io/en/latest/reference/Image.html
Returns
-------
str
"""
return self["source"]
@source.setter
def source(self, val):
self["source"] = val
# templateitemname
# ----------------
@property
def templateitemname(self):
"""
Used to refer to a named item in this array in the template.
Named items from the template will be created even without a
matching item in the input figure, but you can modify one by
making an item with `templateitemname` matching its `name`,
alongside your modifications (including `visible: false` or
`enabled: false` to hide it). If there is no template or no
matching item, this item will be hidden unless you explicitly
show it with `visible: true`.
The 'templateitemname' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["templateitemname"]
@templateitemname.setter
def templateitemname(self, val):
self["templateitemname"] = val
# visible
# -------
@property
def visible(self):
"""
Determines whether or not this image is visible.
The 'visible' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["visible"]
@visible.setter
def visible(self, val):
self["visible"] = val
# x
# -
@property
def x(self):
"""
Sets the image's x position. When `xref` is set to `paper`,
units are sized relative to the plot height. See `xref` for
more info
The 'x' property accepts values of any type
Returns
-------
Any
"""
return self["x"]
@x.setter
def x(self, val):
self["x"] = val
# xanchor
# -------
@property
def xanchor(self):
"""
Sets the anchor for the x position
The 'xanchor' property is an enumeration that may be specified as:
- One of the following enumeration values:
['left', 'center', 'right']
Returns
-------
Any
"""
return self["xanchor"]
@xanchor.setter
def xanchor(self, val):
self["xanchor"] = val
# xref
# ----
@property
def xref(self):
"""
Sets the images's x coordinate axis. If set to a x axis id
(e.g. "x" or "x2"), the `x` position refers to an x data
coordinate If set to "paper", the `x` position refers to the
distance from the left of plot in normalized coordinates where
0 (1) corresponds to the left (right).
The 'xref' property is an enumeration that may be specified as:
- One of the following enumeration values:
['paper']
- A string that matches one of the following regular expressions:
['^x([2-9]|[1-9][0-9]+)?$']
Returns
-------
Any
"""
return self["xref"]
@xref.setter
def xref(self, val):
self["xref"] = val
# y
# -
@property
def y(self):
"""
Sets the image's y position. When `yref` is set to `paper`,
units are sized relative to the plot height. See `yref` for
more info
The 'y' property accepts values of any type
Returns
-------
Any
"""
return self["y"]
@y.setter
def y(self, val):
self["y"] = val
# yanchor
# -------
@property
def yanchor(self):
"""
Sets the anchor for the y position.
The 'yanchor' property is an enumeration that may be specified as:
- One of the following enumeration values:
['top', 'middle', 'bottom']
Returns
-------
Any
"""
return self["yanchor"]
@yanchor.setter
def yanchor(self, val):
self["yanchor"] = val
# yref
# ----
@property
def yref(self):
"""
Sets the images's y coordinate axis. If set to a y axis id
(e.g. "y" or "y2"), the `y` position refers to a y data
coordinate. If set to "paper", the `y` position refers to the
distance from the bottom of the plot in normalized coordinates
where 0 (1) corresponds to the bottom (top).
The 'yref' property is an enumeration that may be specified as:
- One of the following enumeration values:
['paper']
- A string that matches one of the following regular expressions:
['^y([2-9]|[1-9][0-9]+)?$']
Returns
-------
Any
"""
return self["yref"]
@yref.setter
def yref(self, val):
self["yref"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
layer
Specifies whether images are drawn below or above
traces. When `xref` and `yref` are both set to `paper`,
image is drawn below the entire plot area.
name
When used in a template, named items are created in the
output figure in addition to any items the figure
already has in this array. You can modify these items
in the output figure by making your own item with
`templateitemname` matching this `name` alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). Has no effect outside of a
template.
opacity
Sets the opacity of the image.
sizex
Sets the image container size horizontally. The image
will be sized based on the `position` value. When
`xref` is set to `paper`, units are sized relative to
the plot width.
sizey
Sets the image container size vertically. The image
will be sized based on the `position` value. When
`yref` is set to `paper`, units are sized relative to
the plot height.
sizing
Specifies which dimension of the image to constrain.
source
Specifies the URL of the image to be used. The URL must
be accessible from the domain where the plot code is
run, and can be either relative or absolute.
templateitemname
Used to refer to a named item in this array in the
template. Named items from the template will be created
even without a matching item in the input figure, but
you can modify one by making an item with
`templateitemname` matching its `name`, alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). If there is no template or no
matching item, this item will be hidden unless you
explicitly show it with `visible: true`.
visible
Determines whether or not this image is visible.
x
Sets the image's x position. When `xref` is set to
`paper`, units are sized relative to the plot height.
See `xref` for more info
xanchor
Sets the anchor for the x position
xref
Sets the images's x coordinate axis. If set to a x axis
id (e.g. "x" or "x2"), the `x` position refers to an x
data coordinate If set to "paper", the `x` position
refers to the distance from the left of plot in
normalized coordinates where 0 (1) corresponds to the
left (right).
y
Sets the image's y position. When `yref` is set to
`paper`, units are sized relative to the plot height.
See `yref` for more info
yanchor
Sets the anchor for the y position.
yref
Sets the images's y coordinate axis. If set to a y axis
id (e.g. "y" or "y2"), the `y` position refers to a y
data coordinate. If set to "paper", the `y` position
refers to the distance from the bottom of the plot in
normalized coordinates where 0 (1) corresponds to the
bottom (top).
"""
def __init__(
self,
arg=None,
layer=None,
name=None,
opacity=None,
sizex=None,
sizey=None,
sizing=None,
source=None,
templateitemname=None,
visible=None,
x=None,
xanchor=None,
xref=None,
y=None,
yanchor=None,
yref=None,
**kwargs
):
"""
Construct a new Image object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.layout.Image`
layer
Specifies whether images are drawn below or above
traces. When `xref` and `yref` are both set to `paper`,
image is drawn below the entire plot area.
name
When used in a template, named items are created in the
output figure in addition to any items the figure
already has in this array. You can modify these items
in the output figure by making your own item with
`templateitemname` matching this `name` alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). Has no effect outside of a
template.
opacity
Sets the opacity of the image.
sizex
Sets the image container size horizontally. The image
will be sized based on the `position` value. When
`xref` is set to `paper`, units are sized relative to
the plot width.
sizey
Sets the image container size vertically. The image
will be sized based on the `position` value. When
`yref` is set to `paper`, units are sized relative to
the plot height.
sizing
Specifies which dimension of the image to constrain.
source
Specifies the URL of the image to be used. The URL must
be accessible from the domain where the plot code is
run, and can be either relative or absolute.
templateitemname
Used to refer to a named item in this array in the
template. Named items from the template will be created
even without a matching item in the input figure, but
you can modify one by making an item with
`templateitemname` matching its `name`, alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). If there is no template or no
matching item, this item will be hidden unless you
explicitly show it with `visible: true`.
visible
Determines whether or not this image is visible.
x
Sets the image's x position. When `xref` is set to
`paper`, units are sized relative to the plot height.
See `xref` for more info
xanchor
Sets the anchor for the x position
xref
Sets the images's x coordinate axis. If set to a x axis
id (e.g. "x" or "x2"), the `x` position refers to an x
data coordinate If set to "paper", the `x` position
refers to the distance from the left of plot in
normalized coordinates where 0 (1) corresponds to the
left (right).
y
Sets the image's y position. When `yref` is set to
`paper`, units are sized relative to the plot height.
See `yref` for more info
yanchor
Sets the anchor for the y position.
yref
Sets the images's y coordinate axis. If set to a y axis
id (e.g. "y" or "y2"), the `y` position refers to a y
data coordinate. If set to "paper", the `y` position
refers to the distance from the bottom of the plot in
normalized coordinates where 0 (1) corresponds to the
bottom (top).
Returns
-------
Image
"""
super(Image, self).__init__("images")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.layout.Image
constructor must be a dict or
an instance of :class:`plotly.graph_objs.layout.Image`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("layer", None)
_v = layer if layer is not None else _v
if _v is not None:
self["layer"] = _v
_v = arg.pop("name", None)
_v = name if name is not None else _v
if _v is not None:
self["name"] = _v
_v = arg.pop("opacity", None)
_v = opacity if opacity is not None else _v
if _v is not None:
self["opacity"] = _v
_v = arg.pop("sizex", None)
_v = sizex if sizex is not None else _v
if _v is not None:
self["sizex"] = _v
_v = arg.pop("sizey", None)
_v = sizey if sizey is not None else _v
if _v is not None:
self["sizey"] = _v
_v = arg.pop("sizing", None)
_v = sizing if sizing is not None else _v
if _v is not None:
self["sizing"] = _v
_v = arg.pop("source", None)
_v = source if source is not None else _v
if _v is not None:
self["source"] = _v
_v = arg.pop("templateitemname", None)
_v = templateitemname if templateitemname is not None else _v
if _v is not None:
self["templateitemname"] = _v
_v = arg.pop("visible", None)
_v = visible if visible is not None else _v
if _v is not None:
self["visible"] = _v
_v = arg.pop("x", None)
_v = x if x is not None else _v
if _v is not None:
self["x"] = _v
_v = arg.pop("xanchor", None)
_v = xanchor if xanchor is not None else _v
if _v is not None:
self["xanchor"] = _v
_v = arg.pop("xref", None)
_v = xref if xref is not None else _v
if _v is not None:
self["xref"] = _v
_v = arg.pop("y", None)
_v = y if y is not None else _v
if _v is not None:
self["y"] = _v
_v = arg.pop("yanchor", None)
_v = yanchor if yanchor is not None else _v
if _v is not None:
self["yanchor"] = _v
_v = arg.pop("yref", None)
_v = yref if yref is not None else _v
if _v is not None:
self["yref"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
| {
"content_hash": "8fd4ff91d19e26343142d552683b798f",
"timestamp": "",
"source": "github",
"line_count": 652,
"max_line_length": 84,
"avg_line_length": 31.302147239263803,
"alnum_prop": 0.5378019501200451,
"repo_name": "plotly/python-api",
"id": "af443856fc8fae8e4c2f4998093648b7b0180752",
"size": "20409",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/graph_objs/layout/_image.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "6870"
},
{
"name": "Makefile",
"bytes": "1708"
},
{
"name": "Python",
"bytes": "823245"
},
{
"name": "Shell",
"bytes": "3238"
}
],
"symlink_target": ""
} |
from m5.objects import *
from Benchmarks import *
from m5.util import *
class CowIdeDisk(IdeDisk):
image = CowDiskImage(child=RawDiskImage(read_only=True),
read_only=False)
def childImage(self, ci):
self.image.child.image_file = ci
class MemBus(CoherentXBar):
badaddr_responder = BadAddr()
default = Self.badaddr_responder.pio
def makeLinuxAlphaSystem(mem_mode, mdesc = None, ruby = False):
class BaseTsunami(Tsunami):
ethernet = NSGigE(pci_bus=0, pci_dev=1, pci_func=0)
ide = IdeController(disks=[Parent.disk0, Parent.disk2],
pci_func=0, pci_dev=0, pci_bus=0)
self = LinuxAlphaSystem()
if not mdesc:
# generic system
mdesc = SysConfig()
self.readfile = mdesc.script()
self.tsunami = BaseTsunami()
# Create the io bus to connect all device ports
self.iobus = NoncoherentXBar()
self.tsunami.attachIO(self.iobus)
self.tsunami.ide.pio = self.iobus.master
self.tsunami.ide.config = self.iobus.master
self.tsunami.ethernet.pio = self.iobus.master
self.tsunami.ethernet.config = self.iobus.master
if ruby:
# Store the dma devices for later connection to dma ruby ports.
# Append an underscore to dma_ports to avoid the SimObjectVector check.
self._dma_ports = [self.tsunami.ide.dma, self.tsunami.ethernet.dma]
else:
self.membus = MemBus()
# By default the bridge responds to all addresses above the I/O
# base address (including the PCI config space)
IO_address_space_base = 0x80000000000
self.bridge = Bridge(delay='50ns',
ranges = [AddrRange(IO_address_space_base, Addr.max)])
self.bridge.master = self.iobus.slave
self.bridge.slave = self.membus.master
self.tsunami.ide.dma = self.iobus.slave
self.tsunami.ethernet.dma = self.iobus.slave
self.system_port = self.membus.slave
self.mem_ranges = [AddrRange(mdesc.mem())]
self.disk0 = CowIdeDisk(driveID='master')
self.disk2 = CowIdeDisk(driveID='master')
self.disk0.childImage(mdesc.disk())
self.disk2.childImage(disk('linux-bigswap2.img'))
self.simple_disk = SimpleDisk(disk=RawDiskImage(image_file = mdesc.disk(),
read_only = True))
self.intrctrl = IntrControl()
self.mem_mode = mem_mode
self.terminal = Terminal()
self.kernel = binary('vmlinux')
self.pal = binary('ts_osfpal')
self.console = binary('console')
self.boot_osflags = 'root=/dev/hda1 console=ttyS0'
return self
def makeSparcSystem(mem_mode, mdesc = None):
# Constants from iob.cc and uart8250.cc
iob_man_addr = 0x9800000000
uart_pio_size = 8
class CowMmDisk(MmDisk):
image = CowDiskImage(child=RawDiskImage(read_only=True),
read_only=False)
def childImage(self, ci):
self.image.child.image_file = ci
self = SparcSystem()
if not mdesc:
# generic system
mdesc = SysConfig()
self.readfile = mdesc.script()
self.iobus = NoncoherentXBar()
self.membus = MemBus()
self.bridge = Bridge(delay='50ns')
self.t1000 = T1000()
self.t1000.attachOnChipIO(self.membus)
self.t1000.attachIO(self.iobus)
self.mem_ranges = [AddrRange(Addr('1MB'), size = '64MB'),
AddrRange(Addr('2GB'), size ='256MB')]
self.bridge.master = self.iobus.slave
self.bridge.slave = self.membus.master
self.rom.port = self.membus.master
self.nvram.port = self.membus.master
self.hypervisor_desc.port = self.membus.master
self.partition_desc.port = self.membus.master
self.intrctrl = IntrControl()
self.disk0 = CowMmDisk()
self.disk0.childImage(disk('disk.s10hw2'))
self.disk0.pio = self.iobus.master
# The puart0 and hvuart are placed on the IO bus, so create ranges
# for them. The remaining IO range is rather fragmented, so poke
# holes for the iob and partition descriptors etc.
self.bridge.ranges = \
[
AddrRange(self.t1000.puart0.pio_addr,
self.t1000.puart0.pio_addr + uart_pio_size - 1),
AddrRange(self.disk0.pio_addr,
self.t1000.fake_jbi.pio_addr +
self.t1000.fake_jbi.pio_size - 1),
AddrRange(self.t1000.fake_clk.pio_addr,
iob_man_addr - 1),
AddrRange(self.t1000.fake_l2_1.pio_addr,
self.t1000.fake_ssi.pio_addr +
self.t1000.fake_ssi.pio_size - 1),
AddrRange(self.t1000.hvuart.pio_addr,
self.t1000.hvuart.pio_addr + uart_pio_size - 1)
]
self.reset_bin = binary('reset_new.bin')
self.hypervisor_bin = binary('q_new.bin')
self.openboot_bin = binary('openboot_new.bin')
self.nvram_bin = binary('nvram1')
self.hypervisor_desc_bin = binary('1up-hv.bin')
self.partition_desc_bin = binary('1up-md.bin')
self.system_port = self.membus.slave
return self
def makeArmSystem(mem_mode, machine_type, mdesc = None,
dtb_filename = None, bare_metal=False,
sdcard_image = "sdcard-1g-mxplayer.img"):
assert machine_type
if bare_metal:
self = ArmSystem()
else:
self = LinuxArmSystem()
if not mdesc:
# generic system
mdesc = SysConfig()
self.readfile = mdesc.script()
self.iobus = NoncoherentXBar()
self.membus = MemBus()
self.membus.badaddr_responder.warn_access = "warn"
self.bridge = Bridge(delay='50ns')
self.bridge.master = self.iobus.slave
self.bridge.slave = self.membus.master
self.mem_mode = mem_mode
if machine_type == "RealView_PBX":
self.realview = RealViewPBX()
elif machine_type == "RealView_EB":
self.realview = RealViewEB()
elif machine_type == "VExpress_ELT":
self.realview = VExpress_ELT()
elif machine_type == "VExpress_EMM":
self.realview = VExpress_EMM()
elif machine_type == "VExpress_EMM64":
self.realview = VExpress_EMM64()
else:
print "Unknown Machine Type"
sys.exit(1)
self.cf0 = CowIdeDisk(driveID='master')
self.cf2 = CowIdeDisk(driveID='master')
self.cf0.childImage(mdesc.disk())
self.cf2.childImage(disk(sdcard_image))
# Attach any PCI devices this platform supports
self.realview.attachPciDevices()
# default to an IDE controller rather than a CF one
try:
self.realview.ide.disks = [self.cf0, self.cf2]
except:
self.realview.cf_ctrl.disks = [self.cf0, self.cf2]
if bare_metal:
# EOT character on UART will end the simulation
self.realview.uart.end_on_eot = True
self.mem_ranges = [AddrRange(self.realview.mem_start_addr,
size = mdesc.mem())]
else:
if machine_type == "VExpress_EMM64":
self.kernel = binary('vmlinux-3.16-aarch64-vexpress-emm64-pcie')
elif machine_type == "VExpress_EMM":
self.kernel = binary('vmlinux-3.3-arm-vexpress-emm-pcie')
else:
self.kernel = binary('vmlinux.arm.smp.fb.2.6.38.8')
if dtb_filename:
self.dtb_filename = binary(dtb_filename)
self.machine_type = machine_type
# Ensure that writes to the UART actually go out early in the boot
boot_flags = 'earlyprintk=pl011,0x1c090000 console=ttyAMA0 ' + \
'lpj=19988480 norandmaps rw loglevel=8 ' + \
'mem=%s root=/dev/sda1' % mdesc.mem()
self.mem_ranges = []
size_remain = long(Addr(mdesc.mem()))
for region in self.realview._mem_regions:
if size_remain > long(region[1]):
self.mem_ranges.append(AddrRange(region[0], size=region[1]))
size_remain = size_remain - long(region[1])
else:
self.mem_ranges.append(AddrRange(region[0], size=size_remain))
size_remain = 0
break
warn("Memory size specified spans more than one region. Creating" \
" another memory controller for that range.")
if size_remain > 0:
fatal("The currently selected ARM platforms doesn't support" \
" the amount of DRAM you've selected. Please try" \
" another platform")
self.realview.setupBootLoader(self.membus, self, binary)
self.gic_cpu_addr = self.realview.gic.cpu_addr
self.flags_addr = self.realview.realview_io.pio_addr + 0x30
if mdesc.disk().lower().count('android'):
boot_flags += " init=/init "
self.boot_osflags = boot_flags
self.realview.attachOnChipIO(self.membus, self.bridge)
self.realview.attachIO(self.iobus)
self.intrctrl = IntrControl()
self.terminal = Terminal()
self.vncserver = VncServer()
self.system_port = self.membus.slave
return self
def makeLinuxMipsSystem(mem_mode, mdesc = None):
class BaseMalta(Malta):
ethernet = NSGigE(pci_bus=0, pci_dev=1, pci_func=0)
ide = IdeController(disks=[Parent.disk0, Parent.disk2],
pci_func=0, pci_dev=0, pci_bus=0)
self = LinuxMipsSystem()
if not mdesc:
# generic system
mdesc = SysConfig()
self.readfile = mdesc.script()
self.iobus = NoncoherentXBar()
self.membus = MemBus()
self.bridge = Bridge(delay='50ns')
self.mem_ranges = [AddrRange('1GB')]
self.bridge.master = self.iobus.slave
self.bridge.slave = self.membus.master
self.disk0 = CowIdeDisk(driveID='master')
self.disk2 = CowIdeDisk(driveID='master')
self.disk0.childImage(mdesc.disk())
self.disk2.childImage(disk('linux-bigswap2.img'))
self.malta = BaseMalta()
self.malta.attachIO(self.iobus)
self.malta.ide.pio = self.iobus.master
self.malta.ide.config = self.iobus.master
self.malta.ide.dma = self.iobus.slave
self.malta.ethernet.pio = self.iobus.master
self.malta.ethernet.config = self.iobus.master
self.malta.ethernet.dma = self.iobus.slave
self.simple_disk = SimpleDisk(disk=RawDiskImage(image_file = mdesc.disk(),
read_only = True))
self.intrctrl = IntrControl()
self.mem_mode = mem_mode
self.terminal = Terminal()
self.kernel = binary('mips/vmlinux')
self.console = binary('mips/console')
self.boot_osflags = 'root=/dev/hda1 console=ttyS0'
self.system_port = self.membus.slave
return self
def x86IOAddress(port):
IO_address_space_base = 0x8000000000000000
return IO_address_space_base + port
def connectX86ClassicSystem(x86_sys, numCPUs):
# Constants similar to x86_traits.hh
IO_address_space_base = 0x8000000000000000
pci_config_address_space_base = 0xc000000000000000
interrupts_address_space_base = 0xa000000000000000
APIC_range_size = 1 << 12;
x86_sys.membus = MemBus()
# North Bridge
x86_sys.iobus = NoncoherentXBar()
x86_sys.bridge = Bridge(delay='50ns')
x86_sys.bridge.master = x86_sys.iobus.slave
x86_sys.bridge.slave = x86_sys.membus.master
# Allow the bridge to pass through the IO APIC (two pages),
# everything in the IO address range up to the local APIC, and
# then the entire PCI address space and beyond
x86_sys.bridge.ranges = \
[
AddrRange(x86_sys.pc.south_bridge.io_apic.pio_addr,
x86_sys.pc.south_bridge.io_apic.pio_addr +
APIC_range_size - 1),
AddrRange(IO_address_space_base,
interrupts_address_space_base - 1),
AddrRange(pci_config_address_space_base,
Addr.max)
]
# Create a bridge from the IO bus to the memory bus to allow access to
# the local APIC (two pages)
x86_sys.apicbridge = Bridge(delay='50ns')
x86_sys.apicbridge.slave = x86_sys.iobus.master
x86_sys.apicbridge.master = x86_sys.membus.slave
x86_sys.apicbridge.ranges = [AddrRange(interrupts_address_space_base,
interrupts_address_space_base +
numCPUs * APIC_range_size
- 1)]
# connect the io bus
x86_sys.pc.attachIO(x86_sys.iobus)
x86_sys.system_port = x86_sys.membus.slave
def connectX86RubySystem(x86_sys):
# North Bridge
x86_sys.iobus = NoncoherentXBar()
# add the ide to the list of dma devices that later need to attach to
# dma controllers
x86_sys._dma_ports = [x86_sys.pc.south_bridge.ide.dma]
x86_sys.pc.attachIO(x86_sys.iobus, x86_sys._dma_ports)
def makeX86System(mem_mode, numCPUs = 1, mdesc = None, self = None,
Ruby = False):
if self == None:
self = X86System()
if not mdesc:
# generic system
mdesc = SysConfig()
self.readfile = mdesc.script()
self.mem_mode = mem_mode
# Physical memory
# On the PC platform, the memory region 0xC0000000-0xFFFFFFFF is reserved
# for various devices. Hence, if the physical memory size is greater than
# 3GB, we need to split it into two parts.
excess_mem_size = \
convert.toMemorySize(mdesc.mem()) - convert.toMemorySize('3GB')
if excess_mem_size <= 0:
self.mem_ranges = [AddrRange(mdesc.mem())]
else:
warn("Physical memory size specified is %s which is greater than " \
"3GB. Twice the number of memory controllers would be " \
"created." % (mdesc.mem()))
self.mem_ranges = [AddrRange('3GB'),
AddrRange(Addr('4GB'), size = excess_mem_size)]
# Platform
self.pc = Pc()
# Create and connect the busses required by each memory system
if Ruby:
connectX86RubySystem(self)
else:
connectX86ClassicSystem(self, numCPUs)
self.intrctrl = IntrControl()
# Disks
disk0 = CowIdeDisk(driveID='master')
disk2 = CowIdeDisk(driveID='master')
disk0.childImage(mdesc.disk())
disk2.childImage(disk('linux-bigswap2.img'))
self.pc.south_bridge.ide.disks = [disk0, disk2]
# Add in a Bios information structure.
structures = [X86SMBiosBiosInformation()]
self.smbios_table.structures = structures
# Set up the Intel MP table
base_entries = []
ext_entries = []
for i in xrange(numCPUs):
bp = X86IntelMPProcessor(
local_apic_id = i,
local_apic_version = 0x14,
enable = True,
bootstrap = (i == 0))
base_entries.append(bp)
io_apic = X86IntelMPIOAPIC(
id = numCPUs,
version = 0x11,
enable = True,
address = 0xfec00000)
self.pc.south_bridge.io_apic.apic_id = io_apic.id
base_entries.append(io_apic)
isa_bus = X86IntelMPBus(bus_id = 0, bus_type='ISA')
base_entries.append(isa_bus)
pci_bus = X86IntelMPBus(bus_id = 1, bus_type='PCI')
base_entries.append(pci_bus)
connect_busses = X86IntelMPBusHierarchy(bus_id=0,
subtractive_decode=True, parent_bus=1)
ext_entries.append(connect_busses)
pci_dev4_inta = X86IntelMPIOIntAssignment(
interrupt_type = 'INT',
polarity = 'ConformPolarity',
trigger = 'ConformTrigger',
source_bus_id = 1,
source_bus_irq = 0 + (4 << 2),
dest_io_apic_id = io_apic.id,
dest_io_apic_intin = 16)
base_entries.append(pci_dev4_inta)
def assignISAInt(irq, apicPin):
assign_8259_to_apic = X86IntelMPIOIntAssignment(
interrupt_type = 'ExtInt',
polarity = 'ConformPolarity',
trigger = 'ConformTrigger',
source_bus_id = 0,
source_bus_irq = irq,
dest_io_apic_id = io_apic.id,
dest_io_apic_intin = 0)
base_entries.append(assign_8259_to_apic)
assign_to_apic = X86IntelMPIOIntAssignment(
interrupt_type = 'INT',
polarity = 'ConformPolarity',
trigger = 'ConformTrigger',
source_bus_id = 0,
source_bus_irq = irq,
dest_io_apic_id = io_apic.id,
dest_io_apic_intin = apicPin)
base_entries.append(assign_to_apic)
assignISAInt(0, 2)
assignISAInt(1, 1)
for i in range(3, 15):
assignISAInt(i, i)
self.intel_mp_table.base_entries = base_entries
self.intel_mp_table.ext_entries = ext_entries
def makeLinuxX86System(mem_mode, numCPUs = 1, mdesc = None,
Ruby = False):
self = LinuxX86System()
# Build up the x86 system and then specialize it for Linux
makeX86System(mem_mode, numCPUs, mdesc, self, Ruby)
# We assume below that there's at least 1MB of memory. We'll require 2
# just to avoid corner cases.
phys_mem_size = sum(map(lambda r: r.size(), self.mem_ranges))
assert(phys_mem_size >= 0x200000)
assert(len(self.mem_ranges) <= 2)
entries = \
[
# Mark the first megabyte of memory as reserved
X86E820Entry(addr = 0, size = '639kB', range_type = 1),
X86E820Entry(addr = 0x9fc00, size = '385kB', range_type = 2),
# Mark the rest of physical memory as available
X86E820Entry(addr = 0x100000,
size = '%dB' % (self.mem_ranges[0].size() - 0x100000),
range_type = 1),
# Reserve the last 16kB of the 32-bit address space for the
# m5op interface
X86E820Entry(addr=0xFFFF0000, size='64kB', range_type=2),
]
# In case the physical memory is greater than 3GB, we split it into two
# parts and add a separate e820 entry for the second part. This entry
# starts at 0x100000000, which is the first address after the space
# reserved for devices.
if len(self.mem_ranges) == 2:
entries.append(X86E820Entry(addr = 0x100000000,
size = '%dB' % (self.mem_ranges[1].size()), range_type = 1))
self.e820_table.entries = entries
# Command line
self.boot_osflags = 'earlyprintk=ttyS0 console=ttyS0 lpj=7999923 ' + \
'root=/dev/hda1'
self.kernel = binary('x86_64-vmlinux-2.6.22.9')
return self
def makeDualRoot(full_system, testSystem, driveSystem, dumpfile):
self = Root(full_system = full_system)
self.testsys = testSystem
self.drivesys = driveSystem
self.etherlink = EtherLink()
if hasattr(testSystem, 'realview'):
self.etherlink.int0 = Parent.testsys.realview.ethernet.interface
self.etherlink.int1 = Parent.drivesys.realview.ethernet.interface
elif hasattr(testSystem, 'tsunami'):
self.etherlink.int0 = Parent.testsys.tsunami.ethernet.interface
self.etherlink.int1 = Parent.drivesys.tsunami.ethernet.interface
else:
fatal("Don't know how to connect these system together")
if dumpfile:
self.etherdump = EtherDump(file=dumpfile)
self.etherlink.dump = Parent.etherdump
return self
| {
"content_hash": "53ab9a26fd993b07874dbbca92f0422b",
"timestamp": "",
"source": "github",
"line_count": 524,
"max_line_length": 79,
"avg_line_length": 36.48664122137404,
"alnum_prop": 0.6175009153198389,
"repo_name": "lokeshjindal15/gem5_transform",
"id": "014849dab8f83cc1e726563ba4ed1c15dca1829e",
"size": "21365",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "configs/common/FSConfig.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Assembly",
"bytes": "239800"
},
{
"name": "C",
"bytes": "968181"
},
{
"name": "C++",
"bytes": "13895468"
},
{
"name": "Emacs Lisp",
"bytes": "1969"
},
{
"name": "HTML",
"bytes": "136898"
},
{
"name": "Hack",
"bytes": "2489"
},
{
"name": "Java",
"bytes": "3096"
},
{
"name": "Makefile",
"bytes": "17848"
},
{
"name": "Perl",
"bytes": "107185"
},
{
"name": "Protocol Buffer",
"bytes": "3246"
},
{
"name": "Python",
"bytes": "3968190"
},
{
"name": "Shell",
"bytes": "65461"
},
{
"name": "Visual Basic",
"bytes": "2884"
}
],
"symlink_target": ""
} |
import json
import scrapy
class DocumentItem(scrapy.Item):
# define the fields for your item here like:
name = scrapy.Field()
filename = scrapy.Field()
link = scrapy.Field()
category = scrapy.Field()
| {
"content_hash": "a6f4b0a3952fc35ca4e73cd4ac0c00b9",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 48,
"avg_line_length": 20.272727272727273,
"alnum_prop": 0.6771300448430493,
"repo_name": "JayvicWen/Crawler",
"id": "f115d20c7de1b353e6f2f0cdb291fbeead96e48b",
"size": "375",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "xuexiku/xuexiku/items.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "53968"
}
],
"symlink_target": ""
} |
from __future__ import print_function
import argparse
import doctest
import fnmatch
import os
import os.path
import subprocess
import sys
import pickle
# Things to do
# ============
#
# * Cross platform build support? How can I solve the different exe suffix of Windows and Linux?
# Actually, the .exe suffix also works in Linux but is awful. Maybe the "virtual" targets comes
# handy here. Perhaps we can have a "text.exe" target which will build the "test" file under
# Posix and "test.exe" under Windows. Boh...
# * Generate a clean script and keep it updated
# * External documentation using AsciiDoc
# * Concurrent build??
# * Study on absolute paths
# {{{ Common data structures
# ==========================
class RedoException(Exception):
def __init__(self, msg):
Exception.__init__(self, msg)
class Graph(object):
"""
This class reprents a DAG where nodes are target
and arcs connect to targest t1->t2 if t1 depends
on t2
>>> g = Graph()
>>> g.store_dependency("a", "b")
>>> for x in g.get_transitive_dependencies("a"): print x
a
b
>>> g.store_dependency("c", "d")
>>> for x in g.get_transitive_dependencies("c"): print x
c
d
>>> g.store_dependency("c", "d")
>>> for x in g.get_transitive_dependencies("c"): print x
c
d
>>> g.clear_dependency_info_for("c")
>>> for x in g.get_transitive_dependencies("c"): print x
c
"""
def __init__(self):
self.store = {}
self.node_assoclist = {}
self.name_assoclist = {}
def _ensure_node(self, t):
t_idx = self.node_assoclist.get(t)
if t_idx == None:
t_idx = len(self.node_assoclist)
self.node_assoclist[t] = t_idx
self.name_assoclist[t_idx] = t
return t_idx
def store_dependency(self, t1, t2):
"""
This method appends a dependency from t1 to t2
is the dependency doesn't already exists
"""
idx_t1 = self._ensure_node(t1)
idx_t2 = self._ensure_node(t2)
deplist = self.store.get(idx_t1)
if deplist==None:
self.store[idx_t1]=[idx_t2]
else:
if idx_t2 not in deplist:
deplist.append(idx_t2)
def clear_dependency_info_for(self, t):
"""
This method will remove all the arcs from "t"
"""
idx_t = self._ensure_node(t)
if idx_t in self.store:
self.store[idx_t]=[]
def get_transitive_dependencies(self, t):
"""
This method will iterate into the graph and find
all the dependencies of the passed target
"""
t_idx = self._ensure_node(t)
to_check = [t_idx]
checked = []
while 1:
if len(to_check)==0: break
current = to_check[0]
to_check = to_check[1:]
checked.append(to_check)
yield self.name_assoclist[current]
deplist = self.store.get(current)
if deplist!=None: to_check += deplist
def to_tgf(self, file):
"""
This method will iterate throught all the
arcs
"""
fileList = []
for key in self.node_assoclist.keys():
print (self.node_assoclist[key], key, file=file)
print ("#", file=file)
for source in self.store.keys():
for dest in self.store[source]:
print (source,dest, file=file)
class FileCache(object):
"""
This class will contain the latest modification
time of files
"""
def __init__(self):
self.store = {}
self.changed_status = {}
def reset_changed_cache(self):
"""
Reset the changed files cache
"""
self.changed_status = {}
def stamp(self, fileName, fileType):
"""
Memorize the timestamp of a file
"""
self.store[fileName] = {"timestamp":os.path.getmtime(fileName), "fileType":fileType}
def is_changed(self, fileName):
"""
Check a file with the timestamp. If it
is changed or if the file wasn't timestamped
then return true. Else false.
"""
if not self.is_known(fileName): raise RedoException("I don't know this target: " + fileName)
if not os.path.exists(fileName): return True
if fileName in self.changed_status:
return self.changed_status[fileName]
mt = os.path.getmtime(fileName)
if not (fileName in self.store):
result = True
else:
result = mt!=self.store[fileName]["timestamp"]
self.changed_status[fileName] = result
return result
def is_known(self, fileName):
"""
Return true if this fileName is known in this
cache file
"""
status = (fileName in self.store)
return status
def get_type(self, fileName):
"""
Return the file type of the fileName passed.
If this file isn't in the store return None
"""
dict = self.store[fileName]
if dict!=None:
return dict["fileType"]
else:
return None
def get_destinations(self):
"""
Iterate throught the destinations
"""
for target in self.store.keys():
if self.store[target]["fileType"]=="d": yield target
def get_files(self):
"""
Iterate throught the filenames
"""
return self.store.keys()
def test_get_store(self):
return self.store
# }}}
# {{{ This functions will find the correct script for a target
# ============================================================
def generate_script_for__basenames(baseName):
"""
This function will generate all the possible basenames
for one target.
>>> for x in generate_script_for__basenames("testing.c.o"): print x
testing.c.o.do
default.c.o.do
default.c.do
default.do
"""
l = baseName.split(".")
yield ".".join(l) + ".do"
l[0]="default"
for x in range(len(l),0,-1):
yield ".".join(l[0:x]) + ".do"
def generate_scripts_for(fileName):
"""
This function will generate all the possible script
names for a target
>>> for x in generate_scripts_for("a/b/c/d/testing.c.o"): print x
a/b/c/d/testing.c.o.do
a/b/c/d/default.c.o.do
a/b/c/d/default.c.do
a/b/c/d/default.do
a/b/c/testing.c.o.do
a/b/c/default.c.o.do
a/b/c/default.c.do
a/b/c/default.do
a/b/testing.c.o.do
a/b/default.c.o.do
a/b/default.c.do
a/b/default.do
a/testing.c.o.do
a/default.c.o.do
a/default.c.do
a/default.do
testing.c.o.do
default.c.o.do
default.c.do
default.do
"""
(directory, baseName) = os.path.split(fileName)
last_directory = directory
while 1:
for currBase in generate_script_for__basenames(baseName):
if last_directory=="":
yield currBase
else:
yield last_directory + "/" + currBase
(next_directory, name) = os.path.split(last_directory)
if next_directory==last_directory: break
last_directory = next_directory
def find_script_for(target):
tests = []
for x in generate_scripts_for(target):
tests.append(x)
if os.path.exists(x):
return x
msg = "Cannot find script for target " + target + "\n"
msg += "Tryed: \n" + "\n".join(tests)
raise RedoException(msg)
# }}}
# {{{ Logging commands
# ====================
# {{{ Logging subsystem
# ~~~~~~~~~~~~~~~~~~~~~
class Logging(object):
def __init__(self):
self.logging_clean = True
self.logging_cmd = False
self.logging_target = True
self.logging_debug = False
def configure_from_logging_level(self, loglevel):
self.logging_clean = False
self.logging_cmd = False
self.logging_target = False
self.logging_debug = False
if loglevel >=1:
self.logging_target = True
self.logging_clean = True
if loglevel >=2:
self.logging_cmd = True
if loglevel >=3:
self.logging_debug = True
def format_command(self, cmdArgs):
"""
Get the readable format of a command argument
"""
def format_arg(x):
if " " in x or "\"" in x:
return "\"" + x.replace("\"", "\\\"") + "\""
else:
return x
if type([])==type(cmdArgs):
verboseCmd = " ".join(map(format_arg, cmdArgs))
else:
verboseCmd = cmdArgs
return verboseCmd
def clean(self, target):
if not self.logging_clean: return
print ("Cleaning", target)
def command(self, cmdArgs):
if not self.logging_cmd: return
verboseCmd = self.format_command(cmdArgs)
print (verboseCmd)
def error(self, exc):
print ("ERROR: ", str(exc), file=sys.stderr)
def target(self, depth, name):
if not self.logging_target: return
print (" "*depth, name)
def debug(self, msg):
if not self.logging_debug: return
print (">",msg)
# }}}
# {{{ Current logging subsystem
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
_default_logging_subsystem = Logging()
def get_logging_subsystem():
return _default_logging_subsystem
# }}}
# }}}
# {{{ Utilities passed to scripts
# ===============================
class Utilities(object):
def __init__(self):
self.logging = get_logging_subsystem()
def parse_makefile_dependency(self, deps):
"""
Parse the passed string as a makefile
dependency. Useful for parsing the output
of "gcc -M". This function will return a list
of every dependent file
"""
# let's hope in the utf8 encoding
if type(deps)==type(b""): deps = deps.decode("utf-8")
deps = deps.split("\n")[1:]
deps_collection = []
for dep in deps:
dep = dep.strip()
if len(dep)>0:
if dep[-1]=="\\": dep = dep[0:-1]
dep = dep.strip()
if os.path.exists(dep):
deps_collection.append(dep)
return deps_collection
def parse_dmd_dependency_file(self, depFile):
"""
Read a depFile generated by dmd -deps=depFile
directive from the DMD2 compiler
"""
dipendenze = []
f = open(depFile)
for linea in f:
linea = linea.strip()
inizio = linea.find("(")
fine = linea.find(")")
if inizio==-1 or fine==-1:
throw(Exception(linea))
linea = linea[inizio+1:fine]
linea = linea.replace("\\\\", "\\")
if linea not in dipendenze:
dipendenze.append(linea)
return dipendenze
def parse_ocamldep_output(self, depOutput, extension):
"""
This function will parse the output of the OcamlDep -modules call.
For example (extension=".cmi"):
mytest.ml: One Two Three ---> ["one.cmi", "two.cmi", "three.cmi"]
"""
if type(depOutput)==type(b""): depOutput = depOutput.decode("utf-8")
if depOutput.find("\n")!=-1: depOutput = depOutput[0:depOutput.find("\n")].strip()
return list(map(lambda x: x.strip().lower() + extension, depOutput.split(" ")[1:]))
def find_files(self, directory, pattern):
"""
Kinda-glob but recursive
"""
for root, dirs, files in os.walk(directory):
for basename in files:
if fnmatch.fnmatch(basename, pattern):
filename = os.path.join(root, basename)
yield filename
def find_executable(self, executable, path=None):
"""
Try to find 'executable' in the directories listed in 'path' (a
string listing directories separated by 'os.pathsep'; defaults to
os.environ['PATH']). Returns the complete filename or None if not
found
"""
if path is None:
path = os.environ['PATH']
paths = path.split(os.pathsep)
extlist = ['']
if os.name == 'os2':
(base, ext) = os.path.splitext(executable)
# executable files on OS/2 can have an arbitrary extension, but
# .exe is automatically appended if no dot is present in the name
if not ext:
executable = executable + ".exe"
elif sys.platform == 'win32':
pathext = os.environ['PATHEXT'].lower().split(os.pathsep)
(base, ext) = os.path.splitext(executable)
if ext.lower() not in pathext:
extlist = pathext
for ext in extlist:
execname = executable + ext
if os.path.isfile(execname):
return execname
else:
for p in paths:
f = os.path.join(p, execname)
if os.path.isfile(f):
return f
else:
return None
def cmd(self, args):
"""
Run a command. The command and the output will be
shown only of the result of the command is wrong
"""
self.logging.command(args)
try:
if type(args)==type([]):
errorcode = subprocess.call(args)
else:
errorcode = subprocess.call(args, shell=True)
except Exception as e:
raise RedoException(str(e))
if errorcode!=0:
self.logging.error(self.logging.format_command(args))
raise RedoException("compilation failed with exit code " + str(errorcode))
def cmd_output(self, args):
"""
Run a command and capture the stdout which will be
returned as a string
"""
self.logging.command(args)
try:
if type(args)==type([]):
return subprocess.check_output(args)
else:
return subprocess.check_output(args, shell=True)
except Exception as e:
raise RedoException(str(e))
# }}}
# {{{ Redo commands
# =================
class Redo(object):
def __init__(self):
self.graph = Graph()
self.file_cache = FileCache()
self.contexts = []
self.logging = get_logging_subsystem()
self.utils = Utilities()
self.built_targets = []
self._current_db_version = 1
# Read and write graph to file
# ----------------------------
def write_status_to_file(self, fileName):
"""
Write the current build status to a file
"""
self.file_cache.reset_changed_cache()
self.built_targets = []
f = open(fileName, "wb")
pickle.dump(self._current_db_version, f)
pickle.dump(self.graph, f)
pickle.dump(self.file_cache, f)
f.close()
def read_status_from_file(self, fileName):
"""
Read the current build status to a file
"""
f = open(fileName, "rb")
dbver = pickle.load(f)
if dbver!=self._current_db_version:
raise RedoException("Wrong _redo.db version. Please regenerate it from scratch")
self.graph = pickle.load(f)
self.file_cache = pickle.load(f)
f.close()
self.rootdir = os.path.dirname(fileName)
# Script execution and contexts
# -----------------------------
def _create_context(self, scriptName, targetName):
context = {"target":targetName,
"basename":os.path.splitext(targetName)[0],
"redo":self,
"scriptname":scriptName
}
return context
def _exec_script(self, scriptName, targetName):
(scriptPath, scriptBasename) = os.path.split(scriptName)
cwd = os.getcwd()
if scriptPath != "": os.chdir(scriptPath)
ctx = self._create_context(scriptName, targetName)
self.contexts.append(ctx)
self.logging.target(len(self.contexts), targetName)
try:
exec(compile(open(scriptBasename).read(), scriptBasename, 'exec'), ctx)
finally:
self.contexts.pop()
os.chdir(cwd)
def _current_context(self):
return self.contexts[-1]
# Redo commands
# -------------
def redo(self, targetName):
"""
This function will always rebuild the target
"targetName"
"""
targetName = os.path.abspath(targetName)
if targetName not in self.built_targets:
scriptName = find_script_for(targetName)
self.file_cache.stamp(scriptName, "s")
self.graph.store_dependency(targetName, scriptName)
self._exec_script(scriptName, targetName)
self.built_targets.append(targetName)
if os.path.exists(targetName):
self.file_cache.stamp(targetName, "d")
def if_changed(self, *targetNames):
"""
This function will append to the current target
a dependency versus name choosen in "targetNames" and
will rebuild it if the dependencies are outdate.
"""
self.graph.clear_dependency_info_for(self._current_context()["target"])
self.graph.store_dependency(self._current_context()["target"], self._current_context()["scriptname"])
for argument in targetNames:
self._if_changed_file(argument)
def _if_changed_file(self, argument):
"""
As if_changed but for only one file
"""
argument = os.path.abspath(argument)
if not self.file_cache.is_known(argument):
if os.path.exists(argument):
currentType = "s"
else:
currentType = "d"
else:
currentType = self.file_cache.get_type(argument)
current = self._current_context()["target"]
if argument != current:
self.graph.store_dependency(current, argument)
if currentType=="s":
self.file_cache.stamp(argument, currentType)
elif currentType=="d" and (not self.file_cache.is_known(argument)):
self.redo(argument)
elif currentType=="d" and self.file_cache.is_known(argument):
to_rebuild = False
to_rebuild_cause = ""
for dep in self.graph.get_transitive_dependencies(argument):
if self.file_cache.is_changed(dep):
to_rebuild = True
to_rebuild_cause = dep
break
if to_rebuild:
# print "target",argument,"must be rebuild because",to_rebuild_cause,"changed"
self.redo(argument)
def clean(self):
for target in self.file_cache.get_destinations():
if os.path.exists(target):
self.logging.clean(target)
os.unlink(target)
def tgf_graph(self):
"""
This function output a graph description in TGF
format.
"""
self.graph.to_tgf(sys.stdout)
# }}}
# {{{ Redo database management
# ============================
def redo_database_default_name():
"Return the default name of the redo database"
return "_redo.db"
def find_redo_database():
"""
This function will search for a redo database in the current
directory and in all the parent directories
"""
thisDirectory = os.path.abspath(os.getcwd())
db_name = redo_database_default_name()
tests = []
curdir = thisDirectory
while 1:
curdb = os.path.join(curdir, db_name)
tests.append(curdb)
if os.path.exists(curdb):
return curdb
(n_curdir,_) = os.path.split(curdir)
if n_curdir==curdir: break
curdir = n_curdir
msg = "Cannot find redo database. You must create it using the \"init\" command. I tryed\n"
for x in tests: msg += x + "\n"
raise RedoException (msg)
# }}}
# {{{ Main commands
# =================
def main_test():
print ("testing...")
doctest.testmod()
def main_clean():
redo = Redo()
dbname = find_redo_database()
redo.read_status_from_file(dbname)
redo.clean()
redo.write_status_to_file(dbname)
def main_tgf():
redo = Redo()
dbname = find_redo_database()
redo.read_status_from_file(dbname)
redo.tgf_graph()
def main_init():
redo = Redo()
default_db = redo_database_default_name()
if not os.path.exists(default_db):
redo.write_status_to_file(default_db)
else:
get_logging_subsystem().error("Database file (" + default_db + ") already exists")
def main_redo(targetName):
redo = Redo()
dbname = find_redo_database()
redo.read_status_from_file(dbname)
try:
redo.redo(targetName)
finally:
redo.write_status_to_file(dbname)
def main_argparse():
# Main command parser
parser = argparse.ArgumentParser(prog=sys.argv[0])
parser.add_argument("--logging-level", dest="logging_level", type=int,
nargs=1, help="0 means quiet, 5 means really verbose. The default is 1",
default=1)
subparsers = parser.add_subparsers(help="sub-command help", dest="command_name")
# Parser for the "init" command
parser_init = subparsers.add_parser("init", help="create a new redo database file")
# Parser for the "clean" command
parser_init = subparsers.add_parser("clean", help="remove all the generated targets")
# Parser for the "tgf" command
parser_tgf = subparsers.add_parser("tgf", help="generate a tgf file from the build system graph")
# Parser for the "build" command
parser_build = subparsers.add_parser("build", help="build a target")
parser_build.add_argument("target", help="target to build")
# Parse the command line arguments
parameters = parser.parse_args(sys.argv[1:])
# Configuring the logging subsystem
if type(parameters.logging_level)==type([]):
log_level = parameters.logging_level[0]
else:
log_level = parameters.logging_level
get_logging_subsystem().configure_from_logging_level(log_level)
# Invoke the right command
if parameters.command_name == "init":
main_init()
elif parameters.command_name == "clean":
main_clean()
elif parameters.command_name == "tgf":
main_tgf()
elif parameters.command_name == "build":
main_redo(parameters.target)
if __name__=="__main__":
# Check the current python version.
# It must be at least 2.7 because we use "argparse"
# to parse the command line arguments
if sys.version_info.major<2 or (sys.version_info.major==2 and sys.version_info.minor<7):
print ("This software requires Python 2.7 or better! Please update your Python interpreter", file=stderr)
else:
try:
main_argparse()
except RedoException as e:
print (e, file=sys.stderr)
# }}}
| {
"content_hash": "b25c9cc12e0381b4a05f6c00f411ad8d",
"timestamp": "",
"source": "github",
"line_count": 771,
"max_line_length": 113,
"avg_line_length": 30.61348897535668,
"alnum_prop": 0.5505656060670254,
"repo_name": "leonardoce/predo",
"id": "0302ef8a2569f36b70157b16f564fc6774b4e5b7",
"size": "23625",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "redo.py",
"mode": "33261",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "461"
},
{
"name": "Python",
"bytes": "23625"
}
],
"symlink_target": ""
} |
from OpenGLCffi.GL import params
@params(api='gl', prms=['pname', 'param'])
def glPNTrianglesiATI(pname, param):
pass
@params(api='gl', prms=['pname', 'param'])
def glPNTrianglesfATI(pname, param):
pass
| {
"content_hash": "7fd9c39e3089526937f4206bab4e30da",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 42,
"avg_line_length": 19,
"alnum_prop": 0.6985645933014354,
"repo_name": "cydenix/OpenGLCffi",
"id": "f530dce22e8a9b5c6fbd51dfc6f654e10e6a20fa",
"size": "209",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "OpenGLCffi/GL/EXT/ATI/pn_triangles.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1646"
},
{
"name": "C++",
"bytes": "188"
},
{
"name": "Python",
"bytes": "1853617"
}
],
"symlink_target": ""
} |
from sklearn.neural_network import MLPRegressor
from sklearn.externals import joblib # for saving parameters
from sklearn.preprocessing import StandardScaler # for feature scaling
import json
from sin_approx import get_sinusoid_params
import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA # principle component analysis for dim. reduction
import timeit # timer
train_data = "/home/vince/Groundwater/NeuralNet/training_data.json"
def load_data(datafile):
"""
Get training (or test) data from plaintext files.
Return X, y, where X is the 12d previous months input
and y is the (1d) anomoly.
"""
with open(datafile, 'r') as f:
data = json.load(f)
X = data['X']
y = data['y']
X = np.asarray(X)
y = np.asarray(y)
return (X, y) # load as np arrays
def create_and_train(X, y, alpha=1e-5):
"""
Create the network with certain parameters. then train it with the given data.
"""
net = MLPRegressor(
solver='adam',
alpha=alpha,
activation='relu',
hidden_layer_sizes=(500,500),
random_state=1)
net.fit(X, y)
return net
def fit_sines(prev_anomolies):
"""
Calculate sinusoid parameters based on the previous anomolies.
We'll use the slope from these to train the network.
returns a list of lists with the sine parameters
[[amplitude, phase, mean, slope], ...]
"""
fits = []
for i in prev_anomolies:
fit = get_sinusoid_params(i)
fits.append(fit)
return fits
def artificial_data(X, y):
"""
Generate a new row of data with similar
characteristics, but gaussian noise added so
that we can get more data points.
"""
# Generate arrays of random fractions (~ .9 to 1.1)
x_rand_perc = 0.1 * np.random.standard_normal(X.shape) + 1 # gaussian with mean 1, std 0.1
y_rand_perc = 0.1 * np.random.standard_normal(y.shape) + 1
# Multiply random fractions by original to get new dataset
X_new = np.multiply(X, x_rand_perc)
y_new = np.multiply(y, y_rand_perc)
return (X_new, y_new)
def gen_art_data(X, y, n):
"""
Add artificial data to the given X and y datasets until
there are n samples.
"""
print(" Total samples availible = %s" % len(y))
target_samples = n # how many training examples we'd like to have
original_X = X
original_y = y
while (len(y) < target_samples):
newx, newy = artificial_data(original_X, original_y) # only base generated data on the originals
X = np.concatenate((X, newx))
y = np.concatenate((y, newy))
print(" generating artificial data ... %s / %s samples ready" % (len(y), target_samples))
print(" Total samples with artificial = %s" % len(y))
return (X, y)
def choose_alpha():
"""
Make plot of testing and training r^2 values for
different values of the regularization parameter.
"""
test = []
train = []
alphas = [1e-2, 3e-2, 1e-1, 3e-1, 1]
for alpha in alphas:
print("")
print(" ALPHA = %s" % alpha)
print("")
(R_train, R_test) = main(save_data=False, alpha=alpha)
train.append(R_train)
test.append(R_test)
plt.semilogx(alphas, train, 'ro', label='training set')
plt.semilogx(alphas, test, 'bx', label='test set')
plt.xlabel('alpha')
plt.ylabel('R^2 score')
plt.legend()
plt.show()
def main(save_data=True, alpha=1e-5):
"""
Train the network and print basic R^2 scores
for training and test sets. Then save the parameters
"""
print("===> Loading Data")
X, y = load_data("training_data.json")
X_test, y_test = load_data("testing_data.json") # for cross validation
# perform feature scaling
print("===> Scaling features")
scaler = StandardScaler()
scaler.fit(X) # Don't cheat - fit only on training data
X = scaler.transform(X)
X_test = scaler.transform(X_test) # apply same transformation to test data
# generate artificial data
#train_samples = 500
#test_samples = 70
#print("===> Generating artificial training data")
#X, y = gen_art_data(X, y, train_samples)
#print("===> Generating artificial testing data")
#X_test, y_test = gen_art_data(X_test, y_test, test_samples)
# dimensionality reduction/principle component analysis
#print("===> Reducing Dimensions")
#print(" old training dimensions (n_samples, n_features): (%s, %s) " % (X.shape[0], X.shape[1]))
#pca = PCA()
#pca.fit(X)
#X = pca.transform(X) # apply the dim. reduction to both training and test sets
#X_test = pca.transform(X_test)
#print(" new training dimensions (n_samples, n_features): (%s, %s) " % (X.shape[0], X.shape[1]))
print("===> Creating and Training Network")
start_time = timeit.default_timer() # time the training process
rgr = create_and_train(X, y, alpha)
end_time = timeit.default_timer()
print(" %s seconds to train" % (end_time - start_time))
print(" %s iterations completed" % (rgr.n_iter_))
print("===> Evaluating Performance")
training_R = rgr.score(X, y)
testing_R = rgr.score(X_test, y_test)
print("")
print("Training data R^2 score: %s" % training_R)
print("Testing data R^2 score: %s" % testing_R)
print("")
if save_data: # so you can switch this off sometimes
print("===> Saving parameters")
joblib.dump(rgr, 'parameters.pkl')
return (training_R, testing_R)
if __name__=="__main__":
#main(alpha=0.1)
choose_alpha()
| {
"content_hash": "f51929fc6b137e426be6f23c805dc452",
"timestamp": "",
"source": "github",
"line_count": 172,
"max_line_length": 105,
"avg_line_length": 32.72093023255814,
"alnum_prop": 0.6229566453447051,
"repo_name": "vincekurtz/gracenet",
"id": "8ffefb3a76bd8c24e95d8546d2ecd734267a4493",
"size": "6012",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "train.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "36922"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('studentgroups', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='mslstudentgroup',
old_name='msl_description',
new_name='description',
),
migrations.RenameField(
model_name='mslstudentgroup',
old_name='msl_image',
new_name='logo',
),
migrations.RenameField(
model_name='mslstudentgroup',
old_name='msl_image_url',
new_name='logo_url',
),
migrations.AddField(
model_name='mslstudentgroup',
name='link',
field=models.URLField(default=''),
preserve_default=False,
),
migrations.AlterField(
model_name='mslstudentgroup',
name='group',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='msl_group', to='studentgroups.StudentGroup'),
),
]
| {
"content_hash": "dcd97c242bc90343e245d9196034f44a",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 143,
"avg_line_length": 29.05,
"alnum_prop": 0.5679862306368331,
"repo_name": "sussexstudent/falmer",
"id": "24293d9fe33f93bc8ad02af4a4f342826f1577f2",
"size": "1235",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "falmer/studentgroups/migrations/0002_auto_20170703_1626.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2142"
},
{
"name": "Dockerfile",
"bytes": "1035"
},
{
"name": "HTML",
"bytes": "8269"
},
{
"name": "JavaScript",
"bytes": "817"
},
{
"name": "Python",
"bytes": "513792"
},
{
"name": "Shell",
"bytes": "8120"
}
],
"symlink_target": ""
} |
import click
from urls import IMAGES
from base_request import DigitalOcean, print_table, CONTEXT_SETTINGS
@click.group()
def image_actions_group():
"""
image actions command group
"""
pass
def validate(dic, option_list):
"""
image actions command validation
"""
for key in dic.viewkeys():
if key in option_list:
for option in option_list:
if option != key:
if dic[option] and dic[key]:
raise click.UsageError('Invalid option combination --%s \
cannot be used with --%s' % (option, key))
if (dic['transfer'] and not dic['region']) or (dic['region'] and not dic['transfer']):
raise click.UsageError('--transfer option requires --region')
if (dic['action'] and not dic['action_id']) or (dic['action_id'] and not dic['action']):
raise click.UsageError('--action option requires --action-id')
return True
def run_command(token, proxy, image_id, params, record):
"""
run command and process result
"""
method = 'POST'
url = IMAGES + str(image_id) + '/actions'
result = DigitalOcean.do_request(method, url, token=token, proxy=proxy, params=params)
if result['has_error']:
click.echo()
click.echo('Error: %s' %(result['error_message']))
else:
headers = ['Fields', 'Values']
table = [['Id', result['action']['id']], ['Status', result['action']['status']],
['Type', result['action']['type']], ['Started at', result['action']['started_at']],
['Completed at', result['action']['completed_at']],
['Resource Id', result['action']['resource_id']],
['Resource Type', result['action']['resource_type']],
['Region', result['action']['region']]]
data = {'headers': headers, 'table_data': table}
cmd = 'Command: docli image-actions -a %d -i %d' % (image_id, result['action']['id'])
print_table(tablefmt, data, record)
click.echo()
click.echo('To get status update of above action execute following command.')
click.echo(cmd)
@image_actions_group.command(name='image-actions', context_settings=CONTEXT_SETTINGS)
@click.option('--transfer', '-T', type=int, help='transfer given image id to region', metavar='<3812352>')
@click.option('--region', '-r', type=click.Choice(['nyc1', 'nyc2', 'nyc3', 'ams1', 'ams2', 'ams3', 'sfo1', 'sgp1', 'lon1', 'fra1']), help='transfer image to given region', metavar='<nyc1>')
@click.option('--convert', '-c', type=int, help='convert given image id', metavar='<3812352>')
@click.option('--action', '-a', type=int, help='get action details for given image id', metavar='<3812352>')
@click.option('--action-id', '-i', type=int, help='get action details from given action id', metavar='<3812352>')
@click.option('--token', '-t', type=str, help='digital ocean authentication token', metavar='<token>')
@click.option('--tablefmt', '-f', type=click.Choice(['fancy_grid', 'simple', 'plain', 'grid', 'pipe', 'orgtbl', 'psql', 'rst', 'mediawiki', 'html', 'latex', 'latex_booktabs', 'tsv']), help='output table format', default='fancy_grid', metavar='<format>')
@click.option('--proxy', '-p', help='proxy url to be used for this call', metavar='<http://ip:port>')
@click.pass_context
def image_actions(ctx, transfer, region, convert, action, action_id, token, tablefmt, proxy):
"""
Image actions are commands that can be given to a DigitalOcean image.
"""
if (not ctx.params['transfer'] and not ctx.params['region']
and not ctx.params['convert'] and not ctx.params['action']
and not ctx.params['action_id']):
return click.echo(ctx.get_help())
option_list = ['transfer','convert','action']
if validate(ctx.params, option_list):
if transfer:
params = {"type":"transfer","region":region}
record = 'image transfer'
return run_command(token, proxy, transfer, params, record)
if convert:
params = {"type":"convert"}
record = 'image convert'
return run_command(token, proxy, convert, params, record)
if action:
method = 'GET'
url = IMAGES + str(action) + '/' + str(action_id)
result = DigitalOcean.do_request(method, url, token=token, proxy=proxy)
if result['has_error']:
click.echo()
click.echo('Error: %s' %(result['error_message']))
else:
record = 'image action'
headers = ['Fields', 'Values']
dic = result['action']
table = [['Id', dic['id']], ['Status', dic['status']],
['Type', click.style(dic['type'], fg='blue')],
['Started at', dic['started_at']],
['Completed at', dic['completed_at']],
['Resource id', dic['resource_id']],
['Resource type', dic['resource_type']],
['Region', dic['region']]]
data = {'headers': headers, 'table_data': table}
print_table(tablefmt, data, record) | {
"content_hash": "9b2da9d19a185e0fcc574992b34f57fb",
"timestamp": "",
"source": "github",
"line_count": 112,
"max_line_length": 253,
"avg_line_length": 41.026785714285715,
"alnum_prop": 0.6498367791077257,
"repo_name": "yspanchal/docli",
"id": "3d9f64adcfe0b56393800dfb485565b2c9859545",
"size": "4620",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docli/commands/image_actions.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "71317"
}
],
"symlink_target": ""
} |
from django.shortcuts import render
from django.http import HttpResponse
from django.template import loader
# Create your views here.
def hello_world(request):
return HttpResponse("Hello World!")
def profile(request):
template = loader.get_template('myProfile/profile.html')
context = {
'context': 42
}
return HttpResponse(template.render(context, request))
| {
"content_hash": "97e2e7379b9bbe1da1d941bd1ad5043c",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 60,
"avg_line_length": 25.933333333333334,
"alnum_prop": 0.7300771208226221,
"repo_name": "kenjones21/kenWeaver",
"id": "b9abbbb07ae51f84275a5ccb3a054e84126fbbe6",
"size": "389",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "backend/myProfile/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3361"
},
{
"name": "HTML",
"bytes": "29961"
},
{
"name": "JavaScript",
"bytes": "43557"
},
{
"name": "Shell",
"bytes": "2054"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Article',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=20, verbose_name='标题')),
('text', models.TextField(verbose_name='正文')),
('created_time', models.DateTimeField(auto_now_add=True, verbose_name='创建时间')),
('status', models.CharField(choices=[('draft', '草稿'), ('published', '已发布')], max_length=10, verbose_name='文章状态')),
],
),
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('category', models.CharField(max_length=10, verbose_name='分类')),
],
),
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('username', models.CharField(max_length=10, verbose_name='昵称')),
('content', models.TextField(verbose_name='评论内容')),
('created_time', models.DateTimeField(auto_now_add=True, verbose_name='评论时间')),
('article', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='blog.Article', verbose_name='文章')),
],
),
migrations.CreateModel(
name='Tag',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('tag', models.CharField(max_length=15, verbose_name='标签')),
],
),
migrations.AddField(
model_name='article',
name='category',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='blog.Category', verbose_name='分类'),
),
migrations.AddField(
model_name='article',
name='tag',
field=models.ManyToManyField(blank=True, to='blog.Tag', verbose_name='标签'),
),
]
| {
"content_hash": "ddcfc71fffc2feaec04e091a904aedda",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 132,
"avg_line_length": 41.389830508474574,
"alnum_prop": 0.5581490581490581,
"repo_name": "WolfWW/django-simple-blog",
"id": "d68a8958a12136b0b2dfba0b81ec0de0032e6c82",
"size": "2587",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "blog/migrations/0001_initial.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "23215"
},
{
"name": "HTML",
"bytes": "5570"
},
{
"name": "JavaScript",
"bytes": "12365"
},
{
"name": "Python",
"bytes": "19622"
}
],
"symlink_target": ""
} |
import h5py
import numpy as np
import matplotlib.pyplot as plt
import scipy.integrate
filename = 'contour_0000100.h5'
h5file = h5py.File( filename, mode = "r" )
def get_source_current( h5file ):
time_step = h5file["/TimeGrid"].attrs["time_step_size"][0]
charge = h5file["/ParticleSources/cathode_emitter"].attrs["charge"][0]
particles_per_step = h5file[
"/ParticleSources/cathode_emitter"].attrs["particles_to_generate_each_step"][0]
current = particles_per_step * charge / time_step
return current
def get_source_geometry( h5file ):
axis_start_x = \
h5file["/ParticleSources/cathode_emitter"].attrs["cylinder_axis_start_x"][0]
axis_start_z = \
h5file["/ParticleSources/cathode_emitter"].attrs["cylinder_axis_start_z"][0]
radius = h5file["/ParticleSources/cathode_emitter"].attrs["cylinder_radius"][0]
return ( axis_start_x, axis_start_z, radius )
def get_source_particle_parameters( h5file ):
mass = h5file["/ParticleSources/cathode_emitter"].attrs["mass"][0]
charge = h5file["/ParticleSources/cathode_emitter"].attrs["charge"][0]
momentum_z = h5file["/ParticleSources/cathode_emitter"].attrs["mean_momentum_z"][0]
return ( mass, charge, momentum_z )
def beam_radius( u, r_0 ):
return r_0 * np.exp( u ** 2 )
def beam_z( u, m, v, q, I, r_0 ):
coeff = np.sqrt( m * v**3 / q / I ) * r_0
subint = lambda t: np.exp( t * t )
low_lim = 0
up_lim = u
integral_value = scipy.integrate.quad( subint, low_lim, up_lim )[0]
return coeff * integral_value
beam_axis_x_pos, emitter_z_pos, r_0 = get_source_geometry( h5file )
I = get_source_current( h5file )
m, q, p = get_source_particle_parameters( h5file )
v = p / m
u_min = 0; u_max = 2; num_u_points = 100 # for u = 1, r = r(0) * 2.71812
u = np.linspace( u_min, u_max, num_u_points )
r_an = [ beam_radius( x, r_0 ) for x in u ]
r_an_upper = r_an + beam_axis_x_pos
r_an_lower = beam_axis_x_pos - r_an
z_an = [ beam_z( x, m = m, v = v, q = q, I = I, r_0 = r_0 ) for x in u ]
z_an = z_an + emitter_z_pos
r_num = h5file["/ParticleSources/cathode_emitter/position_x"]
z_num = h5file["/ParticleSources/cathode_emitter/position_z"]
z_volume_size = h5file["/SpatialMesh"].attrs["z_volume_size"][0]
x_volume_size = h5file["/SpatialMesh"].attrs["x_volume_size"][0]
plt.xlabel( "Z [cm]" )
plt.ylabel( "X [cm]" )
plt.ylim( 0, x_volume_size )
plt.xlim( 0, z_volume_size )
plt.plot( z_num, r_num, '.', label = "num" )
plt.plot( z_an, r_an_upper, label = "theory", color = "g" )
plt.plot( z_an, r_an_lower, color = "g" )
plt.legend()
plt.savefig( "beam_contour.png" )
h5file.close()
| {
"content_hash": "8a56c685207434b28e0edd127c26b34c",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 87,
"avg_line_length": 38.11594202898551,
"alnum_prop": 0.6433460076045627,
"repo_name": "epicf/ef",
"id": "4b090a5fc9c89686597cbb18c452353ed43516d1",
"size": "2630",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/axially_symmetric_beam_contour/beam_contour.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "23971"
},
{
"name": "C++",
"bytes": "275501"
},
{
"name": "Makefile",
"bytes": "1534"
},
{
"name": "Python",
"bytes": "75815"
}
],
"symlink_target": ""
} |
"""Unit tests for datasets."""
from absl.testing import absltest
from absl.testing import parameterized
import jax
import jax.numpy as jnp
import ml_collections
from scenic.projects.vivit.data import video_tfrecord_dataset
class VideoTFRecordDatsetTest(parameterized.TestCase):
"""Unit tests for video_tfrecord_dataset.py."""
@parameterized.named_parameters(
('1 test clip', 1, False, 0),
('1x3 test clips', 1, True, 0),
('4 test clips, prefetch', 4, False, 1),
('4x3 test clips, prefetch', 4, True, 1))
def test_dataset_builder(self, num_test_clips, do_three_spatial_crops,
prefetch_to_device):
"""Tests dataset builder."""
num_shards = jax.local_device_count()
batch_size = num_shards * 3
eval_batch_size = num_shards * 2
dataset_configs = ml_collections.ConfigDict()
dataset_configs.prefetch_to_device = prefetch_to_device
dataset_configs.num_frames = 8
dataset_configs.num_test_clips = num_test_clips
dataset_configs.do_three_spatial_crops = do_three_spatial_crops
dataset_configs.base_dir = '/path/to/dataset_root/'
dataset_configs.tables = {
'train': 'something-something-v2-train.rgb.tfrecord@128',
'validation': 'something-something-v2-validation.rgb.tfrecord@128',
'test': 'something-something-v2-validation.rgb.tfrecord@128'
}
dataset_configs.examples_per_subset = {
'train': 168913,
'validation': 24777,
'test': 24777
}
dataset_configs.num_classes = 174
print('Please set the correct dataset base directory and run'
'this test again.')
return
dataset = video_tfrecord_dataset.get_dataset(
batch_size=batch_size,
eval_batch_size=eval_batch_size,
num_shards=num_shards,
dataset_configs=dataset_configs)
self.assertIsNotNone(dataset.train_iter)
self.assertIsNotNone(dataset.valid_iter)
self.assertIsNotNone(dataset.test_iter)
train_batch = next(dataset.train_iter)
eval_batch = next(dataset.valid_iter)
test_batch = next(dataset.test_iter)
# Check shapes.
num_spatial_crops = 3 if do_three_spatial_crops else 1
expected_shape = jnp.array((num_shards, batch_size // num_shards) +
dataset.meta_data['input_shape'][1:])
expected_shape_eval = jnp.array(
(num_shards, eval_batch_size // num_shards) +
dataset.meta_data['input_shape'][1:])
expected_shape_test = jnp.array(
(num_shards,
eval_batch_size * num_test_clips * num_spatial_crops // num_shards) +
dataset.meta_data['input_shape'][1:])
self.assertTrue(
jnp.array_equal(train_batch['inputs'].shape, expected_shape))
self.assertTrue(
jnp.array_equal(eval_batch['inputs'].shape, expected_shape_eval))
self.assertTrue(
jnp.array_equal(test_batch['inputs'].shape, expected_shape_test))
# Check number of examples.
self.assertEqual(dataset.meta_data['num_train_examples'], 168913)
self.assertEqual(dataset.meta_data['num_eval_examples'], 24777)
self.assertEqual(dataset.meta_data['num_test_examples'],
24777 * num_test_clips * num_spatial_crops)
if __name__ == '__main__':
absltest.main()
| {
"content_hash": "990dcd27ea11c86ddb2d719223f2eaee",
"timestamp": "",
"source": "github",
"line_count": 89,
"max_line_length": 78,
"avg_line_length": 36.70786516853933,
"alnum_prop": 0.6580961126415672,
"repo_name": "google-research/scenic",
"id": "add6ac88f1004090a8e1a6f52a26dc60226c5fca",
"size": "3267",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "scenic/projects/vivit/data/tests/test_video_tfrecord_dataset.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "1717873"
},
{
"name": "Python",
"bytes": "3692184"
}
],
"symlink_target": ""
} |
"""Fit and plot primaray beam from FHD output"""
from multiprocessing import Pool, Array
import matplotlib
import numpy as np
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from astropy.io import fits
from astropy.modeling import fitting, models
from opstats.utils.settings import MWA_FREQ_EOR_ALL_80KHZ
CENPIX = 3740
DEGPIX = 0.0160428
ANG = np.arange(-CENPIX, CENPIX) * DEGPIX
freqs = MWA_FREQ_EOR_ALL_80KHZ
shared_array_base = Array('d', freqs.size)
shared_array = np.frombuffer(shared_array_base.get_obj())
beam_dir = '/data3/piyanat/runs/fhd_uvlt50/output_data/'
beamxx_files = [
beam_dir + 'vis_interp_delta_21cm_l128_0.000h_{:.3f}MHz_Beam_XX.fits'
.format(f) for f in freqs
]
beamyy_files = [
beam_dir + 'vis_interp_delta_21cm_l128_0.000h_{:.3f}MHz_Beam_YY.fits'
.format(f) for f in freqs
]
def make_ibeam_cross(xx_beam, yy_beam):
"""Combine XX and YY beam into Stokes I beam and return the cross section.
Assume perfect array feed, i.e. I = (XX + YY) / 2. 2 in the denominator
is there to renormalized the beam peak to 1.
"""
return (xx_beam[:, CENPIX] + yy_beam[:, CENPIX]) / 2
def plot_fit(x, y, func, outname):
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(x, y, 'k-', label='data')
ax.plot(x, func(x), 'k:', label='fit')
ax.set_xlim(x[0], x[-1])
ax.set_xlabel('Angular Distance [deg]')
ax.set_ylabel('Response')
ax.legend()
fig.savefig(outname)
plt.close()
def fit_beam(i):
xx_beam_file, yy_beam_file = beamxx_files[i], beamyy_files[i]
ibeam_cross = make_ibeam_cross(fits.getdata(xx_beam_file),
fits.getdata(yy_beam_file))
g_init = models.Gaussian1D(1, 0, 20)
fitter = fitting.LevMarLSQFitter()
g_fit = fitter(g_init, ANG, ibeam_cross)
figout = 'mwa_pbfit_' + xx_beam_file.split('_')[-3] + '.pdf'
plot_fit(ANG, ibeam_cross, g_fit, figout)
fwhm = g_fit.stddev.value * 2 * np.sqrt(2 * np.log(2))
shared_array[i] = fwhm
print(freqs[i], ':', fwhm)
pool = Pool()
pool.map(fit_beam, range(freqs.size))
pool.close()
pool.join()
np.savetxt('mwa_pbfit.csv', np.vstack((freqs, shared_array)).T,
delimiter=',', header='Frequency [MHz], PB FWHM [deg]')
| {
"content_hash": "c709d131df6bb9c88e995bb849f5515d",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 78,
"avg_line_length": 30.78082191780822,
"alnum_prop": 0.6444147752558967,
"repo_name": "piyanatk/sim",
"id": "aef4ed58f55de2f20486ac9efdb379d135f72295",
"size": "2247",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/archive/fit_pb.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
} |
from setuptools import setup, find_packages
from server_tracking import __version__
setup(
name='server-side-tracking',
version=__version__,
packages=find_packages(),
url='',
license='MIT',
author='Matthias Erll',
author_email='[email protected]',
install_requires=['requests', 'six'],
description='Server-side tracking in Google Analytics for web applications.'
)
| {
"content_hash": "fe4ceff90810c85ddd0ad5502c51f57d",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 80,
"avg_line_length": 26.8,
"alnum_prop": 0.6840796019900498,
"repo_name": "merll/server-side-tracking",
"id": "93ee2bd90df3e3f0651a8724c2c5954785328886",
"size": "426",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "63740"
}
],
"symlink_target": ""
} |
import json
import re
import warnings
from os import path
import neo4j
import neo4j.api
from neo4j._async_compat.util import Util
from .. import (
fromtestkit,
test_subtest_skips,
totestkit,
)
from .._warning_check import warning_check
from ..exceptions import MarkdAsDriverException
class FrontendError(Exception):
pass
def load_config():
config_path = path.join(path.dirname(__file__), "..", "test_config.json")
with open(config_path, "r") as fd:
config = json.load(fd)
skips = config["skips"]
features = [k for k, v in config["features"].items() if v is True]
import ssl
if ssl.HAS_TLSv1_3:
features += ["Feature:TLS:1.3"]
return skips, features
SKIPPED_TESTS, FEATURES = load_config()
def _get_skip_reason(test_name):
for skip_pattern, reason in SKIPPED_TESTS.items():
if skip_pattern[0] == skip_pattern[-1] == "'":
match = skip_pattern[1:-1] == test_name
else:
match = re.match(skip_pattern, test_name)
if match:
return reason
def StartTest(backend, data):
test_name = data["testName"]
reason = _get_skip_reason(test_name)
if reason is not None:
if reason.startswith("test_subtest_skips."):
backend.send_response("RunSubTests", {})
else:
backend.send_response("SkipTest", {"reason": reason})
else:
backend.send_response("RunTest", {})
def StartSubTest(backend, data):
test_name = data["testName"]
subtest_args = data["subtestArguments"]
subtest_args.mark_all_as_read(recursive=True)
reason = _get_skip_reason(test_name)
assert reason and reason.startswith("test_subtest_skips.") or print(reason)
func = getattr(test_subtest_skips, reason[19:])
reason = func(**subtest_args)
if reason is not None:
backend.send_response("SkipTest", {"reason": reason})
else:
backend.send_response("RunTest", {})
def GetFeatures(backend, data):
backend.send_response("FeatureList", {"features": FEATURES})
def NewDriver(backend, data):
auth_token = data["authorizationToken"]["data"]
data["authorizationToken"].mark_item_as_read_if_equals(
"name", "AuthorizationToken"
)
scheme = auth_token["scheme"]
if scheme == "basic":
auth = neo4j.basic_auth(
auth_token["principal"], auth_token["credentials"],
realm=auth_token.get("realm", None)
)
elif scheme == "kerberos":
auth = neo4j.kerberos_auth(auth_token["credentials"])
elif scheme == "bearer":
auth = neo4j.bearer_auth(auth_token["credentials"])
else:
auth = neo4j.custom_auth(
auth_token["principal"], auth_token["credentials"],
auth_token["realm"], auth_token["scheme"],
**auth_token.get("parameters", {})
)
auth_token.mark_item_as_read("parameters", recursive=True)
kwargs = {}
if data["resolverRegistered"] or data["domainNameResolverRegistered"]:
kwargs["resolver"] = resolution_func(
backend, data["resolverRegistered"],
data["domainNameResolverRegistered"]
)
for timeout_testkit, timeout_driver in (
("connectionTimeoutMs", "connection_timeout"),
("maxTxRetryTimeMs", "max_transaction_retry_time"),
("connectionAcquisitionTimeoutMs", "connection_acquisition_timeout"),
):
if data.get(timeout_testkit) is not None:
kwargs[timeout_driver] = data[timeout_testkit] / 1000
for k in ("sessionConnectionTimeoutMs", "updateRoutingTableTimeoutMs"):
if k in data:
data.mark_item_as_read_if_equals(k, None)
if data.get("maxConnectionPoolSize"):
kwargs["max_connection_pool_size"] = data["maxConnectionPoolSize"]
if data.get("fetchSize"):
kwargs["fetch_size"] = data["fetchSize"]
if "encrypted" in data:
kwargs["encrypted"] = data["encrypted"]
if "trustedCertificates" in data:
if data["trustedCertificates"] is None:
kwargs["trusted_certificates"] = neo4j.TrustSystemCAs()
elif not data["trustedCertificates"]:
kwargs["trusted_certificates"] = neo4j.TrustAll()
else:
cert_paths = ("/usr/local/share/custom-ca-certificates/" + cert
for cert in data["trustedCertificates"])
kwargs["trusted_certificates"] = neo4j.TrustCustomCAs(*cert_paths)
data.mark_item_as_read_if_equals("livenessCheckTimeoutMs", None)
driver = neo4j.GraphDatabase.driver(
data["uri"], auth=auth, user_agent=data["userAgent"], **kwargs
)
key = backend.next_key()
backend.drivers[key] = driver
backend.send_response("Driver", {"id": key})
def VerifyConnectivity(backend, data):
driver_id = data["driverId"]
driver = backend.drivers[driver_id]
driver.verify_connectivity()
backend.send_response("Driver", {"id": driver_id})
def GetServerInfo(backend, data):
driver_id = data["driverId"]
driver = backend.drivers[driver_id]
server_info = driver.get_server_info()
backend.send_response("ServerInfo", {
"address": ":".join(map(str, server_info.address)),
"agent": server_info.agent,
"protocolVersion": ".".join(map(str, server_info.protocol_version)),
})
def CheckMultiDBSupport(backend, data):
driver_id = data["driverId"]
driver = backend.drivers[driver_id]
with warning_check(
neo4j.ExperimentalWarning,
"Feature support query, based on Bolt protocol version and Neo4j "
"server version will change in the future."
):
available = driver.supports_multi_db()
backend.send_response("MultiDBSupport", {
"id": backend.next_key(), "available": available
})
def resolution_func(backend, custom_resolver=False, custom_dns_resolver=False):
# This solution (putting custom resolution together with DNS resolution
# into one function only works because the Python driver calls the custom
# resolver function for every connection, which is not true for all
# drivers. Properly exposing a way to change the DNS lookup behavior is not
# possible without changing the driver's code.
assert custom_resolver or custom_dns_resolver
def resolve(address):
addresses = [":".join(map(str, address))]
if custom_resolver:
key = backend.next_key()
backend.send_response("ResolverResolutionRequired", {
"id": key,
"address": addresses[0]
})
if not backend.process_request():
# connection was closed before end of next message
return []
if key not in backend.custom_resolutions:
raise RuntimeError(
"Backend did not receive expected "
"ResolverResolutionCompleted message for id %s" % key
)
addresses = backend.custom_resolutions.pop(key)
if custom_dns_resolver:
dns_resolved_addresses = []
for address in addresses:
key = backend.next_key()
address = address.rsplit(":", 1)
backend.send_response("DomainNameResolutionRequired", {
"id": key,
"name": address[0]
})
if not backend.process_request():
# connection was closed before end of next message
return []
if key not in backend.dns_resolutions:
raise RuntimeError(
"Backend did not receive expected "
"DomainNameResolutionCompleted message for id %s" % key
)
dns_resolved_addresses += list(map(
lambda a: ":".join((a, *address[1:])),
backend.dns_resolutions.pop(key)
))
addresses = dns_resolved_addresses
return list(map(neo4j.Address.parse, addresses))
return resolve
def ResolverResolutionCompleted(backend, data):
backend.custom_resolutions[data["requestId"]] = data["addresses"]
def DomainNameResolutionCompleted(backend, data):
backend.dns_resolutions[data["requestId"]] = data["addresses"]
def NewBookmarkManager(backend, data):
bookmark_manager_id = backend.next_key()
bmm_kwargs = {}
data.mark_item_as_read("initialBookmarks", recursive=True)
bmm_kwargs["initial_bookmarks"] = data.get("initialBookmarks")
if data.get("bookmarksSupplierRegistered"):
bmm_kwargs["bookmarks_supplier"] = bookmarks_supplier(
backend, bookmark_manager_id
)
if data.get("bookmarksConsumerRegistered"):
bmm_kwargs["bookmarks_consumer"] = bookmarks_consumer(
backend, bookmark_manager_id
)
with warning_check(
neo4j.ExperimentalWarning,
"The bookmark manager feature is experimental. It might be changed or "
"removed any time even without prior notice."
):
bookmark_manager = neo4j.GraphDatabase.bookmark_manager(
**bmm_kwargs
)
backend.bookmark_managers[bookmark_manager_id] = bookmark_manager
backend.send_response("BookmarkManager", {"id": bookmark_manager_id})
def BookmarkManagerClose(backend, data):
bookmark_manager_id = data["id"]
del backend.bookmark_managers[bookmark_manager_id]
backend.send_response("BookmarkManager", {"id": bookmark_manager_id})
def bookmarks_supplier(backend, bookmark_manager_id):
def supplier(database):
key = backend.next_key()
backend.send_response("BookmarksSupplierRequest", {
"id": key,
"bookmarkManagerId": bookmark_manager_id,
"database": database
})
if not backend.process_request():
# connection was closed before end of next message
return []
if key not in backend.bookmarks_supplies:
raise RuntimeError(
"Backend did not receive expected "
"BookmarksSupplierCompleted message for id %s" % key
)
return backend.bookmarks_supplies.pop(key)
return supplier
def BookmarksSupplierCompleted(backend, data):
backend.bookmarks_supplies[data["requestId"]] = \
neo4j.Bookmarks.from_raw_values(data["bookmarks"])
def bookmarks_consumer(backend, bookmark_manager_id):
def consumer(database, bookmarks):
key = backend.next_key()
backend.send_response("BookmarksConsumerRequest", {
"id": key,
"bookmarkManagerId": bookmark_manager_id,
"database": database,
"bookmarks": list(bookmarks.raw_values)
})
if not backend.process_request():
# connection was closed before end of next message
return []
if key not in backend.bookmarks_consumptions:
raise RuntimeError(
"Backend did not receive expected "
"BookmarksConsumerCompleted message for id %s" % key
)
del backend.bookmarks_consumptions[key]
return consumer
def BookmarksConsumerCompleted(backend, data):
backend.bookmarks_consumptions[data["requestId"]] = True
def DriverClose(backend, data):
key = data["driverId"]
driver = backend.drivers[key]
driver.close()
backend.send_response("Driver", {"id": key})
def CheckDriverIsEncrypted(backend, data):
key = data["driverId"]
driver = backend.drivers[key]
backend.send_response("DriverIsEncrypted", {
"encrypted": driver.encrypted
})
class SessionTracker:
""" Keeps some extra state about the tracked session
"""
def __init__(self, session):
self.session = session
self.state = ""
self.error_id = ""
def NewSession(backend, data):
driver = backend.drivers[data["driverId"]]
access_mode = data["accessMode"]
if access_mode == "r":
access_mode = neo4j.READ_ACCESS
elif access_mode == "w":
access_mode = neo4j.WRITE_ACCESS
else:
raise ValueError("Unknown access mode:" + access_mode)
config = {
"default_access_mode": access_mode,
"database": data["database"],
}
if data.get("bookmarks") is not None:
config["bookmarks"] = neo4j.Bookmarks.from_raw_values(
data["bookmarks"]
)
if data.get("bookmarkManagerId") is not None:
config["bookmark_manager"] = backend.bookmark_managers[
data["bookmarkManagerId"]
]
for (conf_name, data_name) in (
("fetch_size", "fetchSize"),
("impersonated_user", "impersonatedUser"),
):
if data_name in data:
config[conf_name] = data[data_name]
if "bookmark_manager" in config:
with warning_check(
neo4j.ExperimentalWarning,
"The 'bookmark_manager' config key is experimental. It might be "
"changed or removed any time even without prior notice."
):
session = driver.session(**config)
else:
session = driver.session(**config)
key = backend.next_key()
backend.sessions[key] = SessionTracker(session)
backend.send_response("Session", {"id": key})
def SessionRun(backend, data):
session = backend.sessions[data["sessionId"]].session
query, params = fromtestkit.to_query_and_params(data)
result = session.run(query, parameters=params)
key = backend.next_key()
backend.results[key] = result
backend.send_response("Result", {"id": key, "keys": result.keys()})
def SessionClose(backend, data):
key = data["sessionId"]
session = backend.sessions[key].session
session.close()
del backend.sessions[key]
backend.send_response("Session", {"id": key})
def SessionBeginTransaction(backend, data):
key = data["sessionId"]
session = backend.sessions[key].session
tx_kwargs = fromtestkit.to_tx_kwargs(data)
tx = session.begin_transaction(**tx_kwargs)
key = backend.next_key()
backend.transactions[key] = tx
backend.send_response("Transaction", {"id": key})
def SessionReadTransaction(backend, data):
transactionFunc(backend, data, True)
def SessionWriteTransaction(backend, data):
transactionFunc(backend, data, False)
def transactionFunc(backend, data, is_read):
key = data["sessionId"]
session_tracker = backend.sessions[key]
session = session_tracker.session
tx_kwargs = fromtestkit.to_tx_kwargs(data)
@neo4j.unit_of_work(**tx_kwargs)
def func(tx):
txkey = backend.next_key()
backend.transactions[txkey] = tx
session_tracker.state = ''
backend.send_response("RetryableTry", {"id": txkey})
cont = True
while cont:
cont = backend.process_request()
if session_tracker.state == '+':
cont = False
elif session_tracker.state == '-':
if session_tracker.error_id:
raise backend.errors[session_tracker.error_id]
else:
raise FrontendError("Client said no")
if is_read:
session.execute_read(func)
else:
session.execute_write(func)
backend.send_response("RetryableDone", {})
def RetryablePositive(backend, data):
key = data["sessionId"]
session_tracker = backend.sessions[key]
session_tracker.state = '+'
def RetryableNegative(backend, data):
key = data["sessionId"]
session_tracker = backend.sessions[key]
session_tracker.state = '-'
session_tracker.error_id = data.get('errorId', '')
def SessionLastBookmarks(backend, data):
key = data["sessionId"]
session = backend.sessions[key].session
bookmarks = session.last_bookmarks()
backend.send_response("Bookmarks",
{"bookmarks": list(bookmarks.raw_values)})
def TransactionRun(backend, data):
key = data["txId"]
tx = backend.transactions[key]
cypher, params = fromtestkit.to_cypher_and_params(data)
result = tx.run(cypher, parameters=params)
key = backend.next_key()
backend.results[key] = result
backend.send_response("Result", {"id": key, "keys": result.keys()})
def TransactionCommit(backend, data):
key = data["txId"]
tx = backend.transactions[key]
try:
commit = tx.commit
except AttributeError as e:
raise MarkdAsDriverException(e)
# raise DriverError("Type does not support commit %s" % type(tx))
commit()
backend.send_response("Transaction", {"id": key})
def TransactionRollback(backend, data):
key = data["txId"]
tx = backend.transactions[key]
try:
rollback = tx.rollback
except AttributeError as e:
raise MarkdAsDriverException(e)
# raise DriverError("Type does not support rollback %s" % type(tx))
rollback()
backend.send_response("Transaction", {"id": key})
def TransactionClose(backend, data):
key = data["txId"]
tx = backend.transactions[key]
try:
close = tx.close
except AttributeError as e:
raise MarkdAsDriverException(e)
# raise DriverError("Type does not support close %s" % type(tx))
close()
backend.send_response("Transaction", {"id": key})
def ResultNext(backend, data):
result = backend.results[data["resultId"]]
try:
record = Util.next(Util.iter(result))
except StopIteration:
backend.send_response("NullRecord", {})
return
backend.send_response("Record", totestkit.record(record))
def ResultSingle(backend, data):
result = backend.results[data["resultId"]]
backend.send_response("Record", totestkit.record(
result.single(strict=True)
))
def ResultSingleOptional(backend, data):
result = backend.results[data["resultId"]]
with warnings.catch_warnings(record=True) as warning_list:
warnings.simplefilter("always")
record = result.single(strict=False)
if record:
record = totestkit.record(record)
backend.send_response("RecordOptional", {
"record": record, "warnings": list(map(str, warning_list))
})
def ResultPeek(backend, data):
result = backend.results[data["resultId"]]
record = result.peek()
if record is not None:
backend.send_response("Record", totestkit.record(record))
else:
backend.send_response("NullRecord", {})
def ResultList(backend, data):
result = backend.results[data["resultId"]]
records = Util.list(result)
backend.send_response("RecordList", {
"records": [totestkit.record(r) for r in records]
})
def ResultConsume(backend, data):
result = backend.results[data["resultId"]]
summary = result.consume()
from neo4j import ResultSummary
assert isinstance(summary, ResultSummary)
backend.send_response("Summary", {
"serverInfo": {
"address": ":".join(map(str, summary.server.address)),
"agent": summary.server.agent,
"protocolVersion":
".".join(map(str, summary.server.protocol_version)),
},
"counters": None if not summary.counters else {
"constraintsAdded": summary.counters.constraints_added,
"constraintsRemoved": summary.counters.constraints_removed,
"containsSystemUpdates": summary.counters.contains_system_updates,
"containsUpdates": summary.counters.contains_updates,
"indexesAdded": summary.counters.indexes_added,
"indexesRemoved": summary.counters.indexes_removed,
"labelsAdded": summary.counters.labels_added,
"labelsRemoved": summary.counters.labels_removed,
"nodesCreated": summary.counters.nodes_created,
"nodesDeleted": summary.counters.nodes_deleted,
"propertiesSet": summary.counters.properties_set,
"relationshipsCreated": summary.counters.relationships_created,
"relationshipsDeleted": summary.counters.relationships_deleted,
"systemUpdates": summary.counters.system_updates,
},
"database": summary.database,
"notifications": summary.notifications,
"plan": summary.plan,
"profile": summary.profile,
"query": {
"text": summary.query,
"parameters": {k: totestkit.field(v)
for k, v in summary.parameters.items()},
},
"queryType": summary.query_type,
"resultAvailableAfter": summary.result_available_after,
"resultConsumedAfter": summary.result_consumed_after,
})
def ForcedRoutingTableUpdate(backend, data):
driver_id = data["driverId"]
driver = backend.drivers[driver_id]
database = data["database"]
bookmarks = data["bookmarks"]
with driver._pool.refresh_lock:
driver._pool.update_routing_table(
database=database, imp_user=None, bookmarks=bookmarks
)
backend.send_response("Driver", {"id": driver_id})
def GetRoutingTable(backend, data):
driver_id = data["driverId"]
database = data["database"]
driver = backend.drivers[driver_id]
routing_table = driver._pool.routing_tables[database]
response_data = {
"database": routing_table.database,
"ttl": routing_table.ttl,
}
for role in ("routers", "readers", "writers"):
addresses = routing_table.__getattribute__(role)
response_data[role] = list(map(str, addresses))
backend.send_response("RoutingTable", response_data)
| {
"content_hash": "947d89528a6fec6722e5e6da203247c9",
"timestamp": "",
"source": "github",
"line_count": 633,
"max_line_length": 79,
"avg_line_length": 34.11058451816746,
"alnum_prop": 0.6281956280103742,
"repo_name": "neo4j/neo4j-python-driver",
"id": "d226d1525a9be7370f14c15e5f01a4aed84f4cf7",
"size": "22235",
"binary": false,
"copies": "1",
"ref": "refs/heads/5.0",
"path": "testkitbackend/_sync/requests.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2068"
},
{
"name": "Python",
"bytes": "1654566"
},
{
"name": "Shell",
"bytes": "4165"
}
],
"symlink_target": ""
} |
__title__ = 'crossword'
__version__ = '0.1.2'
__author__ = 'Simeon Visser'
__email__ = '[email protected]'
__license__ = 'MIT'
__copyright__ = 'Copyright 2014 Simeon Visser'
from crossword.core import Crossword
from crossword.exceptions import CrosswordException
from crossword.format_ipuz import from_ipuz, to_ipuz
from crossword.format_puz import from_puz, to_puz
| {
"content_hash": "2bfcc2e8531a8b6a9f24f91e2b420c97",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 52,
"avg_line_length": 33.81818181818182,
"alnum_prop": 0.7338709677419355,
"repo_name": "century-arcade/xd",
"id": "63b7acb8b458116b81ced9fb1572770fcb596748",
"size": "397",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "crossword/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "4533"
},
{
"name": "CSS",
"bytes": "10745"
},
{
"name": "HTML",
"bytes": "614"
},
{
"name": "Makefile",
"bytes": "4220"
},
{
"name": "Python",
"bytes": "230827"
},
{
"name": "Shell",
"bytes": "23831"
}
],
"symlink_target": ""
} |
from fun_views.requests import make_req_data
from fun_views.views.generic.redirect import redirect_view
from tests.utils.defaults_for_tests import (default_query_string,
default_response, default_url)
from tests.utils.requests import call_view, FakeRequest
this_req_data = make_req_data(FakeRequest())
this_req_data.request.META = {
'QUERY_STRING': default_query_string
}
def this_get_url(req_data):
assert req_data == this_req_data
return default_url
def this_get_query_string(req_data):
assert req_data == this_req_data
return default_query_string
def test_functions_are_called_as_expected():
for this_use_query_string in (True, False):
for this_permanent in (True, False):
this_url = "{}?{}".format(default_url, default_query_string) \
if this_use_query_string \
else default_url
def this_get_use_query_string(req_data, query_string):
assert req_data == this_req_data
assert query_string == default_query_string
return this_use_query_string
def this_get_permanent(req_data, url):
assert req_data == this_req_data
assert url == this_url
return this_permanent
def this_do_redirect(req_data, url, permanent):
assert req_data == this_req_data
assert url == this_url
assert permanent == this_permanent
return default_response
assert default_response == call_view(
redirect_view(
get_url=this_get_url,
get_query_string=this_get_query_string,
get_use_query_string=this_get_use_query_string,
get_permanent=this_get_permanent,
do_redirect=this_do_redirect
),
this_req_data
)
def test_literals_called_as_expected():
for this_use_query_string in (True, False):
for this_permanent in (True, False):
this_expected_url = "{}?{}".format(default_url, default_query_string) \
if this_use_query_string \
else default_url
def this_do_redirect(req_data, url, permanent):
assert req_data == this_req_data
assert url == this_expected_url
assert permanent == this_permanent
return default_response
assert default_response == call_view(
redirect_view(
url=default_url,
query_string=default_query_string,
use_query_string=this_use_query_string,
permanent=this_permanent,
do_redirect=this_do_redirect
),
this_req_data
)
| {
"content_hash": "cbe8a7d9dcc1ba59c83af6792fd4ac5e",
"timestamp": "",
"source": "github",
"line_count": 80,
"max_line_length": 83,
"avg_line_length": 36.4,
"alnum_prop": 0.5521978021978022,
"repo_name": "keithasaurus/django_fun_views",
"id": "a2c464fa6ade4948d8dbe4a8b9038d17970bde90",
"size": "2912",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/views/generic/test_redirect.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "80355"
}
],
"symlink_target": ""
} |
import os
import re
import sys
import math
import signal
import socket
import timeit
import platform
import threading
__version__ = '0.3.4'
# Some global variables we use
user_agent = None
source = None
shutdown_event = None
scheme = 'http'
# Used for bound_interface
socket_socket = socket.socket
try:
import xml.etree.cElementTree as ET
except ImportError:
try:
import xml.etree.ElementTree as ET
except ImportError:
from xml.dom import minidom as DOM
ET = None
# Begin import game to handle Python 2 and Python 3
try:
from urllib2 import urlopen, Request, HTTPError, URLError
except ImportError:
from urllib.request import urlopen, Request, HTTPError, URLError
try:
from httplib import HTTPConnection, HTTPSConnection
except ImportError:
e_http_py2 = sys.exc_info()
try:
from http.client import HTTPConnection, HTTPSConnection
except ImportError:
e_http_py3 = sys.exc_info()
raise SystemExit('Your python installation is missing required HTTP '
'client classes:\n\n'
'Python 2: %s\n'
'Python 3: %s' % (e_http_py2[1], e_http_py3[1]))
try:
from Queue import Queue
except ImportError:
from queue import Queue
try:
from urlparse import urlparse
except ImportError:
from urllib.parse import urlparse
try:
from urlparse import parse_qs
except ImportError:
try:
from urllib.parse import parse_qs
except ImportError:
from cgi import parse_qs
try:
from hashlib import md5
except ImportError:
from md5 import md5
try:
from argparse import ArgumentParser as ArgParser
except ImportError:
from optparse import OptionParser as ArgParser
try:
import builtins
except ImportError:
def print_(*args, **kwargs):
"""The new-style print function taken from
https://pypi.python.org/pypi/six/
"""
fp = kwargs.pop("file", sys.stdout)
if fp is None:
return
def write(data):
if not isinstance(data, basestring):
data = str(data)
fp.write(data)
want_unicode = False
sep = kwargs.pop("sep", None)
if sep is not None:
if isinstance(sep, unicode):
want_unicode = True
elif not isinstance(sep, str):
raise TypeError("sep must be None or a string")
end = kwargs.pop("end", None)
if end is not None:
if isinstance(end, unicode):
want_unicode = True
elif not isinstance(end, str):
raise TypeError("end must be None or a string")
if kwargs:
raise TypeError("invalid keyword arguments to print()")
if not want_unicode:
for arg in args:
if isinstance(arg, unicode):
want_unicode = True
break
if want_unicode:
newline = unicode("\n")
space = unicode(" ")
else:
newline = "\n"
space = " "
if sep is None:
sep = space
if end is None:
end = newline
for i, arg in enumerate(args):
if i:
write(sep)
write(arg)
write(end)
else:
print_ = getattr(builtins, 'print')
del builtins
class SpeedtestCliServerListError(Exception):
"""Internal Exception class used to indicate to move on to the next
URL for retrieving speedtest.net server details
"""
def bound_socket(*args, **kwargs):
"""Bind socket to a specified source IP address"""
global source
sock = socket_socket(*args, **kwargs)
sock.bind((source, 0))
return sock
def distance(origin, destination):
"""Determine distance between 2 sets of [lat,lon] in km"""
lat1, lon1 = origin
lat2, lon2 = destination
radius = 6371 # km
dlat = math.radians(lat2 - lat1)
dlon = math.radians(lon2 - lon1)
a = (math.sin(dlat / 2) * math.sin(dlat / 2) +
math.cos(math.radians(lat1)) *
math.cos(math.radians(lat2)) * math.sin(dlon / 2) *
math.sin(dlon / 2))
c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))
d = radius * c
return d
def build_user_agent():
"""Build a Mozilla/5.0 compatible User-Agent string"""
global user_agent
if user_agent:
return user_agent
ua_tuple = (
'Mozilla/5.0',
'(%s; U; %s; en-us)' % (platform.system(), platform.architecture()[0]),
'Python/%s' % platform.python_version(),
'(KHTML, like Gecko)',
'speedtest-cli/%s' % __version__
)
user_agent = ' '.join(ua_tuple)
return user_agent
def build_request(url, data=None, headers={}):
"""Build a urllib2 request object
This function automatically adds a User-Agent header to all requests
"""
if url[0] == ':':
schemed_url = '%s%s' % (scheme, url)
else:
schemed_url = url
headers['User-Agent'] = user_agent
return Request(schemed_url, data=data, headers=headers)
def catch_request(request):
"""Helper function to catch common exceptions encountered when
establishing a connection with a HTTP/HTTPS request
"""
try:
uh = urlopen(request)
return uh, False
except (HTTPError, URLError, socket.error):
e = sys.exc_info()[1]
return None, e
class FileGetter(threading.Thread):
"""Thread class for retrieving a URL"""
def __init__(self, url, start):
self.url = url
self.result = None
self.starttime = start
threading.Thread.__init__(self)
def run(self):
self.result = [0]
try:
if (timeit.default_timer() - self.starttime) <= 10:
request = build_request(self.url)
f = urlopen(request)
while 1 and not shutdown_event.isSet():
self.result.append(len(f.read(10240)))
if self.result[-1] == 0:
break
f.close()
except IOError:
pass
def downloadSpeed(files, quiet=False):
"""Function to launch FileGetter threads and calculate download speeds"""
start = timeit.default_timer()
def producer(q, files):
for file in files:
thread = FileGetter(file, start)
thread.start()
q.put(thread, True)
if not quiet and not shutdown_event.isSet():
sys.stdout.write('.')
sys.stdout.flush()
finished = []
def consumer(q, total_files):
while len(finished) < total_files:
thread = q.get(True)
while thread.isAlive():
thread.join(timeout=0.1)
finished.append(sum(thread.result))
del thread
q = Queue(6)
prod_thread = threading.Thread(target=producer, args=(q, files))
cons_thread = threading.Thread(target=consumer, args=(q, len(files)))
start = timeit.default_timer()
prod_thread.start()
cons_thread.start()
while prod_thread.isAlive():
prod_thread.join(timeout=0.1)
while cons_thread.isAlive():
cons_thread.join(timeout=0.1)
return (sum(finished) / (timeit.default_timer() - start))
class FilePutter(threading.Thread):
"""Thread class for putting a URL"""
def __init__(self, url, start, size):
self.url = url
chars = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ'
data = chars * (int(round(int(size) / 36.0)))
self.data = ('content1=%s' % data[0:int(size) - 9]).encode()
del data
self.result = None
self.starttime = start
threading.Thread.__init__(self)
def run(self):
try:
if ((timeit.default_timer() - self.starttime) <= 10 and
not shutdown_event.isSet()):
request = build_request(self.url, data=self.data)
f = urlopen(request)
f.read(11)
f.close()
self.result = len(self.data)
else:
self.result = 0
except IOError:
self.result = 0
def uploadSpeed(url, sizes, quiet=False):
"""Function to launch FilePutter threads and calculate upload speeds"""
start = timeit.default_timer()
def producer(q, sizes):
for size in sizes:
thread = FilePutter(url, start, size)
thread.start()
q.put(thread, True)
if not quiet and not shutdown_event.isSet():
sys.stdout.write('.')
sys.stdout.flush()
finished = []
def consumer(q, total_sizes):
while len(finished) < total_sizes:
thread = q.get(True)
while thread.isAlive():
thread.join(timeout=0.1)
finished.append(thread.result)
del thread
q = Queue(6)
prod_thread = threading.Thread(target=producer, args=(q, sizes))
cons_thread = threading.Thread(target=consumer, args=(q, len(sizes)))
start = timeit.default_timer()
prod_thread.start()
cons_thread.start()
while prod_thread.isAlive():
prod_thread.join(timeout=0.1)
while cons_thread.isAlive():
cons_thread.join(timeout=0.1)
return (sum(finished) / (timeit.default_timer() - start))
def getAttributesByTagName(dom, tagName):
"""Retrieve an attribute from an XML document and return it in a
consistent format
Only used with xml.dom.minidom, which is likely only to be used
with python versions older than 2.5
"""
elem = dom.getElementsByTagName(tagName)[0]
return dict(list(elem.attributes.items()))
def getConfig():
"""Download the speedtest.net configuration and return only the data
we are interested in
"""
request = build_request('://www.speedtest.net/speedtest-config.php')
uh, e = catch_request(request)
if e:
print_('Could not retrieve speedtest.net configuration: %s' % e)
sys.exit(1)
configxml = []
while 1:
configxml.append(uh.read(10240))
if len(configxml[-1]) == 0:
break
if int(uh.code) != 200:
return None
uh.close()
try:
try:
root = ET.fromstring(''.encode().join(configxml))
config = {
'client': root.find('client').attrib,
'times': root.find('times').attrib,
'download': root.find('download').attrib,
'upload': root.find('upload').attrib}
except AttributeError: # Python3 branch
root = DOM.parseString(''.join(configxml))
config = {
'client': getAttributesByTagName(root, 'client'),
'times': getAttributesByTagName(root, 'times'),
'download': getAttributesByTagName(root, 'download'),
'upload': getAttributesByTagName(root, 'upload')}
except SyntaxError:
print_('Failed to parse speedtest.net configuration')
sys.exit(1)
del root
del configxml
return config
def closestServers(client, all=False):
"""Determine the 5 closest speedtest.net servers based on geographic
distance
"""
urls = [
'://www.speedtest.net/speedtest-servers-static.php',
'://c.speedtest.net/speedtest-servers-static.php',
'://www.speedtest.net/speedtest-servers.php',
'://c.speedtest.net/speedtest-servers.php',
]
errors = []
servers = {}
for url in urls:
try:
request = build_request(url)
uh, e = catch_request(request)
if e:
errors.append('%s' % e)
raise SpeedtestCliServerListError
serversxml = []
while 1:
serversxml.append(uh.read(10240))
if len(serversxml[-1]) == 0:
break
if int(uh.code) != 200:
uh.close()
raise SpeedtestCliServerListError
uh.close()
try:
try:
root = ET.fromstring(''.encode().join(serversxml))
elements = root.getiterator('server')
except AttributeError: # Python3 branch
root = DOM.parseString(''.join(serversxml))
elements = root.getElementsByTagName('server')
except SyntaxError:
raise SpeedtestCliServerListError
for server in elements:
try:
attrib = server.attrib
except AttributeError:
attrib = dict(list(server.attributes.items()))
d = distance([float(client['lat']),
float(client['lon'])],
[float(attrib.get('lat')),
float(attrib.get('lon'))])
attrib['d'] = d
if d not in servers:
servers[d] = [attrib]
else:
servers[d].append(attrib)
del root
del serversxml
del elements
except SpeedtestCliServerListError:
continue
# We were able to fetch and parse the list of speedtest.net servers
if servers:
break
if not servers:
print_('Failed to retrieve list of speedtest.net servers:\n\n %s' %
'\n'.join(errors))
sys.exit(1)
closest = []
for d in sorted(servers.keys()):
for s in servers[d]:
closest.append(s)
if len(closest) == 5 and not all:
break
else:
continue
break
del servers
return closest
def getBestServer(servers):
"""Perform a speedtest.net latency request to determine which
speedtest.net server has the lowest latency
"""
results = {}
for server in servers:
cum = []
url = '%s/latency.txt' % os.path.dirname(server['url'])
urlparts = urlparse(url)
for i in range(0, 3):
try:
if urlparts[0] == 'https':
h = HTTPSConnection(urlparts[1])
else:
h = HTTPConnection(urlparts[1])
headers = {'User-Agent': user_agent}
start = timeit.default_timer()
h.request("GET", urlparts[2], headers=headers)
r = h.getresponse()
total = (timeit.default_timer() - start)
except (HTTPError, URLError, socket.error):
cum.append(3600)
continue
text = r.read(9)
if int(r.status) == 200 and text == 'test=test'.encode():
cum.append(total)
else:
cum.append(3600)
h.close()
avg = round((sum(cum) / 6) * 1000, 3)
results[avg] = server
fastest = sorted(results.keys())[0]
best = results[fastest]
best['latency'] = fastest
return best
def ctrl_c(signum, frame):
"""Catch Ctrl-C key sequence and set a shutdown_event for our threaded
operations
"""
global shutdown_event
shutdown_event.set()
raise SystemExit('\nCancelling...')
def version():
"""Print the version"""
raise SystemExit(__version__)
def speedtest():
"""Run the full speedtest.net test"""
global shutdown_event, source, scheme
shutdown_event = threading.Event()
signal.signal(signal.SIGINT, ctrl_c)
description = (
'Command line interface for testing internet bandwidth using '
'speedtest.net.\n'
'------------------------------------------------------------'
'--------------\n'
'https://github.com/sivel/speedtest-cli')
#parser = ArgParser(description=description)
# Give optparse.OptionParser an `add_argument` method for
# compatibility with argparse.ArgumentParser
#try:
# parser.add_argument = parser.add_option
#except AttributeError:
# pass
#parser.add_argument('--bytes', dest='units', action='store_const',
# const=('byte', 1), default=('bit', 8),
# help='Display values in bytes instead of bits. Does '
units = ('bit', 8)
# 'not affect the image generated by --share')
#parser.add_argument('--share', action='store_true',
# help='Generate and provide a URL to the speedtest.net '
# 'share results image')
#parser.add_argument('--simple', action='store_true',
# help='Suppress verbose output, only show basic '
# 'information')
#parser.add_argument('--list', action='store_true',
# help='Display a list of speedtest.net servers '
# 'sorted by distance')
#parser.add_argument('--server', help='Specify a server ID to test against')
#parser.add_argument('--mini', help='URL of the Speedtest Mini server')
#parser.add_argument('--source', help='Source IP address to bind to')
#parser.add_argument('--timeout', default=10, type=int,
# help='HTTP timeout in seconds. Default 10')
#parser.add_argument('--secure', action='store_true',
# help='Use HTTPS instead of HTTP when communicating '
# 'with speedtest.net operated servers')
#parser.add_argument('--version', action='store_true',
# help='Show the version number and exit')
#options = parser.parse_args()
#if isinstance(options, tuple):
# args = options[0]
#else:
# args = options
#del options
# Print the version and exit
#if args.version:
# version()
timeout = 10
socket.setdefaulttimeout(timeout)
# Pre-cache the user agent string
build_user_agent()
# If specified bind to a specific IP address
#if args.source:
# source = args.source
# socket.socket = bound_socket
#if args.secure:
# scheme = 'https'
#if not args.simple:
# print_('Retrieving speedtest.net configuration...')
try:
config = getConfig()
except URLError:
print_('Cannot retrieve speedtest configuration')
sys.exit(1)
#if not args.simple:
# print_('Retrieving speedtest.net server list...')
#if args.list or args.server:
# servers = closestServers(config['client'], True)
# if args.list:
# serverList = []
# for server in servers:
# line = ('%(id)4s) %(sponsor)s (%(name)s, %(country)s) '
# '[%(d)0.2f km]' % server)
# serverList.append(line)
# print_('\n'.join(serverList).encode('utf-8', 'ignore'))
# sys.exit(0)
#else:
servers = closestServers(config['client'])
#if not args.simple:
# print_('Testing from %(isp)s (%(ip)s)...' % config['client'])
#if args.server:
# try:
# best = getBestServer(filter(lambda x: x['id'] == args.server,
# servers))
# except IndexError:
# print_('Invalid server ID')
# sys.exit(1)
#elif args.mini:
# name, ext = os.path.splitext(args.mini)
# if ext:
# url = os.path.dirname(args.mini)
# else:
# url = args.mini
# urlparts = urlparse(url)
# try:
# request = build_request(args.mini)
# f = urlopen(request)
# except:
# print_('Invalid Speedtest Mini URL')
# sys.exit(1)
# else:
# text = f.read()
# f.close()
# extension = re.findall('upload_extension: "([^"]+)"', text.decode())
# if not extension:
# for ext in ['php', 'asp', 'aspx', 'jsp']:
# try:
# request = build_request('%s/speedtest/upload.%s' %
# (args.mini, ext))
# f = urlopen(request)
# except:
# pass
# else:
# data = f.read().strip()
# if (f.code == 200 and
# len(data.splitlines()) == 1 and
# re.match('size=[0-9]', data)):
# extension = [ext]
# break
# if not urlparts or not extension:
# print_('Please provide the full URL of your Speedtest Mini server')
# sys.exit(1)
# servers = [{
# 'sponsor': 'Speedtest Mini',
# 'name': urlparts[1],
# 'd': 0,
# 'url': '%s/speedtest/upload.%s' % (url.rstrip('/'), extension[0]),
# 'latency': 0,
# 'id': 0
# }]
# try:
# best = getBestServer(servers)
# except:
# best = servers[0]
#else:
#if not args.simple:
# print_('Selecting best server based on latency...')
best = getBestServer(servers)
#if not args.simple:
print_(('Hosted by %(sponsor)s (%(name)s) [%(d)0.2f km]: '
'%(latency)s ms' % best).encode('utf-8', 'ignore'))
#else:
# print_('Ping: %(latency)s ms' % best)
sizes = [350, 500, 750, 1000, 1500, 2000, 2500, 3000, 3500, 4000]
urls = []
for size in sizes:
for i in range(0, 4):
urls.append('%s/random%sx%s.jpg' %
(os.path.dirname(best['url']), size, size))
#if not args.simple:
# print_('Testing download speed', end='')
dlspeed = downloadSpeed(urls, None)
#if not args.simple:
# print_()
print_('Download: %0.2f M%s/s' %
((dlspeed / 1000 / 1000) * units[1], units[0]))
sizesizes = [int(.25 * 1000 * 1000), int(.5 * 1000 * 1000)]
sizes = []
for size in sizesizes:
for i in range(0, 25):
sizes.append(size)
#if not args.simple:
# print_('Testing upload speed', end='')
ulspeed = uploadSpeed(best['url'], sizes, None)
#if not args.simple:
# print_()
print_('Upload: %0.2f M%s/s' %
((ulspeed / 1000 / 1000) * units[1], units[0]))
#if args.share and args.mini:
# print_('Cannot generate a speedtest.net share results image while '
# 'testing against a Speedtest Mini server')
#elif args.share:
# dlspeedk = int(round((dlspeed / 1000) * 8, 0))
# ping = int(round(best['latency'], 0))
# ulspeedk = int(round((ulspeed / 1000) * 8, 0))
# Build the request to send results back to speedtest.net
# We use a list instead of a dict because the API expects parameters
# in a certain order
# apiData = [
# 'download=%s' % dlspeedk,
# 'ping=%s' % ping,
# 'upload=%s' % ulspeedk,
# 'promo=',
# 'startmode=%s' % 'pingselect',
# 'recommendedserverid=%s' % best['id'],
# 'accuracy=%s' % 1,
# 'serverid=%s' % best['id'],
# 'hash=%s' % md5(('%s-%s-%s-%s' %
# (ping, ulspeedk, dlspeedk, '297aae72'))
# .encode()).hexdigest()]
# headers = {'Referer': 'http://c.speedtest.net/flash/speedtest.swf'}
# request = build_request('://www.speedtest.net/api/api.php',
# data='&'.join(apiData).encode(),
# headers=headers)
# f, e = catch_request(request)
# if e:
# print_('Could not submit results to speedtest.net: %s' % e)
# sys.exit(1)
# response = f.read()
# code = f.code
# f.close()
#
# if int(code) != 200:
# print_('Could not submit results to speedtest.net')
# sys.exit(1)
# qsargs = parse_qs(response.decode())
# resultid = qsargs.get('resultid')
# if not resultid or len(resultid) != 1:
# print_('Could not submit results to speedtest.net')
# sys.exit(1)#
# print_('Share results: %s://www.speedtest.net/result/%s.png' %
# (scheme, resultid[0]))
return dlspeed*units[1], ulspeed*units[1]
#def main():
# try:
# speedtest()
# except KeyboardInterrupt:
# print_('\nCancelling...')
#if __name__ == '__main__':
# main()
# vim:ts=4:sw=4:expandtab
| {
"content_hash": "a4828ee0515d66c2c3ea4b1b1643e9f3",
"timestamp": "",
"source": "github",
"line_count": 783,
"max_line_length": 80,
"avg_line_length": 31.30779054916986,
"alnum_prop": 0.5374887819205352,
"repo_name": "nabw/InternetControl",
"id": "07abea08f812c06af1b0910420e530d0447e1ae7",
"size": "25191",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "speedtest_cli.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "28289"
}
],
"symlink_target": ""
} |
import argparse
import json
import logging
import requests
from time import sleep
class HueButtonNotPressed(Exception):
pass
###############################################################################
## START LIGHT SECTION ##
###############################################################################
class LightObject(object):
'''
Hue Light Object
This is the object that stores all the data of the lights.
This object should not be used directly becuase it is
deleted and recreated with each hue update. You should use
the Light object for any referenced object.
'''
def __init__(self, raw_data, bridge, light_id):
self._light_id = int(light_id)
self._name = raw_data['name']
self._uniqueid = raw_data['uniqueid']
self._manufacturername = raw_data['manufacturername']
self._swversion = raw_data['swversion']
self._modelid = raw_data['modelid']
self._reachable = raw_data['state']['reachable']
self._on = raw_data['state']['on']
self._hue = raw_data['state']['hue']
self._sat = raw_data['state']['sat']
self._effect = raw_data['state']['effect']
self._xy = raw_data['state']['xy']
self._colormode = raw_data['state']['colormode']
self._alert = raw_data['state']['alert']
self._bri = raw_data['state']['bri']
self._reachable = raw_data['state']['reachable']
self._type = raw_data['type']
self._bridge = bridge
def __del__(self):
pass
def __repr__(self):
return '<{0}.{1} Object "{2}" at ID {3}>'.format(
self.__class__.__module__,
self.__class__.__name__,
self._name,
hex(int(self._light_id)))
def __str__(self):
return 'Light ' + self._name + " ID: " + str(self._light_id) + ' ' + self.str_state()
def str_state(self):
return '{on : ' + str(self.on) + ', bri : ' + str(self.bri) + '}'
@property
def light_id(self):
return self._light_id
@property
def name(self):
return self._name
@property
def hue(self):
return self._hue
@property
def sat(self):
return self._sat
@property
def bri(self):
return self._bri
@property
def xy(self):
return self._xy
@property
def on(self):
return self._on
@on.setter
def on(self, value):
self.set_light({'on':value})
def ON(self):
self.on = True
def OFF(self):
self.on = False
def set_light(self, value):
light_id = self._light_id
logging.info('Setting Light: ' + str(light_id))
self._bridge.send_request('lights/' + str(light_id) + '/state', data=value, method='PUT')
class Light(object):
def __init__(self, light):
self._bridge = light._bridge
self._light_id = light.light_id
def __repr__(self):
return repr(self._bridge.get_light(self._light_id))
def __str__(self):
return str(self._bridge.get_light(self._light_id))
@property
def on(self):
light = self._bridge.get_light(self._light_id)
return light.on
@on.setter
def on(self, value):
light = self._bridge.get_light(self._light_id)
if value:
light.ON()
else:
light.OFF()
def update(self):
self._bridge.update()
def ON(self):
light = self._bridge.get_light(self._light_id)
light.ON()
def OFF(self):
light = self._bridge.get_light(self._light_id)
light.OFF()
###############################################################################
## END LIGHT SECTION ##
###############################################################################
###############################################################################
## START GROUP SECTION ##
###############################################################################
class GroupObject(object):
'''
Hue Group Object
This is the object that stores all the data of the lights.
This object should not be used directly becuase it is
deleted and recreated with each hue update. You should use
the Light object for any referenced object.
'''
def __init__(self, raw_data, bridge, group_id):
self._group_id = int(group_id)
self._name = raw_data['name']
self._lights = raw_data['lights']
self._on = raw_data['action']['on']
self._hue = raw_data['action']['hue']
self._sat = raw_data['action']['sat']
self._effect = raw_data['action']['effect']
self._xy = raw_data['action']['xy']
self._colormode = raw_data['action']['colormode']
self._alert = raw_data['action']['alert']
self._bri = raw_data['action']['bri']
self._type = raw_data['type']
self._bridge = bridge
def __del__(self):
pass
def __repr__(self):
return '<{0}.{1} Object "{2}" at ID {3}>'.format(
self.__class__.__module__,
self.__class__.__name__,
self._name,
hex(int(self._group_id)))
def __str__(self):
return 'Group ' + self._name + " ID: " + str(self._group_id) + ' ' + self.str_state()
def str_state(self):
return '{on : ' + str(self.on) + ', bri : ' + str(self.bri) + '}'
@property
def group_id(self):
return self._group_id
@property
def name(self):
return self._name
@property
def hue(self):
return self._hue
@property
def sat(self):
return self._sat
@property
def bri(self):
return self._bri
@property
def xy(self):
return self._xy
@property
def on(self):
return self._on
@on.setter
def on(self, value):
self.set_group({'on':value})
def ON(self):
self.on = True
def OFF(self):
self.on = False
def set_group(self, value):
group_id = self._group_id
logging.info('Setting Group: ' + str(group_id))
self._bridge.send_request('groups/' + str(group_id) + '/action', data=value, method='PUT')
class Group(object):
def __init__(self, group):
self._bridge = group._bridge
self._group_id = group.group_id
def __repr__(self):
return repr(self._bridge.get_group(self._group_id))
def __str__(self):
return str(self._bridge.get_group(self._group_id))
@property
def on(self):
group = self._bridge.get_group(self._group_id)
return group.on
@on.setter
def on(self, value):
group = self._bridge.get_group(self._group_id)
if value:
group.ON()
else:
group.OFF()
def update(self):
self._bridge.update()
def ON(self):
group = self._bridge.get_group(self._group_id)
group.ON()
def OFF(self):
group = self._bridge.get_group(self._group_id)
group.OFF()
###############################################################################
## END GROUP SECTION ##
###############################################################################
###############################################################################
## START BRIDGE SECTION ##
###############################################################################
class Bridge(object):
'''
Hue Bridge
This is the core of zPyHue. There should only need to be one
bridge object. The bridge object manages all the other objects
and is able to look them up as needed. It also loads the config
file and settigns.
'''
def __init__(self, config_file=None):
self._ip = None
self._username = None
self._name = None
self._rCount = 0
self._lights = []
self._groups = []
if config_file:
self._ip = '10.10.202.104'
self._username = '15946c956413d2e011d7763a649433cf'
#pass #TODO: Add config file parser
if not self._ip:
self.get_ip()
if not self._username:
self.register()
def send_request(self, path, data=None, method='GET', return_json=True, no_username=False):
if data:
data = json.dumps(data)
logging.info('Data: ' + data)
url = ''
if (no_username or not self._username):
url = 'http://' + str(self._ip) + '/' + path
else:
url = 'http://' + str(self._ip) + '/api/' + self._username + '/' + path
logging.info('Request URL: ' + url + ' Method: ' + method)
if method == 'POST':
r = requests.post(url, data=data)
if return_json:
return r.json()
return r
if method == 'PUT':
r = requests.put(url, data=data)
if return_json:
return r.json()
return r
if method == 'GET':
if data:
r = requests.get(url, data=data)
else:
r = requests.get(url)
if return_json:
return r.json()
return r
def get_ip(self):
data = requests.get('http://www.meethue.com/api/nupnp')
try:
self._ip = data.json()[0]['internalipaddress']
except:
logging.error('Problem parsing IP Address of Bridge')
exit()
if not self._ip:
logging.error('Problem parsing IP Address of Bridge')
exit()
logging.info('IP address: ' + str(self._ip))
def register(self):
request_data = {'devicetype':'zPyHue'}
response = self.send_request('api', request_data, method='POST', no_username=True)[0]
logging.info('Response: ' + str(response))
if 'error' in response:
if response['error']['type'] == 101:
logging.info('Please press the hue button.')
sleep(3)
if (self._rCount < 30):
self.register()
else:
raise HueButtonNotPressed("Hue button was not pressed in the last 60 seconds")
if 'success' in response:
self._username = response['success']['username']
logging.info('Success! username: ' + str(self._username))
def get_all(self):
'''Returns all from /api/username'''
return self.send_request('api/' + str(self._username), no_username=True)
def get_lights(self):
'''Get all lights'''
lights = self.send_request('lights')
self._lights = []
for light in lights:
self._lights.append(LightObject(lights[light], self, light))
def get_light(self, light_id):
#self.get_lights()
if isinstance(light_id, int):
for light in self._lights:
if light.light_id == light_id:
return light
for light in self._lights:
if light.name == light_id:
return light
def get_light_control(self, light_id):
return Light(self.get_light(light_id))
def get_all_light_controls(self):
#self.get_lights()
all_lights = {}
for light in self._lights:
all_lights[light._name] = self.get_light_control(light._name)
return all_lights
def get_groups(self):
'''Get all groups'''
groups = self.send_request('groups')
self._groups = []
for group in groups:
self._groups.append(GroupObject(groups[group], self, group))
def get_group(self, group_id):
#self.get_groups()
if isinstance(group_id, int):
for group in self._groups:
if group.group_id == group_id:
return group
for group in self._groups:
if group.name == group_id:
return group
def get_group_control(self, group_id):
return Group(self.get_group(group_id))
def get_all_group_controls(self):
self.get_lights()
all_groups = {}
for group in self._groups:
all_groups[group._name] = self.get_group_control(group._name)
return all_groups
def update(self):
self.get_lights()
self.get_groups()
###############################################################################
## END BRIDGE SECTION ##
###############################################################################
def setup(args):
'''Basic config setup'''
if args.debug:
logging.basicConfig(level=logging.DEBUG)
logging.info('Debug Enabled')
return True
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-d', '--debug', required=False, help='Allow Debugging', action='store_true')
parser.add_argument('-c', '--config', required=False, help='Mock Config File', action='store_true')
args = parser.parse_args()
if (setup(args)):
logging.info('Setup Finished')
myBridge = Bridge(config_file=args.config)
myBridge.update()
all_lights = myBridge.get_all_light_controls()
all_groups = myBridge.get_all_group_controls()
all_groups['c.All Hue'].on = not all_groups['c.All Hue'].on
stairs = all_lights['Stairs']
stairs.update()
stairs.on = not stairs.on
print stairs
if __name__ == '__main__':
main()
| {
"content_hash": "3616e62fe40d0e1bd75ddaf6f274674f",
"timestamp": "",
"source": "github",
"line_count": 502,
"max_line_length": 100,
"avg_line_length": 23.4601593625498,
"alnum_prop": 0.5772267979960941,
"repo_name": "zpriddy/zPyHue",
"id": "697e061f121160461a026415869741aa6bc12ad9",
"size": "11873",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "zPyHue.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "11873"
}
],
"symlink_target": ""
} |
"""ANGLE implementation of //build/skia_gold_common/skia_gold_properties.py."""
import os
import sys
import subprocess
import sys
d = os.path.dirname
THIS_DIR = d(os.path.abspath(__file__))
ANGLE_SRC_DIR = d(d(d(d(THIS_DIR))))
sys.path.insert(0, os.path.join(ANGLE_SRC_DIR, 'build'))
CHROMIUM_SRC_DIR = d(d(ANGLE_SRC_DIR))
sys.path.insert(0, os.path.join(CHROMIUM_SRC_DIR, 'build'))
from skia_gold_common import skia_gold_properties
class ANGLESkiaGoldProperties(skia_gold_properties.SkiaGoldProperties):
@staticmethod
def _GetGitOriginMasterHeadSha1():
try:
return subprocess.check_output(['git', 'rev-parse', 'origin/master'],
shell=_IsWin(),
cwd=ANGLE_SRC_DIR).strip()
except subprocess.CalledProcessError:
return None
def _IsWin():
return sys.platform == 'win32'
| {
"content_hash": "88f2dd8cdf2826a48cbae40caf3518f5",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 81,
"avg_line_length": 29.451612903225808,
"alnum_prop": 0.6308871851040526,
"repo_name": "ppy/angle",
"id": "752de9a0e4c8ef563bb9651cd6da81b1b6a85c4d",
"size": "1080",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/tests/restricted_traces/skia_gold/angle_skia_gold_properties.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "17281"
},
{
"name": "C",
"bytes": "562758"
},
{
"name": "C++",
"bytes": "7776807"
},
{
"name": "Lex",
"bytes": "26383"
},
{
"name": "Objective-C",
"bytes": "18506"
},
{
"name": "Objective-C++",
"bytes": "25649"
},
{
"name": "PostScript",
"bytes": "989"
},
{
"name": "Python",
"bytes": "61989"
},
{
"name": "Shell",
"bytes": "1461"
},
{
"name": "Yacc",
"bytes": "61666"
}
],
"symlink_target": ""
} |
import csv
import timeit
import datetime
import plugin_interface as plugintypes
class PluginCSVCollect(plugintypes.IPluginExtended):
def __init__(self, file_name="collect.csv", delim=",", verbose=False):
now = datetime.datetime.now()
self.time_stamp = '%d-%d-%d_%d-%d-%d' % (
now.year, now.month, now.day, now.hour, now.minute, now.second)
self.file_name = self.time_stamp
self.start_time = timeit.default_timer()
self.delim = delim
self.verbose = verbose
def activate(self):
if len(self.args) > 0:
if 'no_time' in self.args:
self.file_name = self.args[0]
else:
self.file_name = self.args[0] + '_' + self.file_name;
if 'verbose' in self.args:
self.verbose = True
self.file_name = self.file_name + '.csv'
print "Will export CSV to:", self.file_name
# Open in append mode
with open(self.file_name, 'a') as f:
f.write('%' + self.time_stamp + '\n')
def deactivate(self):
print "Closing, CSV saved to:", self.file_name
return
def show_help(self):
print "Optional argument: [filename] (default: collect.csv)"
def __call__(self, sample):
t = timeit.default_timer() - self.start_time
# print timeSinceStart|Sample Id
if self.verbose:
print("CSV: %f | %d" % (t, sample.id))
row = ''
row += str(t)
row += self.delim
row += str(sample.id)
row += self.delim
for i in sample.channel_data:
row += str(i)
row += self.delim
for i in sample.aux_data:
row += str(i)
row += self.delim
# remove last comma
row += '\n'
with open(self.file_name, 'a') as f:
f.write(row)
| {
"content_hash": "482befd5eab15466bce83e02e0cc3fa7",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 75,
"avg_line_length": 30.721311475409838,
"alnum_prop": 0.5341515474919958,
"repo_name": "neurotechuoft/Wall-EEG",
"id": "d688428eda8bb358535ac3b581a6a100af364e6d",
"size": "1874",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Code/OpenBCIPy/src/plugins/csv_collect.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "237350"
}
],
"symlink_target": ""
} |
from django.test import TestCase, RequestFactory
from bambu_analytics import track_event, events
class AnalyticsTrackingTestCase(TestCase):
def setUp(self):
self.factory = RequestFactory()
def test_track_page(self):
request = self.factory.get('/')
track_event(request, events.PAGE)
def test_track_event(self):
request = self.factory.get('/')
track_event(request, events.EVENT,
category = 'test',
action = 'event'
)
def test_track_transaction(self):
request = self.factory.get('/')
track_event(request, events.TRANSACTION,
transaction_id = 1,
store = 'test',
amount = 1.0,
tax = 0,
postage = 0,
city = 'Birmingham',
state = 'West Midlands',
country = 'United Kingdom'
)
def test_track_transaction_item(self):
request = self.factory.get('/')
track_event(request, events.TRANSACTION_ITEM,
transaction_id = 1,
sku = '12345',
product = 'Test Product',
category = u'test',
amount = 1.0,
quantity = 1
) | {
"content_hash": "fceb3b62df7fcff9ef637ad28a711f69",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 53,
"avg_line_length": 29.75609756097561,
"alnum_prop": 0.5360655737704918,
"repo_name": "iamsteadman/bambu-analytics",
"id": "e53fe0ac93f85c4bc6d3604ccfaf1796e9b24936",
"size": "1220",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bambu_analytics/test_tracking.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "5177"
},
{
"name": "JavaScript",
"bytes": "1169"
},
{
"name": "Python",
"bytes": "14945"
}
],
"symlink_target": ""
} |
import logging
import abc
from sft.common.commands.base import ProgramFinished
from sft.common.config import Config
from prompt_toolkit.shortcuts import prompt, create_eventloop
from prompt_toolkit import AbortAction
from .cli.commands import Dispatcher
from .cli import is_cli_input_disabled
LOG = logging.getLogger(__name__)
class ClientBase(metaclass=abc.ABCMeta):
"""Basic client functionality."""
prompt = 'SFT: '
def __init__(self):
super().__init__()
self._conf = Config()
self.cmd_dispatcher = Dispatcher()
def run(self):
self._initialize()
try:
# LOG.debug('Client loop started')
while True:
self._prompt_loop()
except KeyboardInterrupt as e:
pass
except ProgramFinished as e:
pass
finally:
self._terminate()
def _functional_loop(self, context):
while is_cli_input_disabled() or not context.input_is_ready():
self._main_loop()
def _prompt_loop(self):
print()
text = prompt(
self.prompt, patch_stdout=True, on_abort=AbortAction.RAISE_EXCEPTION,
eventloop=create_eventloop(inputhook=self._functional_loop))
self.cmd_dispatcher.onecmd(text)
while is_cli_input_disabled():
self._main_loop()
def _initialize(self):
# LOG.debug('Initializing client')
pass
@abc.abstractmethod
def _main_loop(self):
pass
def _terminate(self):
# LOG.debug('Terminating client')
pass
| {
"content_hash": "bebb4f5c14d1e448dac9e86eac39e4ec",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 81,
"avg_line_length": 25.238095238095237,
"alnum_prop": 0.6094339622641509,
"repo_name": "AlexeiBuzuma/LocalComputeNetworks",
"id": "695d003ba1d54d7eec090381b2baec706890de1f",
"size": "1590",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sft/client/base.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "95802"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.