max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
---|---|---|---|---|---|---|
1094 EXPERIENCIAS.py | castrolimoeiro/Uri-exercise | 0 | 4100 | n = int(input())
coelho = rato = sapo = contador = 0
for i in range(0, n):
q, t = input().split(' ')
t = t.upper()
q = int(q)
if 1 <= q <= 15:
contador += q
if t == 'C':
coelho += q
elif t == 'R':
rato += q
elif t == 'S':
sapo += q
porccoelho = (coelho * 100) / contador
porcrato = (rato * 100) / contador
porcsapo = (sapo * 100) / contador
print(f'Total: {contador} cobaias')
print(f'Total de coelhos: {coelho}')
print(f'Total de ratos: {rato}')
print(f'Total de sapos: {sapo}')
print(f'Percentual de coelhos: {porccoelho:.2f} %')
print(f'Percentual de ratos: {porcrato:.2f} %')
print(f'Percentual de sapos: {porcsapo:.2f} %')
| 3.390625 | 3 |
gravur/common/amountinput.py | F483/gravur | 3 | 4101 | <gh_stars>1-10
# coding: utf-8
# Copyright (c) 2015 <NAME> <<EMAIL>>
# License: MIT (see LICENSE file)
from kivy.uix.boxlayout import BoxLayout
from gravur.common.labelbox import LabelBox # NOQA
from gravur.utils import load_widget
@load_widget
class AmountInput(BoxLayout):
pass
| 1.382813 | 1 |
tibanna/top.py | 4dn-dcic/tibanna | 62 | 4102 | <filename>tibanna/top.py<gh_stars>10-100
import datetime
class Top(object):
"""class TopSeries stores the information of a series of top commands
::
echo -n 'Timestamp: '; date +%F-%H:%M:%S
top -b -n1 [-i] [-c]
over short intervals to monitor the same set of processes over time.
An example input content looks like below, or a series of these.
The initialization works at any time interval and can be used as a generic
class, but the class is designed for the output of a regular top commands above
run at about 1-minute intervals, which is performed by awsf3 on an AWSEM instance
through cron jobs. (some can be skipped but there should be no more than 1 per minute).
This top output can be obtained through ``tibanna log -j <job_id> -t`` or through
API ``API().log(job_id=<job_id>, top=True)``.
::
Timestamp: 2020-12-18-18:55:37
top - 18:55:37 up 4 days, 2:37, 0 users, load average: 5.59, 5.28, 5.76
Tasks: 7 total, 1 running, 6 sleeping, 0 stopped, 0 zombie
%Cpu(s): 6.6 us, 0.1 sy, 0.0 ni, 93.2 id, 0.0 wa, 0.0 hi, 0.0 si, 0.0 st
KiB Mem : 12971188+total, 10379019+free, 20613644 used, 5308056 buff/cache
KiB Swap: 0 total, 0 free, 0 used. 10834606+avail Mem
PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND
712 root 20 0 36.464g 8.223g 19572 S 100.0 6.6 125:55.12 java -Xmx32g -Xms32g -jar juicer_tools.jar addNorm -w 1000 -d -F out.hic
17919 ubuntu 20 0 40676 3828 3144 R 6.2 0.0 0:00.01 top -b -n1 -c -i -w 10000
The default timestamp from top output does not contain dates, which can screw up multi-day processes
which is common for bioinformatics pipelines. So, an extra timestamp is added before each top command.
To parse top output content, simply create an object. This will create processes attribute,
which is a raw parsed result organized by time stamps.
::
top = Top(top_output_content)
To reorganize the contents by commands, run digest. By default, the max number of commands is 16,
and if there are more than 16 unique commands, they will be collapsed into prefixes.
::
top.digest()
To write a csv / tsv file organized by both timestamps (rows) and commands (columns),
use :func: write_to_csv.
::
top.write_to_csv(...)
"""
# assume this format for timestamp
timestamp_format = '%Y-%m-%d-%H:%M:%S'
# These commands are excluded when parsing the top output
# Currently only 1-, 2- or 3-word prefixes work.
exclude_list = ['top', 'docker', 'dockerd', '/usr/bin/dockerd', 'cron',
'docker-untar', 'containerd', 'goofys-latest', 'cwltool',
'/usr/bin/containerd-shim-runc-v2', 'goofys', 'nodejs --eval',
'/usr/bin/python3 /usr/local/bin/cwltool', 'containerd-shim',
'/usr/bin/python3 /bin/unattended-upgrade',
'/usr/bin/python3 /usr/local/bin/awsf3',
'/usr/bin/python3 /usr/local/bin/aws s3',
'java -jar /usr/local/bin/cromwell.jar',
'java -jar /usr/local/bin/cromwell-35.jar']
def __init__(self, contents):
"""initialization parsed top output content and
creates processes which is a dictionary with timestamps as keys
and a list of Process class objects as a value.
It also creates empty attributes timestamps, commands, cpus and mems
which can be filled through method :func: digest.
"""
self.processes = dict()
self.timestamps = []
self.commands = []
self.cpus = dict()
self.mems = dict()
self.parse_contents(contents)
def parse_contents(self, contents):
is_in_table = False
for line in contents.splitlines():
if line.startswith('Timestamp:'):
timestamp = line.split()[1]
continue
if line.lstrip().startswith('PID'):
is_in_table = True
continue
if not line or line.isspace():
is_in_table = False
if is_in_table:
if timestamp not in self.processes:
self.processes[timestamp] = []
process = Process(line)
if not self.should_skip_process(process):
self.processes[timestamp].append(Process(line))
def digest(self, max_n_commands=16, sort_by='alphabetical'):
"""Fills in timestamps, commands, cpus and mems attributes
from processes attribute.
:param max_n_commands: When the number of unique commands exceeds
this value, they are collapsed into unique prefixes.
:sort_by: alphabetical|cpu|mem The commands are by default sorted
alphabetically, but optionally can be sorted by total cpus or total
mem (in reverser order) (e.g. the first command consumed the most cpu)
"""
# Reinitializat these so that you get the same results if you run it twice
self.timestamps = []
self.commands = []
self.cpus = dict()
self.mems = dict()
# First fill in commands from commands in processes (and collapse if needed.)
self.commands = self.get_collapsed_commands(max_n_commands)
# Fill in timestamps, cpus and mems from processes, matching collapsed commands.
self.nTimepoints = len(self.processes)
timestamp_ind = 0
for timestamp in sorted(self.processes):
# sorted timestamps (columns)
self.timestamps.append(timestamp)
# commands (rows)
for process in self.processes[timestamp]:
# find a matching collapsed command (i.e. command prefix) and use that as command.
command = Top.convert_command_to_collapsed_command(process.command, self.commands)
if command not in self.cpus:
self.cpus[command] = [0] * self.nTimepoints
self.mems[command] = [0] * self.nTimepoints
self.cpus[command][timestamp_ind] += process.cpu
self.mems[command][timestamp_ind] += process.mem
timestamp_ind += 1
# sort commands according to total cpu
self.sort_commands(by=sort_by)
def get_collapsed_commands(self, max_n_commands):
"""If the number of commands exceeds max_n_commands,
return a collapsed set of commands
that consists of prefixes of commands so that
the total number is within max_n_commands.
First decide the number of words from the beginning of the commands
to collapse commands that start with the same words, i.e.
find the maximum number of words that makes the number of unique commands to be
bounded by max_n_commands.
If using only the first word is not sufficient, go down to the characters of
the first word. If that's still not sufficient, collapse all of them into a single
command ('all_commands')
After the collapse, commands that are unique to a collapsed prefix are
extended back to the original command.
"""
all_commands = set()
for timestamp in self.processes:
all_commands.update(set([pr.command for pr in self.processes[timestamp]]))
if len(all_commands) <= max_n_commands:
# no need to collapse
return list(all_commands)
# decide the number of words from the beginning of the commands
# to collapse commands starting with the same words
all_cmd_lengths = [len(cmd.split()) for cmd in all_commands] # number of words per command
max_cmd_length = max(all_cmd_lengths)
min_cmd_length = min(all_cmd_lengths)
collapsed_len = max_cmd_length - 1
n_commands = len(all_commands)
while(n_commands > max_n_commands and collapsed_len > 1):
reduced_commands = set()
for cmd in all_commands:
reduced_commands.add(Top.first_words(cmd, collapsed_len))
n_commands = len(reduced_commands)
collapsed_len -= 1
# went down to the first words but still too many commands - start splitting characters then
if n_commands > max_n_commands:
all_cmd_lengths = [len(cmd.split()[0]) for cmd in all_commands] # number of characters of the first word
max_cmd_length = max(all_cmd_lengths)
min_cmd_length = min(all_cmd_lengths)
collapsed_len = max_cmd_length - 1
while(n_commands > max_n_commands and collapsed_len > 1):
reduced_commands = set()
for cmd in all_commands:
reduced_commands.add(Top.first_characters(cmd.split()[0], collapsed_len))
n_commands = len(reduced_commands)
collapsed_len -= 1
if n_commands > max_n_commands:
return ['all_commands']
else:
# extend reduced commands that don't need to be reduced
for r_cmd in list(reduced_commands): # wrap in list so that we can remove elements in the loop
uniq_cmds = [cmd for cmd in all_commands if cmd.startswith(r_cmd)]
if len(uniq_cmds) == 1:
reduced_commands.remove(r_cmd)
reduced_commands.add(uniq_cmds[0])
return reduced_commands
def write_to_csv(self, csv_file, metric='cpu', delimiter=',', colname_for_timestamps='timepoints',
timestamp_start=None, timestamp_end=None, base=0):
"""write metrics as csv file with commands as columns
:param metric: 'cpu' or 'mem'
:param delimiter: default ','
:param colname_for_timestamps: colunm name for the timepoint column (1st column). default 'timepoints'
:param timestamp_start: start time in the same timestamp format (e.g. 01:23:45),
time stamps will be converted to minutes since start time.
The report starts with minute 0.
Time points with no top records will be filled with 0.
If not specified, the first timestamp in the top commands will be used.
:param timestamp_end: end time in the same timestamp format (e.g. 01:23:45),
The reports will be generated only up to the end time.
Time points with no top records will be filled with 0.
If not specified, the last timestamp in the top commands will be used.
:param base: default 0. If 0, minutes start with 0, if 1, minutes are 1-based (shifted by 1).
"""
metric_array = getattr(self, metric + 's')
if self.timestamps:
if not timestamp_start:
timestamp_start = self.timestamps[0]
if not timestamp_end:
timestamp_end = self.timestamps[-1]
timestamps_as_minutes = self.timestamps_as_minutes(timestamp_start)
last_minute = self.as_minutes(timestamp_end, timestamp_start)
else: # default when timestamps is not available (empty object)
timestamps_as_minutes = range(0, 5)
last_minute = 5
with open(csv_file, 'w') as fo:
# header
# we have to escape any double quotes that are present in the cmd, before wrapping it in double quotes. Otherwise we
# will get incorrect column counts when creating the metrics report.
fo.write(delimiter.join([colname_for_timestamps] + [Top.wrap_in_double_quotes(cmd.replace('"', '""')) for cmd in self.commands]))
fo.write('\n')
# contents
# skip timepoints earlier than timestamp_start
for i in range(0, len(timestamps_as_minutes)):
if timestamps_as_minutes[i] >= 0:
break
for clock in range(0, last_minute + 1):
clock_shifted = clock + base
if i < len(timestamps_as_minutes) and timestamps_as_minutes[i] == clock:
fo.write(delimiter.join([str(clock_shifted)] + [str(metric_array[cmd][i]) for cmd in self.commands]))
i += 1
else:
fo.write(delimiter.join([str(clock_shifted)] + ['0' for cmd in self.commands])) # add 0 for timepoints not reported
fo.write('\n')
def should_skip_process(self, process):
"""A predicate function to check if the process should be skipped (excluded).
It returns True if the input process should be skipped.
e.g. the top command itself is excluded, as well as docker, awsf3, cwltool, etc.
the list to be excluded is in self.exclude_list.
It compares either first word or first two or three words only.
Kernel threads (single-word commands wrapped in bracket (e.g. [perl]) are also excluded.
"""
first_word = Top.first_words(process.command, 1)
first_two_words = Top.first_words(process.command, 2)
first_three_words = Top.first_words(process.command, 3)
if first_word in self.exclude_list:
return True
elif first_two_words in self.exclude_list:
return True
elif first_three_words in self.exclude_list:
return True
if first_word.startswith('[') and first_word.endswith(']'):
return True
return False
@staticmethod
def convert_command_to_collapsed_command(cmd, collapsed_commands):
if collapsed_commands == 'all_commands': # collapsed to one command
return 'all_commands'
elif cmd in collapsed_commands: # not collapsed
return cmd
else: # collapsed to prefix
all_prefixes = [_ for _ in collapsed_commands if cmd.startswith(_)]
longest_prefix = sorted(all_prefixes, key=lambda x: len(x), reverse=True)[0]
return longest_prefix
def total_cpu_per_command(self, command):
return sum([v for v in self.cpus[command]])
def total_mem_per_command(self, command):
return sum([v for v in self.mems[command]])
def sort_commands(self, by='cpu'):
"""sort self.commands by total cpu (default) or mem in reverse order,
or alphabetically (by='alphabetical')"""
if by == 'cpu':
self.commands = sorted(self.commands, key=lambda x: self.total_cpu_per_command(x), reverse=True)
elif by == 'mem':
self.commands = sorted(self.commands, key=lambda x: self.total_mem_per_command(x), reverse=True)
elif by == 'alphabetical':
self.commands = sorted(self.commands)
@classmethod
def as_minutes(cls, timestamp, timestamp_start):
"""timestamp as minutes since timestamp_start.
:param timestamp: given timestamp in the same format (e.g. 01:23:45)
:param timestamp_start: start timestamp in the same format (e.g. 01:20:45)
In the above example, 3 will be the return value.
"""
dt = cls.as_datetime(timestamp)
dt_start = cls.as_datetime(timestamp_start)
# negative numbers are not supported by timedelta, so do each case separately
if dt > dt_start:
return round((dt - dt_start).seconds / 60)
else:
return -round((dt_start - dt).seconds / 60)
def timestamps_as_minutes(self, timestamp_start):
"""convert self.timestamps to a list of minutes since timestamp_start
:param timestamp_start: timestamp in the same format (e.g. 01:23:45)
"""
return [self.as_minutes(t, timestamp_start) for t in self.timestamps]
@classmethod
def as_datetime(cls, timestamp):
return datetime.datetime.strptime(timestamp, cls.timestamp_format)
@staticmethod
def wrap_in_double_quotes(string):
"""wrap a given string with double quotes (e.g. haha -> "haha")
"""
return '\"' + string + '\"'
@staticmethod
def first_words(string, n_words):
"""returns first n words of a string
e.g. first_words('abc def ghi', 2) ==> 'abc def'
"""
words = string.split()
return ' '.join(words[0:min(n_words, len(words))])
@staticmethod
def first_characters(string, n_letters):
"""returns first n letters of a string
e.g. first_characters('abc def ghi', 2) ==> 'ab'
"""
letters = list(string)
return ''.join(letters[0:min(n_letters, len(letters))])
def as_dict(self):
return self.__dict__
class Process(object):
def __init__(self, top_line):
prinfo_as_list = top_line.lstrip().split()
self.pid = prinfo_as_list[0]
self.user = prinfo_as_list[1]
self.cpu = float(prinfo_as_list[8])
self.mem = float(prinfo_as_list[9])
self.command = ' '.join(prinfo_as_list[11:])
def as_dict(self):
return self.__dict__
| 2.84375 | 3 |
venv/Lib/site-packages/zmq/tests/test_draft.py | ajayiagbebaku/NFL-Model | 603 | 4103 | # -*- coding: utf8 -*-
# Copyright (C) PyZMQ Developers
# Distributed under the terms of the Modified BSD License.
import os
import platform
import time
import pytest
import zmq
from zmq.tests import BaseZMQTestCase, skip_pypy
class TestDraftSockets(BaseZMQTestCase):
def setUp(self):
if not zmq.DRAFT_API:
raise pytest.skip("draft api unavailable")
super(TestDraftSockets, self).setUp()
def test_client_server(self):
client, server = self.create_bound_pair(zmq.CLIENT, zmq.SERVER)
client.send(b'request')
msg = self.recv(server, copy=False)
assert msg.routing_id is not None
server.send(b'reply', routing_id=msg.routing_id)
reply = self.recv(client)
assert reply == b'reply'
def test_radio_dish(self):
dish, radio = self.create_bound_pair(zmq.DISH, zmq.RADIO)
dish.rcvtimeo = 250
group = 'mygroup'
dish.join(group)
received_count = 0
received = set()
sent = set()
for i in range(10):
msg = str(i).encode('ascii')
sent.add(msg)
radio.send(msg, group=group)
try:
recvd = dish.recv()
except zmq.Again:
time.sleep(0.1)
else:
received.add(recvd)
received_count += 1
# assert that we got *something*
assert len(received.intersection(sent)) >= 5
| 2.203125 | 2 |
home/push/mipush/APIError.py | he0119/smart-home | 0 | 4104 | class APIError(Exception):
"""
raise APIError if receiving json message indicating failure.
"""
def __init__(self, error_code, error, request):
self.error_code = error_code
self.error = error
self.request = request
Exception.__init__(self, error)
def __str__(self):
return "APIError: %s: %s, request: %s" % (
self.error_code,
self.error,
self.request,
)
| 3.375 | 3 |
pythonlibs/mantis/templates/webapp/src/webapp/base.py | adoggie/Tibet.6 | 22 | 4105 | <filename>pythonlibs/mantis/templates/webapp/src/webapp/base.py
#coding:utf-8
class SystemDeviceType(object):
InnerBox = 1 # 主屏分离的室内主机
InnerScreen = 2 # 主屏分离的室内屏
OuterBox = 3 # 室外机
PropCallApp = 4 # 物业值守
PropSentryApp = 5 # 物业岗亭机
Others = 10
ValidatedList = (1,2,3,4,5)
class Constants(object):
SUPER_ACCESS_TOKEN = '<KEY>' | 1.648438 | 2 |
base/site-packages/django_qbe/urls.py | edisonlz/fastor | 285 | 4106 | <filename>base/site-packages/django_qbe/urls.py
# -*- coding: utf-8 -*-
from django.conf.urls.defaults import patterns, url
from django_qbe.exports import formats
urlpatterns = patterns('django_qbe.views',
url(r'^$', 'qbe_form', name="qbe_form"),
url(r'^js/$', 'qbe_js', name="qbe_js"),
url(r'^results/bookmark/$',
'qbe_bookmark', name="qbe_bookmark"),
url(r'^results/export/(?P<format>(%s))/$' % "|".join(formats.keys()),
'qbe_export', name="qbe_export"),
url(r'^results/proxy/$',
'qbe_proxy', name="qbe_proxy"),
url(r'^results/(?P<query_hash>(.*))/$',
'qbe_results', name="qbe_results"),
url(r'^auto/$', 'qbe_autocomplete', name="qbe_autocomplete"),
)
| 1.742188 | 2 |
augment.py | docongminh/Text-Image-Augmentation-python | 217 | 4107 | # -*- coding:utf-8 -*-
# Author: RubanSeven
# import cv2
import numpy as np
# from transform import get_perspective_transform, warp_perspective
from warp_mls import WarpMLS
def distort(src, segment):
img_h, img_w = src.shape[:2]
cut = img_w // segment
thresh = cut // 3
# thresh = img_h // segment // 3
# thresh = img_h // 5
src_pts = list()
dst_pts = list()
src_pts.append([0, 0])
src_pts.append([img_w, 0])
src_pts.append([img_w, img_h])
src_pts.append([0, img_h])
dst_pts.append([np.random.randint(thresh), np.random.randint(thresh)])
dst_pts.append([img_w - np.random.randint(thresh), np.random.randint(thresh)])
dst_pts.append([img_w - np.random.randint(thresh), img_h - np.random.randint(thresh)])
dst_pts.append([np.random.randint(thresh), img_h - np.random.randint(thresh)])
half_thresh = thresh * 0.5
for cut_idx in np.arange(1, segment, 1):
src_pts.append([cut * cut_idx, 0])
src_pts.append([cut * cut_idx, img_h])
dst_pts.append([cut * cut_idx + np.random.randint(thresh) - half_thresh,
np.random.randint(thresh) - half_thresh])
dst_pts.append([cut * cut_idx + np.random.randint(thresh) - half_thresh,
img_h + np.random.randint(thresh) - half_thresh])
trans = WarpMLS(src, src_pts, dst_pts, img_w, img_h)
dst = trans.generate()
return dst
def stretch(src, segment):
img_h, img_w = src.shape[:2]
cut = img_w // segment
thresh = cut * 4 // 5
# thresh = img_h // segment // 3
# thresh = img_h // 5
src_pts = list()
dst_pts = list()
src_pts.append([0, 0])
src_pts.append([img_w, 0])
src_pts.append([img_w, img_h])
src_pts.append([0, img_h])
dst_pts.append([0, 0])
dst_pts.append([img_w, 0])
dst_pts.append([img_w, img_h])
dst_pts.append([0, img_h])
half_thresh = thresh * 0.5
for cut_idx in np.arange(1, segment, 1):
move = np.random.randint(thresh) - half_thresh
src_pts.append([cut * cut_idx, 0])
src_pts.append([cut * cut_idx, img_h])
dst_pts.append([cut * cut_idx + move, 0])
dst_pts.append([cut * cut_idx + move, img_h])
trans = WarpMLS(src, src_pts, dst_pts, img_w, img_h)
dst = trans.generate()
return dst
def perspective(src):
img_h, img_w = src.shape[:2]
thresh = img_h // 2
src_pts = list()
dst_pts = list()
src_pts.append([0, 0])
src_pts.append([img_w, 0])
src_pts.append([img_w, img_h])
src_pts.append([0, img_h])
dst_pts.append([0, np.random.randint(thresh)])
dst_pts.append([img_w, np.random.randint(thresh)])
dst_pts.append([img_w, img_h - np.random.randint(thresh)])
dst_pts.append([0, img_h - np.random.randint(thresh)])
trans = WarpMLS(src, src_pts, dst_pts, img_w, img_h)
dst = trans.generate()
return dst
# def distort(src, segment):
# img_h, img_w = src.shape[:2]
# dst = np.zeros_like(src, dtype=np.uint8)
#
# cut = img_w // segment
# thresh = img_h // 8
#
# src_pts = list()
# # dst_pts = list()
#
# src_pts.append([-np.random.randint(thresh), -np.random.randint(thresh)])
# src_pts.append([-np.random.randint(thresh), img_h + np.random.randint(thresh)])
#
# # dst_pts.append([0, 0])
# # dst_pts.append([0, img_h])
# dst_box = np.array([[0, 0], [0, img_h], [cut, 0], [cut, img_h]], dtype=np.float32)
#
# half_thresh = thresh * 0.5
#
# for cut_idx in np.arange(1, segment, 1):
# src_pts.append([cut * cut_idx + np.random.randint(thresh) - half_thresh,
# np.random.randint(thresh) - half_thresh])
# src_pts.append([cut * cut_idx + np.random.randint(thresh) - half_thresh,
# img_h + np.random.randint(thresh) - half_thresh])
#
# # dst_pts.append([cut * i, 0])
# # dst_pts.append([cut * i, img_h])
#
# src_box = np.array(src_pts[-4:-2] + src_pts[-2:-1] + src_pts[-1:], dtype=np.float32)
#
# # mat = cv2.getPerspectiveTransform(src_box, dst_box)
# # print(mat)
# # dst[:, cut * (cut_idx - 1):cut * cut_idx] = cv2.warpPerspective(src, mat, (cut, img_h))
#
# mat = get_perspective_transform(dst_box, src_box)
# dst[:, cut * (cut_idx - 1):cut * cut_idx] = warp_perspective(src, mat, (cut, img_h))
# # print(mat)
#
# src_pts.append([img_w + np.random.randint(thresh) - half_thresh,
# np.random.randint(thresh) - half_thresh])
# src_pts.append([img_w + np.random.randint(thresh) - half_thresh,
# img_h + np.random.randint(thresh) - half_thresh])
# src_box = np.array(src_pts[-4:-2] + src_pts[-2:-1] + src_pts[-1:], dtype=np.float32)
#
# # mat = cv2.getPerspectiveTransform(src_box, dst_box)
# # dst[:, cut * (segment - 1):] = cv2.warpPerspective(src, mat, (img_w - cut * (segment - 1), img_h))
# mat = get_perspective_transform(dst_box, src_box)
# dst[:, cut * (segment - 1):] = warp_perspective(src, mat, (img_w - cut * (segment - 1), img_h))
#
# return dst
| 2.34375 | 2 |
Graph/print all paths from two vertices in a directed graph.py | ikaushikpal/DS-450-python | 3 | 4108 | from collections import defaultdict
class Graph:
def __init__(self):
self.graph = defaultdict(list)
def addEdge(self, starting_vertex, end_vertex):
self.graph[starting_vertex].append(end_vertex)
def printAllPaths(self, starting_vertex, target_vertex):
visitedVertices = defaultdict(bool)
self.resultPaths = []
self.dfsUtil(starting_vertex, visitedVertices, target_vertex, "")
return self.resultPaths
def dfsUtil(self, current_vertex, visitedVertices, target_vertex, output_string):
visitedVertices[current_vertex] = True
if output_string == "":
output_string = current_vertex
else:
output_string = output_string + "->" + current_vertex
if current_vertex == target_vertex:
self.resultPaths.append(output_string)
return
for vertex in self.graph[current_vertex]:
if visitedVertices[vertex] == False:
self.dfsUtil(vertex, visitedVertices, target_vertex, output_string)
visitedVertices[vertex] = False
if __name__ == "__main__":
g = Graph()
g.addEdge("A", "B")
g.addEdge("B", "D")
g.addEdge("A", "D")
g.addEdge("C", "A")
g.addEdge("C", "B")
g.addEdge("A", "C")
paths = g.printAllPaths("A", "B")
print(paths)
| 3.5625 | 4 |
tests/pipegeojson_test/test_pipegeojson.py | kamyarrasta/berrl | 1 | 4109 | # testing the output of pipegeojson against different input types
import berrl as bl
import itertools
# making line with csv file location
line1=bl.make_line('csvs/line_example.csv')
# making line with list
testlist=bl.read('csvs/line_example.csv')
line2=bl.make_line(testlist,list=True)
# testing each line geojson against each other
ind=0
for a,b in itertools.izip(line1,line2):
if not a==b:
ind=1
# carrying the passing of status down to the test for the rest
if ind==0:
passing=0
else:
passing=1
# making points with csv file location
points1=bl.make_line('csvs/points_example.csv')
# making points with list
testlist=bl.read('csvs/points_example.csv')
points2=bl.make_line(testlist,list=True)
# testing each points geojson against each other
ind=0
for a,b in itertools.izip(points1,points2):
if not a==b:
ind=1
# carrying the passing of status down to the test for the rest
if ind==0 and passing==0:
passing=0
else:
passing=1
# making blocks with csv file location
blocks1=bl.make_line('csvs/blocks_example.csv')
# making blocks with list
testlist=bl.read('csvs/blocks_example.csv')
blocks2=bl.make_line(testlist,list=True)
# testing each bloocks geojson against each other
ind=0
for a,b in itertools.izip(blocks1,blocks2):
if not a==b:
ind=1
# carrying the passing of status down to the test for the rest
if ind==0 and passing==0:
passing=0
else:
passing=1
# making blocks with csv file location
polygon1=bl.make_line('csvs/polygon_example.csv')
# making blocks with list
testlist=bl.read('csvs/polygon_example.csv')
polygon2=bl.make_line(testlist,list=True)
# testing each bloocks geojson against each other
ind=0
for a,b in itertools.izip(polygon1,polygon2):
if not a==b:
ind=1
# carrying the passing of status down to the test for the rest
if ind==0 and passing==0:
passing=0
else:
passing=1
# printing output result
if passing==0:
print 'pipegeojson build passed'
else:
print 'pipegeojson build failed'
| 3.171875 | 3 |
tierpsy/debugging/catch_infinite_loop.py | mgh17/tierpsy-tracker | 9 | 4110 | <filename>tierpsy/debugging/catch_infinite_loop.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon May 8 16:19:07 2017
@author: ajaver
"""
import os
import cv2
import sys
import glob
import threading
from functools import partial
main_dir = '/Volumes/behavgenom_archive$/Celine/raw/'
fnames = glob.glob(os.path.join(main_dir, '**', '*.avi'))
fnames = [x for x in fnames if not x.endswith('_seg.avi')]
fnames = sorted(fnames)
def get_and_release(video_file):
original = sys.stderr
f = open(os.devnull, 'w')
sys.stderr = f
print('here')
vid = cv2.VideoCapture(video_file)
vid.release()
sys.stderr = original
return vid
all_threads = []
for ii, video_file in enumerate(fnames):
print(ii, video_file)
vid = cv2.VideoCapture(video_file)
vid.release()
t = threading.Thread(target = partial(get_and_release, video_file))
t.start()
all_threads.append((video_file, t))
| 2.15625 | 2 |
devito/passes/iet/languages/C.py | guaacoelho/devito | 199 | 4111 | from devito.ir import Call
from devito.passes.iet.definitions import DataManager
from devito.passes.iet.langbase import LangBB
__all__ = ['CBB', 'CDataManager']
class CBB(LangBB):
mapper = {
'aligned': lambda i:
'__attribute__((aligned(%d)))' % i,
'host-alloc': lambda i, j, k:
Call('posix_memalign', (i, j, k)),
'host-free': lambda i:
Call('free', (i,)),
}
class CDataManager(DataManager):
lang = CBB
| 1.898438 | 2 |
tests/_test_image.py | Freakwill/ell | 0 | 4112 | #!/usr/bin/env python3
"""Test methods about image process
Make sure the existance of the images
"""
from ell import *
import numpy as np
_filter = Filter.from_name('db4')
def test_resize():
chennal=0
c = ImageRGB.open('src/lenna.jpg')
d=c.resize(minInd=(-100,-100), maxInd=(100,100))
d.to_image()
assert True
def test_quantize():
im = ImageRGB.open('src/lenna.jpg')
d = im.quantize(128)
d.to_image()
assert True
def test_convolve():
im = ImageRGB.open('src/lenna.jpg')
d = (im @ _filter.H).D
# print(f"{d:i}, {d.shape}")
assert True
def test_filter():
im = ImageRGB.open('src/lenna.jpg')
rec = (im @ _filter.H).D.U @ _filter
assert True
def test_rec():
im = ImageRGB.open('src/lenna.jpg')
def _f(im, h1, h2=None):
if h2 is None: h2 = h1
return (im.conv1d(h1.H, axis=0).conv1d(h2.H, axis=1)).P.conv1d(h1, axis=0).conv1d(h2, axis=1)
rec = _f(im, _filter) + _f(im, _filter.H) + _f(im, _filter, _filter.H) + _f(im, _filter.H, _filter)
assert True
def test_rec2():
im = ImageRGB.open('../src/lenna.jpg')
def _f(im, h1, h2=None):
if h2 is None: h2 = h1
# return (im @ h1.tensor(h2).H).P @ h1.tensor(h2)
return (im.conv1d(h1.H, axis=0).conv1d(h2.H, axis=1)).P.conv1d(h1, axis=0).conv1d(h2, axis=1)
im1 = _f(im, _filter)
rec1 = _f(im1, _filter) + _f(im1, _filter.H) + _f(im1, _filter, _filter.H) + _f(im1, _filter.H, _filter)
rec2 = rec1 + _f(im, _filter.H) + _f(im, _filter, _filter.H) + _f(im, _filter.H, _filter)
assert True
def test_rec3():
im = ImageRGB.open('src/lenna.jpg')
def _f(im, h1, h2=None):
if h2 is None: h2 = h1
f = h1.tensor(h2)
return im.reduce(f).expand(f)
im1 = im.reduce(_filter)
rec1 = _f(im1, _filter) + _f(im1, _filter.H) + _f(im1, _filter, _filter.H) + _f(im1, _filter.H, _filter)
rec2 = rec1.expand(_filter) + _f(im, _filter.H) + _f(im, _filter, _filter.H) + _f(im, _filter.H, _filter)
assert True
| 2.46875 | 2 |
donkeycar/parts/pytorch/torch_data.py | adricl/donkeycar | 1,100 | 4113 | # PyTorch
import torch
from torch.utils.data import IterableDataset, DataLoader
from donkeycar.utils import train_test_split
from donkeycar.parts.tub_v2 import Tub
from torchvision import transforms
from typing import List, Any
from donkeycar.pipeline.types import TubRecord, TubDataset
from donkeycar.pipeline.sequence import TubSequence
import pytorch_lightning as pl
def get_default_transform(for_video=False, for_inference=False, resize=True):
"""
Creates a default transform to work with torchvision models
Video transform:
All pre-trained models expect input images normalized in the same way,
i.e. mini-batches of 3-channel RGB videos of shape (3 x T x H x W),
where H and W are expected to be 112, and T is a number of video frames
in a clip. The images have to be loaded in to a range of [0, 1] and
then normalized using mean = [0.43216, 0.394666, 0.37645] and
std = [0.22803, 0.22145, 0.216989].
"""
mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
input_size = (224, 224)
if for_video:
mean = [0.43216, 0.394666, 0.37645]
std = [0.22803, 0.22145, 0.216989]
input_size = (112, 112)
transform_items = [
transforms.ToTensor(),
transforms.Normalize(mean=mean, std=std)
]
if resize:
transform_items.insert(0, transforms.Resize(input_size))
return transforms.Compose(transform_items)
class TorchTubDataset(IterableDataset):
'''
Loads the dataset, and creates a train/test split.
'''
def __init__(self, config, records: List[TubRecord], transform=None):
"""Create a PyTorch Tub Dataset
Args:
config (object): the configuration information
records (List[TubRecord]): a list of tub records
transform (function, optional): a transform to apply to the data
"""
self.config = config
# Handle the transforms
if transform:
self.transform = transform
else:
self.transform = get_default_transform()
self.sequence = TubSequence(records)
self.pipeline = self._create_pipeline()
self.len = len(records)
def _create_pipeline(self):
""" This can be overridden if more complicated pipelines are
required """
def y_transform(record: TubRecord):
angle: float = record.underlying['user/angle']
throttle: float = record.underlying['user/throttle']
predictions = torch.tensor([angle, throttle], dtype=torch.float)
# Normalize to be between [0, 1]
# angle and throttle are originally between [-1, 1]
predictions = (predictions + 1) / 2
return predictions
def x_transform(record: TubRecord):
# Loads the result of Image.open()
img_arr = record.image(cached=True, as_nparray=False)
return self.transform(img_arr)
# Build pipeline using the transformations
pipeline = self.sequence.build_pipeline(x_transform=x_transform,
y_transform=y_transform)
return pipeline
def __len__(self):
return len(self.sequence)
def __iter__(self):
return iter(self.pipeline)
class TorchTubDataModule(pl.LightningDataModule):
def __init__(self, config: Any, tub_paths: List[str], transform=None):
"""Create a PyTorch Lightning Data Module to contain all data loading logic
Args:
config (object): the configuration information
tub_paths (List[str]): a list of paths to the tubs to use (minimum size of 1).
Each tub path corresponds to another training run.
transform (function, optional): a transform to apply to the data
"""
super().__init__()
self.config = config
self.tub_paths = tub_paths
# Handle the transforms
if transform:
self.transform = transform
else:
self.transform = get_default_transform()
self.tubs: List[Tub] = [Tub(tub_path, read_only=True)
for tub_path in self.tub_paths]
self.records: List[TubRecord] = []
def setup(self, stage=None):
"""Load all the tub data and set up the datasets.
Args:
stage ([string], optional): setup expects a string arg stage.
It is used to separate setup logic for trainer.fit
and trainer.test. Defaults to None.
"""
# Loop through all the different tubs and load all the records for each of them
for tub in self.tubs:
for underlying in tub:
record = TubRecord(self.config, tub.base_path,
underlying=underlying)
self.records.append(record)
train_records, val_records = train_test_split(
self.records, test_size=(1. - self.config.TRAIN_TEST_SPLIT))
assert len(val_records) > 0, "Not enough validation data. Add more data"
self.train_dataset = TorchTubDataset(
self.config, train_records, transform=self.transform)
self.val_dataset = TorchTubDataset(
self.config, val_records, transform=self.transform)
def train_dataloader(self):
# The number of workers are set to 0 to avoid errors on Macs and Windows
# See: https://github.com/rusty1s/pytorch_geometric/issues/366#issuecomment-498022534
return DataLoader(self.train_dataset, batch_size=self.config.BATCH_SIZE, num_workers=0)
def val_dataloader(self):
# The number of workers are set to 0 to avoid errors on Macs and Windows
# See: https://github.com/rusty1s/pytorch_geometric/issues/366#issuecomment-498022534
return DataLoader(self.val_dataset, batch_size=self.config.BATCH_SIZE, num_workers=0)
| 2.765625 | 3 |
lite/__init__.py | CleverInsight/sparx-lite | 0 | 4114 | import os
from tornado.template import Template
__SNIPPET__ = os.path.join(os.path.dirname(os.path.abspath(__file__)), '_snippet')
def T(name, **kw):
t = Template(open(os.path.join(__SNIPPET__, name + '.html'), 'rb').read())
return t.generate(**dict([('template_file', name)] + globals().items() + kw.items()))
| 2.5 | 2 |
homeassistant/components/sensor/hddtemp.py | mdonoughe/home-assistant | 2 | 4115 | """
Support for getting the disk temperature of a host.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sensor.hddtemp/
"""
import logging
from datetime import timedelta
from telnetlib import Telnet
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
CONF_NAME, CONF_HOST, CONF_PORT, TEMP_CELSIUS, TEMP_FAHRENHEIT, CONF_DISKS)
from homeassistant.helpers.entity import Entity
_LOGGER = logging.getLogger(__name__)
ATTR_DEVICE = 'device'
ATTR_MODEL = 'model'
DEFAULT_HOST = 'localhost'
DEFAULT_PORT = 7634
DEFAULT_NAME = 'HD Temperature'
DEFAULT_TIMEOUT = 5
SCAN_INTERVAL = timedelta(minutes=1)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_DISKS, default=[]): vol.All(cv.ensure_list, [cv.string]),
vol.Optional(CONF_HOST, default=DEFAULT_HOST): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
})
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up the HDDTemp sensor."""
name = config.get(CONF_NAME)
host = config.get(CONF_HOST)
port = config.get(CONF_PORT)
disks = config.get(CONF_DISKS)
hddtemp = HddTempData(host, port)
hddtemp.update()
if hddtemp.data is None:
return False
if not disks:
disks = [next(iter(hddtemp.data)).split('|')[0]]
dev = []
for disk in disks:
if disk in hddtemp.data:
dev.append(HddTempSensor(name, disk, hddtemp))
add_devices(dev, True)
class HddTempSensor(Entity):
"""Representation of a HDDTemp sensor."""
def __init__(self, name, disk, hddtemp):
"""Initialize a HDDTemp sensor."""
self.hddtemp = hddtemp
self.disk = disk
self._name = '{} {}'.format(name, disk)
self._state = None
self._details = None
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the device."""
return self._state
@property
def unit_of_measurement(self):
"""Return the unit the value is expressed in."""
if self._details[3] == 'C':
return TEMP_CELSIUS
return TEMP_FAHRENHEIT
@property
def device_state_attributes(self):
"""Return the state attributes of the sensor."""
return {
ATTR_DEVICE: self._details[0],
ATTR_MODEL: self._details[1],
}
def update(self):
"""Get the latest data from HDDTemp daemon and updates the state."""
self.hddtemp.update()
if self.hddtemp.data and self.disk in self.hddtemp.data:
self._details = self.hddtemp.data[self.disk].split('|')
self._state = self._details[2]
else:
self._state = None
class HddTempData(object):
"""Get the latest data from HDDTemp and update the states."""
def __init__(self, host, port):
"""Initialize the data object."""
self.host = host
self.port = port
self.data = None
def update(self):
"""Get the latest data from HDDTemp running as daemon."""
try:
connection = Telnet(
host=self.host, port=self.port, timeout=DEFAULT_TIMEOUT)
data = connection.read_all().decode(
'ascii').lstrip('|').rstrip('|').split('||')
self.data = {data[i].split('|')[0]: data[i]
for i in range(0, len(data), 1)}
except ConnectionRefusedError:
_LOGGER.error(
"HDDTemp is not available at %s:%s", self.host, self.port)
self.data = None
| 2.71875 | 3 |
boomer.py | JohnnySn0w/BabbleBot | 1 | 4116 | <reponame>JohnnySn0w/BabbleBot
import random
prefix = [
'Look at you! ',
'Bless ',
'Bless! ',
'I heard about that! ',
'Amen!',
'You and the kids doing alright?',
'Miss ya\'ll!'
]
suffix = [
'. Amen!',
'. God bless america',
'. God bless!',
' haha',
'. love ya!',
'. love ya\'ll!',
]
def add_pre_suf(sentence):
if random.randint(1,10) <= 6:
if random.randint(1,10) <= 5:
sentence = prefix[random.randint(0, len(prefix) - 1)] + sentence
else:
sentence += suffix[random.randint(0, len(suffix) - 1)]
return sentence
def add_elipses(sentence):
words = sentence.split()
for i in range(4, len(words), 5):
if random.randint(1,10) <= 7:
words[i] += "..."
return " ".join(words)
def boomer_caps(sentence):
seed = random.randint(1, 10)
sent_array = sentence.split()
if seed in (1, 2, 3):
return sentence
elif seed in (4, 5):
temp_sent = []
for x in sent_array:
if random.random() < 0.25:
x = x.upper()
temp_sent.append(x)
return " ".join(temp_sent)
elif seed in (6, 7):
temp_sent = []
for x in sent_array:
if random.random() < 0.5:
x = x.upper()
temp_sent.append(x)
return " ".join(temp_sent)
elif seed in (8, 9):
return sentence.title()
elif seed == 10:
return sentence.upper()
| 3.171875 | 3 |
bot/views.py | eyobofficial/COVID-19-Mutual-Aid | 0 | 4117 | import telegram
from django.conf import settings
from django.shortcuts import redirect
from django.utils.decorators import method_decorator
from django.views.generic import View
from django.views.decorators.csrf import csrf_exempt
from braces.views import CsrfExemptMixin
from rest_framework.authentication import BasicAuthentication
from rest_framework import status
from rest_framework.response import Response
from rest_framework.views import APIView
from rest_framework.permissions import AllowAny
from .bots import TelegramBot
from .models import TelegramUser as User
@method_decorator(csrf_exempt, name='dispatch')
class TelegramBotView(APIView):
permission_classes = (AllowAny, )
def post(self, request, *args, **kwargs):
context = request.data
bot = TelegramBot(context)
user, _ = User.objects.get_or_create(
id=bot.sender['id'],
defaults={
'first_name': bot.sender['first_name'],
'last_name': bot.sender.get('last_name', ''),
'username': bot.sender.get('username', ''),
'is_bot': bot.sender.get('is_bot', False)
}
)
user.access_count += 1
user.save()
bot.process(user)
return Response(status=status.HTTP_200_OK)
| 1.921875 | 2 |
code/counterfactual_generative_networks-main/imagenet/train_cgn.py | dummyxyz1/re_counterfactual_generative | 0 | 4118 | import os
from datetime import datetime
from os.path import join
import pathlib
from tqdm import tqdm
import argparse
import torch
from torch import nn, optim
from torch.autograd import Variable
import torchvision
from torchvision.transforms import Pad
from torchvision.utils import make_grid
import repackage
repackage.up()
from imagenet.models import CGN
from imagenet.config import get_cfg_defaults
from shared.losses import *
from utils import Optimizers
from inception_score import *
def save_sample_sheet(cgn, u_fixed, sample_path, ep_str):
cgn.eval()
dev = u_fixed.to(cgn.get_device())
ys = [15, 251, 330, 382, 385, 483, 559, 751, 938, 947, 999]
to_save = []
with torch.no_grad():
for y in ys:
# generate
y_vec = cgn.get_class_vec(y, sz=1)
inp = (u_fixed.to(dev), y_vec.to(dev), cgn.truncation)
x_gt, mask, premask, foreground, background, bg_mask = cgn(inp)
x_gen = mask * foreground + (1 - mask) * background
# build class grid
to_plot = [premask, foreground, background, x_gen, x_gt]
grid = make_grid(torch.cat(to_plot).detach().cpu(),
nrow=len(to_plot), padding=2, normalize=True)
# add unnormalized mask
mask = Pad(2)(mask[0].repeat(3, 1, 1)).detach().cpu()
grid = torch.cat([mask, grid], 2)
# save to disk
to_save.append(grid)
del to_plot, mask, premask, foreground, background, x_gen, x_gt
# save the image
path = join(sample_path, f'cls_sheet_' + ep_str + '.png')
torchvision.utils.save_image(torch.cat(to_save, 1), path)
cgn.train()
def save_sample_single(cgn, u_fixed, sample_path, ep_str):
cgn.eval()
dev = u_fixed.to(cgn.get_device())
ys = [15, 251, 330, 382, 385, 483, 559, 751, 938, 947, 999]
with torch.no_grad():
for y in ys:
# generate
y_vec = cgn.get_class_vec(y, sz=1)
inp = (u_fixed.to(dev), y_vec.to(dev), cgn.truncation)
_, mask, premask, foreground, background, _ = cgn(inp)
x_gen = mask * foreground + (1 - mask) * background
# save_images
path = join(sample_path, f'{y}_1_premask_' + ep_str + '.png')
torchvision.utils.save_image(premask, path, normalize=True)
path = join(sample_path, f'{y}_2_mask_' + ep_str + '.png')
torchvision.utils.save_image(mask, path, normalize=True)
path = join(sample_path, f'{y}_3_texture_' + ep_str + '.png')
torchvision.utils.save_image(foreground, path, normalize=True)
path = join(sample_path, f'{y}_4_bgs_' + ep_str + '.png')
torchvision.utils.save_image(background, path, normalize=True)
path = join(sample_path, f'{y}_5_gen_ims_' + ep_str + '.png')
torchvision.utils.save_image(x_gen, path, normalize=True)
cgn.train()
def fit(cfg, cgn, opts, losses):
inception_score_val = list()
# total number of episodes, accounted for batch accumulation
episodes = cfg.TRAIN.EPISODES
episodes *= cfg.TRAIN.BATCH_ACC
# directories for experiments
time_str = datetime.now().strftime("%Y_%m_%d_%H_%M")
if cfg.WEIGHTS_PATH:
weights_path = str(pathlib.Path(cfg.WEIGHTS_PATH).parent)
start_ep = int(pathlib.Path(cfg.WEIGHTS_PATH).stem[3:])
sample_path = weights_path.replace('weights', 'samples')
ep_range = (start_ep, start_ep + episodes)
else:
model_path = join('imagenet', 'experiments',
f'cgn_{time_str}_{cfg.MODEL_NAME}')
weights_path = join(model_path, 'weights')
sample_path = join(model_path, 'samples')
pathlib.Path(weights_path).mkdir(parents=True, exist_ok=True)
pathlib.Path(sample_path).mkdir(parents=True, exist_ok=True)
ep_range = (0, episodes)
# fixed noise sample
u_fixed_path = join('imagenet', 'experiments', 'u_fixed.pt')
if not os.path.isfile(u_fixed_path) or cfg.LOG.SAMPLED_FIXED_NOISE:
u_fixed = cgn.get_noise_vec()
torch.save(u_fixed, u_fixed_path)
else:
u_fixed = torch.load(u_fixed_path)
# Training Loop
cgn.train()
L_l1, L_perc, L_binary, L_mask, L_text, L_bg = losses
save_samples = save_sample_single if cfg.LOG.SAVE_SINGLES else save_sample_sheet
pbar = tqdm(range(*ep_range))
for i, ep in enumerate(pbar):
x_gt, mask, premask, foreground, background, background_mask = cgn()
x_gen = mask * foreground + (1 - mask) * background
# Losses
losses_g = {}
losses_g['l1'] = L_l1(x_gen, x_gt)
losses_g['perc'] = L_perc(x_gen, x_gt)
losses_g['binary'] = L_binary(mask)
losses_g['mask'] = L_mask(mask)
losses_g['perc_text'] = L_text(x_gt, mask, foreground)
losses_g['bg'] = L_bg(background_mask)
# backprop
losses_g = {k: v.mean() for k, v in losses_g.items()}
g_loss = sum(losses_g.values())
g_loss.backward()
if (i+1) % cfg.TRAIN.BATCH_ACC == 0:
opts.step(['shape', 'bg', 'texture'])
# Saving
if not i % cfg.LOG.SAVE_ITER:
ep_str = f'ep_{ep:07}'
save_samples(cgn, u_fixed, sample_path, ep_str)
torch.save(cgn.state_dict(), join(weights_path, ep_str + '.pth'))
# Logging
if cfg.LOG.LOSSES:
msg = ''.join([f"[{k}: {v:.3f}]" for k, v in losses_g.items()])
pbar.set_description(msg)
# Calculate Inception SCore
if cfg.LOG.INCEPTION_SCORE:
score, score_std = inception_score(x_gen)
inception_score_val.append(score)
def main(cfg):
# model init
cgn = CGN(
batch_sz=cfg.TRAIN.BATCH_SZ,
truncation=cfg.MODEL.TRUNCATION,
pretrained=True,
)
print("------CGN-------")
print(cgn)
if cfg.WEIGHTS_PATH:
weights = torch.load(cfg.WEIGHTS_PATH)
weights = {k.replace('module.', ''): v for k, v in weights.items()}
cgn.load_state_dict(weights)
# optimizers
opts = Optimizers()
opts.set('shape', cgn.f_shape, cfg.LR.SHAPE)
opts.set('texture', cgn.f_text, cfg.LR.TEXTURE)
opts.set('bg', cgn.f_bg, cfg.LR.BG)
# losses
L_l1 = ReconstructionLoss(mode='l1', loss_weight=cfg.LAMBDA.L1)
L_perc = PerceptualLoss(style_wgts=cfg.LAMBDA.PERC)
L_binary = BinaryLoss(loss_weight=cfg.LAMBDA.BINARY)
L_mask = MaskLoss(loss_weight=cfg.LAMBDA.MASK)
L_text = PercLossText(style_wgts=cfg.LAMBDA.TEXT)
L_bg = BackgroundLoss(loss_weight=cfg.LAMBDA.BG)
losses = (L_l1, L_perc, L_binary, L_mask, L_text, L_bg)
# push to device and train
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
cgn = cgn.to(device)
losses = (l.to(device) for l in losses)
fit(cfg, cgn, opts, losses)
def merge_args_and_cfg(args, cfg):
cfg.MODEL_NAME = args.model_name
cfg.WEIGHTS_PATH = args.weights_path
cfg.LOG.SAMPLED_FIXED_NOISE = args.sampled_fixed_noise
cfg.LOG.SAVE_SINGLES = args.save_singles
cfg.LOG.SAVE_ITER = args.save_iter
cfg.LOG.LOSSES = args.log_losses
cfg.LOG.INCEPTION_SCORE = True
cfg.TRAIN.EPISODES = args.episodes
cfg.TRAIN.BATCH_SZ = args.batch_sz
cfg.TRAIN.BATCH_ACC = args.batch_acc
cfg.MODEL.TRUNCATION = args.truncation
return cfg
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--model_name', default='tmp',
help='Weights and samples will be saved under experiments/model_name')
parser.add_argument('--weights_path', default='',
help='provide path to continue training')
parser.add_argument('--sampled_fixed_noise', default=False, action='store_true',
help='If you want a different noise vector than provided in the repo')
parser.add_argument('--save_singles', default=False, action='store_true',
help='Save single images instead of sheets')
parser.add_argument('--truncation', type=float, default=1.0,
help='Truncation value for noise sampling')
parser.add_argument('--episodes', type=int, default=300,
help="We don't do dataloading, hence, one episode = one gradient update.")
parser.add_argument('--batch_sz', type=int, default=1,
help='Batch size, use in conjunciton with batch_acc')
parser.add_argument('--batch_acc', type=int, default=4000,
help='pseudo_batch_size = batch_acc*batch size')
parser.add_argument('--save_iter', type=int, default=4000,
help='Save samples/weights every n iter')
parser.add_argument('--log_losses', default=False, action='store_true',
help='Print out losses')
args = parser.parse_args()
cfg = get_cfg_defaults()
cfg = merge_args_and_cfg(args, cfg)
print(cfg)
main(cfg)
| 1.726563 | 2 |
portfolio/urls.py | ramza007/Ramza.io | 3 | 4119 | <reponame>ramza007/Ramza.io<gh_stars>1-10
from django.conf.urls import url
from django.urls import path, include,re_path
from . import views
from rest_framework.authtoken.views import obtain_auth_token
urlpatterns = [
path('', views.index, name='index'),
path('about', views.about, name='about'),
path('projects', views.projects, name='projects'),
path('photos', views.photos, name='photos'),
re_path(r'^api/projects/$', views.ProjectList.as_view()),
re_path(r'^api-token-auth/', obtain_auth_token),
re_path(r'api/project/project-id/(?P<pk>[0-9]+)/$', views.ProjectDescription.as_view()),
]
| 1.984375 | 2 |
tests/resources/mlflow-test-plugin/mlflow_test_plugin/file_store.py | iPieter/kiwi | 0 | 4120 | <reponame>iPieter/kiwi
from six.moves import urllib
from kiwi.store.tracking.file_store import FileStore
class PluginFileStore(FileStore):
"""FileStore provided through entrypoints system"""
def __init__(self, store_uri=None, artifact_uri=None):
path = urllib.parse.urlparse(store_uri).path if store_uri else None
self.is_plugin = True
super(PluginFileStore, self).__init__(path, artifact_uri)
| 1.984375 | 2 |
tests/server/test_flask_api.py | YuhangCh/terracotta | 0 | 4121 | <filename>tests/server/test_flask_api.py
from io import BytesIO
import json
import urllib.parse
from collections import OrderedDict
from PIL import Image
import numpy as np
import pytest
@pytest.fixture(scope='module')
def flask_app():
from terracotta.server import create_app
return create_app()
@pytest.fixture(scope='module')
def client(flask_app):
with flask_app.test_client() as client:
yield client
def test_get_keys(client, use_testdb):
rv = client.get('/keys')
expected_response = [
{'key': 'key1'},
{'key': 'akey'},
{'key': 'key2', 'description': 'key2'}
]
assert rv.status_code == 200
assert expected_response == json.loads(rv.data)['keys']
def test_get_metadata(client, use_testdb):
rv = client.get('/metadata/val11/x/val12/')
assert rv.status_code == 200
assert ['extra_data'] == json.loads(rv.data)['metadata']
def test_get_metadata_nonexisting(client, use_testdb):
rv = client.get('/metadata/val11/x/NONEXISTING/')
assert rv.status_code == 404
def test_get_datasets(client, use_testdb):
rv = client.get('/datasets')
assert rv.status_code == 200
datasets = json.loads(rv.data, object_pairs_hook=OrderedDict)['datasets']
assert len(datasets) == 4
assert OrderedDict([('key1', 'val11'), ('akey', 'x'), ('key2', 'val12')]) in datasets
def test_get_datasets_pagination(client, use_testdb):
# no page (implicit 0)
rv = client.get('/datasets?limit=2')
assert rv.status_code == 200
response = json.loads(rv.data, object_pairs_hook=OrderedDict)
assert response['limit'] == 2
assert response['page'] == 0
first_datasets = response['datasets']
assert len(first_datasets) == 2
assert OrderedDict([('key1', 'val11'), ('akey', 'x'), ('key2', 'val12')]) in first_datasets
# second page
rv = client.get('/datasets?limit=2&page=1')
assert rv.status_code == 200
response = json.loads(rv.data, object_pairs_hook=OrderedDict)
assert response['limit'] == 2
assert response['page'] == 1
last_datasets = response['datasets']
assert len(last_datasets) == 2
assert OrderedDict([('key1', 'val11'), ('akey', 'x'), ('key2', 'val12')]) not in last_datasets
# page out of range
rv = client.get('/datasets?limit=2&page=1000')
assert rv.status_code == 200
assert not json.loads(rv.data)['datasets']
# invalid page
rv = client.get('/datasets?page=-1')
assert rv.status_code == 400
# invalid limit
rv = client.get('/datasets?limit=-1')
assert rv.status_code == 400
def test_get_datasets_selective(client, use_testdb):
rv = client.get('/datasets?key1=val21')
assert rv.status_code == 200
assert len(json.loads(rv.data)['datasets']) == 3
rv = client.get('/datasets?key1=val21&key2=val23')
assert rv.status_code == 200
assert len(json.loads(rv.data)['datasets']) == 1
def test_get_datasets_unknown_key(client, use_testdb):
rv = client.get('/datasets?UNKNOWN=val21')
assert rv.status_code == 400
def test_get_singleband_greyscale(client, use_testdb, raster_file_xyz):
import terracotta
settings = terracotta.get_settings()
x, y, z = raster_file_xyz
rv = client.get(f'/singleband/val11/x/val12/{z}/{x}/{y}.png')
assert rv.status_code == 200
img = Image.open(BytesIO(rv.data))
assert np.asarray(img).shape == settings.DEFAULT_TILE_SIZE
def test_get_singleband_extra_args(client, use_testdb, raster_file_xyz):
import terracotta
settings = terracotta.get_settings()
x, y, z = raster_file_xyz
rv = client.get(f'/singleband/val11/x/val12/{z}/{x}/{y}.png?foo=bar&baz=quz')
assert rv.status_code == 200
img = Image.open(BytesIO(rv.data))
assert np.asarray(img).shape == settings.DEFAULT_TILE_SIZE
def test_get_singleband_cmap(client, use_testdb, raster_file_xyz):
import terracotta
settings = terracotta.get_settings()
x, y, z = raster_file_xyz
rv = client.get(f'/singleband/val11/x/val12/{z}/{x}/{y}.png?colormap=jet')
assert rv.status_code == 200
img = Image.open(BytesIO(rv.data))
assert np.asarray(img).shape == settings.DEFAULT_TILE_SIZE
def test_get_singleband_preview(client, use_testdb):
import terracotta
settings = terracotta.get_settings()
rv = client.get(f'/singleband/val11/x/val12/preview.png?colormap=jet')
assert rv.status_code == 200
img = Image.open(BytesIO(rv.data))
assert np.asarray(img).shape == settings.DEFAULT_TILE_SIZE
def urlsafe_json(payload):
payload_json = json.dumps(payload)
return urllib.parse.quote_plus(payload_json, safe=r',.[]{}:"')
def test_get_singleband_explicit_cmap(client, use_testdb, raster_file_xyz):
import terracotta
settings = terracotta.get_settings()
x, y, z = raster_file_xyz
explicit_cmap = {1: (0, 0, 0), 2.0: (255, 255, 255, 20), 3: '#ffffff', 4: 'abcabc'}
rv = client.get(f'/singleband/val11/x/val12/{z}/{x}/{y}.png?colormap=explicit'
f'&explicit_color_map={urlsafe_json(explicit_cmap)}')
assert rv.status_code == 200, rv.data.decode('utf-8')
img = Image.open(BytesIO(rv.data))
assert np.asarray(img).shape == settings.DEFAULT_TILE_SIZE
def test_get_singleband_explicit_cmap_invalid(client, use_testdb, raster_file_xyz):
x, y, z = raster_file_xyz
explicit_cmap = {1: (0, 0, 0), 2: (255, 255, 255), 3: '#ffffff', 4: 'abcabc'}
rv = client.get(f'/singleband/val11/x/val12/{z}/{x}/{y}.png?'
f'explicit_color_map={urlsafe_json(explicit_cmap)}')
assert rv.status_code == 400
rv = client.get(f'/singleband/val11/x/val12/{z}/{x}/{y}.png?colormap=jet'
f'&explicit_color_map={urlsafe_json(explicit_cmap)}')
assert rv.status_code == 400
rv = client.get(f'/singleband/val11/x/val12/{z}/{x}/{y}.png?colormap=explicit')
assert rv.status_code == 400
explicit_cmap[3] = 'omgomg'
rv = client.get(f'/singleband/val11/x/val12/{z}/{x}/{y}.png?colormap=explicit'
f'&explicit_color_map={urlsafe_json(explicit_cmap)}')
assert rv.status_code == 400
explicit_cmap = [(255, 255, 255)]
rv = client.get(f'/singleband/val11/x/val12/{z}/{x}/{y}.png?colormap=explicit'
f'&explicit_color_map={urlsafe_json(explicit_cmap)}')
assert rv.status_code == 400
rv = client.get(f'/singleband/val11/x/val12/{z}/{x}/{y}.png?colormap=explicit'
f'&explicit_color_map=foo')
assert rv.status_code == 400
def test_get_singleband_stretch(client, use_testdb, raster_file_xyz):
import terracotta
settings = terracotta.get_settings()
x, y, z = raster_file_xyz
for stretch_range in ('[0,1]', '[0,null]', '[null, 1]', '[null,null]', 'null'):
rv = client.get(f'/singleband/val11/x/val12/{z}/{x}/{y}.png?stretch_range={stretch_range}')
assert rv.status_code == 200
img = Image.open(BytesIO(rv.data))
assert np.asarray(img).shape == settings.DEFAULT_TILE_SIZE
def test_get_singleband_out_of_bounds(client, use_testdb):
import terracotta
settings = terracotta.get_settings()
x, y, z = (0, 0, 10)
rv = client.get(f'/singleband/val11/x/val12/{z}/{x}/{y}.png')
assert rv.status_code == 200
img = Image.open(BytesIO(rv.data))
assert np.asarray(img).shape == settings.DEFAULT_TILE_SIZE
assert np.all(np.asarray(img) == 0)
def test_get_singleband_unknown_cmap(client, use_testdb, raster_file_xyz):
x, y, z = raster_file_xyz
rv = client.get(f'/singleband/val11/x/val12/{z}/{x}/{y}.png?colormap=UNKNOWN')
assert rv.status_code == 400
def test_get_rgb(client, use_testdb, raster_file_xyz):
import terracotta
settings = terracotta.get_settings()
x, y, z = raster_file_xyz
rv = client.get(f'/rgb/val21/x/{z}/{x}/{y}.png?r=val22&g=val23&b=val24')
assert rv.status_code == 200
img = Image.open(BytesIO(rv.data))
assert np.asarray(img).shape == (*settings.DEFAULT_TILE_SIZE, 3)
def test_get_rgb_preview(client, use_testdb):
import terracotta
settings = terracotta.get_settings()
rv = client.get(f'/rgb/val21/x/preview.png?r=val22&g=val23&b=val24')
assert rv.status_code == 200
img = Image.open(BytesIO(rv.data))
assert np.asarray(img).shape == (*settings.DEFAULT_TILE_SIZE, 3)
def test_get_rgb_extra_args(client, use_testdb, raster_file_xyz):
import terracotta
settings = terracotta.get_settings()
x, y, z = raster_file_xyz
rv = client.get(f'/rgb/val21/x/{z}/{x}/{y}.png?r=val22&g=val23&b=val24&foo=bar&baz=quz')
assert rv.status_code == 200
img = Image.open(BytesIO(rv.data))
assert np.asarray(img).shape == (*settings.DEFAULT_TILE_SIZE, 3)
def test_get_rgb_stretch(client, use_testdb, raster_file_xyz):
import terracotta
settings = terracotta.get_settings()
x, y, z = raster_file_xyz
for stretch_range in ('[0,10000]', '[0,null]', '[null, 10000]', '[null,null]', 'null'):
rv = client.get(f'/rgb/val21/x/{z}/{x}/{y}.png?r=val22&g=val23&b=val24&'
f'r_range={stretch_range}&b_range={stretch_range}&g_range={stretch_range}')
assert rv.status_code == 200, rv.data
img = Image.open(BytesIO(rv.data))
assert np.asarray(img).shape == (*settings.DEFAULT_TILE_SIZE, 3)
def test_get_compute(client, use_testdb, raster_file_xyz):
import terracotta
settings = terracotta.get_settings()
# default tile size
x, y, z = raster_file_xyz
rv = client.get(
f'/compute/val21/x/{z}/{x}/{y}.png'
'?expression=v1*v2&v1=val22&v2=val23'
'&stretch_range=[0,10000]'
)
assert rv.status_code == 200
img = Image.open(BytesIO(rv.data))
assert np.asarray(img).shape == settings.DEFAULT_TILE_SIZE
# custom tile size
rv = client.get(
f'/compute/val21/x/{z}/{x}/{y}.png'
'?expression=v1*v2&v1=val22&v2=val23'
'&stretch_range=[0,10000]'
'&tile_size=[128,128]'
)
assert rv.status_code == 200
img = Image.open(BytesIO(rv.data))
assert np.asarray(img).shape == (128, 128)
def test_get_compute_preview(client, use_testdb):
import terracotta
settings = terracotta.get_settings()
rv = client.get(
f'/compute/val21/x/preview.png'
'?expression=v1*v2&v1=val22&v2=val23'
'&stretch_range=[0,10000]'
)
assert rv.status_code == 200
img = Image.open(BytesIO(rv.data))
assert np.asarray(img).shape == settings.DEFAULT_TILE_SIZE
def test_get_compute_invalid(client, use_testdb, raster_file_xyz):
x, y, z = raster_file_xyz
# too few keys
rv = client.get(
f'/compute/val21/{z}/{x}/{y}.png'
'?expression=v1*v2&v1=val22&v2=val23'
'&stretch_range=[0,10000]'
)
assert rv.status_code == 400
# invalid expression
rv = client.get(
f'/compute/val21/x/preview.png'
'?expression=__builtins__["dir"](v1)&v1=val22'
'&stretch_range=[0,10000]'
)
assert rv.status_code == 400
# no stretch range
rv = client.get(
f'/compute/val21/x/{z}/{x}/{y}.png'
'?expression=v1*v2&v1=val22&v2=val23'
)
assert rv.status_code == 400
# no expression
rv = client.get(
f'/compute/val21/x/{z}/{x}/{y}.png'
'?stretch_range=[0,10000)'
)
assert rv.status_code == 400
# missing operand
rv = client.get(
f'/compute/val21/x/{z}/{x}/{y}.png'
'?expression=v1*v2'
'&stretch_range=[0,10000)'
)
assert rv.status_code == 400
# invalid stretch range (syntax)
rv = client.get(
f'/compute/val21/x/{z}/{x}/{y}.png'
'?expression=v1*v2&v1=val22&v2=val23'
'&stretch_range=[0,10000)'
)
assert rv.status_code == 400
# invalid stretch range (value)
rv = client.get(
f'/compute/val21/x/{z}/{x}/{y}.png'
'?expression=v1*v2&v1=val22&v2=val23'
'&stretch_range=[10000,0]'
)
assert rv.status_code == 400
def test_get_colormap(client):
rv = client.get('/colormap?stretch_range=[0,1]&num_values=100')
assert rv.status_code == 200
assert len(json.loads(rv.data)['colormap']) == 100
def test_get_colormap_invalid(client):
rv = client.get('/colormap?stretch_range=[0,1')
assert rv.status_code == 400
def test_get_colormap_extra_args(client):
rv = client.get('/colormap?stretch_range=[0,1]&num_values=100&foo=bar&baz=quz')
assert rv.status_code == 200
assert len(json.loads(rv.data)['colormap']) == 100
def test_get_spec(client):
from terracotta import __version__
rv = client.get('/swagger.json')
assert rv.status_code == 200
assert json.loads(rv.data)
assert __version__ in rv.data.decode('utf-8')
rv = client.get('/apidoc')
assert rv.status_code == 200
assert b'Terracotta' in rv.data
| 2.140625 | 2 |
url.py | matthieucan/shorturl | 1 | 4122 | <filename>url.py
def base_conv(n, input_base=10, output_base=10):
"""
Converts a number n from base input_base to base output_base.
The following symbols are used to represent numbers:
0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ
n can be an int if input_base <= 10, and a string otherwise.
The result will be a string.
"""
numbers = "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
## base 10 conversion
n = str(n)
size = len(n)
baseten = 0
for i in range(size):
baseten += numbers.index(n[i]) * input_base ** (size - 1 - i)
## base output_base conversion
# we search the biggest number m such that n^m < x
max_power = 0
while output_base ** (max_power + 1) <= baseten:
max_power += 1
result = ""
for i in range(max_power + 1):
coeff = baseten / (output_base ** (max_power - i))
baseten -= coeff * (output_base ** (max_power - i))
result += numbers[coeff]
return result
if __name__ == "__main__":
assert(base_conv(10) == "10")
assert(base_conv(42) == "42")
assert(base_conv(5673576) == "5673576")
assert(base_conv(10, input_base=2) == "2")
assert(base_conv(101010, input_base=2) == "42")
assert(base_conv(43, input_base=10, output_base=2) == "101011")
assert(base_conv(256**3 - 1, input_base=10, output_base=16) == "ffffff")
assert(base_conv("d9bbb9d0ceabf", input_base=16, output_base=8) ==
"154673563503165277")
assert(base_conv("154673563503165277", input_base=8, output_base=10) ==
"3830404793297599")
assert(base_conv(0, input_base=3, output_base=50) == "0")
| 4.15625 | 4 |
src/qtt/qiskit/passes.py | codecrap/qtt | 0 | 4123 | import logging
from typing import Dict, List, Optional
import numpy as np
import qiskit
from qiskit.circuit import Barrier, Delay, Reset
from qiskit.circuit.library import (CRXGate, CRYGate, CRZGate, CZGate,
PhaseGate, RXGate, RYGate, RZGate, U1Gate,
U2Gate, U3Gate, UGate)
from qiskit.circuit.library.standard_gates import (CU1Gate, RZZGate, SdgGate,
SGate, TdgGate, TGate,
ZGate)
from qiskit.circuit.quantumcircuit import QuantumCircuit
from qiskit.converters.circuit_to_dag import circuit_to_dag
from qiskit.dagcircuit import DAGCircuit
from qiskit.transpiler.basepasses import TransformationPass
logger = logging.getLogger(__name__)
class RemoveSmallRotations(TransformationPass):
"""Return a circuit with small rotation gates removed."""
def __init__(self, epsilon: float = 0, modulo2pi=False):
"""Remove all small rotations from a circuit
Args:
epsilon: Threshold for rotation angle to be removed
modulo2pi: If True, then rotations multiples of 2pi are removed as well
"""
super().__init__()
self.epsilon = epsilon
self._empty_dag1 = qiskit.converters.circuit_to_dag(QuantumCircuit(1))
self._empty_dag2 = qiskit.converters.circuit_to_dag(QuantumCircuit(2))
self.mod2pi = modulo2pi
def run(self, dag: DAGCircuit) -> DAGCircuit:
"""Run the pass on `dag`.
Args:
dag: input dag.
Returns:
Output dag with small rotations removed
"""
def modulo_2pi(x):
x = float(x)
return np.mod(x + np.pi, 2 * np.pi) - np.pi
for node in dag.op_nodes():
if isinstance(node.op, (PhaseGate, RXGate, RYGate, RZGate)):
if node.op.is_parameterized():
# for parameterized gates we do not optimize
pass
else:
phi = float(node.op.params[0])
if self.mod2pi:
phi = modulo_2pi(phi)
if np.abs(phi) <= self.epsilon:
dag.substitute_node_with_dag(node, self._empty_dag1)
elif isinstance(node.op, (CRXGate, CRYGate, CRZGate)):
if node.op.is_parameterized():
# for parameterized gates we do not optimize
pass
else:
phi = float(node.op.params[0])
if self.mod2pi:
phi = modulo_2pi(phi)
if np.abs(phi) <= self.epsilon:
dag.substitute_node_with_dag(node, self._empty_dag2)
return dag
class RemoveDiagonalGatesAfterInput(TransformationPass):
"""Remove diagonal gates (including diagonal 2Q gates) at the start of a circuit.
Transpiler pass to remove diagonal gates (like RZ, T, Z, etc) at the start of a circuit.
Including diagonal 2Q gates. Nodes after a reset are also included.
"""
def run(self, dag):
"""Run the RemoveDiagonalGatesBeforeMeasure pass on `dag`.
Args:
dag (DAGCircuit): the DAG to be optimized.
Returns:
DAGCircuit: the optimized DAG.
"""
diagonal_1q_gates = (RZGate, ZGate, TGate, SGate, TdgGate, SdgGate, U1Gate)
diagonal_2q_gates = (CZGate, CRZGate, CU1Gate, RZZGate)
nodes_to_remove = set()
for input_node in (dag.input_map.values()):
try:
successor = next(dag.quantum_successors(input_node))
except StopIteration:
continue
if successor.type == "op" and isinstance(successor.op, diagonal_1q_gates):
nodes_to_remove.add(successor)
def valid_predecessor(s):
""" Return True of node is valid predecessor for removal """
if s.type == 'in':
return True
if s.type == "op" and isinstance(s.op, Reset):
return True
return False
if successor.type == "op" and isinstance(successor.op, diagonal_2q_gates):
predecessors = dag.quantum_predecessors(successor)
if all(valid_predecessor(s) for s in predecessors):
nodes_to_remove.add(successor)
for node_to_remove in nodes_to_remove:
dag.remove_op_node(node_to_remove)
return dag
class DecomposeU(TransformationPass):
""" Decompose U gates into elementary rotations Rx, Ry, Rz
The U gates are decomposed using McKay decomposition.
"""
def __init__(self, verbose=0):
"""
Args:
"""
super().__init__()
self._subdags = []
self.verbose = verbose
self.initial_layout = None
def ugate_replacement_circuit(self, ugate):
qc = QuantumCircuit(1)
if isinstance(ugate, (U3Gate, UGate)):
theta, phi, lam = ugate.params
if theta == np.pi/2:
# a u2 gate
qc.rz(lam - np.pi / 2, 0)
qc.rx(np.pi / 2, 0)
qc.rz(phi + np.pi / 2, 0)
else:
# from https://arxiv.org/pdf/1707.03429.pdf
qc.rz(lam, 0)
qc.rx(np.pi / 2, 0)
qc.rz(theta + np.pi, 0)
qc.rx(np.pi / 2, 0)
qc.rz(phi + np.pi, 0)
elif isinstance(ugate, U2Gate):
phi, lam = ugate.params
qc.rz(lam - np.pi / 2, 0)
qc.rx(np.pi / 2, 0)
qc.rz(phi + np.pi / 2, 0)
elif isinstance(ugate, (U1Gate, PhaseGate)):
lam, = ugate.params
qc.rz(lam, 0)
else:
raise Exception(f'unknown gate type {ugate}')
return qc
def run(self, dag: DAGCircuit) -> DAGCircuit:
"""Run the Decompose pass on `dag`.
Args:
dag: input DAG.
Returns:
Output DAG where ``U`` gates have been decomposed.
"""
# Walk through the DAG and expand each node if required
for node in dag.op_nodes():
if isinstance(node.op, (PhaseGate, U1Gate, U2Gate, U3Gate, UGate)):
subdag = circuit_to_dag(self.ugate_replacement_circuit(node.op))
dag.substitute_node_with_dag(node, subdag)
return dag
class DecomposeCX(TransformationPass):
""" Decompose CX into CZ and single qubit rotations
"""
def __init__(self, mode: str = 'ry'):
"""
Args:
"""
super().__init__()
self._subdags: List = []
self.initial_layout = None
self.gate = qiskit.circuit.library.CXGate
self.decomposition = QuantumCircuit(2)
if mode == 'ry':
self.decomposition.ry(-np.pi / 2, 1)
self.decomposition.cz(0, 1)
self.decomposition.ry(np.pi / 2, 1)
else:
self.decomposition.h(1)
self.decomposition.cz(0, 1)
self.decomposition.h(1)
self._dag = circuit_to_dag(self.decomposition)
def run(self, dag: DAGCircuit) -> DAGCircuit:
"""Run the Decompose pass on `dag`.
Args:
dag: input dag.
Returns:
output dag where ``CX`` was expanded.
"""
# Walk through the DAG and expand each non-basis node
for node in dag.op_nodes(self.gate):
dag.substitute_node_with_dag(node, self._dag)
return dag
class SequentialPass(TransformationPass):
"""Adds barriers between gates to make the circuit sequential."""
def run(self, dag):
new_dag = DAGCircuit()
for qreg in dag.qregs.values():
new_dag.add_qreg(qreg)
for creg in dag.cregs.values():
new_dag.add_creg(creg)
for node in dag.op_nodes():
new_dag.apply_operation_back(node.op, node.qargs, node.cargs)
logger.info('SequentialPass: adding node {node.name}')
if node.name in ['barrier', 'measure']:
continue
new_dag.apply_operation_back(Barrier(new_dag.num_qubits()), list(new_dag.qubits), [])
return new_dag
class LinearTopologyParallelPass(TransformationPass):
"""Adds barriers to enforce a linear topology
The barrier are placed between gates such that no two qubit gates are executed
at the same time and only single qubit gates on non-neighboring qubits can
be executed in parallel. It assumes a linear topology."""
def run(self, dag):
new_dag = DAGCircuit()
for qreg in dag.qregs.values():
new_dag.add_qreg(qreg)
for creg in dag.cregs.values():
new_dag.add_creg(creg)
for ii, layer in enumerate(dag.layers()):
gates_1q = []
gates_2q = []
other_gates = []
for node in layer['graph'].op_nodes():
if len(node.qargs) == 2:
gates_2q.append(node)
elif len(node.qargs) == 1:
gates_1q.append(node)
else:
logging.info(f'layer {ii}: other type of node {node}')
other_gates.append(node)
even = []
odd = []
for node in gates_1q:
if node.qargs[0].index % 2 == 0:
even.append(node)
else:
odd.append(node)
logging.info(
f'layer {ii}: 2q gates {len(gates_2q)}, even {len(even)} odd {len(odd)}, other {len(other_gates)}')
if len(even) > 0:
for node in even:
new_dag.apply_operation_back(node.op, node.qargs, node.cargs)
if not isinstance(node.op, Barrier):
new_dag.apply_operation_back(Barrier(new_dag.num_qubits()), list(new_dag.qubits), [])
if len(odd) > 0:
for node in odd:
new_dag.apply_operation_back(node.op, node.qargs, node.cargs)
if not isinstance(node.op, Barrier):
new_dag.apply_operation_back(Barrier(new_dag.num_qubits()), list(new_dag.qubits), [])
for node in gates_2q:
new_dag.apply_operation_back(node.op, node.qargs, node.cargs)
if not isinstance(node.op, Barrier):
new_dag.apply_operation_back(Barrier(new_dag.num_qubits()), list(new_dag.qubits), [])
for node in other_gates:
new_dag.apply_operation_back(node.op, node.qargs, node.cargs)
if not isinstance(node.op, Barrier):
new_dag.apply_operation_back(Barrier(new_dag.num_qubits()), list(new_dag.qubits), [])
return new_dag
class DelayPass(TransformationPass):
"""Adds delay gates when the qubits are idle.
For every layer of the circuit it finds the gate that
lasts the longest and applies appropriate delays on the
other qubits.
"""
def __init__(self, gate_durations: Dict[str, float], delay_quantum: Optional[float] = None):
"""
Args:
gate_durations: Gate durations in the units of dt
"""
super().__init__()
self.gate_durations = gate_durations
self.delay_quantum = delay_quantum
def add_delay_to_dag(self, duration, dag, qargs, cargs):
if self.delay_quantum:
number_of_delays = int(duration/self.delay_quantum)
for ii in range(number_of_delays):
dag.apply_operation_back(Delay(self.delay_quantum), qargs, cargs)
else:
dag.apply_operation_back(Delay(duration), qargs, cargs)
@staticmethod
def _determine_delay_target_qubits(dag, layer):
""" Determine qubits in specified layer which require a delay gate """
partition = layer['partition']
lst = list(dag.qubits)
for el in partition:
for q in el:
if q in lst:
lst.remove(q)
return lst
def run(self, dag):
new_dag = DAGCircuit()
for qreg in dag.qregs.values():
new_dag.add_qreg(qreg)
for creg in dag.cregs.values():
new_dag.add_creg(creg)
for layer_idx, layer in enumerate(dag.layers()):
max_duration = 0
durations = {}
for node in layer['graph'].op_nodes():
if node.name in self.gate_durations:
max_duration = max(max_duration, self.gate_durations[node.name])
for q in node.qargs:
durations[q] = self.gate_durations[node.name]
else:
logger.info('layer {layer_idx}, could not find duration for node {node.name}')
new_dag.apply_operation_back(node.op, node.qargs, node.cargs)
partition = layer['partition']
if len(partition) == 0:
continue
lst = DelayPass._determine_delay_target_qubits(dag, layer)
logger.info(f'layer: {layer_idx}: lst {lst}, durations {durations}')
for el in lst:
logger.info(f'apply_operation_back: {[el]}')
self.add_delay_to_dag(max_duration, new_dag, [el], [])
for q in durations:
if max_duration - durations[q] > 0:
self.add_delay_to_dag(max_duration - durations[q], new_dag, [q], [])
return new_dag
| 2.234375 | 2 |
IAFNNESTA.py | JonathanAlis/IAFNNESTA | 3 | 4124 | def help():
return '''
Isotropic-Anisotropic Filtering Norm Nesterov Algorithm
Solves the filtering norm minimization + quadratic term problem
Nesterov algorithm, with continuation:
argmin_x || iaFN(x) ||_1/2 subjected to ||b - Ax||_2^2 < delta
If no filter is provided, solves the L1.
Continuation is performed by sequentially applying Nesterov's algorithm
with a decreasing sequence of values of mu0 >= mu >= muf
The observation matrix A must be a projector (non projector not implemented yet)
Inputs:
IAFNNESTA(b, #Observed data, a m x 1 array
A=identity,At=identity, # measurement matrix and adjoint (either a matrix, function handles)
muf=0.0001, #final mu value, smaller leads to higher accuracy
delta, #l2 error bound. This enforces how close the variable
#must fit the observations b, i.e. || y - Ax ||_2 <= delta
#If delta = 0, enforces y = Ax
#delta = sqrt(m + 2*sqrt(2*m))*sigma, where sigma=std(noise).
L1w=1,L2w=0, #weights of L1 (anisotropic) and L2(isotropic) norms
verbose=0, #whether to print internal steps
maxit=1000, #maximum iterations at the inner loop
x0=[], #initial solution, if not provided, will be At(b)
U=identity,Ut=identity, #Analysis/Synthesis operators
stopTest=1, #stopTest == 1 : stop when the relative change in the objective
function is less than TolVar
stopTest == 2 : stop with the l_infinity norm of difference in
the xk variable is less than TolVar
TolVar = 1e-5, #tolerance for the stopping criteria
AAtinv=[], #not implemented
normU=1, #if U is provided, this should be norm(U)
H=[],Ht=[]): #filter operations in sparse matrix form
#also accepts the string 'tv' as input,
#in that case, calculates the tv norm
Outputs:
return xk, #estimated x reconstructed signal
niter, #number of iterations
residuals #first column is the residual at every step,
#second column is the value of f_mu at every step
'''
import IAFNNesterov
import numpy as np
from scipy import sparse
import fil2mat
def identity(x):
return x
def IAFNNESTA(b,sig_size=0,A=identity,At=identity,muf=0.0001,delta=0,L1w=1,L2w=0,verbose=0,MaxIntIter=5,maxit=1000,x0=[],U=identity,Ut=identity,stopTest=1,TolVar = 1e-5,AAtinv=[],normU=1,H=[]):
if delta<0:
raise Exception('Delta must not be negative')
if not callable(A): #If not function
A=lambda x:np.matmul(A,x)
At=lambda x:np.matmul(np.transpose(A),x)
b=b.reshape((-1,1))
Atb=At(b)
if sig_size==0:
sig_size=Atb.shape
if callable(AAtinv):
AtAAtb = At( AAtinv(b) )
else:
if len(AAtinv)>0:
AAtinv=lambda x: np.matmul(AAtinv,x)
AtAAtb = At( AAtinv(b) )
else: #default
AtAAtb = Atb
AAtinv=identity
if len(x0)==0:
x0 = AtAAtb
if len(H)==0:
Hf=identity
Hft=identity
else:
if not sparse.issparse(H):
if isinstance(H, str):
if H=='tv':
hs=[]
hs.append(np.array([[1,-1]]))
hs.append(np.array([[1],[-1]]))
H,_,_,_=fil2mat.fil2mat(hs,sig_size)
else:
print('H not recognized. Must be a sparse matrix, a list of filters or the string tv')
else:
#list of filters:
H,_,_,_=fil2mat.fil2mat(H,sig_size)
#print(H.shape)
#print(H)
#print(type(H))
Ht=H.transpose()
Hf=lambda x: H@x
Hft=lambda x: Ht@x
HU=lambda x: Hf(U(x))
UtHt=lambda x: Ut(Hft(x))
typemin=''
if L1w>0:
typemin+="iso"
if L2w>0:
typemin+="aniso"
typemin+='tropic '
if callable(H):
typemin+='filtering norm '
mu0=0
if L1w>0:
mu0+=L1w*0.9*np.max(np.linalg.norm(HU(x0),1))
if L2w>0:
mu0+=L2w*0.9*np.max(np.linalg.norm(HU(x0),2))
niter = 0
Gamma = np.power(muf/mu0,1/MaxIntIter)
mu = mu0
Gammat= np.power(TolVar/0.1,1/MaxIntIter)
TolVar = 0.1
for i in range(MaxIntIter):
mu = mu*Gamma
TolVar=TolVar*Gammat;
if verbose>0:
#if k%verbose==0:
print("\tBeginning %s Minimization; mu = %g\n" %(typemin,mu))
xk,niter_int,res = IAFNNesterov.IAFNNesterov(b,A=A,At=At,mu=mu,delta=delta,L1w=L1w,L2w=L2w,verbose=verbose,maxit=maxit,x0=x0,U=U,Ut=Ut,stopTest=stopTest,TolVar = TolVar,AAtinv=AAtinv,normU=normU,H=Hf,Ht=Hft)
xplug = xk
niter = niter_int + niter
if i==0:
residuals=res
else:
residuals = np.vstack((residuals, res))
return xk.reshape(sig_size)
if __name__ == "__main__":
print(help())
| 3.40625 | 3 |
hypatia/util/__init__.py | pfw/hypatia | 0 | 4125 | <reponame>pfw/hypatia<gh_stars>0
import itertools
import BTrees
from persistent import Persistent
from ZODB.broken import Broken
from zope.interface import implementer
_marker = object()
from .. import exc
from ..interfaces import (
IResultSet,
STABLE,
)
@implementer(IResultSet)
class ResultSet(object):
"""Implements :class:`hypatia.interfaces.IResultSet`"""
family = BTrees.family64
def __init__(self, ids, numids, resolver, sort_type=None):
self.ids = ids # only guaranteed to be iterable, not sliceable
self.numids = numids
self.resolver = resolver
self.sort_type = sort_type
def __len__(self):
return self.numids
def sort(
self, index, reverse=False, limit=None, sort_type=None, raise_unsortable=True
):
if sort_type is None:
sort_type = self.sort_type
ids = self.ids
if not hasattr(ids, "__len__"):
# indexes have no obligation to be able to sort generators
ids = list(ids)
self.ids = ids
ids = index.sort(
self.ids,
reverse=reverse,
limit=limit,
sort_type=sort_type,
raise_unsortable=raise_unsortable,
)
numids = self.numids
if limit:
numids = min(numids, limit)
return self.__class__(ids, numids, self.resolver, sort_type=STABLE)
def first(self, resolve=True):
# return the first object or None
resolver = self.resolver
if resolver is None or not resolve:
for id_ in self.ids:
# if self.ids is not a list or a tuple, allow this result set
# to be iterated after first() is called and allow first() to
# be idempotent
if not hasattr(self.ids, "__len__"):
self.ids = itertools.chain([id_], self.ids)
return id_
else:
for id_ in self.ids:
# if self.ids is not a list or a tuple, allow this result set
# to be iterated after first() is called and allow first() to
# be idempotent
if not hasattr(self.ids, "__len__"):
self.ids = itertools.chain([id_], self.ids)
return resolver(id_)
def one(self, resolve=True):
if self.numids == 1:
return self.first(resolve=resolve)
if self.numids > 1:
raise exc.MultipleResults(self)
else:
raise exc.NoResults(self)
def _resolve_all(self, resolver):
for id_ in self.ids:
yield resolver(id_)
def all(self, resolve=True):
resolver = self.resolver
if resolver is None or not resolve:
return self.ids
else:
return self._resolve_all(resolver)
def __iter__(self):
return iter(self.all())
def intersect(self, docids):
"""Intersect this resultset with a sequence of docids or
another resultset. Returns a new ResultSet."""
# NB: we can't use an intersection function here because
# self.ids may be a generator
if isinstance(docids, ResultSet):
docids = docids.ids
filtered_ids = [x for x in self.ids if x in docids]
return self.__class__(filtered_ids, len(filtered_ids), self.resolver)
class BaseIndexMixin(object):
"""Mixin class for indexes that implements common behavior"""
family = BTrees.family64
def discriminate(self, obj, default):
"""See interface IIndexInjection"""
if callable(self.discriminator):
value = self.discriminator(obj, _marker)
else:
value = getattr(obj, self.discriminator, _marker)
if value is _marker:
return default
if isinstance(value, Persistent):
raise ValueError("Catalog cannot index persistent object %s" % value)
if isinstance(value, Broken):
raise ValueError("Catalog cannot index broken object %s" % value)
return value
def reindex_doc(self, docid, obj):
"""See interface IIndexInjection"""
self.unindex_doc(docid)
self.index_doc(docid, obj)
def indexed_count(self):
"""See IIndexedDocuments"""
return len(self.indexed())
def not_indexed_count(self):
"""See IIndexedDocuments"""
return len(self.not_indexed())
def docids(self):
"""See IIndexedDocuments"""
not_indexed = self.not_indexed()
indexed = self.indexed()
if len(not_indexed) == 0:
return self.family.IF.Set(indexed)
elif len(indexed) == 0:
return not_indexed
indexed = self.family.IF.Set(indexed)
return self.family.IF.union(not_indexed, indexed)
def docids_count(self):
"""See IIndexedDocuments"""
return len(self.docids())
def apply_intersect(self, query, docids):
"""Default apply_intersect implementation"""
result = self.apply(query)
if docids is None:
return result
return self.family.IF.weightedIntersection(result, docids)[1]
def _negate(self, apply_func, *args, **kw):
positive = apply_func(*args, **kw)
all = self.docids()
if len(positive) == 0:
return all
return self.family.IF.difference(all, positive)
def qname(self):
# used in query representations; __name__ should be set by
# catalog __setitem__ but if it's not, we fall back to a generic
# representation
return getattr(
self,
"__name__",
str(self),
)
def resultset_from_query(self, query, names=None, resolver=None):
# default resultset factory; meant to be overridden by systems that
# have a default resolver. NB: although the default implementation
# below does not access "self", so it would appear that this could be
# turned into a classmeth or staticmethod, subclasses that override may
# expect self, so this is a plain method.
docids = query._apply(names)
numdocs = len(docids)
return ResultSet(docids, numdocs, resolver)
def flush(self, *arg, **kw):
"""Hookable by upstream systems"""
pass
class RichComparisonMixin(object):
# Stolen from http://www.voidspace.org.uk/python/recipebook.shtml#comparison
def __eq__(self, other):
raise NotImplementedError("Equality not implemented")
def __lt__(self, other):
raise NotImplementedError("Less than not implemented")
def __ne__(self, other):
return not self.__eq__(other)
def __gt__(self, other):
return not (self.__lt__(other) or self.__eq__(other))
def __le__(self, other):
return self.__eq__(other) or self.__lt__(other)
def __ge__(self, other):
return self.__eq__(other) or self.__gt__(other)
| 2.234375 | 2 |
backend/listings/migrations/0001_initial.py | relaxxpls/Music-Control | 0 | 4126 | <filename>backend/listings/migrations/0001_initial.py
# Generated by Django 3.2.3 on 2021-05-30 04:28
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
('realtors', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Listing',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('slug', models.CharField(max_length=200, unique=True)),
('title', models.CharField(max_length=150)),
('address', models.CharField(default='', max_length=150)),
('city', models.CharField(max_length=100)),
('state', models.CharField(max_length=100)),
('zipcode', models.CharField(max_length=15)),
('description', models.TextField(blank=True)),
('sale_type', models.CharField(choices=[('For Sale', 'For Sale'), ('For Rent', 'For Rent')], default='For Sale', max_length=50)),
('price', models.IntegerField()),
('bedrooms', models.IntegerField()),
('bathrooms', models.DecimalField(decimal_places=1, max_digits=2)),
('home_type', models.CharField(choices=[('House', 'House'), ('Condo', 'Condo'), ('Townhouse', 'Townhouse')], default='House', max_length=50)),
('sqft', models.IntegerField()),
('open_house', models.BooleanField(default=False)),
('photo_main', models.ImageField(upload_to='photos/%Y/%m/%d')),
('photo_1', models.ImageField(blank=True, upload_to='photos/%Y/%m/%d')),
('photo_2', models.ImageField(blank=True, upload_to='photos/%Y/%m/%d')),
('is_published', models.BooleanField(default=True)),
('list_date', models.DateTimeField(blank=True, default=django.utils.timezone.now)),
('realtor', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='realtors.realtor')),
],
),
]
| 1.828125 | 2 |
examples/example_without_CommandSet/my_listeners.py | LeConstellationniste/DiscordFramework | 1 | 4127 | import asyncio
import discord
# Just with a function to add to the bot.
async def on_message(message):
if not message.author.bot:
await message.channel.send(f"{message.author.mention} a envoyé un message!")
# A Listener already created with the function
from discordEasy.objects import Listener
async def on_message(message):
if not message.author.bot:
await message.channel.send(f"{message.author.mention} a envoyé un message!")
listener_on_message = Listener(on_message) | 2.796875 | 3 |
pianonet/serving/app.py | robgon-art/pianonet | 14 | 4128 | import os
import random
from flask import Flask, request, send_from_directory
from werkzeug.utils import secure_filename
from pianonet.core.pianoroll import Pianoroll
from pianonet.model_inspection.performance_from_pianoroll import get_performance_from_pianoroll
app = Flask(__name__)
base_path = "/app/"
# base_path = "/Users/angsten/PycharmProjects/pianonet"
performances_path = os.path.join(base_path, 'data', 'performances')
def get_random_midi_file_name():
"""
Get a random midi file name that will not ever collide.
"""
return str(random.randint(0, 10000000000000000000)) + ".midi"
def get_performance_path(midi_file_name):
"""
Returns full path to performaqnce midi file given a file name.
"""
return os.path.join(performances_path, midi_file_name)
@app.route('/')
def alive():
return 'OK'
@app.route('/performances/', methods=['GET'])
def get_performance():
"""
Returns the requested performance as midi file.
Expected query string is 'midi_file_name', such as 1234.midi
"""
performance_midi_file_name = request.args.get('midi_file_name')
performance_midi_file_name = secure_filename(performance_midi_file_name)
print(performance_midi_file_name)
if performance_midi_file_name == None:
return {"http_code": 400, "code": "BadRequest", "message": "midi_file_name not found in request."}
midi_file_path = get_performance_path(performance_midi_file_name)
if not os.path.exists(midi_file_path):
return {
"http_code": 404,
"code": "Not Found",
"message": "midi_file " + performance_midi_file_name + " not found."
}
with open(midi_file_path, 'rb') as midi_file:
return send_from_directory(performances_path, performance_midi_file_name)
@app.route('/create-performance', methods=['POST'])
def performance():
"""
Expects post form data as follows:
seed_midi_file_data: Midi file that forms the seed for a performance as string encoding like "8,2,3,4,5..."
seconds_to_generate: Number of seconds of new notes to generate
model_complexity: Quality of model to use, one of ['low', 'medium', 'high', 'highest']
"""
seed_midi_file_data = request.form.get('seed_midi_file_data')
if seed_midi_file_data == None:
return {"http_code": 400, "code": "BadRequest", "message": "seed_midi_file_data not found in request."}
else:
seed_midi_file_int_array = [int(x) for x in seed_midi_file_data.split(',')]
frame = bytearray()
for i in seed_midi_file_int_array:
frame.append(i)
saved_seed_midi_file_path = os.path.join(base_path, 'data', 'seeds', get_random_midi_file_name())
with open(saved_seed_midi_file_path, 'wb') as midi_file:
midi_file.write(frame)
seconds_to_generate = request.form.get('seconds_to_generate')
if seconds_to_generate == None:
return {"http_code": 400, "code": "BadRequest", "message": "seconds_to_generate not found in request."}
else:
seconds_to_generate = float(seconds_to_generate)
model_complexity = request.form.get('model_complexity', 'low')
if model_complexity == 'low':
model_name = "micro_1"
else:
model_name = "r9p0_3500kparams_approx_9_blocks_model"
model_path = os.path.join(base_path, 'models', model_name)
input_pianoroll = Pianoroll(saved_seed_midi_file_path, use_custom_multitrack=True)
input_pianoroll.trim_silence_off_ends()
final_pianoroll = get_performance_from_pianoroll(
pianoroll_seed=input_pianoroll,
num_time_steps=int(48 * seconds_to_generate),
model_path=model_path,
)
midi_file_name = get_random_midi_file_name()
midi_file_path = get_performance_path(midi_file_name)
final_pianoroll.save_to_midi_file(midi_file_path)
return {"http_code": 200, "code": "Success", "message": "", "midi_file_name": midi_file_name}
if __name__ == '__main__':
app.run(host='0.0.0.0')
| 2.59375 | 3 |
app.py | rafalbigaj/epidemic-model-visualization | 0 | 4129 | import dash
import dash_bootstrap_components as dbc
import dash_core_components as dcc
import dash_html_components as html
import plotly.graph_objects as go
from plotly.subplots import make_subplots
import logging
import json
import os
import pandas as pd
from datetime import datetime
from datetime import timedelta
from urllib import parse
import requests
logger = logging.getLogger(__name__)
external_stylesheets = [dbc.themes.DARKLY]
is_cf_instance = os.environ.get('CF_INSTANCE_GUID', '') != ''
port = int(os.environ.get('PORT', 8050))
host = os.environ.get('CF_INSTANCE_INTERNAL_IP', '127.0.0.1')
wml_api_key = os.environ['WML_API_KEY']
wml_scoring_url = os.environ['WML_SCORING_URL']
url = parse.urlparse(wml_scoring_url)
wml_base_url = url._replace(path='').geturl()
wml_instance_id = url.path.split('/')[3]
logger.setLevel(logging.INFO if is_cf_instance else logging.DEBUG)
logger.info('Starting %s server: %s:%d', 'CF' if is_cf_instance else 'local', host, port)
logger.info('WML URL: %s', wml_base_url)
logger.info('WML instance ID: %s', wml_instance_id)
wml_credentials = {
"apikey": wml_api_key,
"instance_id": wml_instance_id,
"url": wml_base_url,
}
iam_token_endpoint = 'https://iam.cloud.ibm.com/identity/token'
def _get_token():
data = {
'grant_type': 'urn:ibm:params:oauth:grant-type:apikey',
'apikey': wml_credentials['apikey']
}
headers = {'Content-Type': 'application/x-www-form-urlencoded'}
response = requests.post(iam_token_endpoint, data=data, headers=headers)
return response.json()['access_token']
def score(token, algorithm, start_date, country, predict_range, s, i, r):
headers = {'Authorization': 'Bearer ' + token}
payload = {
"fields": ["algorithm", "start_date", "country", "predict_range", "S0", "I0", "R0"],
"values": [[algorithm, start_date.strftime('%-m/%-d/%y'), country, predict_range, s, i, r]]
}
logger.info('Scoring with payload: %s', json.dumps(payload))
response = requests.post(wml_scoring_url, json=payload, headers=headers)
if response.status_code == 200:
result = response.json()
else:
raise Exception('Scoring error [{}]: {}'.format(response.status_code, response.text))
n_days = len(result['values'])
index = [(start_date + timedelta(days=i)).strftime('%d/%m/%y') for i in range(n_days)]
return pd.DataFrame(result['values'], columns=result['fields'], index=index)
def serve_layout():
token = _get_token()
# predict_range = 14
# sir_result = score(token, 'SIR', datetime(2020, 3, 3), 'Poland', predict_range, 10_000, 20, 10)
# logistic_result = score(token, 'LOGISTIC', datetime(2020, 3, 3), 'Poland', predict_range, 10_000, 20, 10)
calibration_result = score(token, 'CALIBRATION', datetime(2020, 1, 22), 'Poland', 40, 10_000, 20, 10)
# days = list(sir_result.index)
days = list(calibration_result.index)
calibration_result['ActualChange'] = calibration_result['Actual'] - calibration_result['Actual'].shift(1, fill_value=0)
calibration_result['PredictedChange'] = calibration_result['Predicted'] - calibration_result['Predicted'].shift(1, fill_value=0)
fig = make_subplots(specs=[[{"secondary_y": True}]])
fig.add_trace(
go.Bar(x=days, y=calibration_result['PredictedChange'], name='Predicted Change', opacity=0.5),
secondary_y=True,
)
fig.add_trace(
go.Bar(x=days, y=calibration_result['ActualChange'], name='Actual Change', opacity=0.5),
secondary_y=True,
)
fig.add_trace(
go.Scatter(x=days, y=calibration_result['Predicted'], name='Calibration'),
secondary_y=False,
)
fig.add_trace(
go.Scatter(x=days, y=calibration_result['Actual'], name='Actual', mode="markers", marker=dict(size=8)),
secondary_y=False,
)
fig.update_layout(
title="Prediction of confirmed cases for Poland",
template="plotly_dark",
height=900
)
fig.update_xaxes(title_text="Date")
fig.update_yaxes(title_text="Total confirmed cases", secondary_y=False, range=[0, 6000])
fig.update_yaxes(title_text="New cases per day", secondary_y=True, range=[0, 1000])
# fig = go.Figure(
# data=[
# go.Scatter(x=days, y=sir_result['I'], name='SIR'),
# go.Scatter(x=days, y=logistic_result['I'], name='Logistic'),
# ],
# layout=go.Layout(
# title="COVID19 infected prediction in Poland",
# template="plotly_dark",
# height=600
# )
# )
return html.Div(children=[
html.H1(children='COVID-19 Predictions with Watson Machine Learning'),
dcc.Graph(
id='example-graph',
figure=fig
)
])
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
app.layout = serve_layout
if __name__ == '__main__':
app.run_server(debug=(not is_cf_instance), port=port, host=host)
| 2.09375 | 2 |
src/sweetrpg_library_api/application/blueprints/systems/manager.py | paulyhedral/sweetrpg-library-api | 0 | 4130 | # -*- coding: utf-8 -*-
__author__ = "<NAME> <<EMAIL>>"
"""
"""
from flask_rest_jsonapi import ResourceList, ResourceDetail, ResourceRelationship
from sweetrpg_library_objects.api.system.schema import SystemAPISchema
from sweetrpg_api_core.data import APIData
from sweetrpg_library_objects.model.system import System
from sweetrpg_library_api.application.db import db
from sweetrpg_library_api.application.blueprints.setup import model_info
class SystemList(ResourceList):
schema = SystemAPISchema
data_layer = {"class": APIData, "type": "system", "model": System, "db": db, "model_info": model_info}
class SystemDetail(ResourceDetail):
schema = SystemAPISchema
data_layer = {
"class": APIData,
"type": "system",
"model": System,
"db": db,
"model_info": model_info
}
# class SystemAuthorRelationship(ResourceRelationship):
# schema = SystemAPISchema
# data_layer = {
# "class": APIData,
# "type": "system",
# "model": System,
# "db": db,
# "model_info": model_info
# }
| 2.171875 | 2 |
tests/test_ordering.py | deepio-oc/pabot | 379 | 4131 | from robot import __version__ as ROBOT_VERSION
import sys
import tempfile
import textwrap
import unittest
import shutil
import subprocess
class PabotOrderingGroupTest(unittest.TestCase):
def setUp(self):
self.tmpdir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.tmpdir)
def _run_tests_with(self, testfile, orderfile):
robot_file = open("{}/test.robot".format(self.tmpdir), "w")
robot_file.write(textwrap.dedent(testfile))
robot_file.close()
with open("{}/order.dat".format(self.tmpdir), "w") as f:
f.write(textwrap.dedent(orderfile))
process = subprocess.Popen(
[
sys.executable,
"-m" "pabot.pabot",
"--testlevelsplit",
"--ordering",
"{}/order.dat".format(self.tmpdir),
"{}/test.robot".format(self.tmpdir),
],
cwd=self.tmpdir,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
return process.communicate()
def test_orders(self):
stdout, stderr = self._run_tests_with(
"""
*** Variables ***
${SCALAR} Hello, globe!
*** Test Cases ***
First Test
Set Suite Variable ${SCALAR} Hello, world!
Second Test
Should Be Equal ${SCALAR} Hello, world!
Third Test
Should Be Equal ${SCALAR} Hello, globe!
""",
"""
{
--test Test.First Test
--test Test.Second Test
}
--test Test.Third Test
""",
)
if sys.version_info < (3, 0):
self.assertIn("PASSED", stdout, stderr)
self.assertNotIn("FAILED", stdout, stderr)
self.assertEqual(stdout.count("PASSED"), 2)
else:
self.assertIn(b"PASSED", stdout, stderr)
self.assertNotIn(b"FAILED", stdout, stderr)
self.assertEqual(stdout.count(b"PASSED"), 2)
def test_two_orders(self):
stdout, stderr = self._run_tests_with(
"""
*** Variables ***
${SCALAR} Hello, globe!
*** Test Cases ***
First Test
Set Suite Variable ${SCALAR} Hello, world!
Second Test
Should Be Equal ${SCALAR} Hello, world!
Second And Quarter
Should Be Equal ${SCALAR} Hello, globe!
Second And Half
Should Be Equal ${SCALAR} Hello, globe!
Third Test
Should Be Equal ${SCALAR} Hello, globe!
""",
"""
{
--test Test.First Test
--test Test.Second Test
}
{
--test Test.Second And Quarter
--test Test.Second And Half
}
--test Test.Third Test
""",
)
if sys.version_info < (3, 0):
self.assertIn("PASSED", stdout, stderr)
self.assertNotIn("FAILED", stdout, stderr)
if ROBOT_VERSION < "4.0":
expected_write = "5 critical tests, 5 passed, 0 failed"
else:
expected_write = "5 tests, 5 passed, 0 failed, 0 skipped."
self.assertIn(expected_write, stdout, stderr)
self.assertEqual(stdout.count("PASSED"), 3)
else:
self.assertIn(b"PASSED", stdout, stderr)
self.assertNotIn(b"FAILED", stdout, stderr)
if ROBOT_VERSION < "4.0":
expected_write = b"5 critical tests, 5 passed, 0 failed"
else:
expected_write = b"5 tests, 5 passed, 0 failed, 0 skipped."
self.assertIn(expected_write, stdout, stderr)
self.assertEqual(stdout.count(b"PASSED"), 3)
def test_too_big_testname(self):
stdout, stderr = self._run_tests_with(
"""
*** Test Cases ***
Test Lorem ipsum dolor sit amet, consectetur adipiscing elit. Mauris eu velit nunc. Duis eget purus eget orci porta blandit sed ut tortor. Nunc vel nulla bibendum, auctor sem ac, molestie risus. Sed eu metus volutpat, hendrerit nibh in, auctor urna. Nunc a sodales.
Log Test
""",
"""
--test Invalid
""",
)
if sys.version_info < (3, 0):
self.assertIn("PASSED", stdout, stderr)
self.assertNotIn("FAILED", stdout, stderr)
self.assertEqual(stdout.count("PASSED"), 1)
else:
self.assertIn(b"PASSED", stdout, stderr)
self.assertNotIn(b"FAILED", stdout, stderr)
self.assertEqual(stdout.count(b"PASSED"), 1)
def test_longnames_in_tests(self):
stdout, stderr = self._run_tests_with(
"""
*** Settings ***
Test Template Test1
*** Test Cases ***
The Somewhat Long Name Of The Test S1Test 01 1
The Somewhat Long Name Of The Test S1Test 02 1
The Somewhat Long Name Of The Test S1Test 03 1
The Somewhat Long Name Of The Test S1Test 04 1
The Somewhat Long Name Of The Test S1Test 05 1
The Somewhat Long Name Of The Test S1Test 06 1
The Somewhat Long Name Of The Test S1Test 07 1
The Somewhat Long Name Of The Test S1Test 08 1
The Somewhat Long Name Of The Test S1Test 09 1
The Somewhat Long Name Of The Test S1Test 10 1
The Somewhat Long Name Of The Test S1Test 11 1
The Somewhat Long Name Of The Test S1Test 12 1
*** Keywords ***
Test1
[Arguments] ${arg}
Log Test
""",
"""
{
--test Test.The Somewhat Long Name Of The Test S1Test 01
--test Test.The Somewhat Long Name Of The Test S1Test 02
--test Test.The Somewhat Long Name Of The Test S1Test 03
--test Test.The Somewhat Long Name Of The Test S1Test 04
--test Test.The Somewhat Long Name Of The Test S1Test 05
--test Test.The Somewhat Long Name Of The Test S1Test 06
}
{
--test Test.The Somewhat Long Name Of The Test S1Test 07
--test Test.The Somewhat Long Name Of The Test S1Test 08
--test Test.The Somewhat Long Name Of The Test S1Test 09
--test Test.The Somewhat Long Name Of The Test S1Test 10
--test Test.The Somewhat Long Name Of The Test S1Test 11
--test Test.The Somewhat Long Name Of The Test S1Test 12
}
""",
)
if sys.version_info < (3, 0):
self.assertIn("PASSED", stdout, stderr)
self.assertNotIn("FAILED", stdout, stderr)
self.assertEqual(stdout.count("PASSED"), 2)
else:
self.assertIn(b"PASSED", stdout, stderr)
self.assertNotIn(b"FAILED", stdout, stderr)
self.assertEqual(stdout.count(b"PASSED"), 2)
| 2.4375 | 2 |
setup.py | Commonists/pageview-api | 21 | 4132 | <reponame>Commonists/pageview-api<filename>setup.py
#!/usr/bin/python
# -*- coding: latin-1 -*-
"""Setup script."""
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
try:
import pageviewapi
version = pageviewapi.__version__
except ImportError:
version = 'Undefined'
classifiers = [
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Utilities'
]
packages = ['pageviewapi']
requires = ['requests', 'attrdict']
setup(
name='pageviewapi',
version=version,
author='Commonists',
author_email='<EMAIL>',
url='http://github.com/Commonists/pageview-api',
description='Wikimedia Pageview API client',
long_description=open('README.md').read(),
license='MIT',
packages=packages,
install_requires=requires,
classifiers=classifiers
)
| 1.453125 | 1 |
task1b.py | juby-gif/assignment1 | 0 | 4133 | <reponame>juby-gif/assignment1<gh_stars>0
#a2_t1b.py
#This program is to convert Celsius to Kelvin
def c_to_k(c):
k = c + 273.15 #Formula to convert Celsius to Kelvin
return k
def f_to_c(f):
fa = (f-32) * 5/9 #Formula to convert Fareheit to Celsius
return fa
c = 25.0
f = 100.0
k = c_to_k(c)
fa = f_to_c(f)
print("Celsius of " + str(c) + " is " + str(k) + " in Kelvin")
print("Farenheit of " + str(f) + " is " + str(fa) + " in Celsius")
| 3.59375 | 4 |
TWLight/emails/views.py | jajodiaraghav/TWLight | 1 | 4134 | <filename>TWLight/emails/views.py
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.core.exceptions import PermissionDenied
from django.core.urlresolvers import reverse, reverse_lazy
from django.core.mail import BadHeaderError, send_mail
from django.http import HttpResponse, HttpResponseRedirect
from django.utils.decorators import method_decorator
from django.utils.translation import ugettext_lazy as _
from django.views.generic.edit import FormView
from TWLight.emails.forms import ContactUsForm
from TWLight.emails.signals import ContactUs
@method_decorator(login_required, name='post')
class ContactUsView(FormView):
template_name = 'emails/contact.html'
form_class = ContactUsForm
success_url = reverse_lazy('contact')
def get_initial(self):
initial = super(ContactUsView, self).get_initial()
# @TODO: This sort of gets repeated in ContactUsForm.
# We could probably be factored out to a common place for DRYness.
if self.request.user.is_authenticated():
if self.request.user.email:
initial.update({
'email': self.request.user.email,
})
if ('message' in self.request.GET):
initial.update({
'message': self.request.GET['message'],
})
initial.update({
'next': reverse_lazy('contact'),
})
return initial
def form_valid(self, form):
# Adding an extra check to ensure the user is a wikipedia editor.
try:
assert self.request.user.editor
email = form.cleaned_data['email']
message = form.cleaned_data['message']
carbon_copy = form.cleaned_data['cc']
ContactUs.new_email.send(
sender=self.__class__,
user_email=email,
cc=carbon_copy,
editor_wp_username=self.request.user.editor.wp_username,
body=message
)
messages.add_message(self.request, messages.SUCCESS,
# Translators: Shown to users when they successfully submit a new message using the contact us form.
_('Your message has been sent. We\'ll get back to you soon!'))
return HttpResponseRedirect(reverse('contact'))
except (AssertionError, AttributeError) as e:
messages.add_message (self.request, messages.WARNING,
# Translators: This message is shown to non-wikipedia editors who attempt to post data to the contact us form.
_('You must be a Wikipedia editor to do that.'))
raise PermissionDenied
return self.request.user.editor | 2.125 | 2 |
frontend/config.py | lcbm/cs-data-ingestion | 0 | 4135 | <filename>frontend/config.py
"""Flask App configuration file."""
import logging
import os
import dotenv
import frontend.constants as constants
dotenv.load_dotenv(os.path.join(constants.BASEDIR, "frontend.env"))
class Base:
"""Configuration class used as base for all environments."""
DEBUG = False
TESTING = False
LOGGING_FORMAT = "[%(asctime)s] %(levelname)s in %(message)s"
LOGGING_LOCATION = "frontend.log"
LOGGING_LEVEL = os.environ.get("LOGGING_LEVEL", logging.DEBUG)
class Development(Base):
"""Configuration class for development environment.
Parameters
----------
Base: base configuration object.
"""
DEBUG = True
TESTING = False
ENV = "dev"
class Staging(Base):
"""Configuration class for development staging environment.
Parameters
----------
Base: base configuration object.
"""
DEBUG = False
TESTING = True
ENV = "staging"
class Production(Base):
"""Configuration class for development production environment.
Parameters
----------
Base: base configuration object.
"""
DEBUG = False
TESTING = False
ENV = "prod"
config = {
"development": "frontend.config.Development",
"staging": "frontend.config.Staging",
"production": "frontend.config.Production",
"default": "frontend.config.Development",
}
def configure_app(app):
"""Configures the Flask app according to the FLASK_ENV
envar. In case FLASK_ENV is not defined, then use the
'default' configuration.
Parameters
----------
app: flask.Flask
Flask app Module.
"""
# Configure app
config_name = os.environ.get("FLASK_ENV", "default")
app.config.from_object(config[config_name])
# Configure logging
handler = logging.FileHandler(app.config["LOGGING_LOCATION"])
handler.setLevel(app.config["LOGGING_LEVEL"])
formatter = logging.Formatter(app.config["LOGGING_FORMAT"])
handler.setFormatter(formatter)
app.logger.addHandler(handler)
| 2.9375 | 3 |
tests/test_dsl.py | goodreferences/ElasticQuery | 0 | 4136 | <filename>tests/test_dsl.py<gh_stars>0
# ElasticQuery
# File: tests/test_dsl.py
# Desc: tests for ElasticQuery DSL objects (Filter, Query, Aggregate)
from os import path
from unittest import TestCase
from jsontest import JsonTest
from elasticquery import Query, Aggregate, Suggester
from elasticquery.exceptions import (
NoQueryError, NoAggregateError, NoSuggesterError,
MissingArgError
)
from .util import assert_equal
CLASS_NAMES = {
'_query': Query
}
def _test_query(self, query, test_name, test_data):
method = getattr(query, test_name)
def parse_arg(arg):
if isinstance(arg, list):
return [parse_arg(a) for a in arg]
else:
return (
CLASS_NAMES[arg](arg, {})
if (isinstance(arg, basestring) and arg.startswith('_'))
else arg
)
args = test_data.get('args', [])
args = parse_arg(args)
kwargs = test_data.get('kwargs', {})
kwargs = {
k: parse_arg(v) if isinstance(v, list) else parse_arg(v)
for k, v in kwargs.iteritems()
}
output = method(*args, **kwargs).dict()
assert_equal(self, output, test_data['output'])
class TestQueries(TestCase):
__metaclass__ = JsonTest
jsontest_files = path.join('tests', 'queries')
jsontest_function = lambda self, test_name, test_data: (
_test_query(self, Query, test_name, test_data)
)
class TestAggregates(TestCase):
__metaclass__ = JsonTest
jsontest_files = path.join('tests', 'aggregates')
jsontest_function = lambda self, test_name, test_data: (
_test_query(self, Aggregate, test_name, test_data)
)
class TestSuggesters(TestCase):
__metaclass__ = JsonTest
jsontest_files = path.join('tests', 'suggesters')
jsontest_function = lambda self, test_name, test_data: (
_test_query(self, Suggester, test_name, test_data)
)
class TestFails(TestCase):
def test_no_query(self):
with self.assertRaises(NoQueryError):
Query.doesnotexist()
def test_no_aggregate(self):
with self.assertRaises(NoAggregateError):
Aggregate.doesnotexist()
def test_no_suggester(self):
with self.assertRaises(NoSuggesterError):
Suggester.doesnotexist()
def test_missing_arg(self):
with self.assertRaises(MissingArgError):
Query.term(None)
def test_invalid_arg(self):
# Test passing not a list
with self.assertRaises(ValueError):
Query.bool(must=set())
# And now an invalid list
with self.assertRaises(ValueError):
Query.bool(must=[None])
# And now an invalid list
with self.assertRaises(ValueError):
Query.bool(must=[Aggregate.terms('test', 'test')])
# And now an invalid list
with self.assertRaises(ValueError):
Query.range('field', gte=['error'])
# Empty list should be OK/ignored
Query.bool(must=[])
| 2.5 | 2 |
models/SelectionGAN/person_transfer/tool/rm_insnorm_running_vars.py | xianjian-xie/pose-generation | 445 | 4137 | import torch
ckp_path = './checkpoints/fashion_PATN/latest_net_netG.pth'
save_path = './checkpoints/fashion_PATN_v1.0/latest_net_netG.pth'
states_dict = torch.load(ckp_path)
states_dict_new = states_dict.copy()
for key in states_dict.keys():
if "running_var" in key or "running_mean" in key:
del states_dict_new[key]
torch.save(states_dict_new, save_path) | 2.015625 | 2 |
script/dummy/arm_control.py | amazon-picking-challenge/team_pfn | 7 | 4138 | <filename>script/dummy/arm_control.py
#!/usr/bin/python
# Copyright 2016 Preferred Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy
import rospy
import actionlib
from geometry_msgs.msg import Twist, Vector3
from apc2016.msg import *
class DummyArmControl(object):
def __init__(self):
self.srv_lowlevel_left = \
actionlib.SimpleActionServer('move_to_left',
RobotArmMoveAction,
execute_cb=self.cb_move_to_left,
auto_start=False)
self.srv_highlevel_left = \
actionlib.SimpleActionServer('move_to_bin_left',
BinToteMoveAction,
execute_cb=self.cb_move_to_bin_left,
auto_start=False)
self.srv_lowlevel_right = \
actionlib.SimpleActionServer('move_to_right',
RobotArmMoveAction,
execute_cb=self.cb_move_to_right,
auto_start=False)
self.srv_highlevel_right = \
actionlib.SimpleActionServer('move_to_bin_right',
BinToteMoveAction,
execute_cb=self.cb_move_to_bin_right,
auto_start=False)
self.srv_lowlevel_left.start()
self.srv_highlevel_left.start()
self.srv_lowlevel_right.start()
self.srv_highlevel_right.start()
def cb_move_to_left(self, goal):
print "moving away right arm, then moving left arm:"
print goal.target_position
result = RobotArmMoveResult(success=True,
position=goal.target_position)
self.srv_lowlevel_left.set_succeeded(result)
def cb_move_to_bin_left(self, goal):
if goal.position:
pos = goal.position
else:
pos = "photo"
print "looking up position for %s/%s" % (goal.bin, pos)
pos = numpy.asarray([550, -146, 752, 181, 0, 180])
p = Vector3(pos[0], pos[1], pos[2])
r = Vector3(pos[3], pos[4], pos[5])
print "moving away right arm, then moving left arm"
result = BinToteMoveResult(success=True, position=Twist(p, r))
self.srv_highlevel_left.set_succeeded(result)
def cb_move_to_right(self, goal):
print "moving away left arm, then moving right arm:"
print goal.target_position
result = RobotArmMoveResult(success=True,
position=goal.target_position)
self.srv_lowlevel_right.set_succeeded(result)
def cb_move_to_bin_right(self, goal):
if goal.position:
pos = goal.position
else:
pos = "photo"
print "looking up position for %s/%s" % (goal.bin, pos)
pos = numpy.asarray([550, -146, 752, 184, 0, 180])
p = Vector3(pos[0], pos[1], pos[2])
r = Vector3(pos[3], pos[4], pos[5])
print "moving away left arm, then moving right arm"
result = BinToteMoveResult(success=True, position=Twist(p, r))
self.srv_highlevel_right.set_succeeded(result)
if __name__ == '__main__':
rospy.init_node("arm_control_dummy", anonymous=True)
DummyArmControl()
rospy.spin()
| 2.265625 | 2 |
cogs/events.py | rompdodger/RompDodger | 0 | 4139 | <gh_stars>0
import json
import discord
from utils.time import format_time
from utils import utilities
from discord.ext import commands
from discord import Embed
class Events(commands.Cog):
"""Event Handler for RompDodger"""
def __init__(self, bot):
self.bot = bot
@commands.Cog.listener()
async def on_command_error(self, ctx, error):
if hasattr(ctx.command, 'on_error'):
return
if isinstance(error, (commands.CommandNotFound, commands.NoPrivateMessage)):
return
elif isinstance(error, commands.MissingRequiredArgument):
await ctx.send(embed=await utilities.generate_embed(f"Command {ctx.prefix} {ctx.command} requires **{error.param.name}** argument, but you missed giving that"))
elif isinstance(error, commands.BotMissingPermissions):
perms = "".join(error.missing_perms)
await ctx.send(embed=await utilities.generate_embed(f"To finish the command bot must have {perms} permission, give the bot appropriate permissions and re-try"))
self.bot.logger.critical(f"Ignoring Exception in {ctx.command}\nError: {error}")
@commands.Cog.listener()
async def on_guild_join(self, guild):
#TODO: implement blacklist sytem
self.bot.logger.info(f"Joined on {guild} > Total Guilds: {len(self.bot.guilds)}")
@commands.Cog.listener()
async def on_guild_remove(self, guild):
self.bot.logger.info(f"Removed on {guild} > Total Guilds: {len(self.bot.guilds)}")
@commands.Cog.listener()
async def on_member_join(self, member):
cursor = await self.bot.db.execute(f"SELECT channel FROM welcomer WHERE guild_id = {member.guild.id}")
chrow = await cursor.fetchone()
if chrow is None:
return
else:
msgrow = await self.bot.db.execute(f"SELECT message FROM welcomer WHERE guild_id = {member.guild.id}")
msg = await msgrow.fetchone()
name = member.name
mention = member.mention
members = member.guild.member_count
server = member.guild
embed = discord.Embed(color=discord.Color.dark_green(), description=msg[0].format(name=name, mention=mention, members=members, server=server))
embed.set_thumbnail(url=f"{member.avatar_url_as(format='png', size=2048)}")
created = format_time(member.created_at)
embed.set_footer(text=f"{member.name} Created on {created}")
ch = self.bot.get_channel(int(chrow[0]))
await ch.send(embed=embed)
await cursor.close()
@commands.Cog.listener()
async def on_member_remove(self, member):
cursor = await self.bot.db.execute(f"SELECT channel FROM leaver WHERE guild_id = {ctx.guild.id}")
chrow = await cursor.fetchone()
if chrow is None:
return
else:
msg = await self.bot.db.execute(f"SELECT msg FROM leaver WHERE guild_id = {member.guild.id}")
name = member.name
mention = member.mention
server = member.server
members = member.guild.member_count
embed.set_thumbnail(url=f"{member.avatar_url_as(format='png', size=2048)}")
created = format_time(member.joined_at)
embed.set_footer(text=f"{member.name} Created joined on {joined}")
ch = self.bot.get_channel(int(chrow[0]))
await ch.send(embed=embed)
await cursor.close()
def setup(bot):
bot.add_cog(Events(bot)) | 2.625 | 3 |
Task/Parallel-calculations/Python/parallel-calculations-2.py | LaudateCorpus1/RosettaCodeData | 1 | 4140 | import multiprocessing
# ========== #Python3 - concurrent
from math import floor, sqrt
numbers = [
112272537195293,
112582718962171,
112272537095293,
115280098190773,
115797840077099,
1099726829285419]
# numbers = [33, 44, 55, 275]
def lowest_factor(n, _start=3):
if n % 2 == 0:
return 2
search_max = int(floor(sqrt(n))) + 1
for i in range(_start, search_max, 2):
if n % i == 0:
return i
return n
def prime_factors(n, lowest):
pf = []
while n > 1:
pf.append(lowest)
n //= lowest
lowest = lowest_factor(n, max(lowest, 3))
return pf
# ========== #Python3 - concurrent
def prime_factors_of_number_with_lowest_prime_factor(numbers):
pool = multiprocessing.Pool(processes=5)
factors = pool.map(lowest_factor,numbers)
low_factor,number = max((l,f) for l,f in zip(factors,numbers))
all_factors = prime_factors(number,low_factor)
return number,all_factors
if __name__ == '__main__':
print('For these numbers:')
print('\n '.join(str(p) for p in numbers))
number, all_factors = prime_factors_of_number_with_lowest_prime_factor(numbers)
print(' The one with the largest minimum prime factor is {}:'.format(number))
print(' All its prime factors in order are: {}'.format(all_factors))
| 3.421875 | 3 |
setup.py | clin366/airpollutionnowcast | 0 | 4141 | from setuptools import find_packages, setup
setup(
name='src',
packages=find_packages(),
version='0.1.0',
description='Project: Nowcasting the air pollution using online search log',
author='<NAME>(IR Lab)',
license='MIT',
)
| 1.078125 | 1 |
problems/p0048/s48.py | ahrarmonsur/euler | 1 | 4142 | """
Project Euler Problem 48
Self powers
Solved by <NAME>
The series, 1^1 + 2^2 + 3^3 + ... + 10^10 = 10405071317.
Find the last ten digits of the series, 1^1 + 2^2 + 3^3 + ... + 1000^1000.
"""
def main():
max_digits = 1000
sum = 0
for i in range(1, max_digits+1):
sum += i**i
print str(sum)[-10:]
main() | 3.5625 | 4 |
BigData/sparkTask/test.py | Rainstyd/rainsty | 1 | 4143 | <filename>BigData/sparkTask/test.py
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
@author: rainsty
@file: test.py
@time: 2020-01-04 18:36:57
@description:
"""
import os
from pyspark.sql import SparkSession
os.environ['JAVA_HOME'] = '/root/jdk'
os.environ['SPARK_HOME'] = '/root/spark'
os.environ['PYTHON_HOME'] = "/root/python"
os.environ['PYSPARK_PYTHON'] = "/usr/bin/python"
os.environ['SPARK_MASTER_IP'] = 'rainsty'
def create_spark_context():
sc = SparkSession.builder \
.appName("TestSparkSession") \
.master("spark://rainsty:7077") \
.config('spark.executor.num', '1')\
.config('spark.executor.memory', '512m')\
.config("spark.executor.cores", '1')\
.config('spark.cores.max', '1')\
.config('spark.driver.memory', '512m') \
.getOrCreate()
return sc
logFile = "/root/spark/README.md"
spark = create_spark_context()
logData = spark.read.text(logFile).cache()
numAs = logData.filter(logData.value.contains('a')).count()
numBs = logData.filter(logData.value.contains('b')).count()
print("Lines with a: %i, lines with b: %i" % (numAs, numBs))
spark.stop()
| 2.546875 | 3 |
esercizi/areaSottesaCompareNumPy.py | gdv/python-alfabetizzazione | 0 | 4144 | <reponame>gdv/python-alfabetizzazione<filename>esercizi/areaSottesaCompareNumPy.py
import numpy as np
import timeit
def effe(x):
y = -x * (x - 1.0)
return y
numIntervalli = input('inserire il numero di intervalli in [0.0, 1.0] ')
deltaIntervallo = 1.0 / float(numIntervalli)
print "larghezza intervallo", deltaIntervallo
start = timeit.default_timer()
xIntervalli = []
yIntervalli = []
i = 0
while i < numIntervalli:
xIntervallo = i*deltaIntervallo
xIntervalli.append(xIntervallo)
yIntervalli.append(effe(xIntervallo))
i += 1
areaSottesa = 0.0
for altezza in yIntervalli:
areaSottesa += altezza * deltaIntervallo
endOld = timeit.default_timer()
print "l'area sottesa dalla curva vale ", areaSottesa
xNPIntervalli = np.linspace(0.0, 1.0, numIntervalli, endpoint=False)
yNPIntervalli = -xNPIntervalli * (xNPIntervalli - 1.0)
npArea = np.sum(yNPIntervalli*deltaIntervallo)
endNP = timeit.default_timer()
# print xNPIntervalli
# print xIntervalli
# print yNPIntervalli
# print yIntervalli
print "area numpy = ", npArea
print "old timing = ", endOld - start, "numPy timing = ", endNP - endOld
| 2.609375 | 3 |
json_codegen/generators/python3_marshmallow/object_generator.py | expobrain/json-schema-codegen | 21 | 4145 | import ast
from json_codegen.generators.python3_marshmallow.utils import Annotations, class_name
class ObjectGenerator(object):
@staticmethod
def _get_property_name(node_assign):
name = node_assign.targets[0]
return name.id
@staticmethod
def _nesting_class(node_assign):
for node in ast.walk(node_assign):
if isinstance(node, ast.Call):
if node.func.attr == "Nested":
return class_name(node.args[0].id)
@staticmethod
def _non_primitive_nested_list(node_assign):
if node_assign.value.func.attr == "List":
return (
len(node_assign.value.args) > 0 and node_assign.value.args[0].func.attr == "Nested"
)
else:
return False
@staticmethod
def _init_non_primitive_nested_class(node_assign, object_, prop):
"""
If the nested list is non-primitive, initialise sub-classes in a list comp
If the nest is primitive, we can simply get it
Marshmallow will do the type marshalling
"""
return ast.ListComp(
elt=ast.Call(
func=ast.Name(id=ObjectGenerator._nesting_class(node_assign)),
args=[ast.Name(id="el")],
keywords=[],
),
generators=[
ast.comprehension(
target=ast.Name(id="el"),
iter=ast.Call(
func=ast.Attribute(value=ast.Name(id=object_), attr="get"),
args=[ast.Str(s=prop), ast.Dict(keys=[], values=[])],
keywords=[],
),
ifs=[],
is_async=0,
)
],
)
@staticmethod
def _get_key_from_object(object_, prop):
return ast.Call(
func=ast.Attribute(value=ast.Name(id=object_), attr="get"),
args=[ast.Str(s=prop)],
keywords=[],
)
@staticmethod
def _hint_required_property(node_assign, value, object_, prop):
for node in ast.walk(node_assign):
if isinstance(node, ast.keyword):
if "required" in node.arg:
value = ast.Subscript(
value=ast.Name(id=object_), slice=ast.Index(value=ast.Str(s=prop))
)
return value
@staticmethod
def _get_default_for_property(node_assign, value, object_, prop):
for node in ast.walk(node_assign):
if isinstance(node, ast.keyword) and node.arg == "required":
return value
for node in ast.walk(node_assign):
if isinstance(node, ast.keyword) and node.arg == "default":
default_value = [
keyword.value
for keyword in node_assign.value.keywords
if keyword.arg == "default"
][0]
value.args.append(default_value)
return value
else:
return value
@staticmethod
def assign_property(node_assign, object_):
"""
Required property -> self.prop = parent_dict["prop"]
Optional property -> self.prop = parent_dict.get("prop")
Primative nested list -> self.prop = parent_dict.get("prop")
Non-primative nested list -> self.props = [PropertyClass(el) for el in parent_dict.get('props', {})]
"""
prop = ObjectGenerator._get_property_name(node_assign)
if ObjectGenerator._non_primitive_nested_list(node_assign):
value = ObjectGenerator._init_non_primitive_nested_class(node_assign, object_, prop)
else:
# Assign the property as self.prop = table.get("prop")
value = ObjectGenerator._get_key_from_object(object_, prop)
# If the property is required, assign as self.prop = table["prop"]
value = ObjectGenerator._hint_required_property(node_assign, value, object_, prop)
value = ObjectGenerator._get_default_for_property(node_assign, value, object_, prop)
return ast.AnnAssign(
target=ast.Attribute(value=ast.Name(id="self"), attr=prop),
value=value,
simple=0,
annotation=Annotations(node_assign).type,
)
@staticmethod
def construct_class(schema):
name = class_name(schema.name)
name_lower = name.lower()
# Bundle function arguments and keywords
fn_arguments = ast.arguments(
args=[
ast.arg(arg="self", annotation=None),
ast.arg(arg=name_lower, annotation=ast.Name(id="dict")),
],
vararg=None,
kwarg=None,
kwonlyargs=[],
kw_defaults=[],
defaults=[],
)
fn_body = [
ObjectGenerator.assign_property(node, name_lower)
for node in schema.body
if isinstance(node, ast.Assign)
]
# pass if no Assign nodes
if len(fn_body) == 0:
fn_body = [ast.Pass()]
# Generate class constructor
class_body = [
ast.FunctionDef(
name="__init__", args=fn_arguments, body=fn_body, decorator_list=[], returns=None
),
ObjectGenerator._construct_to_("json")(schema),
ObjectGenerator._construct_to_("dict")(schema),
ObjectGenerator.construct_from_json(schema),
]
return ast.ClassDef(name=name, bases=[], body=class_body, decorator_list=[], keywords=[])
@staticmethod
def _construct_to_(output):
if output == "json":
method = "dumps"
elif output == "dict":
method = "dump"
else:
raise NotImplementedError("Only deserialisation to json or dict supported")
def _construct_to_helper(schema):
fn_args = ast.arguments(
args=[ast.arg(arg="self", annotation=None)],
vararg=None,
kwonlyargs=[],
kw_defaults=[],
kwarg=None,
defaults=[],
)
fn_body = [
ast.Return(
value=ast.Attribute(
value=ast.Call(
func=ast.Attribute(
value=ast.Call(
func=ast.Name(id=schema.name),
args=[],
keywords=[
ast.keyword(
arg="strict", value=ast.NameConstant(value=True)
)
],
),
attr=method,
),
args=[ast.Name(id="self")],
keywords=[],
),
attr="data",
)
)
]
return ast.FunctionDef(
name=f"to_{output}", args=fn_args, body=fn_body, decorator_list=[], returns=None
)
return _construct_to_helper
@staticmethod
def construct_from_json(schema):
fn_args = ast.arguments(
args=[
ast.arg(arg="json", annotation=ast.Name(id="str")),
ast.arg(arg="only", annotation=None),
],
vararg=None,
kwonlyargs=[],
kw_defaults=[],
kwarg=None,
defaults=[ast.NameConstant(value=None)],
)
fn_body = [
ast.Return(
ast.Attribute(
value=ast.Call(
func=ast.Attribute(
value=ast.Call(
func=ast.Name(id=schema.name),
args=[],
keywords=[
ast.keyword(arg="strict", value=ast.NameConstant(value=True)),
ast.keyword(arg="only", value=ast.Name(id="only")),
],
),
attr="loads",
),
args=[ast.Name(id="json")],
keywords=[],
),
attr="data",
)
)
]
return ast.FunctionDef(
name="from_json",
args=fn_args,
body=fn_body,
decorator_list=[ast.Name(id="staticmethod")],
returns=None,
)
| 2.625 | 3 |
testing/regrid/testEsmfGridToMeshRegridCsrv.py | xylar/cdat | 62 | 4146 | <filename>testing/regrid/testEsmfGridToMeshRegridCsrv.py
#!/usr/bin/env python
#
# $Id: ESMP_GridToMeshRegridCsrv.py,v 1.5 2012/04/23 23:00:14 rokuingh Exp $
#===============================================================================
# ESMP/examples/ESMP_GridToMeshRegrid.py
#===============================================================================
"""
ESMP_GridToMeshRegridCsrv.py
Two ESMP_Field objects are created, one on a Grid and the other on a Mesh. The
source Field is set to an analytic function, and a conservative regridding
operation is performed from the source to the destination Field. After
the regridding is completed, the destination Field is compared to the
exact solution over that domain.
"""
import cdms2
import ESMP
import numpy as _NP
import unittest
def grid_create():
'''
PRECONDITIONS: ESMP has been initialized.
POSTCONDITIONS: A ESMP_Grid has been created.
'''
ub_x = float(4)
ub_y = float(4)
lb_x = float(0)
lb_y = float(0)
max_x = float(4)
max_y = float(4)
min_x = float(0)
min_y = float(0)
cellwidth_x = (max_x-min_x)/(ub_x-lb_x)
cellwidth_y = (max_y-min_y)/(ub_y-lb_y)
cellcenter_x = cellwidth_x/2
cellcenter_y = cellwidth_y/2
maxIndex = _NP.array([ub_x,ub_y], dtype=_NP.int32)
grid = ESMP.ESMP_GridCreateNoPeriDim(maxIndex,
coordSys=ESMP.ESMP_COORDSYS_CART)
## CORNERS
ESMP.ESMP_GridAddCoord(grid, staggerloc=ESMP.ESMP_STAGGERLOC_CORNER)
exLB_corner, exUB_corner = ESMP.ESMP_GridGetCoord(grid, \
ESMP.ESMP_STAGGERLOC_CORNER)
# get the coordinate pointers and set the coordinates
[x,y] = [0, 1]
gridXCorner = ESMP.ESMP_GridGetCoordPtr(grid, x, ESMP.ESMP_STAGGERLOC_CORNER)
gridYCorner = ESMP.ESMP_GridGetCoordPtr(grid, y, ESMP.ESMP_STAGGERLOC_CORNER)
#print 'lower corner bounds = [{0},{1}]'.format(exLB_corner[0],exLB_corner[1])
#print 'upper corner bounds = [{0},{1}]'.format(exUB_corner[0],exUB_corner[1])
p = 0
for i1 in range(exLB_corner[1], exUB_corner[1]):
for i0 in range(exLB_corner[0], exUB_corner[0]):
gridXCorner[p] = float(i0)*cellwidth_x
gridYCorner[p] = float(i1)*cellwidth_y
p = p + 1
#print 'Grid corner coordinates:'
p = 0
for i1 in range(exLB_corner[1], exUB_corner[1]):
for i0 in range(exLB_corner[0], exUB_corner[0]):
#print '[{0},{1}]'.format(gridXCorner[p], gridYCorner[p])
p = p + 1
#print '\n'
## CENTERS
ESMP.ESMP_GridAddCoord(grid, staggerloc=ESMP.ESMP_STAGGERLOC_CENTER)
exLB_center, exUB_center = ESMP.ESMP_GridGetCoord(grid, \
ESMP.ESMP_STAGGERLOC_CENTER)
# get the coordinate pointers and set the coordinates
[x,y] = [0, 1]
gridXCenter = ESMP.ESMP_GridGetCoordPtr(grid, x, ESMP.ESMP_STAGGERLOC_CENTER)
gridYCenter = ESMP.ESMP_GridGetCoordPtr(grid, y, ESMP.ESMP_STAGGERLOC_CENTER)
#print 'lower corner bounds = [{0},{1}]'.format(exLB_center[0],exLB_center[1])
#print 'upper corner bounds = [{0},{1}]'.format(exUB_center[0],exUB_center[1])
p = 0
for i1 in range(exLB_center[1], exUB_center[1]):
for i0 in range(exLB_center[0], exUB_center[0]):
gridXCenter[p] = float(i0)*cellwidth_x + cellwidth_x/2.0
gridYCenter[p] = float(i1)*cellwidth_y + cellwidth_y/2.0
p = p + 1
#print 'Grid center coordinates:'
p = 0
for i1 in range(exLB_center[1], exUB_center[1]):
for i0 in range(exLB_center[0], exUB_center[0]):
#print '[{0},{1}]'.format(gridXCenter[p], gridYCenter[p])
p = p + 1
#print '\n'
return grid
def mesh_create_3x3(mesh):
'''
PRECONDITIONS: An ESMP_Mesh has been declared.
POSTCONDITIONS: A 3x3 ESMP_Mesh has been created.
3x3 Mesh
3.0 2.0 13 -------14 --------15--------16
| | | |
| 7 | 8 | 9 |
| | | |
2.5 1.5 9 ------- 10 --------11--------12
| | | |
| 4 | 5 | 6 |
| | | |
1.5 0.5 5 ------- 6 -------- 7-------- 8
| | | |
| 1 | 2 | 3 |
| | | |
1.0 0.0 1 ------- 2 -------- 3-------- 4
0.0 0.5 1.5 2.0
1.0 1.5 2.5 3.0
Node Ids at corners
Element Ids in centers
(Everything owned by PET 0)
'''
# set up a simple mesh
num_node = 16
num_elem = 9
nodeId = _NP.array([1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16])
'''
# this is for grid to mesh
nodeCoord = _NP.array([1.0,1.0, 1.5,1.0, 2.5,1.0, 3.0,1.0,
1.0,1.5, 1.5,1.5, 2.5,1.5, 3.0,1.5,
1.0,2.5, 1.5,2.5, 2.5,2.5, 3.0,2.5,
1.0,3.0, 1.5,3.0, 2.5,3.0, 3.0,3.0])
'''
# this is for mesh to grid
nodeCoord = _NP.array([0.0,0.0, 1.5,0.0, 2.5,0.0, 4.0,0.0,
0.0,1.5, 1.5,1.5, 2.5,1.5, 4.0,1.5,
0.0,2.5, 1.5,2.5, 2.5,2.5, 4.0,2.5,
0.0,4.0, 1.5,4.0, 2.5,4.0, 4.0,4.0])
nodeOwner = _NP.zeros(num_node, dtype=_NP.int32)
elemId = _NP.array([1,2,3,4,5,6,7,8,9], dtype=_NP.int32)
elemType = _NP.ones(num_elem, dtype=_NP.int32)
elemType*=ESMP.ESMP_MESHELEMTYPE_QUAD
elemConn = _NP.array([0,1,5,4,
1,2,6,5,
2,3,7,6,
4,5,9,8,
5,6,10,9,
6,7,11,10,
8,9,13,12,
9,10,14,13,
10,11,15,14], dtype=_NP.int32)
ESMP.ESMP_MeshAddNodes(mesh,num_node,nodeId,nodeCoord,nodeOwner)
ESMP.ESMP_MeshAddElements(mesh,num_elem,elemId,elemType,elemConn)
#print 'Mesh coordinates:'
for i in range(num_node):
x = nodeCoord[2*i]
y = nodeCoord[2*i+1]
#print '[{0},{1}]'.format(x, y)
#print '\n'
return mesh, nodeCoord, elemType, elemConn
def create_ESMPmesh_3x3():
'''
PRECONDITIONS: ESMP is initialized.
POSTCONDITIONS: An ESMP_Mesh (3x3) has been created and returned as 'mesh'.
'''
# Two parametric dimensions, and three spatial dimensions
mesh = ESMP.ESMP_MeshCreate(2,2)
mesh, nodeCoord, elemType, elemConn = mesh_create_3x3(mesh)
return mesh, nodeCoord, elemType, elemConn
def create_ESMPfieldgrid(grid, name):
'''
PRECONDITIONS: An ESMP_Grid has been created, and 'name' is a string that
will be used to initialize the name of a new ESMP_Field.
POSTCONDITIONS: An ESMP_Field has been created.
'''
# defaults to center staggerloc
field = ESMP.ESMP_FieldCreateGrid(grid, name)
return field
def build_analyticfieldgrid(field, grid):
'''
PRECONDITIONS: An ESMP_Field has been created.
POSTCONDITIONS: The 'field' has been initialized to an analytic field.
'''
# get the field pointer first
fieldPtr = ESMP.ESMP_FieldGetPtr(field)
# get the grid bounds and coordinate pointers
exLB, exUB = ESMP.ESMP_GridGetCoord(grid, ESMP.ESMP_STAGGERLOC_CENTER)
# get the coordinate pointers and set the coordinates
[x,y] = [0, 1]
gridXCoord = ESMP.ESMP_GridGetCoordPtr(grid, x, ESMP.ESMP_STAGGERLOC_CENTER)
gridYCoord = ESMP.ESMP_GridGetCoordPtr(grid, y, ESMP.ESMP_STAGGERLOC_CENTER)
#print "Grid center coordinates"
p = 0
for i1 in range(exLB[1], exUB[1]):
for i0 in range(exLB[0], exUB[0]):
xc = gridXCoord[p]
yc = gridYCoord[p]
fieldPtr[p] = 20.0+xc+yc
#fieldPtr[p] = 20.0+xc*yc+yc**2
#print '[{0},{1}] = {2}'.format(xc,yc,fieldPtr[p])
p = p + 1
#print "\n"
return field
def create_ESMPfield(mesh, name):
'''
PRECONDITIONS: An ESMP_Mesh has been created, and 'name' is a string that
will be used to initialize the name of a new ESMP_Field.
POSTCONDITIONS: An ESMP_Field has been created.
'''
field = ESMP.ESMP_FieldCreate(mesh, name, meshloc=ESMP.ESMP_MESHLOC_ELEMENT)
return field
def build_analyticfield(field, nodeCoord, elemType, elemConn):
'''
PRECONDITIONS: An ESMP_Field has been created.
POSTCONDITIONS: The 'field' has been initialized to an analytic field.
'''
# get the field pointer first
fieldPtr = ESMP.ESMP_FieldGetPtr(field, 0)
# set the field to a vanilla initial field for now
#print "Mesh center coordinates"
offset = 0
for i in range(field.size): # this routine assumes this field is on elements
if (elemType[i] == ESMP.ESMP_MESHELEMTYPE_TRI):
raise NameError("Cannot compute a non-constant analytic field for a mesh\
with triangular elements!")
x1 = nodeCoord[(elemConn[offset])*2]
x2 = nodeCoord[(elemConn[offset+1])*2]
y1 = nodeCoord[(elemConn[offset+1])*2+1]
y2 = nodeCoord[(elemConn[offset+3])*2+1]
x = (x1+x2)/2.0
y = (y1+y2)/2.0
fieldPtr[i] = 20.0+x+y
#fieldPtr[i] = 20.0+x*y+y**2
#print '[{0},{1}] = {2}'.format(x,y,fieldPtr[i])
offset = offset + 4
#print "\n"
return field
def run_regridding(srcfield, dstfield):
'''
PRECONDITIONS: Two ESMP_Fields have been created and a regridding operation
is desired from 'srcfield' to 'dstfield'.
POSTCONDITIONS: An ESMP regridding operation has set the data on 'dstfield'.
'''
# call the regridding functions
routehandle = ESMP.ESMP_FieldRegridStore(srcfield, dstfield,
regridmethod=ESMP.ESMP_REGRIDMETHOD_CONSERVE,
unmappedaction=ESMP.ESMP_UNMAPPEDACTION_ERROR)
ESMP.ESMP_FieldRegrid(srcfield, dstfield, routehandle)
ESMP.ESMP_FieldRegridRelease(routehandle)
return dstfield
def compare_fields(field1, field2):
'''
PRECONDITIONS: Two ESMP_Fields have been created and a comparison of the
the values is desired between 'srcfield' and 'dstfield'.
POSTCONDITIONS: The values on 'srcfield' and 'dstfield' are compared.
returns True if the fileds are comparable (success)
'''
# get the data pointers for the fields
field1ptr = ESMP.ESMP_FieldGetPtr(field1)
field2ptr = ESMP.ESMP_FieldGetPtr(field2)
# compare point values of field1 to field2
# first verify they are the same size
if (field1.size != field2.size):
raise NameError('compare_fields: Fields must be the same size!')
# initialize to True, and check for False point values
correct = True
totalErr = 0.0
for i in range(field1.size):
err = abs(field1ptr[i] - field2ptr[i])/abs(field2ptr[i])
if err > .06:
correct = False
print "ACCURACY ERROR - "+str(err)
print "field1 = {0} : field2 = {1}\n".format(field1ptr[i], field2ptr[i])
totalErr += err
if correct:
print " - PASS - Total Error = "+str(totalErr)
return True
else:
print " - FAIL - Total Error = "+str(totalErr)
return False
class TestESMP_GridToMeshRegridCsrv(unittest.TestCase):
def setUp(self):
pass
def test_test1(self):
# create two unique ESMP_Mesh objects
grid = grid_create()
mesh, nodeCoord, elemType, elemConn = create_ESMPmesh_3x3()
'''
# this is for grid to mesh
# create ESMP_Field objects on the Meshes
srcfield = create_ESMPfieldgrid(grid, 'srcfield')
dstfield = create_ESMPfield(mesh, 'dstfield')
dstfield2 = create_ESMPfield(mesh, 'dstfield_exact')
# initialize the Fields to an analytic function
srcfield = build_analyticfieldgrid(srcfield, grid)
dstfield2 = build_analyticfield(dstfield2, nodeCoord, elemType, elemConn)
'''
# this is for mesh to grid
# create ESMP_Field objects on the Meshes
srcfield = create_ESMPfield(mesh, 'srcfield')
dstfield = create_ESMPfieldgrid(grid, 'dstfield')
dstfield2 = create_ESMPfieldgrid(grid, 'dstfield_exact')
# initialize the Fields to an analytic function
srcfield = build_analyticfield(srcfield, nodeCoord, elemType, elemConn)
dstfield2 = build_analyticfieldgrid(dstfield2, grid)
# run the ESMF regridding
dstfield = run_regridding(srcfield, dstfield)
# compare results and output PASS or FAIL
ok = compare_fields(dstfield, dstfield2)
# clean up
ESMP.ESMP_FieldDestroy(srcfield)
ESMP.ESMP_FieldDestroy(dstfield)
ESMP.ESMP_FieldDestroy(dstfield2)
ESMP.ESMP_GridDestroy(grid)
ESMP.ESMP_MeshDestroy(mesh)
self.assertEqual(ok, True)
if __name__ == '__main__':
ESMP.ESMP_LogSet(True)
print "" # Spacer
suite = unittest.TestLoader().loadTestsFromTestCase(TestESMP_GridToMeshRegridCsrv)
unittest.TextTestRunner(verbosity = 1).run(suite)
| 2.34375 | 2 |
test/mock_module.py | ariffyasri/lale | 1 | 4147 | # Copyright 2019 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sklearn.neighbors
# class that follows scikit-learn conventions but lacks schemas,
# for the purpose of testing how to wrap an operator without schemas
class UnknownOp:
def __init__(self, n_neighbors=5, algorithm='auto'):
self._hyperparams = {
'n_neighbors': n_neighbors, 'algorithm': algorithm}
def get_params(self, deep:bool=False):
return self._hyperparams
def fit(self, X, y):
self._wrapped_model = sklearn.neighbors.KNeighborsClassifier(
**self._hyperparams)
def predict(self, X):
return self._wrapped_model.predict(X)
| 2.15625 | 2 |
scripts/beautify.py | lukaschoebel/POTUSgen | 0 | 4148 | <gh_stars>0
import json
import re
import sys
def beautify(name):
''' Loading, filtering and saving the JSON tweet file to a newly generated .txt file
:type: name: String
:rtype: output: .txt
'''
filename = name + '.json'
output_name = name + "_filtered.txt"
with open(filename, "r", encoding="utf-8") as input:
with open(output_name, "w", encoding="utf-8") as output:
document = json.load(input)
# Filter only the messages that are not retweeted
# >> Version i): for tweets from archive "master_XXXX.json"
# document = [x['full_text'] for x in document if x['user']['screen_name'] == 'realDonaldTrump' and 'full_text' in x]
# >> Version ii): for self-scraped tweets via https://github.com/bpb27/twitter_scraping
# document = [x['text'] for x in document if x['user']['screen_name'] == 'realDonaldTrump' and 'text' in x]
# >> Version iii): Data set from https://github.com/MatthewWolff/MarkovTweets/
document = [x['text'] for x in document]
# Clean and only include not retweeted messages
document = [deep_clean(x) for x in document if deep_clean(x) is not None]
# Preventing unicode characters by ensuring false ascii encoding
for _, value in enumerate(document):
output.write(json.dumps(value, ensure_ascii=False) + "\n")
# json.dump(document, output, ensure_ascii=False, indent=4)
print(f">> Sucessfully cleaned {filename} and saved it to {output_name}")
def deep_clean(s):
''' Deep cleaning of filtered tweets. Replaces common symbols and kills quotation marks/apostrophes.
:type: s: String
:rtype: s: String
'''
# Return None if given tweet is a retweet
if s[:2] == 'RT':
return None
# Delete all URLs because they don't make for interesting tweets.
s = re.sub(r'http[\S]*', '', s)
# Replace some common unicode symbols with raw character variants
s = re.sub(r'\\u2026', '...', s)
s = re.sub(r'…', '', s)
s = re.sub(r'\\u2019', "'", s)
s = re.sub(r'\\u2018', "'", s)
s = re.sub(r"&", r"&", s)
s = re.sub(r'\\n', r"", s)
# Delete emoji modifying characters
s = re.sub(chr(127996), '', s)
s = re.sub(chr(65039), '', s)
# Kill apostrophes & punctuation because they confuse things.
s = re.sub(r"'", r"", s)
s = re.sub(r"“", r"", s)
s = re.sub(r"”", r"", s)
s = re.sub('[()]', r'', s)
s = re.sub(r'"', r"", s)
# Collapse multiples of certain chars
s = re.sub('([.-])+', r'\1', s)
# Pad sentence punctuation chars with whitespace
s = re.sub('([^0-9])([.,!?])([^0-9])', r'\1 \2 \3', s)
# Remove extra whitespace (incl. newlines)
s = ' '.join(s.split()).lower()
# Define emoji_pattern
emoji_pattern = re.compile("["
u"\U0001F600-\U0001F64F" # emoticons
u"\U0001F300-\U0001F5FF" # symbols & pictographs
u"\U0001F680-\U0001F6FF" # transport & map symbols
u"\U0001F1E0-\U0001F1FF" # flags (iOS)
u"\U0001F1F2-\U0001F1F4" # Macau flag
u"\U0001F1E6-\U0001F1FF" # flags
u"\U0001F600-\U0001F64F"
u"\U00002702-\U000027B0"
u"\U000024C2-\U0001F251"
u"\U0001f926-\U0001f937"
u"\U0001F1F2"
u"\U0001F1F4"
u"\U0001F620"
u"\u200d"
u"\u2640-\u2642"
"]+", flags=re.UNICODE)
s = emoji_pattern.sub(r'', s)
# Care for a special case where the first char is a "."
# return s[1:] if s[0] == "." else s
if len(s):
return s[1:] if s[0] == "." else s
return None
if __name__ == "__main__":
if len(sys.argv) - 1: beautify(sys.argv[1]) | 3.421875 | 3 |
result2gaofentype/pkl2txt_ggm.py | G-Naughty/Fine-grained-OBB-Detection | 2 | 4149 | import BboxToolkit as bt
import pickle
import copy
import numpy as np
path1="/home/hnu1/GGM/OBBDetection/work_dir/oriented_obb_contrast_catbalance/dets.pkl"
path2="/home/hnu1/GGM/OBBDetection/data/FaIR1M/test/annfiles/ori_annfile.pkl"#
with open(path2,'rb') as f: #/home/disk/FAIR1M_1000_split/val/annfiles/ori_annfile.pkl
data2 = pickle.load(f)
with open(path1,'rb') as f:
obbdets = pickle.load(f)
polydets=copy.deepcopy(obbdets)
for i in range(len(obbdets)):
for j in range(len(obbdets[0][1])):
data=obbdets[i][1][j]
if data.size!= 0:
polys=[]
for k in range(len(data)):
poly = bt.obb2poly(data[k][0:5])
poly=np.append(poly,data[k][5])
polys.append(poly)
else:
polys=[]
polydets[i][1][j]=polys
savepath="/home/hnu1/GGM/OBBDetection/work_dir/oriented_obb_contrast_catbalance/result_txt/"
for i in range(len(polydets)):
txtfile=savepath+polydets[i][0]+".txt"
f = open(txtfile, "w")
for j in range(len(polydets[0][1])):
if polydets[i][1][j]!=[]:
for k in range(len(polydets[i][1][j])):
f.write(str(polydets[i][1][j][k][0])+" "+
str(polydets[i][1][j][k][1])+" "+
str(polydets[i][1][j][k][2])+" "+
str(polydets[i][1][j][k][3])+" "+
str(polydets[i][1][j][k][4])+" "+
str(polydets[i][1][j][k][5])+" "+
str(polydets[i][1][j][k][6])+" "+
str(polydets[i][1][j][k][7])+" "+
str(data2["cls"][j])+" "+
str(polydets[i][1][j][k][8])+"\n")
f.close() | 2.078125 | 2 |
initializer_3d.py | HarperCallahan/taichi_ferrofluid | 0 | 4150 | import taichi as ti
import utils
from apic_extension import *
@ti.data_oriented
class Initializer3D: # tmp initializer
def __init__(self, res, x0, y0, z0, x1, y1, z1):
self.res = res
self.x0 = int(res * x0)
self.y0 = int(res * y0)
self.z0 = int(res * z0)
self.x1 = int(res * x1)
self.y1 = int(res * y1)
self.z1 = int(res * z1)
@ti.kernel
def init_kernel(self, cell_type : ti.template()):
for i, j, k in cell_type:
if i >= self.x0 and i <= self.x1 and \
j >= self.y0 and j <= self.y1 and \
k >= self.z0 and k <= self.z1:
cell_type[i, j, k] = utils.FLUID
def init_scene(self, simulator):
self.init_kernel(simulator.cell_type)
dx = simulator.dx
simulator.level_set.initialize_with_aabb((self.x0 * dx, self.y0 * dx, self.z0 * dx), (self.x1 * dx, self.y1 * dx, self.z1 * dx))
| 2.359375 | 2 |
copy_block_example.py | MilesCranmer/bifrost_paper | 0 | 4151 | <gh_stars>0
from copy import deepcopy
import bifrost as bf
from bifrost.pipeline import TransformBlock
from bifrost.ndarray import copy_array
class CopyBlock(TransformBlock):# $\tikzmark{block-start}$
"""Copy the input ring to output ring"""
def __init__(self, iring, space):
super(CopyBlock, self).__init__(iring)
self.orings = [self.create_ring(space=space)]
def on_sequence(self, iseq):
return deepcopy(iseq.header)
def on_data(self, ispan, ospan):
copy_array(ospan.data, ispan.data)#$\tikzmark{block-end}$
def copy_block(iring, space):
return CopyBlock(iring, space)
bc = bf.BlockChainer()
bc.blocks.read_wav(['hey_jude.wav'], gulp_nframe=4096)
bc.custom(copy_block)(space='cuda')# $\tikzmark{gpu-start}$
bc.views.split_axis('time', 256, label='fine_time')
bc.blocks.fft(axes='fine_time', axis_labels='freq')
bc.blocks.detect(mode='scalar')
bc.blocks.transpose(['time', 'pol', 'freq'])#$\tikzmark{gpu-end}$
bc.blocks.copy(space='system')
bc.blocks.quantize('i8')
bc.blocks.write_sigproc()
pipeline = bf.get_default_pipeline()# $\tikzmark{pipeline-start}$
pipeline.shutdown_on_signals()
pipeline.run()#$\tikzmark{pipeline-end}$
| 2.109375 | 2 |
monasca_persister/conf/influxdb.py | zhangjianweibj/monasca-persister | 0 | 4152 | # (C) Copyright 2016-2017 Hewlett Packard Enterprise Development LP
# Copyright 2017 FUJITSU LIMITED
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo_config import cfg
influxdb_opts = [
cfg.StrOpt('database_name',
help='database name where metrics are stored',
default='mon'),
cfg.HostAddressOpt('ip_address',
help='Valid IP address or hostname '
'to InfluxDB instance'),
cfg.PortOpt('port',
help='port to influxdb',
default=8086),
cfg.StrOpt('user',
help='influxdb user ',
default='mon_persister'),
cfg.StrOpt('password',
secret=True,
help='influxdb password')]
influxdb_group = cfg.OptGroup(name='influxdb',
title='influxdb')
def register_opts(conf):
conf.register_group(influxdb_group)
conf.register_opts(influxdb_opts, influxdb_group)
def list_opts():
return influxdb_group, influxdb_opts
| 1.945313 | 2 |
test_print_json.py | huangsen365/boto3-docker | 0 | 4153 | import json
your_json = '["foo", {"bar":["baz", null, 1.0, 2]}]'
parsed = json.loads(your_json)
print(type(your_json))
print(type(parsed))
#print(json.dumps(parsed, indent=4, sort_keys=True)) | 3.15625 | 3 |
src/solutions/common/integrations/cirklo/api.py | goubertbrent/oca-backend | 0 | 4154 | <filename>src/solutions/common/integrations/cirklo/api.py
# -*- coding: utf-8 -*-
# Copyright 2020 Green Valley Belgium NV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @@license_version:1.7@@
import cloudstorage
import logging
from babel.dates import format_datetime
from datetime import datetime
from google.appengine.ext import ndb, deferred, db
from typing import List
from xlwt import Worksheet, Workbook, XFStyle
from mcfw.cache import invalidate_cache
from mcfw.consts import REST_TYPE_TO
from mcfw.exceptions import HttpBadRequestException, HttpForbiddenException, HttpNotFoundException
from mcfw.restapi import rest
from mcfw.rpc import returns, arguments
from rogerthat.bizz.gcs import get_serving_url
from rogerthat.bizz.service import re_index_map_only
from rogerthat.consts import FAST_QUEUE
from rogerthat.models import ServiceIdentity
from rogerthat.models.settings import ServiceInfo
from rogerthat.rpc import users
from rogerthat.rpc.users import get_current_session
from rogerthat.utils import parse_date
from rogerthat.utils.service import create_service_identity_user
from shop.models import Customer
from solutions import translate
from solutions.common.bizz import SolutionModule, broadcast_updates_pending
from solutions.common.bizz.campaignmonitor import send_smart_email_without_check
from solutions.common.consts import OCA_FILES_BUCKET
from solutions.common.dal import get_solution_settings
from solutions.common.integrations.cirklo.cirklo import get_city_id_by_service_email, whitelist_merchant, \
list_whitelisted_merchants, list_cirklo_cities
from solutions.common.integrations.cirklo.models import CirkloCity, CirkloMerchant, SignupLanguageProperty, \
SignupMails, CirkloAppInfo
from solutions.common.integrations.cirklo.to import CirkloCityTO, CirkloVoucherListTO, CirkloVoucherServiceTO, \
WhitelistVoucherServiceTO
from solutions.common.restapi.services import _check_is_city
def _check_permission(city_sln_settings):
if SolutionModule.CIRKLO_VOUCHERS not in city_sln_settings.modules:
raise HttpForbiddenException()
if len(city_sln_settings.modules) != 1:
_check_is_city(city_sln_settings.service_user)
@rest('/common/vouchers/cities', 'get', silent_result=True)
@returns([dict])
@arguments(staging=bool)
def api_list_cirklo_cities(staging=False):
return list_cirklo_cities(staging)
@rest('/common/vouchers/services', 'get', silent_result=True)
@returns(CirkloVoucherListTO)
@arguments()
def get_cirklo_vouchers_services():
city_service_user = users.get_current_user()
city_sln_settings = get_solution_settings(city_service_user)
_check_permission(city_sln_settings)
to = CirkloVoucherListTO()
to.total = 0
to.results = []
to.cursor = None
to.more = False
cirklo_city = CirkloCity.get_by_service_email(city_service_user.email())
if not cirklo_city:
return to
cirklo_merchants = list_whitelisted_merchants(cirklo_city.city_id)
cirklo_dict = {}
cirklo_emails = []
for merchant in cirklo_merchants:
if merchant['email'] in cirklo_emails:
logging.error('Duplicate found %s', merchant['email'])
continue
cirklo_emails.append(merchant['email'])
cirklo_dict[merchant['email']] = merchant
qry = CirkloMerchant.list_by_city_id(cirklo_city.city_id) # type: List[CirkloMerchant]
osa_merchants = []
for merchant in qry:
if merchant.service_user_email:
osa_merchants.append(merchant)
else:
cirklo_merchant = cirklo_dict.get(merchant.data['company']['email'])
if cirklo_merchant:
if merchant.data['company']['email'] in cirklo_emails:
cirklo_emails.remove(merchant.data['company']['email'])
if not merchant.whitelisted:
merchant.whitelisted = True
merchant.put()
elif merchant.whitelisted:
merchant.whitelisted = False
merchant.put()
whitelist_date = cirklo_merchant['createdAt'] if cirklo_merchant else None
merchant_registered = 'shopInfo' in cirklo_merchant if cirklo_merchant else False
to.results.append(
CirkloVoucherServiceTO.from_model(merchant, whitelist_date, merchant_registered, u'Cirklo signup'))
if osa_merchants:
customer_to_get = [Customer.create_key(merchant.customer_id) for merchant in osa_merchants]
customers_dict = {customer.id: customer for customer in db.get(customer_to_get)}
info_keys = [ServiceInfo.create_key(users.User(merchant.service_user_email), ServiceIdentity.DEFAULT)
for merchant in osa_merchants]
models = ndb.get_multi(info_keys)
for service_info, merchant in zip(models, osa_merchants):
customer = customers_dict[merchant.customer_id]
if not customer.service_user:
merchant.key.delete()
continue
cirklo_merchant = cirklo_dict.get(customer.user_email)
should_save = False
if cirklo_merchant:
if customer.user_email in cirklo_emails:
cirklo_emails.remove(customer.user_email)
if not merchant.whitelisted:
merchant.whitelisted = True
should_save = True
elif merchant.whitelisted:
merchant.whitelisted = False
should_save = True
if should_save:
merchant.put()
service_identity_user = create_service_identity_user(customer.service_user)
deferred.defer(re_index_map_only, service_identity_user)
whitelist_date = cirklo_merchant['createdAt'] if cirklo_merchant else None
merchant_registered = 'shopInfo' in cirklo_merchant if cirklo_merchant else False
service_to = CirkloVoucherServiceTO.from_model(merchant, whitelist_date, merchant_registered, u'OSA signup')
service_to.populate_from_info(service_info, customer)
to.results.append(service_to)
for email in cirklo_emails:
cirklo_merchant = cirklo_dict[email]
to.results.append(CirkloVoucherServiceTO.from_cirklo_info(cirklo_merchant))
return to
@rest('/common/vouchers/services/whitelist', 'put', type=REST_TYPE_TO)
@returns(CirkloVoucherServiceTO)
@arguments(data=WhitelistVoucherServiceTO)
def whitelist_voucher_service(data):
city_service_user = users.get_current_user()
city_sln_settings = get_solution_settings(city_service_user)
_check_permission(city_sln_settings)
cirklo_city = CirkloCity.get_by_service_email(city_service_user.email()) # type: CirkloCity
if not cirklo_city:
raise HttpNotFoundException('No cirklo settings found.')
is_cirklo_only_merchant = '@' not in data.id
if is_cirklo_only_merchant:
merchant = CirkloMerchant.create_key(long(data.id)).get() # type: CirkloMerchant
language = merchant.get_language()
else:
merchant = CirkloMerchant.create_key(data.id).get()
language = get_solution_settings(users.User(merchant.service_user_email)).main_language
if data.accepted:
email_id = cirklo_city.get_signup_accepted_mail(language)
if not email_id:
raise HttpBadRequestException('City settings aren\'t fully setup yet.')
whitelist_merchant(cirklo_city.city_id, data.email)
deferred.defer(send_smart_email_without_check, email_id, [data.email], _countdown=1,
_queue=FAST_QUEUE)
else:
email_id = cirklo_city.get_signup_accepted_mail(language)
if not email_id:
raise HttpBadRequestException('City settings aren\'t fully setup yet.')
deferred.defer(send_smart_email_without_check, email_id, [data.email], _countdown=1,
_queue=FAST_QUEUE)
whitelist_date = datetime.now().isoformat() + 'Z' if data.accepted else None
if not is_cirklo_only_merchant:
if data.accepted:
merchant.whitelisted = True
else:
merchant.denied = True
merchant.put()
service_info = ServiceInfo.create_key(users.User(merchant.service_user_email), ServiceIdentity.DEFAULT).get()
customer = Customer.get_by_id(merchant.customer_id) # type: Customer
if data.accepted:
service_identity_user = create_service_identity_user(customer.service_user)
deferred.defer(re_index_map_only, service_identity_user)
to = CirkloVoucherServiceTO.from_model(merchant, whitelist_date, False, u'OSA signup')
to.populate_from_info(service_info, customer)
return to
else:
if data.accepted:
merchant.whitelisted = True
else:
merchant.denied = True
merchant.put()
return CirkloVoucherServiceTO.from_model(merchant, whitelist_date, False, u'Cirklo signup')
@rest('/common/vouchers/cirklo', 'get')
@returns(CirkloCityTO)
@arguments()
def api_vouchers_get_cirklo_settings():
service_user = users.get_current_user()
city = CirkloCity.get_by_service_email(service_user.email())
return CirkloCityTO.from_model(city)
@rest('/common/vouchers/cirklo', 'put')
@returns(CirkloCityTO)
@arguments(data=CirkloCityTO)
def api_vouchers_save_cirklo_settings(data):
service_user = users.get_current_user()
if not get_current_session().shop:
lang = get_solution_settings(service_user).main_language
raise HttpForbiddenException(translate(lang, 'no_permission'))
other_city = CirkloCity.get_by_service_email(service_user.email()) # type: CirkloCity
if not data.city_id:
if other_city:
other_city.key.delete()
return CirkloCityTO.from_model(None)
key = CirkloCity.create_key(data.city_id)
city = key.get()
if not city:
city = CirkloCity(key=key, service_user_email=service_user.email())
elif city.service_user_email != service_user.email():
raise HttpBadRequestException('City id %s is already in use by another service' % data.city_id)
if other_city and other_city.key != key:
other_city.key.delete()
invalidate_cache(get_city_id_by_service_email, service_user.email())
city.logo_url = data.logo_url
city.signup_enabled = data.signup_enabled
city.signup_logo_url = data.signup_logo_url
city.signup_names = None
city.signup_mail = SignupMails.from_to(data.signup_mail)
if data.signup_name_nl and data.signup_name_fr:
city.signup_names = SignupLanguageProperty(nl=data.signup_name_nl,
fr=data.signup_name_fr)
elif data.signup_name_nl:
city.signup_names = SignupLanguageProperty(nl=data.signup_name_nl,
fr=data.signup_name_nl)
elif data.signup_name_fr:
city.signup_names = SignupLanguageProperty(nl=data.signup_name_fr,
fr=data.signup_name_fr)
og_info = city.app_info and city.app_info.to_dict()
info = CirkloAppInfo(enabled=data.app_info.enabled,
title=data.app_info.title,
buttons=data.app_info.buttons)
sln_settings = get_solution_settings(service_user)
if info.to_dict() != og_info and not sln_settings.ciklo_vouchers_only():
city.app_info = info
sln_settings.updates_pending = True
sln_settings.put()
broadcast_updates_pending(sln_settings)
city.put()
return CirkloCityTO.from_model(city)
@rest('/common/vouchers/cirklo/export', 'post')
@returns(dict)
@arguments()
def api_export_cirklo_services():
service_user = users.get_current_user()
city_sln_settings = get_solution_settings(service_user)
_check_permission(city_sln_settings)
all_services = get_cirklo_vouchers_services()
if all_services.cursor:
raise NotImplementedError()
book = Workbook(encoding='utf-8')
sheet = book.add_sheet('Cirklo') # type: Worksheet
language = city_sln_settings.main_language
sheet.write(0, 0, translate(language, 'reservation-name'))
sheet.write(0, 1, translate(language, 'Email'))
sheet.write(0, 2, translate(language, 'address'))
sheet.write(0, 3, translate(language, 'Phone number'))
sheet.write(0, 4, translate(language, 'created'))
sheet.write(0, 5, translate(language, 'merchant_registered'))
date_format = XFStyle()
date_format.num_format_str = 'dd/mm/yyyy'
row = 0
for service in all_services.results:
row += 1
sheet.write(row, 0, service.name)
sheet.write(row, 1, service.email)
sheet.write(row, 2, service.address)
sheet.write(row, 3, service.phone_number)
sheet.write(row, 4, parse_date(service.creation_date), date_format)
sheet.write(row, 5, translate(language, 'Yes') if service.merchant_registered else translate(language, 'No'))
date = format_datetime(datetime.now(), format='medium', locale='en_GB')
gcs_path = '/%s/tmp/cirklo/export-cirklo-%s.xls' % (OCA_FILES_BUCKET, date.replace(' ', '-'))
content_type = 'application/vnd.ms-excel'
with cloudstorage.open(gcs_path, 'w', content_type=content_type) as gcs_file:
book.save(gcs_file)
deferred.defer(cloudstorage.delete, gcs_path, _countdown=86400)
return {
'url': get_serving_url(gcs_path),
}
| 1.382813 | 1 |
aplpy/tests/test_grid.py | nbrunett/aplpy | 0 | 4155 | import matplotlib
matplotlib.use('Agg')
import numpy as np
from astropy.tests.helper import pytest
from .. import FITSFigure
def test_grid_addremove():
data = np.zeros((16, 16))
f = FITSFigure(data)
f.add_grid()
f.remove_grid()
f.add_grid()
f.close()
def test_grid_showhide():
data = np.zeros((16, 16))
f = FITSFigure(data)
f.add_grid()
f.grid.hide()
f.grid.show()
f.close()
def test_grid_spacing():
data = np.zeros((16, 16))
f = FITSFigure(data)
f.add_grid()
f.grid.set_xspacing(1.)
f.grid.set_xspacing('tick')
with pytest.raises(ValueError):
f.grid.set_xspacing('auto')
f.grid.set_yspacing(2.)
f.grid.set_yspacing('tick')
with pytest.raises(ValueError):
f.grid.set_yspacing('auto')
f.close()
def test_grid_color():
data = np.zeros((16, 16))
f = FITSFigure(data)
f.add_grid()
f.grid.set_color('black')
f.grid.set_color('#003344')
f.grid.set_color((1.0, 0.4, 0.3))
f.close()
def test_grid_alpha():
data = np.zeros((16, 16))
f = FITSFigure(data)
f.add_grid()
f.grid.set_alpha(0.0)
f.grid.set_alpha(0.3)
f.grid.set_alpha(1.0)
f.close()
def test_grid_linestyle():
data = np.zeros((16, 16))
f = FITSFigure(data)
f.add_grid()
f.grid.set_linestyle('solid')
f.grid.set_linestyle('dashed')
f.grid.set_linestyle('dotted')
f.close()
def test_grid_linewidth():
data = np.zeros((16, 16))
f = FITSFigure(data)
f.add_grid()
f.grid.set_linewidth(0)
f.grid.set_linewidth(2)
f.grid.set_linewidth(5)
f.close()
| 2 | 2 |
vz.py | ponyatov/vz | 0 | 4156 | import os, sys
class Object:
## @name constructor
def __init__(self, V):
self.value = V
self.nest = []
def box(self, that):
if isinstance(that, Object): return that
if isinstance(that, str): return S(that)
raise TypeError(['box', type(that), that])
## @name dump / string
def test(self): return self.dump(test=True)
def __repr__(self): return self.dump(test=False)
def dump(self, cycle=[], depth=0, prefix='', test=False):
# head
def pad(depth): return '\n' + '\t' * depth
ret = pad(depth) + self.head(prefix, test)
# subtree
return ret
def head(self, prefix='', test=False):
gid = '' if test else f' @{id(self):x}'
return f'{prefix}<{self.tag()}:{self.val()}>{gid}'
def __format__(self, spec=''):
if not spec: return self.val()
raise TypeError(['__format__', spec])
def tag(self): return self.__class__.__name__.lower()
def val(self): return f'{self.value}'
## @name operator
def __iter__(self):
return iter(self.nest)
def __floordiv__(self, that):
self.nest.append(self.box(that)); return self
class Primitive(Object):
pass
class S(Primitive):
def __init__(self, V=None, end=None, pfx=None, sfx=None):
super().__init__(V)
self.end = end; self.pfx = pfx; self.sfx = sfx
def gen(self, to, depth=0):
ret = ''
if self.pfx is not None:
ret += f'{to.tab*depth}{self.pfx}\n'
if self.value is not None:
ret += f'{to.tab*depth}{self.value}\n'
for i in self:
ret += i.gen(to, depth + 1)
if self.end is not None:
ret += f'{to.tab*depth}{self.end}\n'
if self.sfx is not None:
ret += f'{to.tab*depth}{self.sfx}\n'
return ret
class Sec(S):
def gen(self, to, depth=0):
ret = ''
if self.pfx is not None:
ret += f'{to.tab*depth}{self.pfx}\n' if self.pfx else '\n'
if self.nest and self.value is not None:
ret += f'{to.tab*depth}{to.comment} \\ {self}\n'
for i in self:
ret += i.gen(to, depth + 0)
if self.nest and self.value is not None:
ret += f'{to.tab*depth}{to.comment} / {self}\n'
if self.sfx is not None:
ret += f'{to.tab*depth}{self.sfx}\n' if self.pfx else '\n'
return ret
class IO(Object):
def __init__(self, V):
super().__init__(V)
self.path = V
class Dir(IO):
def __floordiv__(self, that):
assert isinstance(that, IO)
that.path = f'{self.path}/{that.path}'
return super().__floordiv__(that)
def sync(self):
try: os.mkdir(self.path)
except FileExistsError: pass
for i in self: i.sync()
class File(IO):
def __init__(self, V, ext='', tab=' ' * 4, comment='#'):
super().__init__(V + ext)
self.top = Sec(); self.bot = Sec()
self.tab = tab; self.comment = comment
def sync(self):
with open(self.path, 'w') as F:
F.write(self.top.gen(self))
for i in self: F.write(i.gen(self))
F.write(self.bot.gen(self))
class giti(File):
def __init__(self, V='.gitignore'):
super().__init__(V)
self.bot // f'!{self}'
class Makefile(File):
def __init__(self, V='Makefile'):
super().__init__(V, tab='\t')
class pyFile(File):
def __init__(self, V, ext='.py'):
super().__init__(V, ext)
class jsonFile(File):
def __init__(self, V, ext='.json', comment='//'):
super().__init__(V, ext, comment=comment)
class Meta(Object): pass
class Class(Meta):
def __init__(self, C, sup=[]):
assert callable(C)
super().__init__(C.__name__)
self.clazz = C; self.sup = sup
def gen(self, to, depth=0):
ret = S(f'class {self}:', pfx='') // 'pass'
return ret.gen(to, depth)
class Project(Meta):
def __init__(self, V=None, title='', about=''):
if not V: V = os.getcwd().split('/')[-1]
super().__init__(V)
#
self.TITLE = title if title else f'{self}'
self.ABOUT = about
self.AUTHOR = '<NAME>'
self.EMAIL = '<EMAIL>'
self.GITHUB = 'https://github.com/ponyatov'
self.YEAR = 2020
self.LICENSE = 'All rights reserved'
self.COPYRIGHT = f'(c) {self.AUTHOR} <{self.EMAIL}> {self.YEAR} {self.LICENSE}'
#
self.dirs()
self.mk()
self.src()
self.vscode()
self.apt()
def apt(self):
self.apt = File('apt', '.txt'); self.d // self.apt
self.apt \
// 'git make curl' // 'code meld' \
// 'python3 python3-venv' \
// 'build-essential g++'
def vscode(self):
self.vscode = Dir('.vscode'); self.d // self.vscode
self.settings()
self.tasks()
def settings(self):
self.settings = jsonFile('settings'); self.vscode // self.settings
#
def multi(key, cmd):
return (S('{', '},')
// f'"command": "multiCommand.{key}",'
// (S('"sequence": [', ']')
// '"workbench.action.files.saveAll",'
// (S('{"command": "workbench.action.terminal.sendSequence",')
// f'"args": {{"text": "\\u000D {cmd} \\u000D"}}}}'
)))
self.multi = \
(Sec('multi')
// (S('"multiCommand.commands": [', '],')
// multi('f11', 'make meta')
// multi('f12', 'make all')
))
#
self.files = (Sec()
// f'"{self}/**":true,'
)
self.exclude = \
(Sec()
// (S('"files.exclude": {', '},') // self.files))
self.watcher = \
(Sec()
// (S('"files.watcherExclude": {', '},') // self.files))
self.assoc = \
(Sec()
// (S('"files.associations": {', '},')))
self.files = (Sec('files', pfx='')
// self.exclude
// self.watcher
// self.assoc)
#
self.editor = (Sec('editor', pfx='')
// '"editor.tabSize": 4,'
// '"editor.rulers": [80],'
// '"workbench.tree.indent": 32,'
)
#
self.settings \
// (S('{', '}')
// self.multi
// self.files
// self.editor)
def tasks(self):
self.tasks = jsonFile('tasks'); self.vscode // self.tasks
def task(clazz, cmd):
return (S('{', '},')
// f'"label": "{clazz}: {cmd}",'
// f'"type": "shell",'
// f'"command": "make {cmd}",'
// f'"problemMatcher": []'
)
self.tasks \
// (S('{', '}')
// '"version": "2.0.0",'
// (S('"tasks": [', ']')
// task('project', 'install')
// task('project', 'update')
// task('git', 'dev')
// task('git', 'shadow')
))
def src(self):
self.py()
self.test()
self.config()
def config(self):
self.config = pyFile('config'); self.d // self.config
self.config \
// f"{'SECURE_KEY':<11} = {os.urandom(0x22)}" \
// f"{'HOST':<11} = '127..0.0.1'" \
// f"{'PORT':<11} = 12345"
def py(self):
self.py = pyFile(f'{self}'); self.d // self.py
self.py \
// 'import os, sys'
for i in [Object, S, Sec, IO, Dir, File, Meta, Class, Project]:
self.py // Class(i)
self.py // Class(Primitive, [Object])
self.py \
// S('Project().sync()', pfx='')
def test(self):
self.test = pyFile(f'test_{self}'); self.d // self.test
self.test \
// 'import pytest' \
// f'from {self} import *' \
// 'def test_any(): assert True'
def dirs(self):
self.d = Dir(f'{self}'); self.giti = giti(); self.d // self.giti
self.giti.top // '*~' // '*.swp' // '*.log'; self.giti.top.sfx = ''
self.giti // f'/{self}/' // '/__pycache__/'
self.giti.bot.pfx = ''
#
self.bin = Dir('bin'); self.d // self.bin
def mk(self):
self.mk = Makefile(); self.d // self.mk
#
self.mk.var = Sec('var', pfx=''); self.mk // self.mk.var
self.mk.var \
// f'{"MODULE":<11} = $(notdir $(CURDIR))' \
// f'{"OS":<11} = $(shell uname -s)' \
// f'{"CORES":<11} = $(shell grep processor /proc/cpuinfo | wc -l)'
#
self.mk.dir = Sec('dir', pfx=''); self.mk // self.mk.dir
self.mk.dir \
// f'{"CWD":<11} = $(CURDIR)' \
// f'{"BIN":<11} = $(CWD)/bin' \
// f'{"DOC":<11} = $(CWD)/doc' \
// f'{"LIB":<11} = $(CWD)/lib' \
// f'{"SRC":<11} = $(CWD)/src' \
// f'{"TMP":<11} = $(CWD)/tmp'
#
self.mk.tool = Sec('tool', pfx=''); self.mk // self.mk.tool
self.mk.tool \
// f'CURL = curl -L -o' \
// f'PY = $(shell which python3)' \
// f'PYT = $(shell which pytest)' \
// f'PEP = $(shell which autopep8)'
#
self.mk.package = Sec('package', pfx=''); self.mk // self.mk.package
self.mk.package \
// f'SYSLINUX_VER = 6.0.3'
#
self.mk.src = Sec('src', pfx=''); self.mk // self.mk.src
self.mk.src \
// f'Y += $(MODULE).py test_$(MODULE).py' \
// f'P += config.py' \
// f'S += $(Y)'
#
self.mk.cfg = Sec('cfg', pfx=''); self.mk // self.mk.cfg
self.mk.cfg \
// f'PEPS = E26,E302,E305,E401,E402,E701,E702'
#
self.mk.all = Sec('all', pfx=''); self.mk // self.mk.all
self.mk.all \
// (S('meta: $(Y)', pfx='.PHONY: meta')
// '$(MAKE) test'
// '$(PY) $(MODULE).py'
// '$(PEP) --ignore=$(PEPS) --in-place $?')
self.mk.all \
// (S('test: $(Y)', pfx='\n.PHONY: test')
// '$(PYT) test_$(MODULE).py')
#
self.mk.rule = Sec('rule', pfx=''); self.mk // self.mk.rule
#
self.mk.doc = Sec('doc', pfx=''); self.mk // self.mk.doc
self.mk.doc \
// S('doc: doc/pyMorphic.pdf', pfx='.PHONY: doc')
self.mk.doc \
// (S('doc/pyMorphic.pdf:')
// '$(CURL) $@ http://www.diva-portal.org/smash/get/diva2:22296/FULLTEXT01.pdf')
#
self.mk.install = Sec('install', pfx=''); self.mk // self.mk.install
self.mk.install // '.PHONY: install update'
self.mk.install \
// (S('install: $(OS)_install doc')
// '$(MAKE) test'
)
self.mk.install \
// (S('update: $(OS)_update doc')
// '$(MAKE) test'
)
self.mk.install \
// (S('Linux_install Linux_update:',
pfx='.PHONY: Linux_install Linux_update')
// 'sudo apt update'
// 'sudo apt install -u `cat apt.txt`')
#
self.mk.merge = Sec('merge', pfx=''); self.mk // self.mk.merge
self.mk.merge \
// 'SHADOW ?= ponymuck'
self.mk.merge \
// 'MERGE = Makefile .gitignore README.md apt.txt $(S)' \
// 'MERGE += .vscode bin doc lib src tmp'
self.mk.merge \
// (S('dev:', pfx='\n.PHONY: dev')
// 'git push -v'
// 'git checkout $@'
// 'git checkout $(SHADOW) -- $(MERGE)'
)
self.mk.merge \
// (S('shadow:', pfx='\n.PHONY: shadow')
// 'git push -v'
// 'git checkout $(SHADOW)'
)
self.mk.merge \
// (S('release:', pfx='\n.PHONY: release')
)
self.mk.merge \
// (S('zip:', pfx='\n.PHONY: zip')
)
def sync(self):
self.readme()
self.d.sync()
def readme(self):
self.readme = File('README', '.md'); self.d // self.readme
self.readme \
// f'#  `{self}`' // f'## {self.TITLE}'
self.readme \
// '' // self.COPYRIGHT // '' // f'github: {self.GITHUB}/{self}'
self.readme // self.ABOUT
Project(
title='ViZual language environment',
about='''
* object (hyper)graph interpreter
'''
).sync()
| 3.265625 | 3 |
src/server.py | FlakM/fastai_text_serving | 0 | 4157 | import asyncio
import logging
import aiohttp
import uvicorn
from fastai.vision import *
from starlette.applications import Starlette
from starlette.middleware.cors import CORSMiddleware
from starlette.responses import JSONResponse
# put your url here here
model_file_url = 'https://www.dropbox.com/s/...?raw=1'
model_file_name = 'model'
path = Path(__file__).parent
logging.basicConfig(format="%(levelname)s: %(message)s", level=logging.INFO)
logger = logging.getLogger()
logger.setLevel(logging.INFO)
app = Starlette()
app.add_middleware(CORSMiddleware, allow_origins=['*'], allow_headers=['X-Requested-With', 'Content-Type'])
def hashsum(path, hex=True, hash_type=hashlib.md5):
hashinst = hash_type()
with open(path, 'rb') as f:
for chunk in iter(lambda: f.read(hashinst.block_size * 128), b''):
hashinst.update(chunk)
return hashinst.hexdigest() if hex else hashinst.digest()
async def download_file(url, dest):
if dest.exists(): return
async with aiohttp.ClientSession() as session:
async with session.get(url) as response:
data = await response.read()
with open(dest, 'wb') as f: f.write(data)
async def setup_learner():
model_file = path.parent / 'models' / f'{model_file_name}.pkl'
if not model_file.exists():
logging.info("Will download file %s from %s", model_file, model_file_url)
await download_file(model_file_url, model_file)
logging.info("Downloaded file md5sum: %s", hashsum(model_file))
else:
logging.info("File %s already exists will reuse md5sum: %s", model_file, hashsum(model_file))
# Loading the saved model using fastai's load_learner method
model = load_learner(model_file.parent, f'{model_file_name}.pkl')
classes = model.data.classes
return model, classes
loop = asyncio.get_event_loop()
tasks = [asyncio.ensure_future(setup_learner())]
model, classes = loop.run_until_complete(asyncio.gather(*tasks))[0]
loop.close()
def sortByProb(val):
return val["prob"]
@app.route('/predict', methods=['POST'])
async def analyze(request):
data = await request.form()
text = data['text']
predict_class, predict_idx, predict_values = model.predict(text)
results = []
for idx, val in enumerate(predict_values):
prob = val.item()
if prob > 0.01:
record = {"value": classes[idx], "prob": prob}
results.append(record)
results.sort(key=sortByProb, reverse=True)
return JSONResponse(results[:5])
if __name__ == '__main__':
if 'serve' in sys.argv: uvicorn.run(app, host='0.0.0.0' port=4000)
| 2.21875 | 2 |
tcpserver.py | justforbalance/CSnet | 0 | 4158 | from socket import *
serverPort = 12001
serverSocket = socket(AF_INET, SOCK_STREAM)
serverSocket.bind(('', serverPort))
serverSocket.listen(1)
print("the server is ready to receive")
while True:
connectionSocket,addr = serverSocket.accept()
sentence = connectionSocket.recv(1024).decode()
sentence = sentence.upper()
connectionSocket.send(sentence.encode())
connectionSocket.close() | 3.046875 | 3 |
src/geneflow/extend/local_workflow.py | jhphan/geneflow2 | 7 | 4159 | """This module contains the GeneFlow LocalWorkflow class."""
class LocalWorkflow:
"""
A class that represents the Local Workflow objects.
"""
def __init__(
self,
job,
config,
parsed_job_work_uri
):
"""
Instantiate LocalWorkflow class.
"""
self._job = job
self._config = config
self._parsed_job_work_uri = parsed_job_work_uri
def initialize(self):
"""
Initialize the LocalWorkflow class.
This workflow class has no additional functionality.
Args:
None.
Returns:
True.
"""
return True
def init_data(self):
"""
Initialize any data specific to this context.
"""
return True
def get_context_options(self):
"""
Return dict of options specific for this context.
Args:
None.
Returns:
{} - no options specific for this context.
"""
return {}
| 2.6875 | 3 |
S12/tensornet/engine/ops/lr_scheduler.py | abishek-raju/EVA4B2 | 4 | 4160 | from torch.optim.lr_scheduler import StepLR, ReduceLROnPlateau, OneCycleLR
def step_lr(optimizer, step_size, gamma=0.1, last_epoch=-1):
"""Create LR step scheduler.
Args:
optimizer (torch.optim): Model optimizer.
step_size (int): Frequency for changing learning rate.
gamma (float): Factor for changing learning rate. (default: 0.1)
last_epoch (int): The index of last epoch. (default: -1)
Returns:
StepLR: Learning rate scheduler.
"""
return StepLR(optimizer, step_size=step_size, gamma=gamma, last_epoch=last_epoch)
def reduce_lr_on_plateau(optimizer, factor=0.1, patience=10, verbose=False, min_lr=0):
"""Create LR plateau reduction scheduler.
Args:
optimizer (torch.optim): Model optimizer.
factor (float, optional): Factor by which the learning rate will be reduced.
(default: 0.1)
patience (int, optional): Number of epoch with no improvement after which learning
rate will be will be reduced. (default: 10)
verbose (bool, optional): If True, prints a message to stdout for each update.
(default: False)
min_lr (float, optional): A scalar or a list of scalars. A lower bound on the
learning rate of all param groups or each group respectively. (default: 0)
Returns:
ReduceLROnPlateau instance.
"""
return ReduceLROnPlateau(
optimizer, factor=factor, patience=patience, verbose=verbose, min_lr=min_lr
)
def one_cycle_lr(
optimizer, max_lr, epochs, steps_per_epoch, pct_start=0.5, div_factor=10.0, final_div_factor=10000
):
"""Create One Cycle Policy for Learning Rate.
Args:
optimizer (torch.optim): Model optimizer.
max_lr (float): Upper learning rate boundary in the cycle.
epochs (int): The number of epochs to train for. This is used along with
steps_per_epoch in order to infer the total number of steps in the cycle.
steps_per_epoch (int): The number of steps per epoch to train for. This is
used along with epochs in order to infer the total number of steps in the cycle.
pct_start (float, optional): The percentage of the cycle (in number of steps)
spent increasing the learning rate. (default: 0.5)
div_factor (float, optional): Determines the initial learning rate via
initial_lr = max_lr / div_factor. (default: 10.0)
final_div_factor (float, optional): Determines the minimum learning rate via
min_lr = initial_lr / final_div_factor. (default: 1e4)
Returns:
OneCycleLR instance.
"""
return OneCycleLR(
optimizer, max_lr, epochs=epochs, steps_per_epoch=steps_per_epoch,
pct_start=pct_start, div_factor=div_factor, final_div_factor=final_div_factor
)
| 2.625 | 3 |
armi/reactor/tests/test_zones.py | youngmit/armi | 0 | 4161 | # Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test for Zones"""
import copy
import unittest
import armi
from armi import settings
from armi.reactor import assemblies
from armi.reactor import blueprints
from armi.reactor import geometry
from armi.reactor import grids
from armi.reactor import reactors
from armi.reactor import zones
from armi.reactor.flags import Flags
from armi.reactor.tests import test_reactors
from armi.utils import pathTools
from armi.settings.fwSettings import globalSettings
THIS_DIR = pathTools.armiAbsDirFromName(__name__)
class Zone_TestCase(unittest.TestCase):
def setUp(self):
bp = blueprints.Blueprints()
geom = geometry.SystemLayoutInput()
geom.symmetry = "third core periodic"
r = reactors.Reactor(settings.getMasterCs(), bp)
r.add(reactors.Core("Core", settings.getMasterCs(), geom))
r.core.spatialGrid = grids.hexGridFromPitch(1.0)
aList = []
for ring in range(10):
a = assemblies.HexAssembly("fuel")
a.spatialLocator = r.core.spatialGrid[ring, 1, 0]
a.parent = r.core
aList.append(a)
self.aList = aList
def test_addAssemblyLocations(self):
zone = zones.Zone("TestZone")
zone.addAssemblyLocations(self.aList)
for a in self.aList:
self.assertIn(a.getLocation(), zone)
self.assertRaises(RuntimeError, zone.addAssemblyLocations, self.aList)
def test_iteration(self):
locs = [a.getLocation() for a in self.aList]
zone = zones.Zone("TestZone")
zone.addAssemblyLocations(self.aList)
for aLoc in zone:
self.assertIn(aLoc, locs)
# loop twice to make sure it iterates nicely.
for aLoc in zone:
self.assertIn(aLoc, locs)
def test_addRing(self):
zone = zones.Zone("TestZone")
zone.addRing(5)
self.assertIn("A5003", zone)
self.assertNotIn("A6002", zone)
zone.addRing(6, 3, 9)
self.assertIn("A6003", zone)
self.assertIn("A6009", zone)
self.assertNotIn("A6002", zone)
self.assertNotIn("A6010", zone)
class Zones_InReactor(unittest.TestCase):
def setUp(self):
self.o, self.r = test_reactors.loadTestReactor()
def test_buildRingZones(self):
o, r = self.o, self.r
cs = o.cs
cs[globalSettings.CONF_ZONING_STRATEGY] = "byRingZone"
cs["ringZones"] = []
zonez = zones.buildZones(r.core, cs)
self.assertEqual(len(list(zonez)), 1)
self.assertEqual(9, r.core.numRings)
cs["ringZones"] = [5, 8]
zonez = zones.buildZones(r.core, cs)
self.assertEqual(len(list(zonez)), 2)
zone = zonez["ring-1"]
self.assertEqual(len(zone), (5 * (5 - 1) + 1))
zone = zonez["ring-2"]
# Note that the actual number of rings in the reactor model is 9. Even though we
# asked for the last zone to to to 8, the zone engine should bump it out. Not
# sure if this is behavior that we want to preserve, but at least it's being
# tested properly now.
self.assertEqual(len(zone), (9 * (9 - 1) + 1) - (5 * (5 - 1) + 1))
cs["ringZones"] = [5, 7, 8]
zonez = zones.buildZones(r.core, cs)
self.assertEqual(len(list(zonez)), 3)
zone = zonez["ring-3"]
self.assertEqual(len(zone), 30) # rings 8 and 9. See above comment
def test_removeZone(self):
o, r = self.o, self.r
cs = o.cs
cs[globalSettings.CONF_ZONING_STRATEGY] = "byRingZone"
cs["ringZones"] = [5, 8]
# produce 2 zones, with the names ringzone0 and ringzone1
daZones = zones.buildZones(r.core, cs)
daZones.removeZone("ring-1")
# The names list should only house the only other remaining zone now
self.assertEqual(["ring-2"], daZones.names)
# if indexed like a dict, the zones object should give a key error from the removed zone
with self.assertRaises(KeyError):
daZones["ring-1"]
# Ensure we can still iterate through our zones object
for name in daZones.names:
aZone = daZones[name]
def test_findZoneAssemblyIsIn(self):
cs = self.o.cs
cs["ringZones"] = [5, 7, 8]
daZones = zones.buildZones(self.r.core, cs)
for zone in daZones:
a = self.r.core.getAssemblyWithStringLocation(zone.locList[0])
aZone = daZones.findZoneAssemblyIsIn(a)
self.assertEqual(aZone, zone)
# lets test if we get a none and a warning if the assembly does not exist in a zone
a = self.r.core.getAssemblyWithStringLocation(
daZones[daZones.names[0]].locList[0]
) # get assem from first zone
daZones.removeZone(
daZones.names[0]
) # remove a zone to ensure that our assem does not have a zone anymore
self.assertEqual(daZones.findZoneAssemblyIsIn(a), None)
class Zones_InRZReactor(unittest.TestCase):
def test_splitZones(self):
# Test to make sure that we can split a zone containing control and fuel assemblies.
# Also test that we can separate out assemblies with differing numbers of blocks.
o, r = test_reactors.loadTestReactor(inputFileName="partisnTestReactor.yaml")
cs = o.cs
cs["splitZones"] = False
cs[globalSettings.CONF_ZONING_STRATEGY] = "byRingZone"
cs["ringZones"] = [1, 2, 3, 4, 5, 6, 7, 8, 9]
diverseZone = "ring-4"
r.core.buildZones(cs)
daZones = r.core.zones
# lets make one of the assemblies have an extra block
zoneLocations = daZones.getZoneLocations(diverseZone)
originalAssemblies = r.core.getLocationContents(
zoneLocations, assemblyLevel=True
)
fuel = [a for a in originalAssemblies if a.hasFlags(Flags.FUEL)][0]
newBlock = copy.deepcopy(fuel[-1])
fuel.add(newBlock)
# should contain a zone for every ring zone
# we only want one ring zone for this test, containing assemblies of different types.
zoneTup = tuple(daZones.names)
for zoneName in zoneTup:
if zoneName != diverseZone:
daZones.removeZone(zoneName)
# this should split diverseZone into multiple zones by nodalization type.
cs["splitZones"] = True
zones.splitZones(r.core, cs, daZones)
# test to make sure that we split the ring zone correctly
self.assertEqual(len(daZones["ring-4-primary-control-5"]), 2)
self.assertEqual(len(daZones["ring-4-middle-fuel-5"]), 3)
self.assertEqual(len(daZones["ring-4-middle-fuel-6"]), 1)
def test_createHotZones(self):
# Test to make sure createHotZones identifies the highest p/f location in a zone
# Test to make sure createHotZones can remove the peak assembly from that zone and place it in a new zone
# Test that the power in the old zone and the new zone is conserved.
# Test that if a hot zone can not be created from a single assembly zone.
o, r = test_reactors.loadTestReactor(inputFileName="partisnTestReactor.yaml")
cs = o.cs
cs["splitZones"] = False
cs[globalSettings.CONF_ZONING_STRATEGY] = "byRingZone"
cs["ringZones"] = [9] # build one giant zone
r.core.buildZones(cs)
daZones = r.core.zones
originalassemblies = []
originalPower = 0.0
peakZonePFRatios = []
# Create a single assembly zone to verify that it will not create a hot zone
single = zones.Zone("single")
daZones.add(single)
aLoc = r.core.getFirstAssembly(Flags.FUEL).getLocation()
single.append(aLoc)
# Set power and flow.
# Also gather channel peak P/F ratios, assemblies and power.
for zone in daZones:
powerToFlow = []
zoneLocations = daZones.getZoneLocations(zone.name)
assems = r.core.getLocationContents(zoneLocations, assemblyLevel=True)
power = 300.0
flow = 300.0
for a in assems:
a.getFirstBlock().p.power = power
assemblyPower = a.calcTotalParam("power")
a[-1].p.THmassFlowRate = flow
powerToFlow.append(assemblyPower / a[-1].p.THmassFlowRate)
originalPower += assemblyPower
originalassemblies.append(a)
power += 1
flow -= 1
peakZonePFRatios.append(max(powerToFlow))
daZones = zones.createHotZones(r.core, daZones)
# Test that the hot zones have the peak P/F from the host channels
i = 0
for zone in daZones:
if zone.hotZone:
hotAssemLocation = daZones.getZoneLocations(zone.name)
hotAssem = r.core.getLocationContents(
hotAssemLocation, assemblyLevel=True
)[0]
self.assertEqual(
peakZonePFRatios[i],
hotAssem.calcTotalParam("power") / hotAssem[-1].p.THmassFlowRate,
)
i += 1
powerAfterHotZoning = 0.0
assembliesAfterHotZoning = []
# Check that power is conserved and that we did not lose any assemblies
for zone in daZones:
locs = daZones.getZoneLocations(zone.name)
assems = r.core.getLocationContents(locs, assemblyLevel=True)
for a in assems:
assembliesAfterHotZoning.append(a)
powerAfterHotZoning += a.calcTotalParam("power")
self.assertEqual(powerAfterHotZoning, originalPower)
self.assertEqual(len(assembliesAfterHotZoning), len(originalassemblies))
# check that the original zone with 1 channel has False for hotzone
self.assertEqual(single.hotZone, False)
# check that we have the correct number of hot and normal zones.
hotCount = 0
normalCount = 0
for zone in daZones:
if zone.hotZone:
hotCount += 1
else:
normalCount += 1
self.assertEqual(hotCount, 1)
self.assertEqual(normalCount, 2)
if __name__ == "__main__":
# import sys;sys.argv = ['', 'Zones_InReactor.test_buildRingZones']
unittest.main()
| 1.828125 | 2 |
islam_fitz/survey/migrations/0005_auto_20210712_2132.py | OmarEhab177/Islam_fitz | 0 | 4162 | <filename>islam_fitz/survey/migrations/0005_auto_20210712_2132.py<gh_stars>0
# Generated by Django 3.1.12 on 2021-07-12 19:32
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('survey', '0004_lastpage_whatsapp_button'),
]
operations = [
migrations.RemoveField(
model_name='lastpage',
name='whatsapp_button',
),
migrations.AddField(
model_name='lastpage',
name='whatsapp_number',
field=models.CharField(default=1, max_length=50),
preserve_default=False,
),
]
| 1.484375 | 1 |
Examples/WorkingWithOutlookMSGs/CreateAndSaveOutlookNote.py | Muzammil-khan/Aspose.Email-Python-Dotnet | 5 | 4163 | <filename>Examples/WorkingWithOutlookMSGs/CreateAndSaveOutlookNote.py<gh_stars>1-10
import aspose.email.mapi.msg as msg
from aspose.email.mapi import MapiNote, NoteSaveFormat, NoteColor
def run():
dataDir = "Data/"
#ExStart: CreateAndSaveOutlookNote
note3 = MapiNote()
note3.subject = "Blue color note"
note3.body = "This is a blue color note";
note3.color = NoteColor.YELLOW
note3.height = 500
note3.width = 500
note3.save(dataDir + "CreateAndSaveOutlookNote_out.msg", NoteSaveFormat.MSG)
#ExEnd: CreateAndSaveOutlookNote
if __name__ == '__main__':
run()
| 2.6875 | 3 |
nonebot/internal/adapter/template.py | mobyw/nonebot2 | 0 | 4164 | <gh_stars>0
import functools
from string import Formatter
from typing import (
TYPE_CHECKING,
Any,
Set,
Dict,
List,
Type,
Tuple,
Union,
Generic,
Mapping,
TypeVar,
Callable,
Optional,
Sequence,
cast,
overload,
)
if TYPE_CHECKING:
from .message import Message, MessageSegment
TM = TypeVar("TM", bound="Message")
TF = TypeVar("TF", str, "Message")
FormatSpecFunc = Callable[[Any], str]
FormatSpecFunc_T = TypeVar("FormatSpecFunc_T", bound=FormatSpecFunc)
class MessageTemplate(Formatter, Generic[TF]):
"""消息模板格式化实现类。
参数:
template: 模板
factory: 消息类型工厂,默认为 `str`
"""
@overload
def __init__(
self: "MessageTemplate[str]", template: str, factory: Type[str] = str
) -> None:
...
@overload
def __init__(
self: "MessageTemplate[TM]", template: Union[str, TM], factory: Type[TM]
) -> None:
...
def __init__(self, template, factory=str) -> None:
self.template: TF = template
self.factory: Type[TF] = factory
self.format_specs: Dict[str, FormatSpecFunc] = {}
def add_format_spec(
self, spec: FormatSpecFunc_T, name: Optional[str] = None
) -> FormatSpecFunc_T:
name = name or spec.__name__
if name in self.format_specs:
raise ValueError(f"Format spec {name} already exists!")
self.format_specs[name] = spec
return spec
def format(self, *args, **kwargs):
"""根据传入参数和模板生成消息对象"""
return self._format(args, kwargs)
def format_map(self, mapping: Mapping[str, Any]) -> TF:
"""根据传入字典和模板生成消息对象, 在传入字段名不是有效标识符时有用"""
return self._format([], mapping)
def _format(self, args: Sequence[Any], kwargs: Mapping[str, Any]) -> TF:
msg = self.factory()
if isinstance(self.template, str):
msg += self.vformat(self.template, args, kwargs)
elif isinstance(self.template, self.factory):
template = cast("Message[MessageSegment]", self.template)
for seg in template:
msg += self.vformat(str(seg), args, kwargs) if seg.is_text() else seg
else:
raise TypeError("template must be a string or instance of Message!")
return msg # type:ignore
def vformat(
self, format_string: str, args: Sequence[Any], kwargs: Mapping[str, Any]
) -> TF:
used_args = set()
result, _ = self._vformat(format_string, args, kwargs, used_args, 2)
self.check_unused_args(list(used_args), args, kwargs)
return result
def _vformat(
self,
format_string: str,
args: Sequence[Any],
kwargs: Mapping[str, Any],
used_args: Set[Union[int, str]],
recursion_depth: int,
auto_arg_index: int = 0,
) -> Tuple[TF, int]:
if recursion_depth < 0:
raise ValueError("Max string recursion exceeded")
results: List[Any] = [self.factory()]
for (literal_text, field_name, format_spec, conversion) in self.parse(
format_string
):
# output the literal text
if literal_text:
results.append(literal_text)
# if there's a field, output it
if field_name is not None:
# this is some markup, find the object and do
# the formatting
# handle arg indexing when empty field_names are given.
if field_name == "":
if auto_arg_index is False:
raise ValueError(
"cannot switch from manual field specification to "
"automatic field numbering"
)
field_name = str(auto_arg_index)
auto_arg_index += 1
elif field_name.isdigit():
if auto_arg_index:
raise ValueError(
"cannot switch from manual field specification to "
"automatic field numbering"
)
# disable auto arg incrementing, if it gets
# used later on, then an exception will be raised
auto_arg_index = False
# given the field_name, find the object it references
# and the argument it came from
obj, arg_used = self.get_field(field_name, args, kwargs)
used_args.add(arg_used)
assert format_spec is not None
# do any conversion on the resulting object
obj = self.convert_field(obj, conversion) if conversion else obj
# expand the format spec, if needed
format_control, auto_arg_index = self._vformat(
format_spec,
args,
kwargs,
used_args,
recursion_depth - 1,
auto_arg_index,
)
# format the object and append to the result
formatted_text = self.format_field(obj, str(format_control))
results.append(formatted_text)
return functools.reduce(self._add, results), auto_arg_index
def format_field(self, value: Any, format_spec: str) -> Any:
formatter: Optional[FormatSpecFunc] = self.format_specs.get(format_spec)
if formatter is None and not issubclass(self.factory, str):
segment_class: Type["MessageSegment"] = self.factory.get_segment_class()
method = getattr(segment_class, format_spec, None)
if callable(method) and not cast(str, method.__name__).startswith("_"):
formatter = getattr(segment_class, format_spec)
return (
super().format_field(value, format_spec)
if formatter is None
else formatter(value)
)
def _add(self, a: Any, b: Any) -> Any:
try:
return a + b
except TypeError:
return a + str(b)
| 2.546875 | 3 |
Others/code_festival/code-festival-2015-final-open/a.py | KATO-Hiro/AtCoder | 2 | 4165 | <gh_stars>1-10
# -*- coding: utf-8 -*-
def main():
s, t, u = map(str, input().split())
if len(s) == 5 and len(t) == 7 and len(u) == 5:
print('valid')
else:
print('invalid')
if __name__ == '__main__':
main()
| 3.25 | 3 |
python_Project/Day_16-20/test_2.py | Zzz-ww/Python-prac | 0 | 4166 | <filename>python_Project/Day_16-20/test_2.py
"""
嵌套的列表的坑
"""
names = ['关羽', '张飞', '赵云', '马超', '黄忠']
courses = ['语文', '数学', '英语']
# 录入五个学生三门课程的成绩
scores = [[None] * len(courses) for _ in range(len(names))]
for row, name in enumerate(names):
for col, course in enumerate(courses):
scores[row][col] = float(input(f'请输入{name}的{course}的成绩:'))
print(scores) | 3.671875 | 4 |
asr/dataloaders/am_dataloader.py | Z-yq/audioSamples.github.io | 1 | 4167 | <gh_stars>1-10
import logging
import random
import numpy as np
import pypinyin
import tensorflow as tf
from augmentations.augments import Augmentation
from utils.speech_featurizers import SpeechFeaturizer
from utils.text_featurizers import TextFeaturizer
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
import time
class AM_DataLoader():
def __init__(self, config_dict, training=True):
self.speech_config = config_dict['speech_config']
self.phone_config = config_dict['inp_config']
self.text_config = config_dict['tar_config']
self.running_config=config_dict['running_config']
self.augment_config = config_dict['augments_config']
self.streaming = self.speech_config['streaming']
self.chunk = self.speech_config['sample_rate'] * self.speech_config['streaming_bucket']
self.batch = config_dict['running_config']['batch_size']
self.speech_featurizer = SpeechFeaturizer(self.speech_config)
self.phone_featurizer = TextFeaturizer(self.phone_config)
self.text_featurizer = TextFeaturizer(self.text_config)
self.make_file_list( training)
self.augment = Augmentation(self.augment_config)
self.init_text_to_vocab()
self.epochs = 1
self.steps = 0
def return_data_types(self):
return (tf.float32, tf.int32, tf.int32, tf.int32,tf.int32)
def return_data_shape(self):
return (
tf.TensorShape([self.batch, None, 1]),
tf.TensorShape([self.batch, ]),
tf.TensorShape([self.batch, None]),
tf.TensorShape([self.batch, ]),
tf.TensorShape([self.batch, None]),
)
def get_per_epoch_steps(self):
return len(self.train_list) // self.batch
def eval_per_epoch_steps(self):
return len(self.test_list) // self.batch
def init_text_to_vocab(self):
pypinyin.load_phrases_dict({'调大': [['tiáo'], ['dà']],
'调小': [['tiáo'], ['xiǎo']],
'调亮': [['tiáo'], ['liàng']],
'调暗': [['tiáo'], ['àn']],
'肖': [['xiāo']],
'英雄传': [['yīng'], ['xióng'], ['zhuàn']],
'新传': [['xīn'], ['zhuàn']],
'外传': [['wài'], ['zhuàn']],
'正传': [['zhèng'], ['zhuàn']], '水浒传': [['shuǐ'], ['hǔ'], ['zhuàn']]
})
def text_to_vocab_func(txt):
pins = pypinyin.pinyin(txt)
pins = [i[0] for i in pins]
phones = []
for pin in pins:
if pin in self.phone_featurizer.vocab_array:
phones += [pin]
else:
phones += list(pin)
# print(phones)
return phones
self.text_to_vocab = text_to_vocab_func
def make_file_list(self, training=True):
train_list=self.speech_config['train_list']
test_list=self.speech_config['eval_list']
if training:
with open(train_list, encoding='utf-8') as f:
train_list = f.readlines()
train_list = [i.strip() for i in train_list if i != '']
self.train_list = train_list
np.random.shuffle(self.train_list)
with open(test_list, encoding='utf-8') as f:
data = f.readlines()
data = [i.strip() for i in data if i != '']
self.test_list = data
self.train_offset = 0
self.test_offset = 0
logging.info('load train list {} test list {}'.format(len(self.train_list), len(self.test_list)))
else:
with open(test_list, encoding='utf-8') as f:
data = f.readlines()
data = [i.strip() for i in data if i != '']
self.test_list = data
self.test_offset = 0
def only_chinese(self, word):
txt = ''
for ch in word:
if '\u4e00' <= ch <= '\u9fff':
txt += ch
else:
continue
return txt
def eval_data_generator(self):
sample = []
speech_features = []
input_length = []
phones = []
phones_length = []
txts = []
max_input = 0
batch = self.batch
for i in range(batch * 10):
line = self.test_list[self.test_offset]
self.test_offset += 1
if self.test_offset > len(self.test_list) - 1:
self.test_offset = 0
wp, txt = line.strip().split('\t')
try:
data = self.speech_featurizer.load_wav(wp)
except:
logging.info('{} load data failed,skip'.format(wp))
continue
if len(data) < 400:
continue
elif len(data) > self.speech_featurizer.sample_rate * self.speech_config['wav_max_duration']:
logging.info(
'{} duration out of wav_max_duration({}),skip'.format(wp, self.speech_config['wav_max_duration']))
continue
if self.speech_config['only_chinese']:
txt = self.only_chinese(txt)
if not self.streaming:
speech_feature = data / np.abs(data).max()
speech_feature = np.expand_dims(speech_feature, -1)
in_len = len(speech_feature) // (
self.speech_config['reduction_factor'] * (self.speech_featurizer.sample_rate / 1000) *
self.speech_config['stride_ms'])
else:
speech_feature = data
speech_feature = np.expand_dims(speech_feature, -1)
reduce = self.speech_config['reduction_factor'] * (self.speech_featurizer.sample_rate / 1000) * \
self.speech_config['stride_ms']
in_len = len(speech_feature) // self.chunk
if len(speech_feature) % self.chunk != 0:
in_len += 1
chunk_times = self.chunk // reduce
if self.chunk % reduce != 0:
chunk_times += 1
in_len *= chunk_times
py = self.text_to_vocab(txt)
if self.check_valid(py, self.phone_featurizer.vocab_array) is not True:
logging.info(' {} txt phone {} not all in tokens,continue'.format(txt, self.check_valid(py,
self.phone_featurizer.vocab_array)))
continue
if self.check_valid(txt, self.text_featurizer.vocab_array) is not True:
logging.info(' {} txt phone {} not all in tokens,continue'.format(txt, self.check_valid(py,
self.text_featurizer.vocab_array)))
continue
txt = list(txt)
phone_feature = self.phone_featurizer.extract(py)
text_feature = self.text_featurizer.extract(txt)+[self.text_featurizer.endid()]
if in_len < len(phone_feature):
logging.info('{} feature length < phone length,continue'.format(wp))
continue
max_input = max(max_input, len(speech_feature))
speech_features.append(speech_feature)
input_length.append(in_len)
phones.append(np.array(phone_feature))
txts.append(np.array(text_feature))
phones_length.append(len(phone_feature))
sample.append(line)
if len(sample) == batch:
break
if self.streaming:
max_input = max_input // self.chunk * self.chunk + self.chunk
speech_features = self.speech_featurizer.pad_signal(speech_features, max_input)
if self.streaming:
reduce = self.speech_config['reduction_factor'] * (self.speech_featurizer.sample_rate / 1000) * \
self.speech_config['stride_ms']
max_input = max_input // self.chunk * self.chunk + self.chunk
max_in_len = max_input // self.chunk
chunk_times = self.chunk // reduce
if self.chunk % reduce != 0:
chunk_times += 1
max_in_len *= chunk_times
input_length = np.clip(input_length, 0, max_in_len)
speech_features = self.speech_featurizer.pad_signal(speech_features, max_input)
phones = tf.keras.preprocessing.sequence.pad_sequences(phones, maxlen=max([len(i) for i in phones]),
padding='post', value=self.phone_featurizer.pad)
txts = tf.keras.preprocessing.sequence.pad_sequences(txts, maxlen=max([len(i) for i in txts]), padding='post',
value=self.text_featurizer.pad)
x = np.array(speech_features, 'float32')
phones = np.array(phones, 'int32')
txts = np.array(txts, 'int32')
input_length = np.array(input_length, 'int32')
phones_length = np.array(phones_length, 'int32')
return x, input_length, phones, phones_length, txts
def check_valid(self, txt, vocab_list):
if len(txt) == 0:
return False
for n in txt:
if n in vocab_list:
pass
else:
return n
return True
def generate(self, train=True):
sample = []
speech_features = []
input_length = []
phones = []
phones_length = []
txts = []
max_input = 0
if train:
batch = self.batch * 3 // 4 if self.augment.available() else self.batch
else:
batch = self.batch
for i in range(batch * 10):
if train:
line = self.train_list[self.train_offset]
self.train_offset += 1
if self.train_offset > len(self.train_list) - 1:
self.train_offset = 0
np.random.shuffle(self.train_list)
self.epochs += 1
else:
line = self.test_list[self.test_offset]
self.test_offset += 1
if self.test_offset > len(self.test_list) - 1:
self.test_offset = 0
wp, txt = line.strip().split('\t')
try:
data = self.speech_featurizer.load_wav(wp)
except:
logging.info('{} load data failed,skip'.format(wp))
continue
if len(data) < 400:
continue
elif len(data) > self.speech_featurizer.sample_rate * self.speech_config['wav_max_duration']:
logging.info(
'{} duration out of wav_max_duration({}),skip'.format(wp, self.speech_config['wav_max_duration']))
continue
if self.speech_config['only_chinese']:
txt = self.only_chinese(txt)
if not self.streaming:
speech_feature = data / np.abs(data).max()
speech_feature = np.expand_dims(speech_feature, -1)
in_len = len(speech_feature) // (
self.speech_config['reduction_factor'] * (self.speech_featurizer.sample_rate / 1000) *
self.speech_config['stride_ms'])
else:
speech_feature = data
speech_feature = np.expand_dims(speech_feature, -1)
reduce = self.speech_config['reduction_factor'] * (self.speech_featurizer.sample_rate / 1000) * \
self.speech_config['stride_ms']
in_len = len(speech_feature) // self.chunk
if len(speech_feature) % self.chunk != 0:
in_len += 1
chunk_times = self.chunk // reduce
if self.chunk % reduce != 0:
chunk_times += 1
in_len *= chunk_times
py = self.text_to_vocab(txt)
if self.check_valid(py, self.phone_featurizer.vocab_array) is not True:
logging.info(' {} txt phone {} not all in tokens,continue'.format(txt, self.check_valid(py,
self.phone_featurizer.vocab_array)))
continue
if self.check_valid(txt, self.text_featurizer.vocab_array) is not True:
logging.info(' {} txt {} not all in tokens,continue'.format(txt, self.check_valid(txt,
self.text_featurizer.vocab_array)))
continue
txt = list(txt)
phone_feature = self.phone_featurizer.extract(py)
text_feature = self.text_featurizer.extract(txt)+[self.text_featurizer.endid()]
if in_len < len(phone_feature):
logging.info('{} feature length < phone length,continue'.format(wp))
continue
max_input = max(max_input, len(speech_feature))
speech_features.append(speech_feature)
input_length.append(in_len)
phones.append(np.array(phone_feature))
txts.append(np.array(text_feature))
phones_length.append(len(phone_feature))
sample.append(line)
if len(sample) == batch:
break
if train and self.augment.available():
sample = random.sample(sample, self.batch // 4)
for i in sample:
wp, txt = i.strip().split('\t')
try:
data = self.speech_featurizer.load_wav(wp)
except:
continue
if len(data) < 400:
logging.info('{} wav too short < 25ms,skip'.format(wp))
continue
elif len(data) > self.speech_featurizer.sample_rate * self.speech_config['wav_max_duration']:
continue
data = self.augment.process(data)
if self.speech_config['only_chinese']:
txt = self.only_chinese(txt)
if not self.streaming:
speech_feature = data / np.abs(data).max()
speech_feature = np.expand_dims(speech_feature, -1)
in_len = len(speech_feature) // (
self.speech_config['reduction_factor'] * (self.speech_featurizer.sample_rate / 1000) *
self.speech_config['stride_ms'])
else:
speech_feature = data
speech_feature = np.expand_dims(speech_feature, -1)
reduce = self.speech_config['reduction_factor'] * (self.speech_featurizer.sample_rate / 1000) * \
self.speech_config['stride_ms']
in_len = len(speech_feature) // self.chunk
if len(speech_feature) % self.chunk != 0:
in_len += 1
chunk_times = self.chunk // reduce
if self.chunk % reduce != 0:
chunk_times += 1
in_len *= chunk_times
py = self.text_to_vocab(txt)
if self.check_valid(py, self.phone_featurizer.vocab_array) is not True:
logging.info(' {} txt phone {} not all in tokens,continue'.format(txt, self.check_valid(py,
self.phone_featurizer.vocab_array)))
continue
if self.check_valid(txt, self.text_featurizer.vocab_array) is not True:
logging.info(' {} txt {} not all in tokens,continue'.format(txt, self.check_valid(txt,
self.text_featurizer.vocab_array)))
continue
txt = list(txt)
phone_feature = self.phone_featurizer.extract(py)
text_feature = self.text_featurizer.extract(txt)+[self.text_featurizer.endid()]
if in_len < len(phone_feature):
logging.info('{} feature length < phone length,continue'.format(wp))
continue
max_input = max(max_input, len(speech_feature))
speech_features.append(speech_feature)
input_length.append(in_len)
phones.append(np.array(phone_feature))
txts.append(np.array(text_feature))
phones_length.append(len(phone_feature))
if self.streaming:
reduce = self.speech_config['reduction_factor'] * (self.speech_featurizer.sample_rate / 1000) * \
self.speech_config['stride_ms']
max_input = max_input // self.chunk * self.chunk + self.chunk
max_in_len = max_input // self.chunk
chunk_times = self.chunk // reduce
if self.chunk % reduce != 0:
chunk_times += 1
max_in_len *= chunk_times
input_length = np.clip(input_length, 0, max_in_len)
speech_features = self.speech_featurizer.pad_signal(speech_features, max_input)
phones=tf.keras.preprocessing.sequence.pad_sequences(phones,maxlen=max([len(i) for i in phones]),padding='post',value=self.phone_featurizer.pad)
txts=tf.keras.preprocessing.sequence.pad_sequences(txts,maxlen=max([len(i) for i in txts]),padding='post',value=self.text_featurizer.pad)
x = np.array(speech_features, 'float32')
phones = np.array(phones, 'int32')
txts = np.array(txts, 'int32')
input_length = np.array(input_length, 'int32')
phones_length = np.array(phones_length, 'int32')
return x, input_length, phones, phones_length,txts
def generator(self, train=True):
while 1:
s=time.time()
x, input_length, phones, phones_length,txts = self.generate(train)
e=time.time()
logging.info('load data cost time: {}'.format(e-s))
if x.shape[0] == 0:
logging.info('load data length zero,continue')
continue
yield x, input_length, phones, phones_length,txts
| 2.1875 | 2 |
migrations/versions/2018_04_20_data_src_refactor.py | AlexKouzy/ethnicity-facts-and-figures-publisher | 0 | 4168 | """empty message
Revision ID: 2018_04_20_data_src_refactor
Revises: 2018_04_11_add_sandbox_topic
Create Date: 2018-04-20 13:03:32.478880
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
from sqlalchemy.dialects.postgresql import ARRAY
revision = '2018_04_20_data_src_refactor'
down_revision = '2018_04_11_add_sandbox_topic'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
type_of_data_types = sa.Enum('ADMINISTRATIVE', 'SURVEY', name='type_of_data_types')
op.add_column('page', sa.Column('secondary_source_1_type_of_data', ARRAY(type_of_data_types), nullable=True))
op.add_column('page', sa.Column('suppression_and_disclosure', sa.TEXT(), nullable=True))
op.add_column('page', sa.Column('note_on_corrections_or_updates', sa.TEXT(), nullable=True))
op.add_column('page', sa.Column('secondary_source_1_note_on_corrections_or_updates', sa.TEXT(), nullable=True))
op.add_column('page', sa.Column('secondary_source_1_data_source_purpose', sa.TEXT(), nullable=True))
op.get_bind()
op.execute('''
UPDATE page SET suppression_and_disclosure = suppression_rules
WHERE disclosure_control is null;
''')
op.execute('''
UPDATE page SET suppression_and_disclosure = disclosure_control
WHERE suppression_rules is null;
''')
op.execute('''
UPDATE page SET suppression_and_disclosure = trim(suppression_rules || ' ' || disclosure_control)
WHERE suppression_rules is not null
AND disclosure_control is not null;
''')
op.drop_constraint('organisation_secondary_source_2_fkey', 'page', type_='foreignkey')
op.drop_constraint('frequency_secondary_source_2_fkey', 'page', type_='foreignkey')
op.drop_constraint('secondary_source_2_type_of_statistic_fkey', 'page', type_='foreignkey')
op.drop_column('page', 'secondary_source_1_date_next_update')
op.drop_column('page', 'secondary_source_1_date_updated')
op.drop_column('page', 'secondary_source_1_suppression_rules')
op.drop_column('page', 'secondary_source_1_disclosure_control')
op.drop_column('page', 'secondary_source_2_frequency')
op.drop_column('page', 'secondary_source_2_contact_2_name')
op.drop_column('page', 'secondary_source_2_contact_2_phone')
op.drop_column('page', 'secondary_source_2_url')
op.drop_column('page', 'secondary_source_2_date_next_update')
op.drop_column('page', 'secondary_source_2_contact_1_name')
op.drop_column('page', 'last_update_date')
op.drop_column('page', 'secondary_source_2_contact_1_phone')
op.drop_column('page', 'secondary_source_2_publisher_text')
op.drop_column('page', 'secondary_source_2_disclosure_control')
op.drop_column('page', 'secondary_source_2_type_of_statistic_id')
op.drop_column('page', 'secondary_source_2_suppression_rules')
op.drop_column('page', 'secondary_source_2_frequency_other')
op.drop_column('page', 'secondary_source_2_publisher_id')
op.drop_column('page', 'secondary_source_2_title')
op.drop_column('page', 'secondary_source_2_date')
op.drop_column('page', 'next_update_date')
op.drop_column('page', 'secondary_source_2_date_updated')
op.drop_column('page', 'secondary_source_2_statistic_type')
op.drop_column('page', 'secondary_source_2_frequency_id')
op.drop_column('page', 'secondary_source_2_contact_2_email')
op.drop_column('page', 'secondary_source_2_contact_1_email')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('page', sa.Column('secondary_source_2_contact_1_email', sa.TEXT(), autoincrement=False, nullable=True))
op.add_column('page', sa.Column('secondary_source_2_contact_2_email', sa.TEXT(), autoincrement=False, nullable=True))
op.add_column('page', sa.Column('secondary_source_2_frequency_id', sa.INTEGER(), autoincrement=False, nullable=True))
op.add_column('page', sa.Column('secondary_source_2_statistic_type', sa.TEXT(), autoincrement=False, nullable=True))
op.add_column('page', sa.Column('secondary_source_2_date_updated', sa.TEXT(), autoincrement=False, nullable=True))
op.add_column('page', sa.Column('secondary_source_2_date', sa.TEXT(), autoincrement=False, nullable=True))
op.add_column('page', sa.Column('secondary_source_2_title', sa.TEXT(), autoincrement=False, nullable=True))
op.add_column('page', sa.Column('secondary_source_2_publisher_id', sa.VARCHAR(length=255), autoincrement=False, nullable=True))
op.add_column('page', sa.Column('secondary_source_2_frequency_other', sa.VARCHAR(length=255), autoincrement=False, nullable=True))
op.add_column('page', sa.Column('secondary_source_2_suppression_rules', sa.TEXT(), autoincrement=False, nullable=True))
op.add_column('page', sa.Column('secondary_source_2_type_of_statistic_id', sa.INTEGER(), autoincrement=False, nullable=True))
op.add_column('page', sa.Column('secondary_source_2_disclosure_control', sa.TEXT(), autoincrement=False, nullable=True))
op.add_column('page', sa.Column('secondary_source_2_publisher_text', sa.TEXT(), autoincrement=False, nullable=True))
op.add_column('page', sa.Column('secondary_source_2_contact_1_phone', sa.TEXT(), autoincrement=False, nullable=True))
op.add_column('page', sa.Column('secondary_source_2_contact_1_name', sa.TEXT(), autoincrement=False, nullable=True))
op.add_column('page', sa.Column('secondary_source_2_date_next_update', sa.TEXT(), autoincrement=False, nullable=True))
op.add_column('page', sa.Column('secondary_source_2_url', sa.TEXT(), autoincrement=False, nullable=True))
op.add_column('page', sa.Column('secondary_source_2_contact_2_phone', sa.TEXT(), autoincrement=False, nullable=True))
op.add_column('page', sa.Column('secondary_source_2_contact_2_name', sa.TEXT(), autoincrement=False, nullable=True))
op.add_column('page', sa.Column('secondary_source_2_frequency', sa.TEXT(), autoincrement=False, nullable=True))
op.add_column('page', sa.Column('last_update_date', sa.VARCHAR(length=255), autoincrement=False, nullable=True))
op.add_column('page', sa.Column('next_update_date', sa.VARCHAR(length=255), autoincrement=False, nullable=True))
op.add_column('page', sa.Column('secondary_source_1_date_next_update', sa.TEXT(), autoincrement=False, nullable=True))
op.add_column('page', sa.Column('secondary_source_1_date_updated', sa.TEXT(), autoincrement=False, nullable=True))
op.add_column('page', sa.Column('secondary_source_1_disclosure_control', sa.TEXT(), autoincrement=False, nullable=True))
op.add_column('page', sa.Column('secondary_source_1_suppression_rules', sa.TEXT(), autoincrement=False, nullable=True))
op.create_foreign_key('secondary_source_2_type_of_statistic_fkey', 'page', 'type_of_statistic', ['secondary_source_2_type_of_statistic_id'], ['id'])
op.create_foreign_key('frequency_secondary_source_2_fkey', 'page', 'frequency_of_release', ['secondary_source_2_frequency_id'], ['id'])
op.create_foreign_key('organisation_secondary_source_2_fkey', 'page', 'organisation', ['secondary_source_2_publisher_id'], ['id'])
op.drop_column('page', 'secondary_source_1_type_of_data')
op.drop_column('page', 'suppression_and_disclosure')
op.drop_column('page', 'note_on_corrections_or_updates')
op.drop_column('page', 'secondary_source_1_note_on_corrections_or_updates')
op.drop_column('page', 'secondary_source_1_data_source_purpose')
# ### end Alembic commands ###
| 1.601563 | 2 |
lib/core/parse/cmdline.py | vikas-kundu/phonedict | 0 | 4169 | <reponame>vikas-kundu/phonedict<filename>lib/core/parse/cmdline.py<gh_stars>0
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
# coded by <NAME> https://github.com/vikas-kundu
# -------------------------------------------
import sys
import getopt
import time
import config
from lib.core.parse import banner
from lib.core import util
from lib.core import installer
def options():
argv = sys.argv[1:]
try:
opts, args = getopt.getopt(argv, 'm:t:c:o:n:whi', ['mode','task','country','output','number','wizard','help','install'])
if((len(sys.argv)==9) or (len(sys.argv)==2)):
pass
else:
print("Error! Some parameter is missing please check!")
time.sleep(2)
banner.usage()
sys.exit()
except getopt.GetoptError as err:
print(err)
banner.usage()
sys.exit(2)
for (o, a) in opts:
if(o in('-i','--install')):
if(util.packages_check()==False):
installer.start_install()
else:
print("Packages already installed!")
sys.exit()
elif (o in ('-w', '--wizard')):
config.wizard=True
elif o in ('-h','--help'):
banner.usage()
sys.exit()
elif o in ('-m','--mode'):
config.str_mode=str(a)
elif o in ('-t','--task'):
config.str_task=str(a)
elif o in ('-c','--country'):
config.str_country=str(a.lower().strip('"\''))
elif o in ('-o','--output'):
config.str_output=str(a.strip('"\''))
elif o in ('-n','--number'):
config.str_number=str(a.strip('"\''))
else:
print("Something went wrong with argument parsing!")
time.sleep(2)
banner.usage()
sys.exit()
| 2.15625 | 2 |
mistral/tests/unit/utils/test_utils.py | shubhamdang/mistral | 205 | 4170 | # Copyright 2013 - Mirantis, Inc.
# Copyright 2015 - StackStorm, Inc.
# Copyright 2015 - Huawei Technologies Co. Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from mistral import exceptions as exc
from mistral.tests.unit import base
from mistral.utils import ssh_utils
from mistral_lib import utils
class UtilsTest(base.BaseTest):
def test_itersubclasses(self):
class A(object):
pass
class B(A):
pass
class C(A):
pass
class D(C):
pass
self.assertEqual([B, C, D], list(utils.iter_subclasses(A)))
def test_paramiko_to_private_key(self):
self.assertRaises(
exc.DataAccessException,
ssh_utils._to_paramiko_private_key,
"../dir"
)
self.assertRaises(
exc.DataAccessException,
ssh_utils._to_paramiko_private_key,
"..\\dir"
)
self.assertIsNone(
ssh_utils._to_paramiko_private_key(private_key_filename=None,
password='<PASSWORD>')
)
| 1.9375 | 2 |
shoutcast_api/shoutcast_request.py | scls19fr/shoutcast_api | 6 | 4171 | import xmltodict
import json
from .models import Tunein
from .utils import _init_session
from .Exceptions import APIException
base_url = 'http://api.shoutcast.com'
tunein_url = 'http://yp.shoutcast.com/{base}?id={id}'
tuneins = [Tunein('/sbin/tunein-station.pls'), Tunein('/sbin/tunein-station.m3u'), Tunein('/sbin/tunein-station.xspf')]
def call_api_xml(endpoint, params=None, session=None):
session = _init_session(session)
request_url = "{}{}".format(base_url, endpoint)
response = session.get(request_url, params=params)
if response.status_code == 200:
response_as_dict = xmltodict.parse(response.content)
api_response = response_as_dict.get('response')
if api_response:
api_status_code = int(api_response.get('statusCode'))
message = "statusText:{}, statusDetailText:{}".format(
api_response.get('statusText'), api_response.get('statusDetailText')
)
raise APIException(message, code=api_status_code)
return response_as_dict
raise APIException(response.content, code=response.status_code)
def call_api_json(endpoint, params=None, session=None):
session = _init_session(session)
request_url = "{}{}".format(base_url, endpoint)
response = session.get(request_url, params=params)
if response.status_code == 200:
json_response = json.loads(response.content.decode('utf-8'))
api_response = json_response.get('response')
api_status_code = int(api_response.get('statusCode'))
if api_status_code != 200:
message = "statusText:{}, statusDetailText:{}".format(
api_response.get('statusText'), api_response.get('statusDetailText', '')
)
raise APIException(message, code=api_status_code)
return json_response.get('response')['data']
raise APIException(response.reason, code=response.status_code)
def call_api_tunein(station_id: int, session=None):
session = _init_session(session)
url = tunein_url.format(base=tuneins[2], id=station_id)
response = session.get(url)
if response.status_code == 200:
api_response = xmltodict.parse(response.content.decode('utf-8'))
return api_response
raise APIException(response.reason, code=response.status_code)
def call_api_tunein_any(base: Tunein, station_id: int, session=None):
session = _init_session(session)
url = tunein_url.format(base=base, id=station_id)
response = session.get(url)
if response.status_code == 200:
return response.content.decode('utf-8')
raise APIException(response.reason, code=response.status_code)
| 2.21875 | 2 |
django_app_permissions/management/commands/resolve_app_groups.py | amp89/django-app-permissions | 2 | 4172 | from django.core.management.base import BaseCommand, no_translations
from django.contrib.auth.models import Group
from django.conf import settings
import sys
class Command(BaseCommand):
def handle(self, *args, **options):
sys.stdout.write("\nResolving app groups")
app_list = [app_name.lower() for app_name in settings.ACCESS_CONTROLLED_INSTALLED_APPS]
for app_name in app_list:
created = Group.objects.get_or_create(name=app_name)
sys.stdout.write(f"\n{app_name}, new={created}")
sys.stdout.write("\n") | 2 | 2 |
swift/common/db.py | sunzz679/swift-2.4.0--source-read | 0 | 4173 | # Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Database code for Swift """
from contextlib import contextmanager, closing
import hashlib
import logging
import os
from uuid import uuid4
import sys
import time
import errno
import six.moves.cPickle as pickle
from swift import gettext_ as _
from tempfile import mkstemp
from eventlet import sleep, Timeout
import sqlite3
from swift.common.constraints import MAX_META_COUNT, MAX_META_OVERALL_SIZE
from swift.common.utils import json, Timestamp, renamer, \
mkdirs, lock_parent_directory, fallocate
from swift.common.exceptions import LockTimeout
from swift.common.swob import HTTPBadRequest
#: Whether calls will be made to preallocate disk space for database files.
DB_PREALLOCATION = False
#: Timeout for trying to connect to a DB
BROKER_TIMEOUT = 25
#: Pickle protocol to use
PICKLE_PROTOCOL = 2
#: Max number of pending entries
PENDING_CAP = 131072
def utf8encode(*args):
return [(s.encode('utf8') if isinstance(s, unicode) else s) for s in args]
def utf8encodekeys(metadata):
uni_keys = [k for k in metadata if isinstance(k, unicode)]
for k in uni_keys:
sv = metadata[k]
del metadata[k]
metadata[k.encode('utf-8')] = sv
def _db_timeout(timeout, db_file, call):
with LockTimeout(timeout, db_file):
retry_wait = 0.001
while True:
try:
return call()
except sqlite3.OperationalError as e:
if 'locked' not in str(e):
raise
sleep(retry_wait)
retry_wait = min(retry_wait * 2, 0.05)
class DatabaseConnectionError(sqlite3.DatabaseError):
"""More friendly error messages for DB Errors."""
def __init__(self, path, msg, timeout=0):
self.path = path
self.timeout = timeout
self.msg = msg
def __str__(self):
return 'DB connection error (%s, %s):\n%s' % (
self.path, self.timeout, self.msg)
class DatabaseAlreadyExists(sqlite3.DatabaseError):
"""More friendly error messages for DB Errors."""
def __init__(self, path):
self.path = path
def __str__(self):
return 'DB %s already exists' % self.path
class GreenDBConnection(sqlite3.Connection):
"""SQLite DB Connection handler that plays well with eventlet."""
def __init__(self, database, timeout=None, *args, **kwargs):
if timeout is None:
timeout = BROKER_TIMEOUT
self.timeout = timeout
self.db_file = database
super(GreenDBConnection, self).__init__(database, 0, *args, **kwargs)
def cursor(self, cls=None):
if cls is None:
cls = GreenDBCursor
return sqlite3.Connection.cursor(self, cls)
def commit(self):
return _db_timeout(
self.timeout, self.db_file,
lambda: sqlite3.Connection.commit(self))
class GreenDBCursor(sqlite3.Cursor):
"""SQLite Cursor handler that plays well with eventlet."""
def __init__(self, *args, **kwargs):
self.timeout = args[0].timeout
self.db_file = args[0].db_file
super(GreenDBCursor, self).__init__(*args, **kwargs)
def execute(self, *args, **kwargs):
return _db_timeout(
self.timeout, self.db_file, lambda: sqlite3.Cursor.execute(
self, *args, **kwargs))
def dict_factory(crs, row):
"""
This should only be used when you need a real dict,
i.e. when you're going to serialize the results.
"""
return dict(
((col[0], row[idx]) for idx, col in enumerate(crs.description)))
def chexor(old, name, timestamp):
"""
Each entry in the account and container databases is XORed by the 128-bit
hash on insert or delete. This serves as a rolling, order-independent hash
of the contents. (check + XOR)
:param old: hex representation of the current DB hash
:param name: name of the object or container being inserted
:param timestamp: internalized timestamp of the new record
:returns: a hex representation of the new hash value
"""
if name is None:
raise Exception('name is None!')
new = hashlib.md5(('%s-%s' % (name, timestamp)).encode('utf8')).hexdigest()
return '%032x' % (int(old, 16) ^ int(new, 16))
def get_db_connection(path, timeout=30, okay_to_create=False):
"""
Returns a properly configured SQLite database connection.
:param path: path to DB
:param timeout: timeout for connection
:param okay_to_create: if True, create the DB if it doesn't exist
:returns: DB connection object
"""
try:
connect_time = time.time()
conn = sqlite3.connect(path, check_same_thread=False,
factory=GreenDBConnection, timeout=timeout)
if path != ':memory:' and not okay_to_create:
# attempt to detect and fail when connect creates the db file
stat = os.stat(path)
if stat.st_size == 0 and stat.st_ctime >= connect_time:
os.unlink(path)
raise DatabaseConnectionError(path,
'DB file created by connect?')
conn.row_factory = sqlite3.Row
conn.text_factory = str
with closing(conn.cursor()) as cur:
cur.execute('PRAGMA synchronous = NORMAL')
cur.execute('PRAGMA count_changes = OFF')
cur.execute('PRAGMA temp_store = MEMORY')
cur.execute('PRAGMA journal_mode = DELETE')
conn.create_function('chexor', 3, chexor)
except sqlite3.DatabaseError:
import traceback
raise DatabaseConnectionError(path, traceback.format_exc(),
timeout=timeout)
return conn
class DatabaseBroker(object):
"""Encapsulates working with a database."""
def __init__(self, db_file, timeout=BROKER_TIMEOUT, logger=None,
account=None, container=None, pending_timeout=None,
stale_reads_ok=False):
"""Encapsulates working with a database."""
self.conn = None
self.db_file = db_file
self.pending_file = self.db_file + '.pending'
self.pending_timeout = pending_timeout or 10
self.stale_reads_ok = stale_reads_ok
self.db_dir = os.path.dirname(db_file)
self.timeout = timeout
self.logger = logger or logging.getLogger()
self.account = account
self.container = container
self._db_version = -1
def __str__(self):
"""
Returns a string identifying the entity under broker to a human.
The baseline implementation returns a full pathname to a database.
This is vital for useful diagnostics.
"""
return self.db_file
def initialize(self, put_timestamp=None, storage_policy_index=None):
"""
Create the DB
The storage_policy_index is passed through to the subclass's
``_initialize`` method. It is ignored by ``AccountBroker``.
:param put_timestamp: internalized timestamp of initial PUT request
:param storage_policy_index: only required for containers
"""
if self.db_file == ':memory:':
tmp_db_file = None
conn = get_db_connection(self.db_file, self.timeout)
else:
mkdirs(self.db_dir)
fd, tmp_db_file = mkstemp(suffix='.tmp', dir=self.db_dir)
os.close(fd)
conn = sqlite3.connect(tmp_db_file, check_same_thread=False,
factory=GreenDBConnection, timeout=0)
# creating dbs implicitly does a lot of transactions, so we
# pick fast, unsafe options here and do a big fsync at the end.
with closing(conn.cursor()) as cur:
cur.execute('PRAGMA synchronous = OFF')
cur.execute('PRAGMA temp_store = MEMORY')
cur.execute('PRAGMA journal_mode = MEMORY')
conn.create_function('chexor', 3, chexor)
conn.row_factory = sqlite3.Row
conn.text_factory = str
conn.executescript("""
CREATE TABLE outgoing_sync (
remote_id TEXT UNIQUE,
sync_point INTEGER,
updated_at TEXT DEFAULT 0
);
CREATE TABLE incoming_sync (
remote_id TEXT UNIQUE,
sync_point INTEGER,
updated_at TEXT DEFAULT 0
);
CREATE TRIGGER outgoing_sync_insert AFTER INSERT ON outgoing_sync
BEGIN
UPDATE outgoing_sync
SET updated_at = STRFTIME('%s', 'NOW')
WHERE ROWID = new.ROWID;
END;
CREATE TRIGGER outgoing_sync_update AFTER UPDATE ON outgoing_sync
BEGIN
UPDATE outgoing_sync
SET updated_at = STRFTIME('%s', 'NOW')
WHERE ROWID = new.ROWID;
END;
CREATE TRIGGER incoming_sync_insert AFTER INSERT ON incoming_sync
BEGIN
UPDATE incoming_sync
SET updated_at = STRFTIME('%s', 'NOW')
WHERE ROWID = new.ROWID;
END;
CREATE TRIGGER incoming_sync_update AFTER UPDATE ON incoming_sync
BEGIN
UPDATE incoming_sync
SET updated_at = STRFTIME('%s', 'NOW')
WHERE ROWID = new.ROWID;
END;
""")
if not put_timestamp:
put_timestamp = Timestamp(0).internal
self._initialize(conn, put_timestamp,
storage_policy_index=storage_policy_index)
conn.commit()
if tmp_db_file:
conn.close()
with open(tmp_db_file, 'r+b') as fp:
os.fsync(fp.fileno())
with lock_parent_directory(self.db_file, self.pending_timeout):
if os.path.exists(self.db_file):
# It's as if there was a "condition" where different parts
# of the system were "racing" each other.
raise DatabaseAlreadyExists(self.db_file)
renamer(tmp_db_file, self.db_file)
self.conn = get_db_connection(self.db_file, self.timeout)
else:
self.conn = conn
def delete_db(self, timestamp):
"""
Mark the DB as deleted
:param timestamp: internalized delete timestamp
"""
# first, clear the metadata
cleared_meta = {}
for k in self.metadata:
cleared_meta[k] = ('', timestamp)
self.update_metadata(cleared_meta)
# then mark the db as deleted
with self.get() as conn:
self._delete_db(conn, timestamp)
conn.commit()
def possibly_quarantine(self, exc_type, exc_value, exc_traceback):
"""
Checks the exception info to see if it indicates a quarantine situation
(malformed or corrupted database). If not, the original exception will
be reraised. If so, the database will be quarantined and a new
sqlite3.DatabaseError will be raised indicating the action taken.
"""
if 'database disk image is malformed' in str(exc_value):
exc_hint = 'malformed'
elif 'file is encrypted or is not a database' in str(exc_value):
exc_hint = 'corrupted'
elif 'disk I/O error' in str(exc_value):
exc_hint = 'disk error while accessing'
else:
raise exc_type, exc_value, exc_traceback
prefix_path = os.path.dirname(self.db_dir)
partition_path = os.path.dirname(prefix_path)
dbs_path = os.path.dirname(partition_path)
device_path = os.path.dirname(dbs_path)
quar_path = os.path.join(device_path, 'quarantined',
self.db_type + 's',
os.path.basename(self.db_dir))
try:
renamer(self.db_dir, quar_path, fsync=False)
except OSError as e:
if e.errno not in (errno.EEXIST, errno.ENOTEMPTY):
raise
quar_path = "%s-%s" % (quar_path, uuid4().hex)
renamer(self.db_dir, quar_path, fsync=False)
detail = _('Quarantined %s to %s due to %s database') % \
(self.db_dir, quar_path, exc_hint)
self.logger.error(detail)
raise sqlite3.DatabaseError(detail)
@contextmanager
def get(self):
"""Use with the "with" statement; returns a database connection."""
if not self.conn:
if self.db_file != ':memory:' and os.path.exists(self.db_file):
try:
self.conn = get_db_connection(self.db_file, self.timeout)
except (sqlite3.DatabaseError, DatabaseConnectionError):
self.possibly_quarantine(*sys.exc_info())
else:
raise DatabaseConnectionError(self.db_file, "DB doesn't exist")
conn = self.conn
self.conn = None
try:
yield conn
conn.rollback()
self.conn = conn
except sqlite3.DatabaseError:
try:
conn.close()
except Exception:
pass
self.possibly_quarantine(*sys.exc_info())
except (Exception, Timeout):
conn.close()
raise
@contextmanager
def lock(self):
"""Use with the "with" statement; locks a database."""
if not self.conn:
if self.db_file != ':memory:' and os.path.exists(self.db_file):
self.conn = get_db_connection(self.db_file, self.timeout)
else:
raise DatabaseConnectionError(self.db_file, "DB doesn't exist")
conn = self.conn
self.conn = None
orig_isolation_level = conn.isolation_level
conn.isolation_level = None
conn.execute('BEGIN IMMEDIATE')
try:
yield True
except (Exception, Timeout):
pass
try:
conn.execute('ROLLBACK')
conn.isolation_level = orig_isolation_level
self.conn = conn
except (Exception, Timeout):
logging.exception(
_('Broker error trying to rollback locked connection'))
conn.close()
def newid(self, remote_id):
"""
Re-id the database. This should be called after an rsync.
:param remote_id: the ID of the remote database being rsynced in
"""
with self.get() as conn:
row = conn.execute('''
UPDATE %s_stat SET id=?
''' % self.db_type, (str(uuid4()),))
row = conn.execute('''
SELECT ROWID FROM %s ORDER BY ROWID DESC LIMIT 1
''' % self.db_contains_type).fetchone()
sync_point = row['ROWID'] if row else -1
conn.execute('''
INSERT OR REPLACE INTO incoming_sync (sync_point, remote_id)
VALUES (?, ?)
''', (sync_point, remote_id))
self._newid(conn)
conn.commit()
def _newid(self, conn):
# Override for additional work when receiving an rsynced db.
pass
def _is_deleted(self, conn):
"""
Check if the database is considered deleted
:param conn: database conn
:returns: True if the DB is considered to be deleted, False otherwise
"""
raise NotImplementedError()
def is_deleted(self):
"""
Check if the DB is considered to be deleted.
:returns: True if the DB is considered to be deleted, False otherwise
"""
if self.db_file != ':memory:' and not os.path.exists(self.db_file):
return True
self._commit_puts_stale_ok()
with self.get() as conn:
return self._is_deleted(conn)
def merge_timestamps(self, created_at, put_timestamp, delete_timestamp):
"""
Used in replication to handle updating timestamps.
:param created_at: create timestamp
:param put_timestamp: put timestamp
:param delete_timestamp: delete timestamp
"""
with self.get() as conn:
old_status = self._is_deleted(conn)
conn.execute('''
UPDATE %s_stat SET created_at=MIN(?, created_at),
put_timestamp=MAX(?, put_timestamp),
delete_timestamp=MAX(?, delete_timestamp)
''' % self.db_type, (created_at, put_timestamp, delete_timestamp))
if old_status != self._is_deleted(conn):
timestamp = Timestamp(time.time())
self._update_status_changed_at(conn, timestamp.internal)
conn.commit()
def get_items_since(self, start, count):
"""
Get a list of objects in the database between start and end.
:param start: start ROWID
:param count: number to get
:returns: list of objects between start and end
"""
self._commit_puts_stale_ok()
with self.get() as conn:
curs = conn.execute('''
SELECT * FROM %s WHERE ROWID > ? ORDER BY ROWID ASC LIMIT ?
''' % self.db_contains_type, (start, count))
curs.row_factory = dict_factory
return [r for r in curs]
def get_sync(self, id, incoming=True):
"""
Gets the most recent sync point for a server from the sync table.
:param id: remote ID to get the sync_point for
:param incoming: if True, get the last incoming sync, otherwise get
the last outgoing sync
:returns: the sync point, or -1 if the id doesn't exist.
"""
with self.get() as conn:
row = conn.execute(
"SELECT sync_point FROM %s_sync WHERE remote_id=?"
% ('incoming' if incoming else 'outgoing'), (id,)).fetchone()
if not row:
return -1
return row['sync_point']
def get_syncs(self, incoming=True):
"""
Get a serialized copy of the sync table.
:param incoming: if True, get the last incoming sync, otherwise get
the last outgoing sync
:returns: list of {'remote_id', 'sync_point'}
"""
with self.get() as conn:
curs = conn.execute('''
SELECT remote_id, sync_point FROM %s_sync
''' % ('incoming' if incoming else 'outgoing'))
result = []
for row in curs:
result.append({'remote_id': row[0], 'sync_point': row[1]})
return result
def get_max_row(self):
query = '''
SELECT SQLITE_SEQUENCE.seq
FROM SQLITE_SEQUENCE
WHERE SQLITE_SEQUENCE.name == '%s'
LIMIT 1
''' % (self.db_contains_type)
with self.get() as conn:
row = conn.execute(query).fetchone()
return row[0] if row else -1
def get_replication_info(self):
"""
Get information about the DB required for replication.
:returns: dict containing keys from get_info plus max_row and metadata
Note:: get_info's <db_contains_type>_count is translated to just
"count" and metadata is the raw string.
"""
info = self.get_info()
info['count'] = info.pop('%s_count' % self.db_contains_type)
info['metadata'] = self.get_raw_metadata()
info['max_row'] = self.get_max_row()
return info
def get_info(self):
self._commit_puts_stale_ok()
with self.get() as conn:
curs = conn.execute('SELECT * from %s_stat' % self.db_type)
curs.row_factory = dict_factory
return curs.fetchone()
#在数据库中添加一条记录
def put_record(self, record):
if self.db_file == ':memory:':
self.merge_items([record])
return
if not os.path.exists(self.db_file):
raise DatabaseConnectionError(self.db_file, "DB doesn't exist")
#对数据库父目录加锁
with lock_parent_directory(self.pending_file, self.pending_timeout):
pending_size = 0
try:
pending_size = os.path.getsize(self.pending_file)
except OSError as err:
if err.errno != errno.ENOENT:
raise
if pending_size > PENDING_CAP:
self._commit_puts([record])
else:
#将对象记录写入数据库文件中
with open(self.pending_file, 'a+b') as fp:
# Colons aren't used in base64 encoding; so they are our
# delimiter
fp.write(':')
fp.write(pickle.dumps(
self.make_tuple_for_pickle(record),
protocol=PICKLE_PROTOCOL).encode('base64'))
fp.flush()
def _commit_puts(self, item_list=None):
"""
Scan for .pending files and commit the found records by feeding them
to merge_items(). Assume that lock_parent_directory has already been
called.
:param item_list: A list of items to commit in addition to .pending
"""
if self.db_file == ':memory:' or not os.path.exists(self.pending_file):
return
if item_list is None:
item_list = []
self._preallocate()
if not os.path.getsize(self.pending_file):
if item_list:
self.merge_items(item_list)
return
with open(self.pending_file, 'r+b') as fp:
for entry in fp.read().split(':'):
if entry:
try:
self._commit_puts_load(item_list, entry)
except Exception:
self.logger.exception(
_('Invalid pending entry %(file)s: %(entry)s'),
{'file': self.pending_file, 'entry': entry})
if item_list:
self.merge_items(item_list)
try:
os.ftruncate(fp.fileno(), 0)
except OSError as err:
if err.errno != errno.ENOENT:
raise
def _commit_puts_stale_ok(self):
"""
Catch failures of _commit_puts() if broker is intended for
reading of stats, and thus does not care for pending updates.
"""
if self.db_file == ':memory:' or not os.path.exists(self.pending_file):
return
try:
with lock_parent_directory(self.pending_file,
self.pending_timeout):
self._commit_puts()
except LockTimeout:
if not self.stale_reads_ok:
raise
def _commit_puts_load(self, item_list, entry):
"""
Unmarshall the :param:entry and append it to :param:item_list.
This is implemented by a particular broker to be compatible
with its :func:`merge_items`.
"""
raise NotImplementedError
def make_tuple_for_pickle(self, record):
"""
Turn this db record dict into the format this service uses for
pending pickles.
"""
raise NotImplementedError
def merge_syncs(self, sync_points, incoming=True):
"""
Merge a list of sync points with the incoming sync table.
:param sync_points: list of sync points where a sync point is a dict of
{'sync_point', 'remote_id'}
:param incoming: if True, get the last incoming sync, otherwise get
the last outgoing sync
"""
with self.get() as conn:
for rec in sync_points:
try:
conn.execute('''
INSERT INTO %s_sync (sync_point, remote_id)
VALUES (?, ?)
''' % ('incoming' if incoming else 'outgoing'),
(rec['sync_point'], rec['remote_id']))
except sqlite3.IntegrityError:
conn.execute('''
UPDATE %s_sync SET sync_point=max(?, sync_point)
WHERE remote_id=?
''' % ('incoming' if incoming else 'outgoing'),
(rec['sync_point'], rec['remote_id']))
conn.commit()
def _preallocate(self):
"""
The idea is to allocate space in front of an expanding db. If it gets
within 512k of a boundary, it allocates to the next boundary.
Boundaries are 2m, 5m, 10m, 25m, 50m, then every 50m after.
"""
if not DB_PREALLOCATION or self.db_file == ':memory:':
return
MB = (1024 * 1024)
def prealloc_points():
for pm in (1, 2, 5, 10, 25, 50):
yield pm * MB
while True:
pm += 50
yield pm * MB
stat = os.stat(self.db_file)
file_size = stat.st_size
allocated_size = stat.st_blocks * 512
for point in prealloc_points():
if file_size <= point - MB / 2:
prealloc_size = point
break
if allocated_size < prealloc_size:
with open(self.db_file, 'rb+') as fp:
fallocate(fp.fileno(), int(prealloc_size))
def get_raw_metadata(self):
with self.get() as conn:
try:
metadata = conn.execute('SELECT metadata FROM %s_stat' %
self.db_type).fetchone()[0]
except sqlite3.OperationalError as err:
if 'no such column: metadata' not in str(err):
raise
metadata = ''
return metadata
@property
def metadata(self):
"""
Returns the metadata dict for the database. The metadata dict values
are tuples of (value, timestamp) where the timestamp indicates when
that key was set to that value.
"""
metadata = self.get_raw_metadata()
if metadata:
metadata = json.loads(metadata)
utf8encodekeys(metadata)
else:
metadata = {}
return metadata
@staticmethod
def validate_metadata(metadata):
"""
Validates that metadata_falls within acceptable limits.
:param metadata: to be validated
:raises: HTTPBadRequest if MAX_META_COUNT or MAX_META_OVERALL_SIZE
is exceeded
"""
meta_count = 0
meta_size = 0
for key, (value, timestamp) in metadata.items():
key = key.lower()
if value != '' and (key.startswith('x-account-meta') or
key.startswith('x-container-meta')):
prefix = 'x-account-meta-'
if key.startswith('x-container-meta-'):
prefix = 'x-container-meta-'
key = key[len(prefix):]
meta_count = meta_count + 1
meta_size = meta_size + len(key) + len(value)
if meta_count > MAX_META_COUNT:
raise HTTPBadRequest('Too many metadata items; max %d'
% MAX_META_COUNT)
if meta_size > MAX_META_OVERALL_SIZE:
raise HTTPBadRequest('Total metadata too large; max %d'
% MAX_META_OVERALL_SIZE)
def update_metadata(self, metadata_updates, validate_metadata=False):
"""
Updates the metadata dict for the database. The metadata dict values
are tuples of (value, timestamp) where the timestamp indicates when
that key was set to that value. Key/values will only be overwritten if
the timestamp is newer. To delete a key, set its value to ('',
timestamp). These empty keys will eventually be removed by
:func:`reclaim`
"""
#从数据库中查询元数据信息,生成字典格式,保存到old_metadata
old_metadata = self.metadata
#如果新添加的元数据是原来元数据的子集
if set(metadata_updates).issubset(set(old_metadata)):
#查询时间戳,由于网络存在乱序,所以,只更新时间最后请求的元数据
for key, (value, timestamp) in metadata_updates.items():
if timestamp > old_metadata[key][1]:
break
else:
#所有的元数据均过期,则不作任何处理
return
#到这里,就是存在需要更新的元数据
with self.get() as conn:
try:
md = conn.execute('SELECT metadata FROM %s_stat' %
self.db_type).fetchone()[0]
md = json.loads(md) if md else {}
utf8encodekeys(md)
except sqlite3.OperationalError as err:
if 'no such column: metadata' not in str(err):
raise
conn.execute("""
ALTER TABLE %s_stat
ADD COLUMN metadata TEXT DEFAULT '' """ % self.db_type)
md = {}
#遍历待更新的所有元数据,只更新不存在的元数据或时间戳最新的元数据
for key, value_timestamp in metadata_updates.items():
value, timestamp = value_timestamp
if key not in md or timestamp > md[key][1]:
md[key] = value_timestamp
if validate_metadata:
DatabaseBroker.validate_metadata(md)
conn.execute('UPDATE %s_stat SET metadata = ?' % self.db_type,
(json.dumps(md),))
conn.commit()
def reclaim(self, age_timestamp, sync_timestamp):
"""
Delete rows from the db_contains_type table that are marked deleted
and whose created_at timestamp is < age_timestamp. Also deletes rows
from incoming_sync and outgoing_sync where the updated_at timestamp is
< sync_timestamp.
In addition, this calls the DatabaseBroker's :func:`_reclaim` method.
:param age_timestamp: max created_at timestamp of object rows to delete
:param sync_timestamp: max update_at timestamp of sync rows to delete
"""
if self.db_file != ':memory:' and os.path.exists(self.pending_file):
with lock_parent_directory(self.pending_file,
self.pending_timeout):
self._commit_puts()
with self.get() as conn:
conn.execute('''
DELETE FROM %s WHERE deleted = 1 AND %s < ?
''' % (self.db_contains_type, self.db_reclaim_timestamp),
(age_timestamp,))
try:
conn.execute('''
DELETE FROM outgoing_sync WHERE updated_at < ?
''', (sync_timestamp,))
conn.execute('''
DELETE FROM incoming_sync WHERE updated_at < ?
''', (sync_timestamp,))
except sqlite3.OperationalError as err:
# Old dbs didn't have updated_at in the _sync tables.
if 'no such column: updated_at' not in str(err):
raise
DatabaseBroker._reclaim(self, conn, age_timestamp)
conn.commit()
def _reclaim(self, conn, timestamp):
"""
Removes any empty metadata values older than the timestamp using the
given database connection. This function will not call commit on the
conn, but will instead return True if the database needs committing.
This function was created as a worker to limit transactions and commits
from other related functions.
:param conn: Database connection to reclaim metadata within.
:param timestamp: Empty metadata items last updated before this
timestamp will be removed.
:returns: True if conn.commit() should be called
"""
try:
md = conn.execute('SELECT metadata FROM %s_stat' %
self.db_type).fetchone()[0]
if md:
md = json.loads(md)
keys_to_delete = []
for key, (value, value_timestamp) in md.items():
if value == '' and value_timestamp < timestamp:
keys_to_delete.append(key)
if keys_to_delete:
for key in keys_to_delete:
del md[key]
conn.execute('UPDATE %s_stat SET metadata = ?' %
self.db_type, (json.dumps(md),))
return True
except sqlite3.OperationalError as err:
if 'no such column: metadata' not in str(err):
raise
return False
def update_put_timestamp(self, timestamp):
"""
Update the put_timestamp. Only modifies it if it is greater than
the current timestamp.
:param timestamp: internalized put timestamp
"""
with self.get() as conn:
conn.execute(
'UPDATE %s_stat SET put_timestamp = ?'
' WHERE put_timestamp < ?' % self.db_type,
(timestamp, timestamp))
conn.commit()
def update_status_changed_at(self, timestamp):
"""
Update the status_changed_at field in the stat table. Only
modifies status_changed_at if the timestamp is greater than the
current status_changed_at timestamp.
:param timestamp: internalized timestamp
"""
with self.get() as conn:
self._update_status_changed_at(conn, timestamp)
conn.commit()
def _update_status_changed_at(self, conn, timestamp):
conn.execute(
'UPDATE %s_stat SET status_changed_at = ?'
' WHERE status_changed_at < ?' % self.db_type,
(timestamp, timestamp))
| 1.648438 | 2 |
xdl/utils/prop_limits.py | mcrav/xdl | 0 | 4174 | <reponame>mcrav/xdl<filename>xdl/utils/prop_limits.py
"""Prop limits are used to validate the input given to xdl elements. For
example, a volume property should be a positive number, optionally followed by
volume units. The prop limit is used to check that input supplied is valid for
that property.
"""
import re
from typing import List, Optional
class PropLimit(object):
"""Convenience class for storing prop limit. A prop limit is essentially a
regex for validating the input to a given prop. For example, checking
appropriate units are used or a value is within a certain range.
Either ``regex`` or ``enum`` must be given when instantiating. If ``enum``
is given it will override whatever is given for ``regex`` and ``hint``.
``hint`` and ``default`` are both optional, but recommended, at least when
using ``regex`` not ``enum``.
Arguments:
regex (str): Regex pattern that should match with valid values and not
match with invalid values.
hint (str): Useful hint for what valid value should look like, e.g.
"Volume should be a number followed by volume units, e.g. '5 mL'."
default (str): Default valid value. Should use standard units of the
quantity involved, e.g. for volume, '0 mL'.
enum (List[str]): List of values that the prop can take. This is used
to automatically generate a regex from the list of allowed values.
"""
def __init__(
self,
regex: Optional[str] = None,
hint: Optional[str] = '',
default: Optional[str] = '',
enum: Optional[List[str]] = [],
):
if not regex and not enum:
raise ValueError(
'Either `regex` or `enum` argument must be given.')
self.default = default
# If enum given generate regex from this
self.enum = enum
if enum:
if not regex:
self.regex = self.generate_enum_regex()
else:
self.regex = regex
if not hint:
self.hint = self.generate_enum_hint()
else:
self.hint = hint
# Otherwise just set regex as attribute
else:
self.regex = regex
self.hint = hint
def validate(self, value: str) -> bool:
"""Validate given value against prop limit regex.
Args:
value (str): Value to validate against prop limit.
Returns:
bool: True if the value matches the prop limit, otherwise False.
"""
return re.match(self.regex, value) is not None
def generate_enum_regex(self) -> str:
"""Generate regex from :py:attr:`enum`. Regex will match any of the
items in :py:attr:`enum`.
Returns:
str: Regex that will match any of the strings in the :py:attr:`enum`
list.
"""
regex = r'('
for item in self.enum:
regex += item + r'|'
regex = regex[:-1] + r')'
return regex
def generate_enum_hint(self) -> str:
"""Generate hint from :py:attr:`enum`. Hint will list all items in
:py:attr:`enum`.
Returns:
str: Hint listing all items in :py:attr:`enum`.
"""
s = 'Expecting one of '
for item in self.enum[:-1]:
s += f'"{item}", '
s = s[:-2] + f' or "{self.enum[-1]}".'
return s
##################
# Regex patterns #
##################
#: Pattern to match a positive or negative float,
#: e.g. '0', '-1', '1', '-10.3', '10.3', '0.0' would all be matched by this
#: pattern.
FLOAT_PATTERN: str = r'([-]?[0-9]+(?:[.][0-9]+)?)'
#: Pattern to match a positive float,
#: e.g. '0', 1', '10.3', '0.0' would all be matched by this pattern, but not
#: '-10.3' or '-1'.
POSITIVE_FLOAT_PATTERN: str = r'([0-9]+(?:[.][0-9]+)?)'
#: Pattern to match boolean strings, specifically matching 'true' and 'false'
#: case insensitvely.
BOOL_PATTERN: str = r'(false|False|true|True)'
#: Pattern to match all accepted volumes units case insensitvely, or empty string.
VOLUME_UNITS_PATTERN: str = r'(l|L|litre|litres|liter|liters|ml|mL|cm3|cc|milliltre|millilitres|milliliter|milliliters|cl|cL|centiltre|centilitres|centiliter|centiliters|dl|dL|deciltre|decilitres|deciliter|deciliters|ul|uL|μl|μL|microlitre|microlitres|microliter|microliters)?'
#: Pattern to match all accepted mass units, or empty string.
MASS_UNITS_PATTERN: str = r'(g|gram|grams|kg|kilogram|kilograms|mg|milligram|milligrams|ug|μg|microgram|micrograms)?'
#: Pattern to match all accepted temperature units, or empty string.
TEMP_UNITS_PATTERN: str = r'(°C|K|F)?'
#: Pattern to match all accepted time units, or empty string.
TIME_UNITS_PATTERN = r'(days|day|h|hr|hrs|hour|hours|m|min|mins|minute|minutes|s|sec|secs|second|seconds)?'
#: Pattern to match all accepted pressure units, or empty string.
PRESSURE_UNITS_PATTERN = r'(mbar|bar|torr|Torr|mmhg|mmHg|atm|Pa|pa)?'
#: Pattern to match all accepted rotation speed units, or empty string.
ROTATION_SPEED_UNITS_PATTERN = r'(rpm|RPM)?'
#: Pattern to match all accepted length units, or empty string.
DISTANCE_UNITS_PATTERN = r'(nm|µm|mm|cm|m|km)?'
#: Pattern to match all accepted mol units, or empty string.
MOL_UNITS_PATTERN = r'(mmol|mol)?'
###############
# Prop limits #
###############
def generate_quantity_units_pattern(
quantity_pattern: str,
units_pattern: str,
hint: Optional[str] = '',
default: Optional[str] = ''
) -> PropLimit:
"""
Convenience function to generate PropLimit object for different quantity
types, i.e. for variations on the number followed by unit pattern.
Args:
quantity_pattern (str): Pattern to match the number expected. This will
typically be ``POSITIVE_FLOAT_PATTERN`` or ``FLOAT_PATTERN``.
units_pattern (str): Pattern to match the units expected or empty
string. Empty string is matched as not including units is allowed
as in this case standard units are used.
hint (str): Hint for the prop limit to tell the user what correct input
should look like in the case of an errror.
default (str): Default value for the prop limit, should use standard
units for the prop involved.
"""
return PropLimit(
regex=r'^((' + quantity_pattern + r'[ ]?'\
+ units_pattern + r'$)|(^' + quantity_pattern + r'))$',
hint=hint,
default=default
)
# NOTE: It is important here that defaults use the standard unit for that
# quantity type as XDL app uses this to add in default units.
#: Prop limit for volume props.
VOLUME_PROP_LIMIT: PropLimit = PropLimit(
regex=r'^(all|(' + POSITIVE_FLOAT_PATTERN + r'[ ]?'\
+ VOLUME_UNITS_PATTERN + r')|(' + POSITIVE_FLOAT_PATTERN + r'))$',
hint='Expecting number followed by standard volume units, e.g. "5.5 mL"',
default='0 mL',
)
#: Prop limit for mass props.
MASS_PROP_LIMIT: PropLimit = generate_quantity_units_pattern(
POSITIVE_FLOAT_PATTERN,
MASS_UNITS_PATTERN,
hint='Expecting number followed by standard mass units, e.g. "2.3 g"',
default='0 g'
)
#: Prop limit for mol props.
MOL_PROP_LIMIT: PropLimit = generate_quantity_units_pattern(
POSITIVE_FLOAT_PATTERN,
MOL_UNITS_PATTERN,
hint='Expecting number followed by mol or mmol, e.g. "2.3 mol".',
default='0 mol',
)
#: Prop limit for temp props.
TEMP_PROP_LIMIT: PropLimit = generate_quantity_units_pattern(
FLOAT_PATTERN,
TEMP_UNITS_PATTERN,
hint='Expecting number in degrees celsius or number followed by standard temperature units, e.g. "25", "25°C", "298 K".',
default='25°C',
)
#: Prop limit for time props.
TIME_PROP_LIMIT: PropLimit = generate_quantity_units_pattern(
POSITIVE_FLOAT_PATTERN,
TIME_UNITS_PATTERN,
hint='Expecting number followed by standard time units, e.g. "15 mins", "3 hrs".',
default='0 secs'
)
#: Prop limit for pressure props.
PRESSURE_PROP_LIMIT: PropLimit = generate_quantity_units_pattern(
POSITIVE_FLOAT_PATTERN,
PRESSURE_UNITS_PATTERN,
hint='Expecting number followed by standard pressure units, e.g. "50 mbar", "1 atm".',
default='1013.25 mbar'
)
#: Prop limit for rotation speed props.
ROTATION_SPEED_PROP_LIMIT: PropLimit = generate_quantity_units_pattern(
POSITIVE_FLOAT_PATTERN,
ROTATION_SPEED_UNITS_PATTERN,
hint='Expecting RPM value, e.g. "400 RPM".',
default='400 RPM',
)
#: Prop limit for wavelength props.
WAVELENGTH_PROP_LIMIT: PropLimit = generate_quantity_units_pattern(
POSITIVE_FLOAT_PATTERN,
DISTANCE_UNITS_PATTERN,
hint='Expecting wavelength, e.g. "400 nm".',
default='400 nm'
)
#: Prop limit for any props requiring a positive integer such as ``repeats``.
#: Used if no explicit property is given and prop type is ``int``.
POSITIVE_INT_PROP_LIMIT: PropLimit = PropLimit(
r'[0-9]+',
hint='Expecting positive integer value, e.g. "3"',
default='1',
)
#: Prop limit for any props requiring a positive float. Used if no explicit
#: prop type is given and prop type is ``float``.
POSITIVE_FLOAT_PROP_LIMIT: PropLimit = PropLimit(
regex=POSITIVE_FLOAT_PATTERN,
hint='Expecting positive float value, e.g. "3", "3.5"',
default='0',
)
#: Prop limit for any props requiring a boolean value. Used if no explicit prop
#: type is given and prop type is ``bool``.
BOOL_PROP_LIMIT: PropLimit = PropLimit(
BOOL_PATTERN,
hint='Expecting one of "false" or "true".',
default='false',
)
#: Prop limit for ``WashSolid`` ``stir`` prop. This is a special case as the
#: value can be ``True``, ``False`` or ``'solvent'``.
WASH_SOLID_STIR_PROP_LIMIT: PropLimit = PropLimit(
r'(' + BOOL_PATTERN + r'|solvent)',
enum=['true', 'solvent', 'false'],
hint='Expecting one of "true", "false" or "solvent".',
default='True'
)
#: Prop limit for ``Separate`` ``purpose`` prop. One of 'extract' or 'wash'.
SEPARATION_PURPOSE_PROP_LIMIT: PropLimit = PropLimit(enum=['extract', 'wash'])
#: Prop limit for ``Separate`` ``product_phase`` prop. One of 'top' or 'bottom'.
SEPARATION_PRODUCT_PHASE_PROP_LIMIT: PropLimit = PropLimit(enum=['top', 'bottom'])
#: Prop limit for ``Add`` ``purpose`` prop. One of 'neutralize', 'precipitate',
#: 'dissolve', 'basify', 'acidify' or 'dilute'.
ADD_PURPOSE_PROP_LIMIT = PropLimit(
enum=[
'neutralize',
'precipitate',
'dissolve',
'basify',
'acidify',
'dilute',
]
)
#: Prop limit for ``HeatChill`` ``purpose`` prop. One of 'control-exotherm',
#: 'reaction' or 'unstable-reagent'.
HEATCHILL_PURPOSE_PROP_LIMIT = PropLimit(
enum=['control-exotherm', 'reaction', 'unstable-reagent']
)
#: Prop limit for ``Stir`` ``purpose`` prop. 'dissolve' is only option.
STIR_PURPOSE_PROP_LIMIT = PropLimit(
enum=['dissolve']
)
#: Prop limit for ``Reagent`` ``role`` prop. One of 'solvent', 'reagent',
#: 'catalyst', 'substrate', 'acid', 'base' or 'activating-agent'.
REAGENT_ROLE_PROP_LIMIT = PropLimit(
enum=[
'solvent',
'reagent',
'catalyst',
'substrate',
'acid',
'base',
'activating-agent'
]
)
#: Prop limit for ``Component`` ``component_type`` prop. One of 'reactor',
#: 'filter', 'separator', 'rotavap' or 'flask'.
COMPONENT_TYPE_PROP_LIMIT: PropLimit = PropLimit(
enum=['reactor', 'filter', 'separator', 'rotavap', 'flask']
)
#: Pattern matching a float of value 100, e.g. '100', '100.0', '100.000' would
#: all be matched.
_hundred_float: str = r'(100(?:[.][0]+)?)'
#: Pattern matching any float between 10.000 and 99.999.
_ten_to_ninety_nine_float: str = r'([0-9][0-9](?:[.][0-9]+)?)'
#: Pattern matching any float between 0 and 9.999.
_zero_to_ten_float: str = r'([0-9](?:[.][0-9]+)?)'
#: Pattern matching float between 0 and 100. Used for percentages.
PERCENT_RANGE_PROP_LIMIT: PropLimit = PropLimit(
r'^(' + _hundred_float + '|'\
+ _ten_to_ninety_nine_float + '|' + _zero_to_ten_float + ')$',
hint='Expecting number from 0-100 representing a percentage, e.g. "50", "8.5".',
default='0',
)
| 3.171875 | 3 |
dit/utils/bindargs.py | leoalfonso/dit | 1 | 4175 | """
Provides usable args and kwargs from inspect.getcallargs.
For Python 3.3 and above, this module is unnecessary and can be achieved using
features from PEP 362:
http://www.python.org/dev/peps/pep-0362/
For example, to override a parameter of some function:
>>> import inspect
>>> def func(a, b=1, c=2, d=3):
... return a, b, c, d
...
>>> def override_c(*args, **kwargs):
... sig = inspect.signature(override)
... ba = sig.bind(*args, **kwargs)
... ba['c'] = 10
... return func(*ba.args, *ba.kwargs)
...
>>> override_c(0, c=3)
(0, 1, 10, 3)
Also useful:
http://www.python.org/dev/peps/pep-3102/
"""
import sys
import inspect
from inspect import getcallargs
try:
from inspect import getfullargspec
except ImportError:
# Python 2.X
from collections import namedtuple
from inspect import getargspec
FullArgSpec = namedtuple('FullArgSpec',
'args, varargs, varkw, defaults, kwonlyargs, kwonlydefaults, annotations')
def getfullargspec(f):
args, varargs, varkw, defaults = getargspec(f)
kwonlyargs = []
kwonlydefaults = None
annotations = getattr(f, '__annotations__', {})
return FullArgSpec(args, varargs, varkw, defaults,
kwonlyargs, kwonlydefaults, annotations)
def bindcallargs_leq32(_fUnCtIoN_, *args, **kwargs):
"""Binds arguments and keyword arguments to a function or method.
Returns a tuple (bargs, bkwargs) suitable for manipulation and passing
to the specified function.
`bargs` consists of the bound args, varargs, and kwonlyargs from
getfullargspec. `bkwargs` consists of the bound varkw from getfullargspec.
Both can be used in a call to the specified function. Any default
parameter values are included in the output.
Examples
--------
>>> def func(a, b=3, *args, **kwargs):
... pass
>>> bindcallargs(func, 5)
((5, 3), {})
>>> bindcallargs(func, 5, 4, 3, 2, 1, hello='there')
((5, 4, 3, 2, 1), {'hello': 'there'})
>>> args, kwargs = bindcallargs(func, 5)
>>> kwargs['b'] = 5 # overwrite default value for b
>>> func(*args, **kwargs)
"""
# It is necessary to choose an unlikely variable name for the function.
# The reason is that any kwarg by the same name will cause a TypeError
# due to multiple values being passed for that argument name.
func = _fUnCtIoN_
callargs = getcallargs(func, *args, **kwargs)
spec = getfullargspec(func)
# Construct all args and varargs and use them in bargs
bargs = [callargs[arg] for arg in spec.args]
if spec.varargs is not None:
bargs.extend(callargs[spec.varargs])
bargs = tuple(bargs)
# Start with kwonlyargs.
bkwargs = {kwonlyarg: callargs[kwonlyarg] for kwonlyarg in spec.kwonlyargs}
# Add in kwonlydefaults for unspecified kwonlyargs only.
# Since keyword only arguements aren't allowed in python2, and we
# don't support python 3.0, 3.1, 3.2, this should never be executed:
if spec.kwonlydefaults is not None: # pragma: no cover
bkwargs.update({k: v for k, v in spec.kwonlydefaults.items()
if k not in bkwargs})
# Add in varkw.
if spec.varkw is not None:
bkwargs.update(callargs[spec.varkw])
return bargs, bkwargs
def bindcallargs_geq33(_fUnCtIoN_, *args, **kwargs):
# Should match functionality of bindcallargs_32 for Python > 3.3.
sig = inspect.signature(_fUnCtIoN_)
ba = sig.bind(*args, **kwargs)
# Add in all default values
for param in sig.parameters.values():
if param.name not in ba.arguments:
ba.arguments[param.name] = param.default
return ba.args, ba.kwargs
if sys.version_info[0:2] < (3,3):
bindcallargs = bindcallargs_leq32
else:
bindcallargs = bindcallargs_geq33
| 3.40625 | 3 |
tests/python/gaia-ui-tests/gaiatest/gaia_test.py | AmyYLee/gaia | 1 | 4176 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import json
import os
import sys
import time
from marionette import MarionetteTestCase
from marionette.by import By
from marionette.errors import NoSuchElementException
from marionette.errors import ElementNotVisibleException
from marionette.errors import TimeoutException
from marionette.errors import StaleElementException
from marionette.errors import InvalidResponseException
import mozdevice
class LockScreen(object):
def __init__(self, marionette):
self.marionette = marionette
js = os.path.abspath(os.path.join(__file__, os.path.pardir, 'atoms', "gaia_lock_screen.js"))
self.marionette.import_script(js)
@property
def is_locked(self):
self.marionette.switch_to_frame()
return self.marionette.execute_script('window.wrappedJSObject.LockScreen.locked')
def lock(self):
self.marionette.switch_to_frame()
result = self.marionette.execute_async_script('GaiaLockScreen.lock()')
assert result, 'Unable to lock screen'
def unlock(self):
self.marionette.switch_to_frame()
result = self.marionette.execute_async_script('GaiaLockScreen.unlock()')
assert result, 'Unable to unlock screen'
class GaiaApp(object):
def __init__(self, origin=None, name=None, frame=None, src=None):
self.frame = frame
self.frame_id = frame
self.src = src
self.name = name
self.origin = origin
def __eq__(self, other):
return self.__dict__ == other.__dict__
class GaiaApps(object):
def __init__(self, marionette):
self.marionette = marionette
js = os.path.abspath(os.path.join(__file__, os.path.pardir, 'atoms', "gaia_apps.js"))
self.marionette.import_script(js)
def get_permission(self, app_name, permission_name):
return self.marionette.execute_async_script("return GaiaApps.getPermission('%s', '%s')" % (app_name, permission_name))
def set_permission(self, app_name, permission_name, value):
return self.marionette.execute_async_script("return GaiaApps.setPermission('%s', '%s', '%s')" %
(app_name, permission_name, value))
def launch(self, name, switch_to_frame=True, url=None, launch_timeout=None):
self.marionette.switch_to_frame()
result = self.marionette.execute_async_script("GaiaApps.launchWithName('%s')" % name, script_timeout=launch_timeout)
assert result, "Failed to launch app with name '%s'" % name
app = GaiaApp(frame=result.get('frame'),
src=result.get('src'),
name=result.get('name'),
origin=result.get('origin'))
if app.frame_id is None:
raise Exception("App failed to launch; there is no app frame")
if switch_to_frame:
self.switch_to_frame(app.frame_id, url)
return app
@property
def displayed_app(self):
self.marionette.switch_to_frame()
result = self.marionette.execute_async_script('return GaiaApps.displayedApp();')
return GaiaApp(frame=result.get('frame'),
src=result.get('src'),
name=result.get('name'),
origin=result.get('origin'))
def switch_to_displayed_app(self):
self.marionette.switch_to_default_content()
self.marionette.switch_to_frame(self.displayed_app.frame)
def is_app_installed(self, app_name):
self.marionette.switch_to_frame()
return self.marionette.execute_async_script("GaiaApps.locateWithName('%s')" % app_name)
def uninstall(self, name):
self.marionette.switch_to_frame()
self.marionette.execute_async_script("GaiaApps.uninstallWithName('%s')" % name)
def kill(self, app):
self.marionette.switch_to_frame()
js = os.path.abspath(os.path.join(__file__, os.path.pardir, 'atoms', "gaia_apps.js"))
self.marionette.import_script(js)
result = self.marionette.execute_async_script("GaiaApps.kill('%s');" % app.origin)
assert result, "Failed to kill app with name '%s'" % app.name
def kill_all(self):
self.marionette.switch_to_frame()
js = os.path.abspath(os.path.join(__file__, os.path.pardir, 'atoms', "gaia_apps.js"))
self.marionette.import_script(js)
self.marionette.execute_async_script("GaiaApps.killAll()")
def runningApps(self):
return self.marionette.execute_script("return GaiaApps.getRunningApps()")
def switch_to_frame(self, app_frame, url=None, timeout=30):
self.marionette.switch_to_frame(app_frame)
start = time.time()
if not url:
def check(now):
return "about:blank" not in now
else:
def check(now):
return url in now
while (time.time() - start < timeout):
if check(self.marionette.get_url()):
return
time.sleep(2)
raise TimeoutException('Could not switch to app frame %s in time' % app_frame)
class GaiaData(object):
def __init__(self, marionette, testvars=None):
self.marionette = marionette
self.testvars = testvars or {}
js = os.path.abspath(os.path.join(__file__, os.path.pardir, 'atoms', "gaia_data_layer.js"))
self.marionette.import_script(js)
self.marionette.set_search_timeout(10000)
def set_time(self, date_number):
self.marionette.set_context(self.marionette.CONTEXT_CHROME)
self.marionette.execute_script("window.navigator.mozTime.set(%s);" % date_number)
self.marionette.set_context(self.marionette.CONTEXT_CONTENT)
@property
def all_contacts(self):
self.marionette.switch_to_frame()
return self.marionette.execute_async_script('return GaiaDataLayer.getAllContacts();', special_powers=True)
@property
def sim_contacts(self):
self.marionette.switch_to_frame()
return self.marionette.execute_async_script('return GaiaDataLayer.getSIMContacts();', special_powers=True)
def insert_contact(self, contact):
self.marionette.switch_to_frame()
result = self.marionette.execute_async_script('return GaiaDataLayer.insertContact(%s);' % json.dumps(contact), special_powers=True)
assert result, 'Unable to insert contact %s' % contact
def remove_all_contacts(self, default_script_timeout=60000):
self.marionette.switch_to_frame()
self.marionette.set_script_timeout(max(default_script_timeout, 1000 * len(self.all_contacts)))
result = self.marionette.execute_async_script('return GaiaDataLayer.removeAllContacts();', special_powers=True)
assert result, 'Unable to remove all contacts'
self.marionette.set_script_timeout(default_script_timeout)
def get_setting(self, name):
return self.marionette.execute_async_script('return GaiaDataLayer.getSetting("%s")' % name, special_powers=True)
@property
def all_settings(self):
return self.get_setting('*')
def set_setting(self, name, value):
import json
value = json.dumps(value)
result = self.marionette.execute_async_script('return GaiaDataLayer.setSetting("%s", %s)' % (name, value), special_powers=True)
assert result, "Unable to change setting with name '%s' to '%s'" % (name, value)
def _get_pref(self, datatype, name):
return self.marionette.execute_script("return SpecialPowers.get%sPref('%s');" % (datatype, name), special_powers=True)
def _set_pref(self, datatype, name, value):
value = json.dumps(value)
self.marionette.execute_script("SpecialPowers.set%sPref('%s', %s);" % (datatype, name, value), special_powers=True)
def get_bool_pref(self, name):
"""Returns the value of a Gecko boolean pref, which is different from a Gaia setting."""
return self._get_pref('Bool', name)
def set_bool_pref(self, name, value):
"""Sets the value of a Gecko boolean pref, which is different from a Gaia setting."""
return self._set_pref('Bool', name, value)
def get_int_pref(self, name):
"""Returns the value of a Gecko integer pref, which is different from a Gaia setting."""
return self._get_pref('Int', name)
def set_int_pref(self, name, value):
"""Sets the value of a Gecko integer pref, which is different from a Gaia setting."""
return self._set_pref('Int', name, value)
def get_char_pref(self, name):
"""Returns the value of a Gecko string pref, which is different from a Gaia setting."""
return self._get_pref('Char', name)
def set_char_pref(self, name, value):
"""Sets the value of a Gecko string pref, which is different from a Gaia setting."""
return self._set_pref('Char', name, value)
def set_volume(self, value):
channels = ['alarm', 'content', 'notification']
for channel in channels:
self.set_setting('audio.volume.%s' % channel, value)
def bluetooth_enable(self):
self.marionette.switch_to_frame()
return self.marionette.execute_async_script("return GaiaDataLayer.enableBluetooth()")
def bluetooth_disable(self):
self.marionette.switch_to_frame()
return self.marionette.execute_async_script("return GaiaDataLayer.disableBluetooth()")
def bluetooth_pair_device(self, device_name):
return self.marionette.execute_async_script('return GaiaDataLayer.pairBluetoothDevice("%s")' % device_name)
def bluetooth_unpair_all_devices(self):
self.marionette.switch_to_frame()
self.marionette.execute_async_script('return GaiaDataLayer.unpairAllBluetoothDevices()')
def bluetooth_set_device_name(self, device_name):
result = self.marionette.execute_async_script('return GaiaDataLayer.bluetoothSetDeviceName(%s);' % device_name)
assert result, "Unable to set device's bluetooth name to %s" % device_name
def bluetooth_set_device_discoverable_mode(self, discoverable):
if (discoverable):
result = self.marionette.execute_async_script('return GaiaDataLayer.bluetoothSetDeviceDiscoverableMode(true);')
else:
result = self.marionette.execute_async_script('return GaiaDataLayer.bluetoothSetDeviceDiscoverableMode(false);')
assert result, 'Able to set the device bluetooth discoverable mode'
@property
def bluetooth_is_enabled(self):
return self.marionette.execute_script("return window.navigator.mozBluetooth.enabled")
@property
def is_cell_data_enabled(self):
return self.get_setting('ril.data.enabled')
def connect_to_cell_data(self):
self.marionette.switch_to_frame()
result = self.marionette.execute_async_script("return GaiaDataLayer.connectToCellData()", special_powers=True)
assert result, 'Unable to connect to cell data'
def disable_cell_data(self):
self.marionette.switch_to_frame()
result = self.marionette.execute_async_script("return GaiaDataLayer.disableCellData()", special_powers=True)
assert result, 'Unable to disable cell data'
@property
def is_cell_data_connected(self):
# XXX: check bug-926169
# this is used to keep all tests passing while introducing multi-sim APIs
return self.marionette.execute_script('var mobileConnection = window.navigator.mozMobileConnection || ' +
'window.navigator.mozMobileConnections && ' +
'window.navigator.mozMobileConnections[0]; ' +
'return mobileConnection.data.connected;')
def enable_cell_roaming(self):
self.set_setting('ril.data.roaming_enabled', True)
def disable_cell_roaming(self):
self.set_setting('ril.data.roaming_enabled', False)
@property
def is_wifi_enabled(self):
return self.marionette.execute_script("return window.navigator.mozWifiManager.enabled;")
def enable_wifi(self):
self.marionette.switch_to_frame()
result = self.marionette.execute_async_script("return GaiaDataLayer.enableWiFi()", special_powers=True)
assert result, 'Unable to enable WiFi'
def disable_wifi(self):
self.marionette.switch_to_frame()
result = self.marionette.execute_async_script("return GaiaDataLayer.disableWiFi()", special_powers=True)
assert result, 'Unable to disable WiFi'
def connect_to_wifi(self, network=None):
network = network or self.testvars.get('wifi')
assert network, 'No WiFi network provided'
self.enable_wifi()
self.marionette.switch_to_frame()
result = self.marionette.execute_async_script("return GaiaDataLayer.connectToWiFi(%s)" % json.dumps(network))
assert result, 'Unable to connect to WiFi network'
def forget_all_networks(self):
self.marionette.switch_to_frame()
self.marionette.execute_async_script('return GaiaDataLayer.forgetAllNetworks()')
def is_wifi_connected(self, network=None):
network = network or self.testvars.get('wifi')
assert network, 'No WiFi network provided'
self.marionette.switch_to_frame()
return self.marionette.execute_script("return GaiaDataLayer.isWiFiConnected(%s)" % json.dumps(network))
@property
def known_networks(self):
return self.marionette.execute_async_script('return GaiaDataLayer.getKnownNetworks()')
@property
def active_telephony_state(self):
# Returns the state of only the currently active call or None if no active call
return self.marionette.execute_script("return GaiaDataLayer.getMozTelephonyState()")
@property
def is_antenna_available(self):
return self.marionette.execute_script('return window.navigator.mozFMRadio.antennaAvailable')
@property
def is_fm_radio_enabled(self):
return self.marionette.execute_script('return window.navigator.mozFMRadio.enabled')
@property
def fm_radio_frequency(self):
return self.marionette.execute_script('return window.navigator.mozFMRadio.frequency')
@property
def media_files(self):
result = []
result.extend(self.music_files)
result.extend(self.picture_files)
result.extend(self.video_files)
return result
def delete_all_sms(self):
self.marionette.switch_to_frame()
return self.marionette.execute_async_script("return GaiaDataLayer.deleteAllSms();", special_powers=True)
def delete_all_call_log_entries(self):
"""The call log needs to be open and focused in order for this to work."""
self.marionette.execute_script('window.wrappedJSObject.RecentsDBManager.deleteAll();')
def kill_active_call(self):
self.marionette.execute_script("var telephony = window.navigator.mozTelephony; " +
"if(telephony.active) telephony.active.hangUp();")
@property
def music_files(self):
return self.marionette.execute_async_script(
'return GaiaDataLayer.getAllMusic();')
@property
def picture_files(self):
return self.marionette.execute_async_script(
'return GaiaDataLayer.getAllPictures();')
@property
def video_files(self):
return self.marionette.execute_async_script(
'return GaiaDataLayer.getAllVideos();')
def sdcard_files(self, extension=''):
files = self.marionette.execute_async_script(
'return GaiaDataLayer.getAllSDCardFiles();')
if len(extension):
return [filename for filename in files if filename.endswith(extension)]
return files
def send_sms(self, number, message):
import json
number = json.dumps(number)
message = json.dumps(message)
result = self.marionette.execute_async_script('return GaiaDataLayer.sendSMS(%s, %s)' % (number, message), special_powers=True)
assert result, 'Unable to send SMS to recipient %s with text %s' % (number, message)
class GaiaDevice(object):
def __init__(self, marionette, testvars=None):
self.marionette = marionette
self.testvars = testvars or {}
@property
def manager(self):
if hasattr(self, '_manager') and self._manager:
return self._manager
if not self.is_android_build:
raise Exception('Device manager is only available for devices.')
dm_type = os.environ.get('DM_TRANS', 'adb')
if dm_type == 'adb':
self._manager = mozdevice.DeviceManagerADB()
elif dm_type == 'sut':
host = os.environ.get('TEST_DEVICE')
if not host:
raise Exception('Must specify host with SUT!')
self._manager = mozdevice.DeviceManagerSUT(host=host)
else:
raise Exception('Unknown device manager type: %s' % dm_type)
return self._manager
@property
def is_android_build(self):
if self.testvars.get('is_android_build') is None:
self.testvars['is_android_build'] = 'Android' in self.marionette.session_capabilities['platform']
return self.testvars['is_android_build']
@property
def is_online(self):
# Returns true if the device has a network connection established (cell data, wifi, etc)
return self.marionette.execute_script('return window.navigator.onLine;')
@property
def has_mobile_connection(self):
# XXX: check bug-926169
# this is used to keep all tests passing while introducing multi-sim APIs
return self.marionette.execute_script('var mobileConnection = window.navigator.mozMobileConnection || ' +
'window.navigator.mozMobileConnections && ' +
'window.navigator.mozMobileConnections[0]; ' +
'return mobileConnection !== undefined')
@property
def has_wifi(self):
if not hasattr(self, '_has_wifi'):
self._has_wifi = self.marionette.execute_script('return window.navigator.mozWifiManager !== undefined')
return self._has_wifi
def push_file(self, source, count=1, destination='', progress=None):
if not destination.count('.') > 0:
destination = '/'.join([destination, source.rpartition(os.path.sep)[-1]])
self.manager.mkDirs(destination)
self.manager.pushFile(source, destination)
if count > 1:
for i in range(1, count + 1):
remote_copy = '_%s.'.join(iter(destination.split('.'))) % i
self.manager._checkCmd(['shell', 'dd', 'if=%s' % destination, 'of=%s' % remote_copy])
if progress:
progress.update(i)
self.manager.removeFile(destination)
def restart_b2g(self):
self.stop_b2g()
time.sleep(2)
self.start_b2g()
def start_b2g(self):
if self.marionette.instance:
# launch the gecko instance attached to marionette
self.marionette.instance.start()
elif self.is_android_build:
self.manager.shellCheckOutput(['start', 'b2g'])
else:
raise Exception('Unable to start B2G')
self.marionette.wait_for_port()
self.marionette.start_session()
if self.is_android_build:
self.marionette.execute_async_script("""
window.addEventListener('mozbrowserloadend', function loaded(aEvent) {
if (aEvent.target.src.indexOf('ftu') != -1 || aEvent.target.src.indexOf('homescreen') != -1) {
window.removeEventListener('mozbrowserloadend', loaded);
marionetteScriptFinished();
}
});""", script_timeout=60000)
# TODO: Remove this sleep when Bug 924912 is addressed
time.sleep(5)
def stop_b2g(self):
if self.marionette.instance:
# close the gecko instance attached to marionette
self.marionette.instance.close()
elif self.is_android_build:
self.manager.shellCheckOutput(['stop', 'b2g'])
else:
raise Exception('Unable to stop B2G')
self.marionette.client.close()
self.marionette.session = None
self.marionette.window = None
class GaiaTestCase(MarionetteTestCase):
_script_timeout = 60000
_search_timeout = 10000
# deafult timeout in seconds for the wait_for methods
_default_timeout = 30
def __init__(self, *args, **kwargs):
self.restart = kwargs.pop('restart', False)
kwargs.pop('iterations', None)
kwargs.pop('checkpoint_interval', None)
MarionetteTestCase.__init__(self, *args, **kwargs)
def setUp(self):
try:
MarionetteTestCase.setUp(self)
except InvalidResponseException:
if self.restart:
pass
self.device = GaiaDevice(self.marionette, self.testvars)
if self.restart and (self.device.is_android_build or self.marionette.instance):
self.device.stop_b2g()
if self.device.is_android_build:
# revert device to a clean state
self.device.manager.removeDir('/data/local/storage/persistent')
self.device.manager.removeDir('/data/b2g/mozilla')
self.device.start_b2g()
# the emulator can be really slow!
self.marionette.set_script_timeout(self._script_timeout)
self.marionette.set_search_timeout(self._search_timeout)
self.lockscreen = LockScreen(self.marionette)
self.apps = GaiaApps(self.marionette)
self.data_layer = GaiaData(self.marionette, self.testvars)
from gaiatest.apps.keyboard.app import Keyboard
self.keyboard = Keyboard(self.marionette)
self.cleanUp()
def cleanUp(self):
# remove media
if self.device.is_android_build:
for filename in self.data_layer.media_files:
# filename is a fully qualified path
self.device.manager.removeFile(filename)
# Switch off keyboard FTU screen
self.data_layer.set_setting("keyboard.ftu.enabled", False)
# restore settings from testvars
[self.data_layer.set_setting(name, value) for name, value in self.testvars.get('settings', {}).items()]
# unlock
self.lockscreen.unlock()
# If we are restarting all of these values are reset to default earlier in the setUp
if not self.restart:
# disable passcode before restore settings from testvars
self.data_layer.set_setting('lockscreen.passcode-lock.code', '1111')
self.data_layer.set_setting('lockscreen.passcode-lock.enabled', False)
# Change language back to English
self.data_layer.set_setting("language.current", "en-US")
# Switch off spanish keyboard before test
self.data_layer.set_setting("keyboard.layouts.spanish", False)
# Set do not track pref back to the default
self.data_layer.set_setting('privacy.donottrackheader.value', '-1')
if self.data_layer.get_setting('ril.radio.disabled'):
# enable the device radio, disable Airplane mode
self.data_layer.set_setting('ril.radio.disabled', False)
# Re-set edge gestures pref to False
self.data_layer.set_setting('edgesgesture.enabled', False)
# disable carrier data connection
if self.device.has_mobile_connection:
self.data_layer.disable_cell_data()
self.data_layer.disable_cell_roaming()
if self.device.has_wifi:
self.data_layer.enable_wifi()
self.data_layer.forget_all_networks()
self.data_layer.disable_wifi()
# remove data
self.data_layer.remove_all_contacts(self._script_timeout)
# reset to home screen
self.marionette.execute_script("window.wrappedJSObject.dispatchEvent(new Event('home'));")
# kill any open apps
self.apps.kill_all()
# disable sound completely
self.data_layer.set_volume(0)
def install_marketplace(self):
_yes_button_locator = (By.ID, 'app-install-install-button')
mk = {"name": "Marketplace Dev",
"manifest": "https://marketplace-dev.allizom.org/manifest.webapp ",
}
if not self.apps.is_app_installed(mk['name']):
# install the marketplace dev app
self.marionette.execute_script('navigator.mozApps.install("%s")' % mk['manifest'])
# TODO add this to the system app object when we have one
self.wait_for_element_displayed(*_yes_button_locator)
self.marionette.find_element(*_yes_button_locator).tap()
self.wait_for_element_not_displayed(*_yes_button_locator)
def connect_to_network(self):
if not self.device.is_online:
try:
self.connect_to_local_area_network()
except:
if self.device.has_mobile_connection:
self.data_layer.connect_to_cell_data()
else:
raise Exception('Unable to connect to network')
assert self.device.is_online
def connect_to_local_area_network(self):
if not self.device.is_online:
if self.testvars.get('wifi') and self.device.has_wifi:
self.data_layer.connect_to_wifi()
assert self.device.is_online
else:
raise Exception('Unable to connect to local area network')
def push_resource(self, filename, count=1, destination=''):
self.device.push_file(self.resource(filename), count, '/'.join(['sdcard', destination]))
def resource(self, filename):
return os.path.abspath(os.path.join(os.path.dirname(__file__), 'resources', filename))
def change_orientation(self, orientation):
""" There are 4 orientation states which the phone can be passed in:
portrait-primary(which is the default orientation), landscape-primary, portrait-secondary and landscape-secondary
"""
self.marionette.execute_async_script("""
if (arguments[0] === arguments[1]) {
marionetteScriptFinished();
}
else {
var expected = arguments[1];
window.screen.onmozorientationchange = function(e) {
console.log("Received 'onmozorientationchange' event.");
waitFor(
function() {
window.screen.onmozorientationchange = null;
marionetteScriptFinished();
},
function() {
return window.screen.mozOrientation === expected;
}
);
};
console.log("Changing orientation to '" + arguments[1] + "'.");
window.screen.mozLockOrientation(arguments[1]);
};""", script_args=[self.screen_orientation, orientation])
@property
def screen_width(self):
return self.marionette.execute_script('return window.screen.width')
@property
def screen_orientation(self):
return self.marionette.execute_script('return window.screen.mozOrientation')
def wait_for_element_present(self, by, locator, timeout=_default_timeout):
timeout = float(timeout) + time.time()
while time.time() < timeout:
time.sleep(0.5)
try:
return self.marionette.find_element(by, locator)
except NoSuchElementException:
pass
else:
raise TimeoutException(
'Element %s not present before timeout' % locator)
def wait_for_element_not_present(self, by, locator, timeout=_default_timeout):
timeout = float(timeout) + time.time()
while time.time() < timeout:
time.sleep(0.5)
try:
self.marionette.find_element(by, locator)
except NoSuchElementException:
break
else:
raise TimeoutException(
'Element %s still present after timeout' % locator)
def wait_for_element_displayed(self, by, locator, timeout=_default_timeout):
timeout = float(timeout) + time.time()
e = None
while time.time() < timeout:
time.sleep(0.5)
try:
if self.marionette.find_element(by, locator).is_displayed():
break
except (NoSuchElementException, StaleElementException) as e:
pass
else:
# This is an effortless way to give extra debugging information
if isinstance(e, NoSuchElementException):
raise TimeoutException('Element %s not present before timeout' % locator)
else:
raise TimeoutException('Element %s present but not displayed before timeout' % locator)
def wait_for_element_not_displayed(self, by, locator, timeout=_default_timeout):
timeout = float(timeout) + time.time()
while time.time() < timeout:
time.sleep(0.5)
try:
if not self.marionette.find_element(by, locator).is_displayed():
break
except StaleElementException:
pass
except NoSuchElementException:
break
else:
raise TimeoutException(
'Element %s still visible after timeout' % locator)
def wait_for_condition(self, method, timeout=_default_timeout,
message="Condition timed out"):
"""Calls the method provided with the driver as an argument until the \
return value is not False."""
end_time = time.time() + timeout
while time.time() < end_time:
try:
value = method(self.marionette)
if value:
return value
except (NoSuchElementException, StaleElementException):
pass
time.sleep(0.5)
else:
raise TimeoutException(message)
def is_element_present(self, by, locator):
try:
self.marionette.find_element(by, locator)
return True
except:
return False
def is_element_displayed(self, by, locator):
try:
return self.marionette.find_element(by, locator).is_displayed()
except (NoSuchElementException, ElementNotVisibleException):
return False
def tearDown(self):
self.lockscreen = None
self.apps = None
self.data_layer = None
MarionetteTestCase.tearDown(self)
class GaiaEnduranceTestCase(GaiaTestCase):
def __init__(self, *args, **kwargs):
self.iterations = kwargs.pop('iterations') or 1
self.checkpoint_interval = kwargs.pop('checkpoint_interval') or self.iterations
GaiaTestCase.__init__(self, *args, **kwargs)
def drive(self, test, app):
self.test_method = test
self.app_under_test = app
# Now drive the actual test case iterations
for count in range(1, self.iterations + 1):
self.iteration = count
self.marionette.log("%s iteration %d of %d" % (self.test_method.__name__, count, self.iterations))
# Print to console so can see what iteration we're on while test is running
if self.iteration == 1:
print "\n"
print "Iteration %d of %d..." % (count, self.iterations)
sys.stdout.flush()
self.test_method()
# Checkpoint time?
if ((count % self.checkpoint_interval) == 0) or count == self.iterations:
self.checkpoint()
# Finished, now process checkpoint data into .json output
self.process_checkpoint_data()
def checkpoint(self):
# Console output so know what's happening if watching console
print "Checkpoint..."
sys.stdout.flush()
# Sleep to give device idle time (for gc)
idle_time = 30
self.marionette.log("sleeping %d seconds to give the device some idle time" % idle_time)
time.sleep(idle_time)
# Dump out some memory status info
self.marionette.log("checkpoint")
self.cur_time = time.strftime("%Y%m%d%H%M%S", time.localtime())
# If first checkpoint, create the file if it doesn't exist already
if self.iteration in (0, self.checkpoint_interval):
self.checkpoint_path = "checkpoints"
if not os.path.exists(self.checkpoint_path):
os.makedirs(self.checkpoint_path, 0755)
self.log_name = "%s/checkpoint_%s_%s.log" % (self.checkpoint_path, self.test_method.__name__, self.cur_time)
with open(self.log_name, 'a') as log_file:
log_file.write('%s Gaia Endurance Test: %s\n' % (self.cur_time, self.test_method.__name__))
output_str = self.device.manager.shellCheckOutput(["b2g-ps"])
with open(self.log_name, 'a') as log_file:
log_file.write('%s Checkpoint after iteration %d of %d:\n' % (self.cur_time, self.iteration, self.iterations))
log_file.write('%s\n' % output_str)
def close_app(self):
# Close the current app (self.app) by using the home button
self.marionette.switch_to_frame()
self.marionette.execute_script("window.wrappedJSObject.dispatchEvent(new Event('home'));")
# Bring up the cards view
_cards_view_locator = ('id', 'cards-view')
self.marionette.execute_script("window.wrappedJSObject.dispatchEvent(new Event('holdhome'));")
self.wait_for_element_displayed(*_cards_view_locator)
# Sleep a bit
time.sleep(5)
# Tap the close icon for the current app
locator_part_two = '#cards-view li.card[data-origin*="%s"] .close-card' % self.app_under_test.lower()
_close_button_locator = ('css selector', locator_part_two)
close_card_app_button = self.marionette.find_element(*_close_button_locator)
close_card_app_button.tap()
def process_checkpoint_data(self):
# Process checkpoint data into .json
self.marionette.log("processing checkpoint data from %s" % self.log_name)
# Open the checkpoint file
checkpoint_file = open(self.log_name, 'r')
# Grab every b2g rss reading for each checkpoint
b2g_rss_list = []
for next_line in checkpoint_file:
if next_line.startswith("b2g"):
b2g_rss_list.append(next_line.split()[5])
# Close the checkpoint file
checkpoint_file.close()
# Calculate the average b2g_rss
total = 0
for b2g_mem_value in b2g_rss_list:
total += int(b2g_mem_value)
avg_rss = total / len(b2g_rss_list)
# Create a summary text file
summary_name = self.log_name.replace('.log', '_summary.log')
summary_file = open(summary_name, 'w')
# Write the summarized checkpoint data
summary_file.write('test_name: %s\n' % self.test_method.__name__)
summary_file.write('completed: %s\n' % self.cur_time)
summary_file.write('app_under_test: %s\n' % self.app_under_test.lower())
summary_file.write('total_iterations: %d\n' % self.iterations)
summary_file.write('checkpoint_interval: %d\n' % self.checkpoint_interval)
summary_file.write('b2g_rss: ')
summary_file.write(', '.join(b2g_rss_list))
summary_file.write('\navg_rss: %d\n\n' % avg_rss)
# Close the summary file
summary_file.close()
# Write to suite summary file
suite_summary_file_name = '%s/avg_b2g_rss_suite_summary.log' % self.checkpoint_path
suite_summary_file = open(suite_summary_file_name, 'a')
suite_summary_file.write('%s: %s\n' % (self.test_method.__name__, avg_rss))
suite_summary_file.close()
| 1.992188 | 2 |
library/__mozilla__/pyjamas/DOM.py | certik/pyjamas | 0 | 4177 | <gh_stars>0
def buttonClick(button):
JS("""
var doc = button.ownerDocument;
if (doc != null) {
var evt = doc.createEvent('MouseEvents');
evt.initMouseEvent('click', true, true, null, 0, 0,
0, 0, 0, false, false, false, false, 0, null);
button.dispatchEvent(evt);
}
""")
def compare(elem1, elem2):
JS("""
if (!elem1 && !elem2) {
return true;
} else if (!elem1 || !elem2) {
return false;
}
if (!elem1.isSameNode) {
return (elem1 == elem2);
}
return (elem1.isSameNode(elem2));
""")
def eventGetButton(evt):
JS("""
var button = evt.which;
if(button == 2) {
return 4;
} else if (button == 3) {
return 2;
} else {
return button || 0;
}
""")
# This is what is in GWT 1.5 for getAbsoluteLeft. err...
#"""
# // We cannot use DOMImpl here because offsetLeft/Top return erroneous
# // values when overflow is not visible. We have to difference screenX
# // here due to a change in getBoxObjectFor which causes inconsistencies
# // on whether the calculations are inside or outside of the element's
# // border.
# try {
# return $doc.getBoxObjectFor(elem).screenX
# - $doc.getBoxObjectFor($doc.documentElement).screenX;
# } catch (e) {
# // This works around a bug in the FF3 betas. The bug
# // should be fixed before they release, so this can
# // be removed at a later date.
# // https://bugzilla.mozilla.org/show_bug.cgi?id=409111
# // DOMException.WRONG_DOCUMENT_ERR == 4
# if (e.code == 4) {
# return 0;
# }
# throw e;
# }
#"""
def getAbsoluteLeft(elem):
JS("""
// Firefox 3 expects getBoundingClientRect
// getBoundingClientRect can be float: 73.1 instead of 74, see
// gwt's workaround at user/src/com/google/gwt/dom/client/DOMImplMozilla.java:47
// Please note, their implementation has 1px offset.
if ( typeof elem.getBoundingClientRect == 'function' ) {
var left = Math.ceil(elem.getBoundingClientRect().left);
return left + $doc.body.scrollLeft + $doc.documentElement.scrollLeft;
}
// Older Firefox can use getBoxObjectFor
else {
var left = $doc.getBoxObjectFor(elem).x;
var parent = elem.parentNode;
while (parent) {
if (parent.scrollLeft > 0) {
left = left - parent.scrollLeft;
}
parent = parent.parentNode;
}
return left + $doc.body.scrollLeft + $doc.documentElement.scrollLeft;
}
""")
# This is what is in GWT 1.5 for getAbsoluteTop. err...
#"""
# // We cannot use DOMImpl here because offsetLeft/Top return erroneous
# // values when overflow is not visible. We have to difference screenY
# // here due to a change in getBoxObjectFor which causes inconsistencies
# // on whether the calculations are inside or outside of the element's
# // border.
# try {
# return $doc.getBoxObjectFor(elem).screenY
# - $doc.getBoxObjectFor($doc.documentElement).screenY;
# } catch (e) {
# // This works around a bug in the FF3 betas. The bug
# // should be fixed before they release, so this can
# // be removed at a later date.
# // https://bugzilla.mozilla.org/show_bug.cgi?id=409111
# // DOMException.WRONG_DOCUMENT_ERR == 4
# if (e.code == 4) {
# return 0;
# }
# throw e;
# }
#"""
def getAbsoluteTop(elem):
JS("""
// Firefox 3 expects getBoundingClientRect
if ( typeof elem.getBoundingClientRect == 'function' ) {
var top = Math.ceil(elem.getBoundingClientRect().top);
return top + $doc.body.scrollTop + $doc.documentElement.scrollTop;
}
// Older Firefox can use getBoxObjectFor
else {
var top = $doc.getBoxObjectFor(elem).y;
var parent = elem.parentNode;
while (parent) {
if (parent.scrollTop > 0) {
top -= parent.scrollTop;
}
parent = parent.parentNode;
}
return top + $doc.body.scrollTop + $doc.documentElement.scrollTop;
}
""")
def getChildIndex(parent, child):
JS("""
var count = 0, current = parent.firstChild;
while (current) {
if (! current.isSameNode) {
if (current == child) {
return count;
}
}
else if (current.isSameNode(child)) {
return count;
}
if (current.nodeType == 1) {
++count;
}
current = current.nextSibling;
}
return -1;
""")
def isOrHasChild(parent, child):
JS("""
while (child) {
if ((!parent.isSameNode)) {
if (parent == child) {
return true;
}
}
else if (parent.isSameNode(child)) {
return true;
}
try {
child = child.parentNode;
} catch(e) {
// Give up on 'Permission denied to get property
// HTMLDivElement.parentNode'
// See https://bugzilla.mozilla.org/show_bug.cgi?id=208427
return false;
}
if (child && (child.nodeType != 1)) {
child = null;
}
}
return false;
""")
def releaseCapture(elem):
JS("""
if ((DOM.sCaptureElem != null) && DOM.compare(elem, DOM.sCaptureElem))
DOM.sCaptureElem = null;
if (!elem.isSameNode) {
if (elem == $wnd.__captureElem) {
$wnd.__captureElem = null;
}
}
else if (elem.isSameNode($wnd.__captureElem)) {
$wnd.__captureElem = null;
}
""")
| 2.015625 | 2 |
apps/vendors/migrations/0090_auto_20160610_2125.py | ExpoAshique/ProveBanking__s | 0 | 4178 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-06-10 21:25
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('vendors', '0089_auto_20160602_2123'),
]
operations = [
migrations.AlterField(
model_name='vendor',
name='email',
field=models.EmailField(blank=True, max_length=254, verbose_name='Email'),
),
]
| 1.359375 | 1 |
graph/articulation_points.py | fujihiraryo/library | 0 | 4179 | <gh_stars>0
from depth_first_search import DFS
def articulation_points(graph):
n = len(graph)
dfs = DFS(graph)
order = [None] * n
for i, x in enumerate(dfs.preorder):
order[x] = i
lower = order[:]
for x in dfs.preorder[::-1]:
for y in graph[x]:
if y == dfs.parent[x]:
continue
lower[x] = min(lower[x], lower[y])
if len(dfs.children[0]) > 1:
yield 0
for x in range(1, n):
if any(order[x] <= lower[y] for y in dfs.children[x]):
yield x
| 2.953125 | 3 |
database.py | AndreAngelucci/popcorn_time_bot | 0 | 4180 | import pymongo
from conf import Configuracoes
class Mongo_Database:
""" Singleton com a conexao com o MongoDB """
_instancia = None
def __new__(cls, *args, **kwargs):
if not(cls._instancia):
cls._instancia = super(Mongo_Database, cls).__new__(cls, *args, **kwargs)
return cls._instancia
def __init__(self,):
#pega a string de conexao no arquivo de configuracao
string_conexao = Configuracoes().get_config("database", "string_connection")
assert (string_conexao != ""), "String de conexao indefinida"
try:
self.mongo_client = pymongo.MongoClient(string_conexao)
self.collection_filmes = self.mongo_client["popcorn_time"]["filmes"]
self.collection_tweets = self.mongo_client["twitter_log"]["tweets"]
except:
raise Exception("Nao foi possivel se conectar ao B.D.")
print("Conectado a", string_conexao)
def grava_filmes(self, lista_filmes):
#verifica se o filme ja existe
#se nao existir, grava e adiciona a lista de novos filmes
novos = []
try:
for filme in lista_filmes:
if (self.collection_filmes.count_documents({"_id": filme["_id"]}) == 0):
self.collection_filmes.insert_one(filme)
novos.append(filme)
finally:
return novos
def grava_tweet(self, tweet_info):
#grava o retorno dos tweets
self.collection_tweets.insert_one(tweet_info)
| 2.875 | 3 |
sensor_core/sleep.py | JorisHerbots/niip_iot_zombie_apocalypse | 0 | 4181 | <gh_stars>0
import machine
import pycom
import utime
from exceptions import Exceptions
class Sleep:
@property
def wakeReason(self):
return machine.wake_reason()[0]
@property
def wakePins(self):
return machine.wake_reason()[1]
@property
def powerOnWake(self):
return self.wakeReason == machine.PWRON_WAKE
@property
def pinWake(self):
return self.wakeReason == machine.PIN_WAKE
@property
def RTCWake(self):
return self.wakeReason == machine.RTC_WAKE
@property
def ULPWake(self):
return self.wakeReason == machine.ULP_WAKE
@property
def isSleepWake(self):
return self.pinWake or self.RTCWake or self.ULPWake
@property
def activeTime(self):
return self.__activeTime + utime.ticks_diff(utime.ticks_ms(), self.__activityStart)
@property
def inactiveTime(self):
return self.__inactiveTime
ACTIVE_TIME_KEY = 'activeTime'
INACTIVE_TIME_KEY = 'inactiveTime'
SLEEP_TIME_KEY = 'sleepTime'
def __init__(self):
self.__activityStart = utime.ticks_ms()
self.__initPersistentVariable(Sleep.ACTIVE_TIME_KEY)
self.__initPersistentVariable(Sleep.INACTIVE_TIME_KEY)
if not self.powerOnWake:
sleptTime = pycom.nvs_get(Sleep.SLEEP_TIME_KEY) - machine.remaining_sleep_time()
pycom.nvs_set(Sleep.INACTIVE_TIME_KEY, pycom.nvs_get(Sleep.INACTIVE_TIME_KEY) + sleptTime)
self.__activeTime = pycom.nvs_get(Sleep.ACTIVE_TIME_KEY)
self.__inactiveTime = pycom.nvs_get(Sleep.INACTIVE_TIME_KEY)
self.__wakeUpPins = []
def __initPersistentVariable(self, key, value=0):
if (pycom.nvs_get(key) == None):
pycom.nvs_set(key, value)
def addWakeUpPin(self, pin):
# P2, P3, P4, P6, P8 to P10 and P13 to P23
if isinstance(pin, list):
self.__wakeUpPins.extend(pin)
else:
self.__wakeUpPins.append(pin)
try:
machine.pin_sleep_wakeup(self.__wakeUpPins, mode=machine.WAKEUP_ANY_HIGH, enable_pull=True)
except Exception as e:
Exceptions.error(Exception('Sleep not available: ' + str(e)))
def resetTimers(self):
pycom.nvs_set(Sleep.ACTIVE_TIME_KEY, 0)
pycom.nvs_set(Sleep.INACTIVE_TIME_KEY, 0)
def sleep(self, milliseconds=0):
if milliseconds == 0:
milliseconds = 604800000 # 1 week
pycom.nvs_set(Sleep.SLEEP_TIME_KEY, milliseconds)
pycom.nvs_set(Sleep.ACTIVE_TIME_KEY, self.activeTime + utime.ticks_diff(utime.ticks_ms(), self.__activityStart))
try:
machine.deepsleep(milliseconds)
except Exception as e:
Exceptions.error(Exception('Deepsleep not available: ' + str(e)))
def delay(self, milliseconds):
utime.sleep_ms(milliseconds)
| 2.328125 | 2 |
pytorch_gleam/search/rerank_format.py | Supermaxman/pytorch-gleam | 0 | 4182 | <reponame>Supermaxman/pytorch-gleam<gh_stars>0
import torch
import argparse
from collections import defaultdict
import os
import json
def load_predictions(input_path):
pred_list = []
for file_name in os.listdir(input_path):
if file_name.endswith('.pt'):
preds = torch.load(os.path.join(input_path, file_name))
pred_list.extend(preds)
question_scores = defaultdict(lambda: defaultdict(dict))
p_count = 0
u_count = 0
for prediction in pred_list:
doc_pass_id = prediction['id']
q_p_id = prediction['question_id']
# score = prediction['pos_score']
score = prediction['pos_score'] - prediction['neg_score']
if doc_pass_id not in question_scores or q_p_id not in question_scores[doc_pass_id]:
p_count += 1
u_count += 1
question_scores[doc_pass_id][q_p_id] = score
print(f'{p_count} unique predictions')
print(f'{u_count} total predictions')
return question_scores
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--input_path', required=True)
parser.add_argument('-o', '--output_path', required=True)
args = parser.parse_args()
input_path = args.input_path
output_path = args.output_path
question_scores = load_predictions(input_path)
with open(output_path, 'w') as f:
json.dump(question_scores, f)
if __name__ == '__main__':
main()
| 2.28125 | 2 |
des036.py | LeonardoPereirajr/Curso_em_video_Python | 0 | 4183 | casa = int(input('Qual o valor da casa? '))
sal = int(input('Qual seu salario? '))
prazo = int(input('Quantos meses deseja pagar ? '))
parcela = casa/prazo
margem = sal* (30/100)
if parcela > margem:
print('Este negocio não foi aprovado, aumente o prazo .')
else:
print("Negocio aprovado pois a parcela é de R$ {} e voce pode pagar R$ {} mensais".format(parcela,margem))
| 3.84375 | 4 |
HackBitApp/migrations/0003_roadmap.py | SukhadaM/HackBit-Interview-Preparation-Portal | 0 | 4184 | # Generated by Django 3.1.7 on 2021-03-27 18:22
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('HackBitApp', '0002_company_photo'),
]
operations = [
migrations.CreateModel(
name='Roadmap',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('company_name', models.CharField(db_index=True, max_length=200, unique=True)),
('photo1', models.ImageField(upload_to='photos/company/roadmap')),
('photo2', models.ImageField(blank=True, upload_to='photos/company/roadmap')),
('photo3', models.ImageField(blank=True, upload_to='photos/company/roadmap')),
],
options={
'verbose_name': 'roadmap',
'verbose_name_plural': 'roadmaps',
'ordering': ('company_name',),
},
),
]
| 1.734375 | 2 |
Other_Python/Kernel_Methods/matrix_operations.py | Romit-Maulik/Tutorials-Demos-Practice | 0 | 4185 | <reponame>Romit-Maulik/Tutorials-Demos-Practice<filename>Other_Python/Kernel_Methods/matrix_operations.py
# -*- coding: utf-8 -*-
"""
Created on Wed Jul 22 14:36:48 2020
@author: matth
"""
import autograd.numpy as np
#%% Kernel operations
# Returns the norm of the pairwise difference
def norm_matrix(matrix_1, matrix_2):
norm_square_1 = np.sum(np.square(matrix_1), axis = 1)
norm_square_1 = np.reshape(norm_square_1, (-1,1))
norm_square_2 = np.sum(np.square(matrix_2), axis = 1)
norm_square_2 = np.reshape(norm_square_2, (-1,1))
d1=matrix_1.shape
d2=matrix_2.shape
if d1[1]!=d2[1]:
matrix_1=np.transpose(matrix_1)
inner_matrix = np.matmul(matrix_1, np.transpose(matrix_2))
norm_diff = -2 * inner_matrix + norm_square_1 + np.transpose(norm_square_2)
return norm_diff
# Returns the pairwise inner product
def inner_matrix(matrix_1, matrix_2):
d1=matrix_1.shape
d2=matrix_2.shape
if d1[1]!=d2[1]:
matrix_1=np.transpose(matrix_1)
return np.matmul(matrix_1, np.transpose(matrix_2))
if __name__ == '__main__':
print('This is the matrix operations file') | 3.546875 | 4 |
cors/resources/cors-makeheader.py | meyerweb/wpt | 14,668 | 4186 | import json
from wptserve.utils import isomorphic_decode
def main(request, response):
origin = request.GET.first(b"origin", request.headers.get(b'origin') or b'none')
if b"check" in request.GET:
token = request.GET.first(b"token")
value = request.server.stash.take(token)
if value is not None:
if request.GET.first(b"check", None) == b"keep":
request.server.stash.put(token, value)
body = u"1"
else:
body = u"0"
return [(b"Content-Type", b"text/plain")], body
if origin != b'none':
response.headers.set(b"Access-Control-Allow-Origin", origin)
if b'origin2' in request.GET:
response.headers.append(b"Access-Control-Allow-Origin", request.GET.first(b'origin2'))
#Preflight
if b'headers' in request.GET:
response.headers.set(b"Access-Control-Allow-Headers", request.GET.first(b'headers'))
if b'credentials' in request.GET:
response.headers.set(b"Access-Control-Allow-Credentials", request.GET.first(b'credentials'))
if b'methods' in request.GET:
response.headers.set(b"Access-Control-Allow-Methods", request.GET.first(b'methods'))
code_raw = request.GET.first(b'code', None)
if code_raw:
code = int(code_raw)
else:
code = None
if request.method == u'OPTIONS':
#Override the response code if we're in a preflight and it's asked
if b'preflight' in request.GET:
code = int(request.GET.first(b'preflight'))
#Log that the preflight actually happened if we have an ident
if b'token' in request.GET:
request.server.stash.put(request.GET[b'token'], True)
if b'location' in request.GET:
if code is None:
code = 302
if code >= 300 and code < 400:
response.headers.set(b"Location", request.GET.first(b'location'))
headers = {}
for name, values in request.headers.items():
if len(values) == 1:
headers[isomorphic_decode(name)] = isomorphic_decode(values[0])
else:
#I have no idea, really
headers[name] = values
headers[u'get_value'] = isomorphic_decode(request.GET.first(b'get_value', b''))
body = json.dumps(headers)
if code:
return (code, b"StatusText"), [], body
else:
return body
| 2.265625 | 2 |
device_osc_grid.py | wlfyit/PiLightsLib | 0 | 4187 | <reponame>wlfyit/PiLightsLib
#!/usr/bin/env python3
from pythonosc import osc_bundle_builder
from pythonosc import osc_message_builder
from pythonosc import udp_client
from .device import DeviceObj
# OSC Grid Object
class OSCGrid(DeviceObj):
def __init__(self, name, width, height, ip, port, bri=1):
DeviceObj.__init__(self, name, "osc_grid", width, height)
self.buffer = []
self.brightness = bri
self.osc = udp_client.SimpleUDPClient(ip, port)
def set(self, r, g, b, x=0, y=0):
DeviceObj.set(self, r, g, b, x, y)
# Set Pixel
builder = osc_message_builder.OscMessageBuilder(address="/light/{0}/{1}/color".format(x, y))
builder.add_arg(r)
builder.add_arg(g)
builder.add_arg(b)
self.buffer.append(builder.build())
def show(self):
DeviceObj.show(self)
# Update Display
bundle = osc_bundle_builder.OscBundleBuilder(0)
for m in self.buffer:
bundle.add_content(m)
self.osc.send(bundle.build())
self.buffer.clear()
| 2.328125 | 2 |
main/models.py | StevenSume/EasyCMDB | 2 | 4188 | from .app import db
class Project(db.Model):
__tablename__ = 'projects'
id = db.Column(db.Integer,primary_key=True,autoincrement=True)
project_name = db.Column(db.String(64),unique=True,index=True)
def to_dict(self):
mydict = {
'id': self.id,
'project_name': self.project_name
}
return mydict
def __repr__(self):
return '<Project %r>' % self.__name__
class Item(db.Model):
__tablename__ = 'Items'
id = db.Column(db.Integer, primary_key=True,autoincrement=True)
project_id = db.Column(db.Integer)
key = db.Column(db.String(64),nullable=False)
value = db.Column(db.String(64),nullable=False)
def to_dict(self):
mydict = {
'id': self.id,
'project_id': self.project_id,
'key': self.key,
'value': self.value
}
return mydict
def __repr__(self):
return '<Item %r>' % self.__name__
| 2.828125 | 3 |
test.py | iron-io/iron_cache_python | 3 | 4189 | <reponame>iron-io/iron_cache_python<gh_stars>1-10
from iron_cache import *
import unittest
import requests
class TestIronCache(unittest.TestCase):
def setUp(self):
self.cache = IronCache("test_cache")
def test_get(self):
self.cache.put("test_item", "testing")
item = self.cache.get("test_item")
self.assertEqual(item.value, "testing")
def test_delete(self):
self.cache.put("test_item", "will be deleted")
self.cache.delete("test_item")
self.assertRaises(requests.exceptions.HTTPError,
self.cache.get, "test_item")
def test_increment(self):
self.cache.put("test_item", 2)
self.cache.increment("test_item")
item = self.cache.get("test_item")
self.assertEqual(item.value, 3)
self.cache.increment("test_item", amount=42)
item = self.cache.get("test_item")
self.assertEqual(item.value, 45)
def test_decrement(self):
self.cache.put("test_item", 100)
self.cache.decrement("test_item")
item = self.cache.get("test_item")
self.assertEqual(item.value, 99)
self.cache.decrement("test_item", amount=98)
item = self.cache.get("test_item")
self.assertEqual(item.value, 1)
if __name__ == '__main__':
unittest.main()
| 2.796875 | 3 |
lib_exec/StereoPipeline/libexec/asp_image_utils.py | sebasmurphy/iarpa | 20 | 4190 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# __BEGIN_LICENSE__
# Copyright (c) 2009-2013, United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration. All
# rights reserved.
#
# The NGT platform is licensed under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# __END_LICENSE__
"""
Basic functions for working with images on disk.
"""
import sys, os, re, subprocess, string, time, errno
import asp_string_utils
def stripRgbImageAlphaChannel(inputPath, outputPath):
"""Makes an RGB copy of an RBGA image"""
cmd = 'gdal_translate ' + inputPath + ' ' + outputPath + ' -b 1 -b 2 -b 3 -co "COMPRESS=LZW" -co "TILED=YES" -co "BLOCKXSIZE=256" -co "BLOCKYSIZE=256"'
print cmd
os.system(cmd)
def getImageSize(imagePath):
"""Returns the size [samples, lines] in an image"""
# Make sure the input file exists
if not os.path.exists(imagePath):
raise Exception('Image file ' + imagePath + ' not found!')
# Use subprocess to suppress the command output
cmd = ['gdalinfo', imagePath]
p = subprocess.Popen(cmd, stdout=subprocess.PIPE)
textOutput, err = p.communicate()
# Extract the size from the text
sizePos = textOutput.find('Size is')
endPos = textOutput.find('\n', sizePos+7)
sizeStr = textOutput[sizePos+7:endPos]
sizeStrs = sizeStr.strip().split(',')
numSamples = int(sizeStrs[0])
numLines = int(sizeStrs[1])
size = [numSamples, numLines]
return size
def isIsisFile(filePath):
"""Returns True if the file is an ISIS file, False otherwise."""
# Currently we treat all files with .cub extension as ISIS files
extension = os.path.splitext(filePath)[1]
return (extension == '.cub')
def getImageStats(imagePath):
"""Obtains some image statistics from gdalinfo"""
if not os.path.exists(imagePath):
raise Exception('Image file ' + imagePath + ' not found!')
# Call command line tool silently
cmd = ['gdalinfo', imagePath, '-stats']
p = subprocess.Popen(cmd, stdout=subprocess.PIPE)
textOutput, err = p.communicate()
# Statistics are computed seperately for each band
bandStats = []
band = 0
while (True): # Loop until we run out of bands
# Look for the stats line for this band
bandString = 'Band ' + str(band+1) + ' Block='
bandLoc = textOutput.find(bandString)
if bandLoc < 0:
return bandStats # Quit if we did not find it
# Now parse out the statistics for this band
bandMaxStart = textOutput.find('STATISTICS_MAXIMUM=', bandLoc)
bandMeanStart = textOutput.find('STATISTICS_MEAN=', bandLoc)
bandMinStart = textOutput.find('STATISTICS_MINIMUM=', bandLoc)
bandStdStart = textOutput.find('STATISTICS_STDDEV=', bandLoc)
bandMax = asp_string_utils.getNumberAfterEqualSign(textOutput, bandMaxStart)
bandMean = asp_string_utils.getNumberAfterEqualSign(textOutput, bandMeanStart)
bandMin = asp_string_utils.getNumberAfterEqualSign(textOutput, bandMinStart)
bandStd = asp_string_utils.getNumberAfterEqualSign(textOutput, bandStdStart)
# Add results to the output list
bandStats.append( (bandMin, bandMax, bandMean, bandStd) )
band = band + 1 # Move to the next band
| 2.59375 | 3 |
src/sv-pipeline/04_variant_resolution/scripts/merge_RdTest_genotypes.py | leipzig/gatk-sv | 76 | 4191 | #!/usr/bin/env python
import argparse
DELIMITER = "\t"
def merge(genotypes_filename, gq_filename, merged_filename):
with open(genotypes_filename, "r") as genotypes, open(gq_filename, "r") as gq, open(merged_filename, "w") as merged:
# Integrity check: do the files have same columns?
genotypes_header = genotypes.readline().rstrip().split(DELIMITER)
gq_header = gq.readline().rstrip().split(DELIMITER)
if not genotypes_header == gq_header:
raise ValueError("The files do not have same number/order of columns")
n_cols = len(gq_header)
for genotypes_line, gq_line in zip(genotypes, gq):
x = genotypes_line.rstrip().split(DELIMITER)
y = gq_line.rstrip().split(DELIMITER)
# Check if lines in the files are in the correct order.
if not x[0:4] == y[0:4]:
raise ValueError(f"The lines in the files are not in the same order; "
f"expected the following lines to match.\n{x[0:4]}\n{y[0:4]}")
h = DELIMITER.join(x[0:4])
for i in range(4, n_cols):
merged.write(DELIMITER.join([h, gq_header[i], x[i], y[i]]) + "\n")
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('genotypes')
parser.add_argument('GQ')
parser.add_argument('fout')
args = parser.parse_args()
merge(args.genotypes, args.GQ, args.fout)
| 3.109375 | 3 |
esperanto_analyzer/web/__init__.py | fidelisrafael/esperanto-analyzer | 18 | 4192 | <gh_stars>10-100
from .api.server import run_app
| 1.085938 | 1 |
crawling/sns/main.py | CSID-DGU/2021-2-OSSP2-TwoRolless-2 | 0 | 4193 | import tweepy
import traceback
import time
import pymongo
from tweepy import OAuthHandler
from pymongo import MongoClient
from pymongo.cursor import CursorType
twitter_consumer_key = ""
twitter_consumer_secret = ""
twitter_access_token = ""
twitter_access_secret = ""
auth = OAuthHandler(twitter_consumer_key, twitter_consumer_secret)
auth.set_access_token(twitter_access_token, twitter_access_secret)
api = tweepy.API(auth)
def crawllTwit(snsname, findtag):
account = snsname
tweets = api.user_timeline(screen_name=account, count=100, include_rts=False, exclude_replies=True, tweet_mode='extended')
snsList = []
snsTime = []
url = []
pic = []
i = 0
for tweet in tweets:
flag = tweet.full_text.find(findtag)
if flag >= 0:
ttp = tweet.full_text.split("https://")
gong = ""
count = 0
for slist in ttp:
if count == (len(ttp) - 1):
break
gong = gong + slist
count += 1
snsList.append(gong)
snsTime.append(tweet.created_at)
tmp = f"https://twitter.com/{tweet.user.screen_name}/status/{tweet.id}"
url.append(tmp)
i += 1
media = tweet.entities.get('media', [])
if (len(media) > 0):
pic.append(media[0]['media_url'])
else:
pic.append("")
j = 0
while j < len(snsList):
if j == 10:
break
snsList[j] = snsList[j].replace('<', '<')
snsList[j] = snsList[j].replace('>', '>')
snsList[j] = snsList[j].replace('▶️', ' ⇒ ')
j += 1
mydb = my_client['TwoRolless']
mycol = mydb['sns']
for k in range(0, len(snsList)):
if k == 15:
break
x = mycol.insert_one(
{
"tag": findtag,
"time": snsTime[k],
"text": snsList[k],
"img": pic[k],
"url": url[k]
}
)
conn_str = ""
my_client = pymongo.MongoClient(conn_str)
if __name__ == '__main__':
while True:
print("cycles start")
mydb = my_client['TwoRolless']
mycol = mydb['sns']
mycol.remove({})
crawllTwit("@m_thelastman", "더라스트맨")
crawllTwit("@Musical_NarGold", "나르치스와_골드문트")
crawllTwit("@rndworks", "더데빌")
crawllTwit("@ninestory9", "엘리펀트송")
crawllTwit("@companyrang", "쿠로이저택엔누가살고있을까")
crawllTwit("@companyrang", "난쟁이들")
crawllTwit("@page1company", "곤투모로우")
crawllTwit("@HONGcompany", "더모먼트")
crawllTwit("@orchardmusical", "칠칠")
crawllTwit("@livecorp2011", "팬레터")
crawllTwit("@shownote", "젠틀맨스가이드")
crawllTwit("@od_musical", "지킬앤하이드")
crawllTwit("@kontentz", "엔딩노트")
crawllTwit("@i_seensee", "빌리")
crawllTwit("@doublek_ent", "은하철도의")
crawllTwit("@Insight_Since96", "뱀파이어아더")
print("cycle end")
print("sleep 30 seconds")
time.sleep(30)
print("sleep end")
| 2.765625 | 3 |
demos/interactive-classifier/config.py | jepabe/Demo_earth2 | 1,909 | 4194 | #!/usr/bin/env python
"""Handles Earth Engine service account configuration."""
import ee
# The service account email address authorized by your Google contact.
# Set up a service account as described in the README.
EE_ACCOUNT = '<EMAIL>'
# The private key associated with your service account in Privacy Enhanced
# Email format (.pem suffix). To convert a private key from the RSA format
# (.p12 suffix) to .pem, run the openssl command like this:
# openssl pkcs12 -in downloaded-privatekey.p12 -nodes -nocerts > privatekey.pem
EE_PRIVATE_KEY_FILE = 'privatekey.pem'
EE_CREDENTIALS = ee.ServiceAccountCredentials(EE_ACCOUNT, EE_PRIVATE_KEY_FILE)
| 1.929688 | 2 |
PythonScripting/NumbersInPython.py | Neo-sunny/pythonProgs | 0 | 4195 | """
Demonstration of numbers in Python
"""
# Python has an integer type called int
print("int")
print("---")
print(0)
print(1)
print(-3)
print(70383028364830)
print("")
# Python has a real number type called float
print("float")
print("-----")
print(0.0)
print(7.35)
print(-43.2)
print("")
# Limited precision
print("Precision")
print("---------")
print(4.56372883832331773)
print(1.23456789012345678)
print("")
# Scientific/exponential notation
print("Scientific notation")
print("-------------------")
print(5e32)
print(999999999999999999999999999999999999999.9)
print("")
# Infinity
print("Infinity")
print("--------")
print(1e500)
print(-1e500)
print("")
# Conversions
print("Conversions between numeric types")
print("---------------------------------")
print(float(3))
print(float(99999999999999999999999999999999999999))
print(int(3.0))
print(int(3.7))
print(int(-3.7))
"""
Demonstration of simple arithmetic expressions in Python
"""
# Unary + and -
print("Unary operators")
print(+3)
print(-5)
print(+7.86)
print(-3348.63)
print("")
# Simple arithmetic
print("Addition and Subtraction")
print(1 + 2)
print(48 - 89)
print(3.45 + 2.7)
print(87.3384 - 12.35)
print(3 + 6.7)
print(9.8 - 4)
print("")
print("Multiplication")
print(3 * 2)
print(7.8 * 27.54)
print(7 * 8.2)
print("")
print("Division")
print(8 / 2)
print(3 / 2)
print(7.538 / 14.3)
print(8 // 2)
print(3 // 2)
print(7.538 // 14.3)
print("")
print("Exponentiation")
print(3 ** 2)
print(5 ** 4)
print(32.6 ** 7)
print(9 ** 0.5)
"""
Demonstration of compound arithmetic expressions in Python
"""
# Expressions can include multiple operations
print("Compound expressions")
print(3 + 5 + 7 + 27)
#Operator with same precedence are evaluated from left to right
print(18 - 6 + 4)
print("")
# Operator precedence defines how expressions are evaluated
print("Operator precedence")
print(7 + 3 * 5)
print(5.5 * 6 // 2 + 8)
print(-3 ** 2)
print("")
# Use parentheses to change evaluation order
print("Grouping with parentheses")
print((7 + 3) * 5)
print(5.5 * ((6 // 2) + 8))
print((-3) ** 2)
"""
Demonstration of the use of variables and how to assign values to
them.
"""
# The = operator can be used to assign values to variables
bakers_dozen = 12 + 1
temperature = 93
# Variables can be used as values and in expressions
print(temperature, bakers_dozen)
print("celsius:", (temperature - 32) * 5 / 9)
print("fahrenheit:", float(temperature))
# You can assign a different value to an existing variable
temperature = 26
print("new value:", temperature)
# Multiple variables can be used in arbitrary expressions
offset = 32
multiplier = 5.0 / 9.0
celsius = (temperature - offset) * multiplier
print("celsius value:", celsius)
| 4.21875 | 4 |
3DBeam/source/solving_strategies/strategies/linear_solver.py | JoZimmer/Beam-Models | 0 | 4196 | from source.solving_strategies.strategies.solver import Solver
class LinearSolver(Solver):
def __init__(self,
array_time, time_integration_scheme, dt,
comp_model,
initial_conditions,
force,
structure_model):
super().__init__(array_time, time_integration_scheme, dt,
comp_model, initial_conditions, force, structure_model)
def _print_solver_info(self):
print("Linear Solver")
def solve(self):
# time loop
for i in range(0, len(self.array_time)):
self.step = i
current_time = self.array_time[i]
#print("time: {0:.2f}".format(current_time))
self.scheme.solve_single_step(self.force[:, i])
# appending results to the list
self.displacement[:, i] = self.scheme.get_displacement()
self.velocity[:, i] = self.scheme.get_velocity()
self.acceleration[:, i] = self.scheme.get_acceleration()
# TODO: only calculate reaction when user wants it
# if self.structure_model is not None:
# self.dynamic_reaction[:, i] = self._compute_reaction()
# reaction computed in dynamic analysis
# TODO: only calculate reaction when user wants it
# moved reaction computation to dynamic analysis level
# AK . this doesnt considers the support reaction check
#if self.structure_model is not None:
# self.dynamic_reaction[:, i] = self._compute_reaction()
# update results
self.scheme.update()
| 3.21875 | 3 |
payment/migrations/0002_auto_20171125_0022.py | Littledelma/mofadog | 0 | 4197 | <gh_stars>0
# -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2017-11-24 16:22
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('payment', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='history_order',
name='dead_date',
field=models.DateTimeField(default=datetime.datetime(2017, 11, 24, 16, 22, 1, 719840, tzinfo=utc), verbose_name='daed_date'),
),
migrations.AlterField(
model_name='history_order',
name='order_date',
field=models.DateTimeField(default=datetime.datetime(2017, 11, 24, 16, 22, 1, 719662, tzinfo=utc), verbose_name='order date'),
),
migrations.AlterField(
model_name='history_order',
name='valid_date',
field=models.DateTimeField(default=datetime.datetime(2017, 11, 24, 16, 22, 1, 719758, tzinfo=utc), verbose_name='valid_date'),
),
]
| 1.765625 | 2 |
src/sqlfluff/rules/L024.py | NathanHowell/sqlfluff | 3,024 | 4198 | <gh_stars>1000+
"""Implementation of Rule L024."""
from sqlfluff.core.rules.doc_decorators import document_fix_compatible
from sqlfluff.rules.L023 import Rule_L023
@document_fix_compatible
class Rule_L024(Rule_L023):
"""Single whitespace expected after USING in JOIN clause.
| **Anti-pattern**
.. code-block:: sql
SELECT b
FROM foo
LEFT JOIN zoo USING(a)
| **Best practice**
| The • character represents a space.
| Add a space after USING, to avoid confusing it
| for a function.
.. code-block:: sql
:force:
SELECT b
FROM foo
LEFT JOIN zoo USING•(a)
"""
expected_mother_segment_type = "join_clause"
pre_segment_identifier = ("name", "using")
post_segment_identifier = ("type", "bracketed")
expand_children = None
allow_newline = True
| 1.757813 | 2 |
projects/scocen/cmd_components_simple.py | mikeireland/chronostar | 4 | 4199 | <reponame>mikeireland/chronostar
"""
Plot CMDs for each component.
"""
import numpy as np
from astropy.table import Table
import matplotlib.pyplot as plt
import matplotlib.cm as cm
plt.ion()
# Pretty plots
from fig_settings import *
############################################
# Some things are the same for all the plotting scripts and we put
# this into a single library to avoid confusion.
import scocenlib as lib
data_filename = lib.data_filename
comps_filename = lib.comps_filename
compnames = lib.compnames
colors = lib.colors
############################################
# Minimal probability required for membership
pmin_membership = 0.5
############################################
# how to split subplots
grid = [5, 5]
# CMD limits
xlim = [-1, 5]
ylim = [17, -3]
############################################
# Read data
try:
tab = tab0
comps = comps0
except:
tab0 = Table.read(data_filename)
Gmag = tab0['phot_g_mean_mag'] - 5 * np.log10(1.0 / (tab0['parallax'] * 1e-3) / 10) # tab['parallax'] in micro arcsec
tab0['Gmag'] = Gmag
comps0 = Table.read(comps_filename)
tab = tab0
comps = comps0
# Main sequence parametrization
# fitpar for pmag, rpmag
fitpar = [0.17954163, -2.48748376, 12.9279348, -31.35434182, 38.31330583, -12.25864507]
poly = np.poly1d(fitpar)
x = np.linspace(1, 4, 100)
y = poly(x)
m = y > 4
yms = y[m]
xms = x[m]
def plot_MS_parametrisation_and_spectral_types(ax, xlim, ylim):
ax.plot(xms, yms, c='brown', label='Median main sequence', linewidth=1)
ax.plot(xms, yms - 1, c='brown', label='1 mag above the median', linewidth=1, linestyle='--')
ax.plot(xms, yms - 1.5, c='brown', label='1.5 mag above the median', linewidth=1, linestyle='--')
ax.axvline(x=0.369, linewidth=0.5, color='k') # F
ax.axvline(x=0.767, linewidth=0.5, color='k') # G
ax.axvline(x=0.979, linewidth=0.5, color='k') # K
ax.axvline(x=1.848, linewidth=0.5, color='k') # M
ax.set_xlim(xlim[0], xlim[1])
ax.set_ylim(ylim[0], ylim[1])
return ax
print('Plotting %d components.'%len(comps))
fig=plt.figure()
for i, c in enumerate(comps):
ax = fig.add_subplot(grid[0], grid[1], i+1) # TODO: adjust this if needed
comp_ID = c['comp_ID']
col=tab['membership%s'%comp_ID]
mask = col > pmin_membership
t=tab[mask]
if len(t)>100:
alpha=0.5
else:
alpha=1
t.sort('membership%s'%comp_ID)
#~ t.reverse()
#~ ax.scatter(t['bp_rp'], t['Gmag'], s=1, c='k', alpha=alpha)
ax.scatter(t['bp_rp'], t['Gmag'], s=1, c=t['membership%s'%comp_ID], alpha=1, vmin=0.5, vmax=1, cmap=cm.jet)
ax=plot_MS_parametrisation_and_spectral_types(ax, xlim, ylim)
age=c['Age']
ax.set_title('%s (%.2f$\pm$%.2f Myr %s) %d'%(comp_ID, age, c['Crossing_time'], c['Age_reliable'], len(t)))
#~ plt.tight_layout()
plt.show()
| 2.296875 | 2 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.