code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
---|---|---|---|---|---|
import unittest
import time
from friendloc.base.splitproc import SplitProcess
class AdderSplitProc(SplitProcess):
def __init__(self, delay, **kwargs):
SplitProcess.__init__(self, **kwargs)
self.delay = delay
def produce(self):
for x in xrange(10):
time.sleep(self.delay)
yield x
def map(self,items):
for item in items:
yield item+1
def consume(self,items):
return sum(items)
class TestSplitProc(unittest.TestCase):
def test_single(self):
asp = AdderSplitProc(0)
result = asp.run_single()
self.assertEqual(55, result)
@unittest.skip
def test_multi(self):
self._run_test(delay=0,slaves=2)
self._run_test(delay=0,slaves=15)
self._run_test(delay=.2,slaves=2)
def _run_test(self,**kwargs):
asp = AdderSplitProc(**kwargs)
result = asp.run()
self.assertEqual(55, result)
| JeffAMcGee/friendloc | friendloc/base/tests/splitproc_tests.py | Python | bsd-2-clause | 954 |
from django.contrib import admin
from django.contrib.auth.models import User
from .models import FCM
# Register your models here.
admin.site.register(FCM)
| gtsapelas/TRANSrisk_fcm_project | fcm_app/admin.py | Python | mit | 156 |
import numpy as np
import cv2
from matplotlib import pyplot as plt
'''
a = np.empty((1,10),np.uint8)
cv2.randn(a,(0),(10))
print(a)
'''
img = cv2.imread('nature.jpg',0)
noise = img.copy()
cv2.randn(noise,(32),(20)) #Mean = 0, Variance = 1
noise_img = img + noise #* 200 #Sigma for noise = 25
cv2.imshow('image',img)
cv2.imshow('noise',noise_img)
#pad = (int) (5 - 1) / 2
#pad = 5
#img = cv2.copyMakeBorder(img, pad, pad, pad, pad,cv2.BORDER_REPLICATE)
blur = cv2.GaussianBlur(img,(11,11),2) #Kernel Size - 11x11, Sigma = 2
cv2.imshow('blur',blur)
cv2.waitKey(0)
cv2.destroyAllWindows()
'''
plt.subplot(121),plt.imshow(img,cmap='gray'),plt.title('Original')
plt.xticks([]), plt.yticks([])
plt.subplot(122),plt.imshow(cv2.cvtColor(blur, cv2.COLOR_BGR2RGB)),plt.title('Blurred')
plt.xticks([]), plt.yticks([])
plt.show()
''' | rohithredd94/Computer-Vision-using-OpenCV | cv2test2.py | Python | mit | 824 |
from gopigo import *
import sys
fwd()
enable_com_timeout(2000) | slowrunner/goRWpigo | tests/timeout_test.py | Python | lgpl-3.0 | 63 |
"""
Provide pre-made queries on top of the recorder component.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/history/
"""
import asyncio
from collections import defaultdict
from datetime import timedelta
from itertools import groupby
import logging
import time
import voluptuous as vol
from homeassistant.const import (
HTTP_BAD_REQUEST, CONF_DOMAINS, CONF_ENTITIES, CONF_EXCLUDE, CONF_INCLUDE)
import homeassistant.util.dt as dt_util
from homeassistant.components import recorder, script
from homeassistant.components.http import HomeAssistantView
from homeassistant.const import ATTR_HIDDEN
from homeassistant.components.recorder.util import session_scope, execute
_LOGGER = logging.getLogger(__name__)
DOMAIN = 'history'
DEPENDENCIES = ['recorder', 'http']
CONFIG_SCHEMA = vol.Schema({
DOMAIN: recorder.FILTER_SCHEMA,
}, extra=vol.ALLOW_EXTRA)
SIGNIFICANT_DOMAINS = ('thermostat', 'climate')
IGNORE_DOMAINS = ('zone', 'scene',)
def last_recorder_run(hass):
"""Retrieve the last closed recorder run from the database."""
from homeassistant.components.recorder.models import RecorderRuns
with session_scope(hass=hass) as session:
res = (session.query(RecorderRuns)
.filter(RecorderRuns.end.isnot(None))
.order_by(RecorderRuns.end.desc()).first())
if res is None:
return None
session.expunge(res)
return res
def get_significant_states(hass, start_time, end_time=None, entity_ids=None,
filters=None, include_start_time_state=True):
"""
Return states changes during UTC period start_time - end_time.
Significant states are all states where there is a state change,
as well as all states from certain domains (for instance
thermostat so that we get current temperature in our graphs).
"""
timer_start = time.perf_counter()
from homeassistant.components.recorder.models import States
with session_scope(hass=hass) as session:
query = session.query(States).filter(
(States.domain.in_(SIGNIFICANT_DOMAINS) |
(States.last_changed == States.last_updated)) &
(States.last_updated > start_time))
if filters:
query = filters.apply(query, entity_ids)
if end_time is not None:
query = query.filter(States.last_updated < end_time)
query = query.order_by(States.last_updated)
states = (
state for state in execute(query)
if (_is_significant(state) and
not state.attributes.get(ATTR_HIDDEN, False)))
if _LOGGER.isEnabledFor(logging.DEBUG):
elapsed = time.perf_counter() - timer_start
_LOGGER.debug(
'get_significant_states took %fs', elapsed)
return states_to_json(
hass, states, start_time, entity_ids, filters,
include_start_time_state)
def state_changes_during_period(hass, start_time, end_time=None,
entity_id=None):
"""Return states changes during UTC period start_time - end_time."""
from homeassistant.components.recorder.models import States
with session_scope(hass=hass) as session:
query = session.query(States).filter(
(States.last_changed == States.last_updated) &
(States.last_updated > start_time))
if end_time is not None:
query = query.filter(States.last_updated < end_time)
if entity_id is not None:
query = query.filter_by(entity_id=entity_id.lower())
entity_ids = [entity_id] if entity_id is not None else None
states = execute(
query.order_by(States.last_updated))
return states_to_json(hass, states, start_time, entity_ids)
def get_states(hass, utc_point_in_time, entity_ids=None, run=None,
filters=None):
"""Return the states at a specific point in time."""
from homeassistant.components.recorder.models import States
if run is None:
run = recorder.run_information(hass, utc_point_in_time)
# History did not run before utc_point_in_time
if run is None:
return []
from sqlalchemy import and_, func
with session_scope(hass=hass) as session:
if entity_ids and len(entity_ids) == 1:
# Use an entirely different (and extremely fast) query if we only
# have a single entity id
most_recent_state_ids = session.query(
States.state_id.label('max_state_id')
).filter(
(States.last_updated < utc_point_in_time) &
(States.entity_id.in_(entity_ids))
).order_by(
States.last_updated.desc())
most_recent_state_ids = most_recent_state_ids.limit(1)
else:
# We have more than one entity to look at (most commonly we want
# all entities,) so we need to do a search on all states since the
# last recorder run started.
most_recent_states_by_date = session.query(
States.entity_id.label('max_entity_id'),
func.max(States.last_updated).label('max_last_updated')
).filter(
(States.last_updated >= run.start) &
(States.last_updated < utc_point_in_time)
)
if entity_ids:
most_recent_states_by_date.filter(
States.entity_id.in_(entity_ids))
most_recent_states_by_date = most_recent_states_by_date.group_by(
States.entity_id)
most_recent_states_by_date = most_recent_states_by_date.subquery()
most_recent_state_ids = session.query(
func.max(States.state_id).label('max_state_id')
).join(most_recent_states_by_date, and_(
States.entity_id == most_recent_states_by_date.c.max_entity_id,
States.last_updated == most_recent_states_by_date.c.
max_last_updated))
most_recent_state_ids = most_recent_state_ids.group_by(
States.entity_id)
most_recent_state_ids = most_recent_state_ids.subquery()
query = session.query(States).join(
most_recent_state_ids,
States.state_id == most_recent_state_ids.c.max_state_id
).filter((~States.domain.in_(IGNORE_DOMAINS)))
if filters:
query = filters.apply(query, entity_ids)
return [state for state in execute(query)
if not state.attributes.get(ATTR_HIDDEN, False)]
def states_to_json(
hass,
states,
start_time,
entity_ids,
filters=None,
include_start_time_state=True):
"""Convert SQL results into JSON friendly data structure.
This takes our state list and turns it into a JSON friendly data
structure {'entity_id': [list of states], 'entity_id2': [list of states]}
We also need to go back and create a synthetic zero data point for
each list of states, otherwise our graphs won't start on the Y
axis correctly.
"""
result = defaultdict(list)
# Get the states at the start time
timer_start = time.perf_counter()
if include_start_time_state:
for state in get_states(hass, start_time, entity_ids, filters=filters):
state.last_changed = start_time
state.last_updated = start_time
result[state.entity_id].append(state)
if _LOGGER.isEnabledFor(logging.DEBUG):
elapsed = time.perf_counter() - timer_start
_LOGGER.debug(
'getting %d first datapoints took %fs', len(result), elapsed)
# Append all changes to it
for ent_id, group in groupby(states, lambda state: state.entity_id):
result[ent_id].extend(group)
return result
def get_state(hass, utc_point_in_time, entity_id, run=None):
"""Return a state at a specific point in time."""
states = list(get_states(hass, utc_point_in_time, (entity_id,), run))
return states[0] if states else None
@asyncio.coroutine
def async_setup(hass, config):
"""Set up the history hooks."""
filters = Filters()
exclude = config[DOMAIN].get(CONF_EXCLUDE)
if exclude:
filters.excluded_entities = exclude[CONF_ENTITIES]
filters.excluded_domains = exclude[CONF_DOMAINS]
include = config[DOMAIN].get(CONF_INCLUDE)
if include:
filters.included_entities = include[CONF_ENTITIES]
filters.included_domains = include[CONF_DOMAINS]
hass.http.register_view(HistoryPeriodView(filters))
yield from hass.components.frontend.async_register_built_in_panel(
'history', 'history', 'mdi:poll-box')
return True
class HistoryPeriodView(HomeAssistantView):
"""Handle history period requests."""
url = '/api/history/period'
name = 'api:history:view-period'
extra_urls = ['/api/history/period/{datetime}']
def __init__(self, filters):
"""Initialize the history period view."""
self.filters = filters
@asyncio.coroutine
def get(self, request, datetime=None):
"""Return history over a period of time."""
timer_start = time.perf_counter()
if datetime:
datetime = dt_util.parse_datetime(datetime)
if datetime is None:
return self.json_message('Invalid datetime', HTTP_BAD_REQUEST)
now = dt_util.utcnow()
one_day = timedelta(days=1)
if datetime:
start_time = dt_util.as_utc(datetime)
else:
start_time = now - one_day
if start_time > now:
return self.json([])
end_time = request.query.get('end_time')
if end_time:
end_time = dt_util.parse_datetime(end_time)
if end_time:
end_time = dt_util.as_utc(end_time)
else:
return self.json_message('Invalid end_time', HTTP_BAD_REQUEST)
else:
end_time = start_time + one_day
entity_ids = request.query.get('filter_entity_id')
if entity_ids:
entity_ids = entity_ids.lower().split(',')
include_start_time_state = 'skip_initial_state' not in request.query
result = yield from request.app['hass'].async_add_job(
get_significant_states, request.app['hass'], start_time, end_time,
entity_ids, self.filters, include_start_time_state)
result = result.values()
if _LOGGER.isEnabledFor(logging.DEBUG):
elapsed = time.perf_counter() - timer_start
_LOGGER.debug(
'Extracted %d states in %fs', sum(map(len, result)), elapsed)
return self.json(result)
class Filters(object):
"""Container for the configured include and exclude filters."""
def __init__(self):
"""Initialise the include and exclude filters."""
self.excluded_entities = []
self.excluded_domains = []
self.included_entities = []
self.included_domains = []
def apply(self, query, entity_ids=None):
"""Apply the include/exclude filter on domains and entities on query.
Following rules apply:
* only the include section is configured - just query the specified
entities or domains.
* only the exclude section is configured - filter the specified
entities and domains from all the entities in the system.
* if include and exclude is defined - select the entities specified in
the include and filter out the ones from the exclude list.
"""
from homeassistant.components.recorder.models import States
# specific entities requested - do not in/exclude anything
if entity_ids is not None:
return query.filter(States.entity_id.in_(entity_ids))
query = query.filter(~States.domain.in_(IGNORE_DOMAINS))
filter_query = None
# filter if only excluded domain is configured
if self.excluded_domains and not self.included_domains:
filter_query = ~States.domain.in_(self.excluded_domains)
if self.included_entities:
filter_query &= States.entity_id.in_(self.included_entities)
# filter if only included domain is configured
elif not self.excluded_domains and self.included_domains:
filter_query = States.domain.in_(self.included_domains)
if self.included_entities:
filter_query |= States.entity_id.in_(self.included_entities)
# filter if included and excluded domain is configured
elif self.excluded_domains and self.included_domains:
filter_query = ~States.domain.in_(self.excluded_domains)
if self.included_entities:
filter_query &= (States.domain.in_(self.included_domains) |
States.entity_id.in_(self.included_entities))
else:
filter_query &= (States.domain.in_(self.included_domains) & ~
States.domain.in_(self.excluded_domains))
# no domain filter just included entities
elif not self.excluded_domains and not self.included_domains and \
self.included_entities:
filter_query = States.entity_id.in_(self.included_entities)
if filter_query is not None:
query = query.filter(filter_query)
# finally apply excluded entities filter if configured
if self.excluded_entities:
query = query.filter(~States.entity_id.in_(self.excluded_entities))
return query
def _is_significant(state):
"""Test if state is significant for history charts.
Will only test for things that are not filtered out in SQL.
"""
# scripts that are not cancellable will never change state
return (state.domain != 'script' or
state.attributes.get(script.ATTR_CAN_CANCEL))
| ewandor/home-assistant | homeassistant/components/history.py | Python | apache-2.0 | 13,984 |
import string
import subprocess
import re
# yield n-sized chunks
# reference: https://stackoverflow.com/questions/312443/how-do-you-split-a-list-into-evenly-sized-chunks
def chunks(data, byte_size):
length = len(data)
for i in range(0, length, byte_size):
yield data[i:i + byte_size]
def autoN_cmd(data, arrow, byte_size):
for chunk in chunks(data, int(byte_size)):
final_chunk = ""
for c in chunk:
# print escape characters for \t, \\, \n and \r
if c == '\t' or c == '\\' or c == '\n' or c == '\r':
c = c.encode('unicode_escape').decode("ASCII")
# print raw for bytes between 32 and 127
elif ord(c) >= 32 and ord(c) <= 127:
pass
else:
c = "\\" + hex(ord(c))
final_chunk += c
# print each bytesize chunk
print(arrow + final_chunk)
def replace_cmd(data, arguments, arrow):
# get replace target and new value
args = vars(arguments)
target = args['replace'][0]
new = args['replace'][1]
# apply replace functionality for each type of flag (raw, strip, hex)
if arguments.raw:
for line in data.splitlines():
print(arrow + " {}".format(line.replace(target, new)))
elif arguments.strip:
for line in data.splitlines():
filtered_line = ''.join(i if i in string.printable
else '.' for i in line)
print(arrow + " {}".format(filtered_line.replace(target, new)))
elif arguments.hex:
data = data.replace(target, new)
p = subprocess.run("hexdump -C", stdout=subprocess.PIPE,
shell=True, input=data.encode())
output = p.stdout.decode()
for line in output.splitlines():
print(arrow + " {}".format(line))
def raw_cmd(arguments, data, arrow):
for line in data.splitlines():
print(arrow + " {}".format(line))
def strip_cmd(arguments, data, arrow):
for line in data.splitlines():
filtered_line = ''.join(i if i in string.printable
else '.' for i in line)
print(arrow + " {}".format(filtered_line))
def hex_cmd(arguments, data, arrow):
import subprocess
p = subprocess.run("hexdump -C", stdout=subprocess.PIPE,
shell=True, input=data.encode())
output = p.stdout.decode()
for line in output.splitlines():
print(arrow + " {}".format(line))
def argument_handler(arguments, byte_size, data, incoming):
arrow = None
if incoming is True:
arrow = "<---"
else:
arrow = "--->"
if arguments.replace:
# Call replace command
replace_cmd(data, arguments, arrow)
return
if arguments.raw:
raw_cmd(arguments, data, arrow)
if byte_size is not None:
autoN_cmd(data, arrow, byte_size)
elif arguments.strip:
strip_cmd(arguments, data, arrow)
elif arguments.hex:
hex_cmd(arguments, data, arrow)
| henrytran28/CPSC-526 | assignment_3/commands.py | Python | mit | 3,046 |
#!/usr/bin/env python
import glob
import re
import os
list=glob.glob("*00")
for img in list:
tmp1 = re.sub("_00_","_01_",img)
tiltname = re.sub("en_00","en_01",tmp1)
f1 = "./%s/down4_%s.raw" %(img,img)
f2 = "./%s/down4_%s.raw" %(tiltname,tiltname)
if os.path.isfile(f1) & os.path.isfile(f2):
print "%s %s 1" %(f1,f2)
| leschzinerlab/XMIPP | create_tiltSelFiles.py | Python | mit | 332 |
#
# ABC iView XBMC Addon
# Copyright (C) 2012 Andy Botting
#
# This addon includes code from python-iview
# Copyright (C) 2009-2012 by Jeremy Visser <[email protected]>
#
# This addon is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This addon is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this addon. If not, see <http://www.gnu.org/licenses/>.
#
import comm
import config
import classes
import utils
import sys
import re
import datetime
import time
import json
import xml.etree.ElementTree as ET
from BeautifulSoup import BeautifulStoneSoup
# This is a throwaway variable to deal with a python bug with strptime:
# ImportError: Failed to import _strptime because the import lockis
# held by another thread.
throwaway = time.strptime('20140101', '%Y%m%d')
def parse_config(soup):
"""There are lots of goodies in the config we get back from the ABC.
In particular, it gives us the URLs of all the other XML data we
need.
"""
try:
soup = soup.replace('&', '&')
xml = BeautifulStoneSoup(soup)
# should look like "rtmp://cp53909.edgefcs.net/ondemand"
# Looks like the ABC don't always include this field.
# If not included, that's okay -- ABC usually gives us the server in the auth result as well.
rtmp_url = xml.find('param', attrs={'name':'server_streaming'}).get('value')
rtmp_chunks = rtmp_url.split('/')
return {
'rtmp_url' : rtmp_url,
'rtmp_host' : rtmp_chunks[2],
'rtmp_app' : rtmp_chunks[3],
'api_url' : xml.find('param', attrs={'name':'api'}).get('value'),
'categories_url' : xml.find('param', attrs={'name':'categories'}).get('value'),
}
except:
raise Exception("Error fetching iView config. Service possibly unavailable")
def parse_categories(soup):
categories_list = []
"""
<category id="pre-school" genre="true">
<name>ABC 4 Kids</name>
</category>
"""
# This next line is the magic to make recursive=False work (wtf?)
BeautifulStoneSoup.NESTABLE_TAGS["category"] = []
xml = BeautifulStoneSoup(soup)
# Get all the top level categories, except the alphabetical ones
for cat in xml.find('categories').findAll('category', recursive=False):
id = cat.get('id')
if cat.get('index') or id == 'index':
continue
item = {}
item['keyword'] = id
item['name'] = cat.find('name').string;
categories_list.append(item);
return categories_list
def parse_programme_from_feed(data):
xml = ET.fromstring(data)
show_list = []
for item in xml.getiterator('item'):
title = item.find('title').text
if title.startswith('Trailer'):
continue
show = None
for s in show_list:
if s.title == title:
show = s
break
if show:
show.increment_num_episodes()
else:
show = classes.Series()
show.title = title
show.description = item.find('description').text
show.thumbnail = item.find('{http://search.yahoo.com/mrss/}thumbnail').attrib['url']
show_list.append(show)
return show_list
def parse_programs_from_feed(data):
xml = ET.fromstring(data)
programs_list = []
for item in xml.getiterator('item'):
p = classes.Program()
title = item.find('title').text
p.title = title
subtitle = item.find('subtitle').text
title_match = None
title_parts = None
if subtitle:
# Series 2 Episode 25 Home Is Where The Hatch Is
title_match = re.search('^[Ss]eries\s?(?P<series>\w+)\s[Ee]p(isode)?\s?(?P<episode>\d+)\s(?P<episode_title>.*)$', subtitle)
if not title_match:
# Series 8 Episode 13
title_match = re.search('^[Ss]eries\s?(?P<series>\w+)\s[Ee]p(isode)?\s?(?P<episode>\d+)$', subtitle)
if not title_match:
# Episode 34 Shape Shifter
title_match = re.search('^[Ee]p(isode)?\s?(?P<episode>\d+)\s(?P<episode_title>.*)$', subtitle)
if not title_match:
# Series 10 Rylan Clark, Joanna Lumley, Ant And Dec, The Vaccines
title_match = re.search('^[Ss]eries\s?(?P<series>\d+)\s(?P<episode_title>.*)$', subtitle)
if not title_match:
# Episode 5
title_match = re.search('^[Ee]p(isode)?\s?(?P<episode>\d+)$', subtitle)
if not title_match:
p.episode_title = subtitle
if title_match:
title_parts = title_match.groupdict()
p.episode_title = title_parts.get('episode_title')
try:
# If we have actual series/episode fields given
p.series = item.find('series').text
p.episode = item.find('episode').text
except:
try:
# If we only get series/episode in the subtitle
p.series = title_parts.get('series')
p.episode = title_parts.get('episode')
except:
pass
p.description = item.find('description').text
p.url = item.find('{http://www.abc.net.au/tv/mrss}videoAsset').text
p.thumbnail = item.find('{http://search.yahoo.com/mrss/}thumbnail').attrib['url']
try:
p.rating = item.find('{http://www.abc.net.au/tv/mrss}rating').text
except:
# Rating not given for all programs
pass
try:
duration = item.find('{http://search.yahoo.com/mrss/}content').attrib['duration']
p.duration = int(duration)
except:
utils.log("Couldn't parse program duration: %s" % duration)
try:
p.link = item.find('link').text
except:
pass
p.date = utils.get_datetime(item.find('pubDate').text)
p.expire = utils.get_datetime(item.find('{http://www.abc.net.au/tv/mrss}expireDate').text)
programs_list.append(p)
return programs_list
def convert_timecode(start, end):
""" convert iview xml timecode attribute to subrip srt standard"""
return start[:8]+','+start[9:11]+'0'+' --> '+end[:8]+','+end[9:11]+'0'
def convert_to_srt(data):
""" convert our iview xml subtitles to subrip SRT format"""
tree = ET.fromstring(data)
root = tree.find('reel')
result = ""
count = 1
for elem in root.findall('title'):
if elem.text == None:
continue
result += str(count) + '\n'
result += convert_timecode(elem.get('start'), elem.get('end'))+'\n'
st = elem.text.split('|')
for line in st:
result += line + '\n'
result += '\n'
count +=1
return result.encode('utf-8')
| sdt/xbmc-addon-abc-iview | resources/lib/parse.py | Python | gpl-3.0 | 7,401 |
# -*- coding: utf-8 -*-
#
# AWL simulator - instructions
#
# Copyright 2012-2014 Michael Buesch <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
from __future__ import division, absolute_import, print_function, unicode_literals
from awlsim.common.compat import *
from awlsim.core.instructions.main import * #@nocy
from awlsim.core.operators import *
#from awlsim.core.instructions.main cimport * #@cy
class AwlInsn_RRD(AwlInsn): #+cdef
__slots__ = ()
def __init__(self, cpu, rawInsn):
AwlInsn.__init__(self, cpu, AwlInsn.TYPE_RRD, rawInsn)
self.assertOpCount((0, 1))
if self.ops:
self.ops[0].assertType(AwlOperator.IMM, 0, 255)
def run(self):
#@cy cdef S7StatusWord s
s = self.cpu.statusWord
accu1 = self.cpu.accu1.getDWord()
if self.ops:
count = self.ops[0].value
else:
count = self.cpu.accu2.getByte()
if count <= 0:
return
count = max(0, count % 32)
accu1 &= 0xFFFFFFFF
accu1 = ((accu1 >> count) | (accu1 << (32 - count))) & 0xFFFFFFFF
self.cpu.accu1.set(accu1)
s.A0, s.A1, s.OV = 0, (accu1 >> 31) & 1, 0
| gion86/awlsim | awlsim/core/instructions/insn_rrd.py | Python | gpl-2.0 | 1,729 |
# -*- coding: utf-8 -*-
"""
Custom module logger
"""
import logging
module_name = 'moflow'
logger = logging.getLogger(module_name)
logger.addHandler(logging.NullHandler()) # best practice to not show anything
def use_basic_config(level=logging.INFO, format=logging.BASIC_FORMAT):
"""Add basic configuration and formatting to the logger
By default, the logger should not be configured in any way. However
users and developers may prefer to see the logger messages.
"""
logger.level = level
if module_name not in [_.name for _ in logger.handlers]:
formatter = logging.Formatter(format)
handler = logging.StreamHandler()
handler.name = module_name
handler.setFormatter(formatter)
logger.addHandler(handler)
| mwtoews/moflow | moflow/_logger.py | Python | bsd-2-clause | 773 |
from django import forms
from order.models import Pizza, Bread, Customer
class PizzaForm(forms.ModelForm):
class Meta:
model = Pizza
fields = ('size', 'toppings', 'crust')
widgets = {
'size': forms.RadioSelect(),
'crust': forms.RadioSelect(),
'toppings': forms.CheckboxSelectMultiple(),
}
def process(self, order):
data = self.cleaned_data
size = data['size']
crust = data['crust']
toppings = data['toppings']
pizza = Pizza.objects.create()
pizza.size = size
pizza.crust = crust
for topping in toppings:
pizza.toppings.add(topping)
pizza.save()
order.pizzas.add(pizza)
order.save()
class BreadForm(forms.ModelForm):
class Meta:
model = Bread
fields = ('flavor',)
widgets = {
'type': forms.RadioSelect(),
}
def process(self, order):
data = self.cleaned_data
flavor = data['flavor']
bread = Bread.objects.create(flavor=flavor)
order.breads.add(bread)
order.save()
class CustomerForm(forms.ModelForm):
class Meta:
model = Customer
def process(self, order):
data = self.cleaned_data
name = str(data['name'])
number = str(data['number'])
customer = Customer.objects.create(name=name, number=number)
order.customer = customer
order.save()
| ajpocus/pizzeria | order/forms.py | Python | bsd-3-clause | 1,411 |
# Kivy Imports
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.dropdown import DropDown
from kivy.uix.button import Button
from kivy.properties import ObjectProperty, StringProperty
from kivy.app import App
# Utility Imports
import yaml
# Game Imports
from scripts.events import Event
# File Loads
loc_file = file('data/location.yml')
loc_dat = yaml.load(loc_file)
# Bind Classes
event = Event()
class EventWindow(BoxLayout):
ct = StringProperty('')
button_status = StringProperty('')
def __init__(self, **kwargs):
super(EventWindow, self).__init__(**kwargs)
self.ct = event.current_text
self.button_status = event.command
event.bind(current_text=self.setter('ct'))
event.bind(command=self.setter('button_status'))
def slide_change(self, cmd):
poss_commands = ("[Next Slide]", "[Query]", "[Combat]", 'Previous')
if cmd == '[Terminate]':
event.controller(cmd)
self.parent.remove_widget(self)
MainWin.in_event = False
elif cmd in poss_commands:
event.controller(cmd)
else:
print "Event command not recognized. Please check the event file."
class MainWin(BoxLayout):
current_map = ObjectProperty('')
current_location = StringProperty()
containers = ObjectProperty(loc_dat['Havana']['container'], allownone=True)
in_event = False
def __init__(self, **kwargs):
super(MainWin, self).__init__(**kwargs)
self.current_map = 'maps/Havana.jpg'
self.current_location = 'Havana'
self.containers = loc_dat['Havana']['container']
self.locale = loc_dat['Havana']['connections']
self.first_run = True
# Drop Down code
self.dropdown = DropDown() # Binds Class
self.dd_updater() # Calls method to determine location containers
self.ids.mb.bind(on_release=self.dropdown.open) # Connects generated locations to
self.dropdown.bind(on_select=lambda instance, x: self.location(x)) # Binds button's location changing behavior
# Widgets
self.ew = EventWindow()
# Handles the placement of buttons on the map
self.place_locale()
def place_locale(self):
self.cleanup()
if self.locale is None:
pass
else:
for place in self.locale:
place_button = Button(text=place,
id='pb',
size_hint=(.05, .05),
pos_hint={'x': loc_dat[self.current_location]['connections'][place][0],
'y': loc_dat[self.current_location]['connections'][place][1]},
on_release=lambda destination: self.location(destination.text))
self.ids.mapspace.add_widget(place_button)
if self.first_run is False:
MainWin.in_event = True
self.ids.mapspace.add_widget(self.ew)
def cleanup(self):
for child in [child for child in self.ids.mapspace.children]:
self.ids.mapspace.remove_widget(child)
def dd_updater(self):
self.dropdown.clear_widgets()
if self.containers is None:
pass
else:
for note in self.containers: # Adds widgets to the existing dropdown
btn = Button(text=note,
color=(0, 0, 0, 1),
background_normal='images/Button.png',
background_down='images/Button.png',
border=(0, 60, 0, 120),
size_hint_y=None,
height=30)
btn.bind(on_release=lambda b: self.dropdown.select(b.text))
self.dropdown.add_widget(btn)
def location(self, new_loc):
if MainWin.in_event is False or self.first_run is True:
self.current_location = new_loc
self.current_map = loc_dat[new_loc]['image']
self.locale = loc_dat[new_loc]['connections']
self.containers = loc_dat[new_loc]['container']
event.event_name()
event.parse()
self.dd_updater()
self.place_locale()
else:
pass
self.first_run = False
class Test(App):
def build(self):
self.mainspace = MainWin()
self.eventwindow = EventWindow()
return self.mainspace
Test().run()
| DinkWerks/Kivy-Game | main.py | Python | mit | 4,529 |
#! /usr/bin/env python
#
#Copyright (c) 2012 Robert Rouhani <[email protected]>, Nick Richard <[email protected]>, Hayden Lee <[email protected]>
#
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#
#The above copyright notice and this permission notice shall be included in
#all copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
#SOFTWARE.
import pygame
import resources
class Player(pygame.sprite.Sprite):
def __init__(self, (x, y), speed, collide_lambda, fps=10):
pygame.sprite.Sprite.__init__(self)
self.image, self.rect = resources.load_image("../assets/images/player/front1.bmp", -1)
self.rect.move_ip(x, y)
self.speed = speed
self.walls = []
self.obstacles = []
self.collide_obstacle = collide_lambda
self._start = pygame.time.get_ticks()
self._delay = 1000 / fps
self._last_update = 0
self._frame = 0
self.anim_paused = True
self.forward_images = []
self.backward_images = []
self.left_images = []
self.right_images = []
self.forward_images.append(resources.load_image_no_rect("../assets/images/player/front1.bmp", -1))
self.forward_images.append(resources.load_image_no_rect("../assets/images/player/front2.bmp", -1))
self.forward_images.append(resources.load_image_no_rect("../assets/images/player/front3.bmp", -1))
self.backward_images.append(resources.load_image_no_rect("../assets/images/player/back1.bmp", -1))
self.backward_images.append(resources.load_image_no_rect("../assets/images/player/back2.bmp", -1))
self.backward_images.append(resources.load_image_no_rect("../assets/images/player/back3.bmp", -1))
self.left_images.append(resources.load_image_no_rect("../assets/images/player/left1.bmp", -1))
self.left_images.append(resources.load_image_no_rect("../assets/images/player/left2.bmp", -1))
self.right_images.append(resources.load_image_no_rect("../assets/images/player/right1.bmp", -1))
self.right_images.append(resources.load_image_no_rect("../assets/images/player/right2.bmp", -1))
self._images = self.forward_images
self.update_anim(pygame.time.get_ticks())
def set_walls(self, walls):
self.walls = walls
def set_obstacles(self, obstacles):
self.obstacles = obstacles
def update_anim(self, t):
if not self.anim_paused:
if t - self._last_update > self._delay:
self._frame += 1
if self._frame >= len(self._images): self._frame = 0
self.image = self._images[self._frame]
self._last_update = t
def pause(self):
self.anim_paused = True
def play(self):
self.anim_paused = False
def move_forward(self):
self.move(0, -self.speed)
self._images = self.backward_images
self.play()
def move_backwards(self):
self.move(0, self.speed)
self._images = self.forward_images
self.play()
def move_left(self):
self.move(-self.speed, 0)
self._images = self.left_images
self.play()
def move_right(self):
self.move(self.speed, 0)
self._images = self.right_images
self.play()
def move(self, x, y):
self.rect.move_ip(x, y)
for i in self.rect.collidelistall(self.walls):
r = self.walls[i]
if x > 0: # Moving right; Hit the left side of the wall
self.rect.right = r.left
if x < 0: # Moving left; Hit the right side of the wall
self.rect.left = r.right
if y > 0: # Moving down; Hit the top side of the wall
self.rect.bottom = r.top
if y < 0: # Moving up; Hit the bottom side of the wall
self.rect.top = r.bottom
obstacle_to_remove = None
for o in self.obstacles:
if self.rect.colliderect(o.get_rect()):
if x > 0: # Moving right; Hit the left side of the wall
self.rect.right = o.get_rect().left
if x < 0: # Moving left; Hit the right side of the wall
self.rect.left = o.get_rect().right
if y > 0: # Moving down; Hit the top side of the wall
self.rect.bottom = o.get_rect().top
if y < 0: # Moving up; Hit the bottom side of the wall
self.rect.top = o.get_rect().bottom
obstacle_to_remove = self.collide_obstacle(o)
if obstacle_to_remove is not None:
self.obstacles.remove(obstacle_to_remove)
def move_to(self, x, y):
self.rect.left = x
self.rect.top = y | Robmaister/RPI-Game-Jam-9-22-12 | src/player.py | Python | mit | 5,660 |
# -*- coding: utf-8 -*-
# +---------------------------------------------------------------------------+
# | pylstar : Implementation of the LSTAR Grammatical Inference Algorithm |
# +---------------------------------------------------------------------------+
# | Copyright (C) 2015 Georges Bossert |
# | This program is free software: you can redistribute it and/or modify |
# | it under the terms of the GNU General Public License as published by |
# | the Free Software Foundation, either version 3 of the License, or |
# | (at your option) any later version. |
# | |
# | This program is distributed in the hope that it will be useful, |
# | but WITHOUT ANY WARRANTY; without even the implied warranty of |
# | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
# | GNU General Public License for more details. |
# | |
# | You should have received a copy of the GNU General Public License |
# | along with this program. If not, see <http://www.gnu.org/licenses/>. |
# +---------------------------------------------------------------------------+
# | @url : https://github.com/gbossert/pylstar |
# | @contact : [email protected] |
# +---------------------------------------------------------------------------+
# +----------------------------------------------------------------------------
# | Global Imports
# +----------------------------------------------------------------------------
import os
import json
# +----------------------------------------------------------------------------
# | Pylstar Imports
# +----------------------------------------------------------------------------
from pylstar.tools.Decorators import PylstarLogger
from pylstar.Word import Word
from pylstar.Letter import Letter
@PylstarLogger
class KnowledgeNode(object):
def __init__(self, input_letter, output_letter):
self.input_letter = input_letter
self.output_letter = output_letter
self.children = dict()
def __str__(self, level=0):
return json.dumps(self.serialize(), sort_keys=True, indent=4, separators=(',', ': '))
def serialize(self):
"""This method return a serialized representation of the node"""
node = {
"input_letter" : self.input_letter.serialize(),
"output_letter": self.output_letter.serialize(),
"children" : [c.serialize() for c in self.children.values()]
}
return node
@staticmethod
def deserialize(dict_data, possible_letters):
if dict_data is None:
raise Exception("dict_data cannot be None")
input_letter = Letter.deserialize(dict_data['input_letter'], possible_letters)
output_letter = Letter.deserialize(dict_data['output_letter'], possible_letters)
node = KnowledgeNode(input_letter, output_letter)
for child in dict_data["children"]:
child_node = KnowledgeNode.deserialize(child, possible_letters)
node.children[child_node.input_letter] = child_node
return node
def traverse(self, input_letters, output_letters = None):
if input_letters[0] != self.input_letter:
raise Exception("Node cannot be traversed with input letter '{}'".format(input_letters[0]))
if output_letters is not None and output_letters[0] != self.output_letter:
raise Exception("Node '{}' cannot be traversed with output letter '{}'".format(self, output_letters[0]))
if output_letters is not None and len(input_letters) != len(output_letters):
raise Exception("Specified input and output letters do not have the same length")
if len(input_letters) < 2:
return [self.output_letter]
current_input_letter = input_letters[1]
current_output_letter = None
if output_letters is not None:
current_output_letter = output_letters[1]
if current_input_letter in self.children:
child = self.children[current_input_letter]
if current_output_letter is not None and child.output_letter != current_output_letter:
raise Exception("Incompatible path found, expected '{}' found '{}'".format(child.output_letter.symbols, current_output_letter.symbols))
if output_letters is None:
new_output_letters = None
else:
new_output_letters = output_letters[1:]
new_input_letters = input_letters[1:]
return [self.output_letter] + child.traverse(new_input_letters, output_letters = new_output_letters)
elif output_letters is not None:
new_children = KnowledgeNode(input_letter = input_letters[1], output_letter = output_letters[1])
self.children[new_children.input_letter] = new_children
new_input_letters = input_letters[1:]
new_output_letters = output_letters[1:]
return [self.output_letter] + new_children.traverse(new_input_letters, output_letters = new_output_letters)
raise Exception("Cannot traverse node '{}' with subsequences '{}'".format(self, ', '.join([str(l) for l in input_letters])))
@property
def input_letter(self):
"""Input letter"""
return self.__input_letter
@input_letter.setter
def input_letter(self, input_letter):
if input_letter is None:
raise Exception("Input letter cannot be None")
self.__input_letter = input_letter
@property
def output_letter(self):
"""Output letter"""
return self.__output_letter
@output_letter.setter
def output_letter(self, output_letter):
if output_letter is None:
raise Exception("Output letter cannot be None")
self.__output_letter = output_letter
@PylstarLogger
class KnowledgeTree(object):
"""A pythonic implementation of a tree that hosts query results.
>>> from pylstar.KnowledgeTree import KnowledgeTree
>>> from pylstar.Word import Word
>>> from pylstar.Letter import Letter
>>> tree = KnowledgeTree()
>>> input_word = Word([Letter("a"), Letter("b")])
>>> output_word = Word([Letter(1), Letter(2)])
>>> tree.get_output_word(input_word)
Traceback (most recent call last):
...
Exception: No path found
>>> tree.add_word(input_word, output_word)
>>> print(tree.get_output_word(input_word))
[Letter(1), Letter(2)]
>>> eq_input_word = Word([Letter("a"), Letter("b")])
>>> print(tree.get_output_word(eq_input_word))
[Letter(1), Letter(2)]
"""
def __init__(self, cache_file_path = None):
self.__cache_file_path = cache_file_path
self.__nb_added_word = 0
self.roots = []
def __str__(self):
result = '\n'.join([root.__str__(level=1).rstrip() for root in self.roots])
return 'Tree (\n{}\n)'.format(result)
def get_output_word(self, input_word):
if input_word is None:
raise Exception("Input word cannot be None")
for root in self.roots:
try:
w = Word(root.traverse(input_word.letters))
self._logger.info("I = {} > O = {}".format(input_word, w))
return w
except Exception:
pass
raise Exception("No path found")
def add_word(self, input_word, output_word):
"""This method can be use to associate an input word to an output word
>>> from pylstar.KnowledgeTree import KnowledgeTree
>>> from pylstar.Word import Word
>>> from pylstar.Letter import Letter
>>> tree = KnowledgeTree()
>>> input_word = Word([Letter("a"), Letter("b")])
>>> output_word = Word([Letter(1), Letter(2)])
>>> tree.add_word(input_word, output_word)
The same association can be inserted twice iif both input and output are equivalent
to the previously inserted association
>>> from pylstar.KnowledgeTree import KnowledgeTree
>>> from pylstar.Word import Word
>>> from pylstar.Letter import Letter
>>> tree = KnowledgeTree()
>>> input_word = Word([Letter("a"), Letter("b")])
>>> output_word = Word([Letter(1), Letter(2)])
>>> tree.add_word(input_word, output_word)
>>> input_word2 = Word([Letter("a"), Letter("b")])
>>> output_word2 = Word([Letter(1), Letter(2)])
>>> tree.add_word(input_word2, output_word2)
>>> output_word3 = Word([Letter(1), Letter(1)])
>>> tree.add_word(input_word2, output_word3)
Traceback (most recent call last):
...
Exception: Incompatible path found, expected '{2}' found '{1}'
"""
if input_word is None:
raise Exception("Input word cannot be None")
if output_word is None:
raise Exception("Output word cannot be None")
if len(input_word) != len(output_word):
raise Exception("Input and output words do not have the same size")
self.__add_letters(input_word.letters, output_word.letters)
self.__nb_added_word += 1
if self.__cache_file_path is not None and self.__nb_added_word % 100 == 0:
self.write_cache()
def write_cache(self):
"""This method writes the content of the knowledge tree to the self.cache_file_path.
>>> cache_file = "/tmp/test_ktree_cache.dump"
>>> from pylstar.KnowledgeTree import KnowledgeTree
>>> from pylstar.Word import Word
>>> from pylstar.Letter import Letter
>>> l_a = Letter("a")
>>> l_b = Letter("b")
>>> l_c = Letter("c")
>>> l_1 = Letter(1)
>>> l_2 = Letter(2)
>>> l_3 = Letter(3)
>>> tree = KnowledgeTree(cache_file_path = cache_file)
>>> input_word = Word([l_a, l_b])
>>> output_word = Word([l_1, l_2])
>>> tree.add_word(input_word, output_word)
>>> tree.write_cache()
>>> input_word = Word([l_a, l_c])
>>> output_word = Word([l_1, l_3])
>>> tree.add_word(input_word, output_word)
>>> tree.write_cache()
>>> tree2 = KnowledgeTree(cache_file_path = cache_file)
>>> tree2.load_cache(possible_letters = [l_a, l_b, l_c, l_1, l_2, l_3])
>>> print(tree2.get_output_word(input_word))
[Letter(1), Letter(3)]
"""
if self.__cache_file_path is None:
raise Exception("Cache file path cannot be None")
self._logger.info("Writing the knowledge tree in cache '{}'".format(self.__cache_file_path))
if os.path.exists(self.__cache_file_path):
self._logger.info("Removing previous cache file '{}'".format(self.__cache_file_path))
os.remove(self.__cache_file_path)
nodes = [ root.serialize() for root in self.roots ]
with open(self.__cache_file_path, "w") as fd:
str_content = json.dumps(nodes, sort_keys=True, indent=4, separators=(',', ': '))
fd.write(str_content)
def load_cache(self, possible_letters):
"""This method loads the content of the cache in the knowledge tree
See doctest declared in method "write_cache"
"""
if self.__cache_file_path is None:
raise Exception("Cache file path cannot be None")
self._logger.info("Loading cache from '{}'".format(self.__cache_file_path))
json_content = None
with open(self.__cache_file_path, "r") as fd:
json_content = json.loads(fd.read())
for content in json_content:
root = KnowledgeNode.deserialize(content, possible_letters)
self.roots.append(root)
def __add_letters(self, input_letters, output_letters):
self._logger.debug("Adding letters '{}' / '{}'".format(', '.join([str(l) for l in input_letters]), ', '.join([str(l) for l in output_letters])))
retained_root = None
for root in self.roots:
if root.input_letter == input_letters[0]:
if root.output_letter != output_letters[0]:
raise Exception("Incompatible path found, expected '{}' found '{}'".format(root.output_letter.symbols, output_letters[0].symbols))
retained_root = root
break
if retained_root is None:
retained_root = KnowledgeNode(input_letters[0], output_letters[0])
self._logger.debug("Creating '{}' as a new root".format(retained_root))
self.roots.append(retained_root)
return retained_root.traverse(input_letters, output_letters)
| gbossert/pylstar | src/pylstar/KnowledgeTree.py | Python | gpl-3.0 | 12,988 |
import boto3
import json
import sure # noqa
from moto import mock_lambda, mock_cloudformation, mock_apigateway, mock_iam, mock_logs
from string import Template
template = """{
"AWSTemplateFormatVersion": "2010-09-09",
"Description": "The AWS CloudFormation template for this Serverless application",
"Resources": {
"ServerlessDeploymentBucket": {
"Type": "AWS::S3::Bucket"
},
"HelloLogGroup": {
"Type": "AWS::Logs::LogGroup",
"Properties": {
"LogGroupName": "/aws/lambda/timeseries-service-dev-hello"
}
},
"IamRoleLambdaExecution": {
"Type": "AWS::IAM::Role",
"Properties": {
"AssumeRolePolicyDocument": {
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": {
"Service": [
"lambda.amazonaws.com"
]
},
"Action": [
"sts:AssumeRole"
]
}
]
},
"Policies": [
{
"PolicyName": {
"Fn::Join": [
"-",
[
"dev",
"timeseries-service",
"lambda"
]
]
},
"PolicyDocument": {
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"logs:CreateLogStream"
],
"Resource": [
{
"Fn::Sub": "arn:${AWS::Partition}:logs:${AWS::Region}:${AWS::AccountId}:log-group:/aws/lambda/timeseries-service-dev-hello:*"
}
]
},
{
"Effect": "Allow",
"Action": [
"logs:PutLogEvents"
],
"Resource": [
{
"Fn::Sub": "arn:${AWS::Partition}:logs:${AWS::Region}:${AWS::AccountId}:log-group:/aws/lambda/timeseries-service-dev-hello:*:*"
}
]
}
]
}
}
],
"Path": "/",
"RoleName": {
"Fn::Join": [
"-",
[
"timeseries-service",
"dev",
"us-east-1",
"lambdaRole"
]
]
}
}
},
"HelloLambdaFunction": {
"Type": "AWS::Lambda::Function",
"Properties": {
"Code": {
"S3Bucket": {
"Ref": "ServerlessDeploymentBucket"
},
"S3Key": "serverless/timeseries-service/dev/1542744572309-2018-11-20T20:09:32.309Z/timeseries-service.zip"
},
"FunctionName": "timeseries-service-dev-hello",
"Handler": "handler.hello",
"MemorySize": 1024,
"Role": {
"Fn::GetAtt": [
"IamRoleLambdaExecution",
"Arn"
]
},
"Runtime": "python2.7",
"Timeout": 6
},
"DependsOn": [
"HelloLogGroup",
"IamRoleLambdaExecution"
]
},
"HelloLambdaVersionU88Ag36tX5K6Yuze3R8jedH2g7q2TTGuafWQxEnUmo": {
"Type": "AWS::Lambda::Version",
"DeletionPolicy": "Retain",
"Properties": {
"FunctionName": {
"Ref": "HelloLambdaFunction"
},
"CodeSha256": "+pq+8RveA979z1DNF8UKnFGZfgE07blNyJGust5VJnU="
}
},
"ApiGatewayRestApi": {
"Type": "AWS::ApiGateway::RestApi",
"Properties": {
"Name": "dev-timeseries-service",
"EndpointConfiguration": {
"Types": [
"EDGE"
]
}
}
},
"ApiGatewayResourceHello": {
"Type": "AWS::ApiGateway::Resource",
"Properties": {
"ParentId": {
"Fn::GetAtt": [
"ApiGatewayRestApi",
"RootResourceId"
]
},
"PathPart": "hello",
"RestApiId": {
"Ref": "ApiGatewayRestApi"
}
}
},
"ApiGatewayMethodHelloGet": {
"Type": "AWS::ApiGateway::Method",
"Properties": {
"HttpMethod": "GET",
"RequestParameters": {},
"ResourceId": {
"Ref": "ApiGatewayResourceHello"
},
"RestApiId": {
"Ref": "ApiGatewayRestApi"
},
"ApiKeyRequired": false,
"AuthorizationType": "NONE",
"Integration": {
"IntegrationHttpMethod": "POST",
"Type": "AWS_PROXY",
"Uri": {
"Fn::Join": [
"",
[
"arn:",
{
"Ref": "AWS::Partition"
},
":apigateway:",
{
"Ref": "AWS::Region"
},
":lambda:path/2015-03-31/functions/",
{
"Fn::GetAtt": [
"HelloLambdaFunction",
"Arn"
]
},
"/invocations"
]
]
}
},
"MethodResponses": []
}
},
"ApiGatewayDeployment1542744572805": {
"Type": "AWS::ApiGateway::Deployment",
"Properties": {
"RestApiId": {
"Ref": "ApiGatewayRestApi"
},
"StageName": "dev"
},
"DependsOn": [
"ApiGatewayMethodHelloGet"
]
},
"HelloLambdaPermissionApiGateway": {
"Type": "AWS::Lambda::Permission",
"Properties": {
"FunctionName": {
"Fn::GetAtt": [
"HelloLambdaFunction",
"Arn"
]
},
"Action": "lambda:InvokeFunction",
"Principal": {
"Fn::Join": [
"",
[
"apigateway.",
{
"Ref": "AWS::URLSuffix"
}
]
]
},
"SourceArn": {
"Fn::Join": [
"",
[
"arn:",
{
"Ref": "AWS::Partition"
},
":execute-api:",
{
"Ref": "AWS::Region"
},
":",
{
"Ref": "AWS::AccountId"
},
":",
{
"Ref": "ApiGatewayRestApi"
},
"/*/*"
]
]
}
}
}
},
"Outputs": {
"ServerlessDeploymentBucketName": {
"Value": {
"Ref": "ServerlessDeploymentBucket"
}
},
"HelloLambdaFunctionQualifiedArn": {
"Description": "Current Lambda function version",
"Value": {
"Ref": "HelloLambdaVersionU88Ag36tX5K6Yuze3R8jedH2g7q2TTGuafWQxEnUmo"
}
},
"ServiceEndpoint": {
"Description": "URL of the service endpoint",
"Value": {
"Fn::Join": [
"",
[
"https://",
{
"Ref": "ApiGatewayRestApi"
},
".execute-api.us-east-1.",
{
"Ref": "AWS::URLSuffix"
},
"/dev"
]
]
}
}
}
}"""
@mock_cloudformation
@mock_lambda
@mock_iam
@mock_logs
@mock_apigateway
def test_simple_apigateway_with_lambda_proxy():
region = "us-east-1"
apigw = boto3.client("apigateway", region_name=region)
cf = boto3.client("cloudformation", region_name=region)
awslambda = boto3.client("lambda", region_name=region)
cf.create_stack(StackName="teststack", TemplateBody=template)
#
cf.describe_stacks(StackName="teststack")["Stacks"]
resources = cf.describe_stack_resources(StackName="teststack")["StackResources"]
api_id = [
r["PhysicalResourceId"]
for r in resources
if r["ResourceType"] == "AWS::ApiGateway::RestApi"
][0]
fn_name = [
r["PhysicalResourceId"]
for r in resources
if r["LogicalResourceId"] == "HelloLambdaFunction"
][0]
#
# Verify Rest API was created
api = apigw.get_rest_apis()["items"][0]
api["id"].should.equal(api_id)
api["name"].should.equal("dev-timeseries-service")
#
# Verify Gateway Resource was created
paths = apigw.get_resources(restApiId=api_id)["items"]
root_path = [p for p in paths if p["path"] == "/"][0]
hello_path = [p for p in paths if p["path"] == "/hello"][0]
hello_path["parentId"].should.equal(root_path["id"])
#
# Verify Gateway Method was created
m = apigw.get_method(
restApiId=api_id, resourceId=hello_path["id"], httpMethod="GET"
)
m["httpMethod"].should.equal("GET")
#
# Verify a Gateway Deployment was created
d = apigw.get_deployments(restApiId=api_id)["items"]
d.should.have.length_of(1)
#
# Verify Lambda function was created
awslambda.get_function(FunctionName=fn_name) # Will throw 404 if it doesn't exist
#
# Verify Lambda Permission was created
policy = json.loads(awslambda.get_policy(FunctionName=fn_name)["Policy"])
statement = policy["Statement"][0]
statement["FunctionName"].should.contain(fn_name)
statement["Condition"]["ArnLike"]["AWS:SourceArn"].should.equal(
"arn:aws:execute-api:us-east-1:123456789012:{}/*/*".format(api_id)
)
| william-richard/moto | tests/test_apigateway/test_apigateway_cloudformation.py | Python | apache-2.0 | 9,464 |
class ParameterError(Exception):
"""An error in the parameters passed to a function."""
pass | jlorieau/mollib | mollib/utils/exceptions.py | Python | gpl-3.0 | 100 |
# coding: utf-8
#
# Monk is an unobtrusive data modeling, manipulation and validation library.
# Copyright © 2011—2014 Andrey Mikhaylenko
#
# This file is part of Monk.
#
# Monk is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Monk is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Monk. If not, see <http://gnu.org/licenses/>.
"""
~~~~~~~~~~
Validators
~~~~~~~~~~
"""
__all__ = [
'BaseValidator',
# combinators
'BaseCombinator',
'All',
'Any',
# requirements
'BaseRequirement',
'Anything',
'Exists',
'IsA',
'HasAttr',
'Equals',
'Contains',
'InRange',
'Length',
'ListOf',
'ListOfAll',
'ListOfAny',
'DictOf',
# functions
'translate',
# special objects
'MISSING',
]
import copy
from . import compat
from .errors import (
CombinedValidationError, AtLeastOneFailed, AllFailed, ValidationError,
NoDefaultValue, InvalidKeys, MissingKeys, StructureSpecificationError,
DictValueError,
)
#: The value is valid if any of its items passes validation.
ITEM_STRATEGY_ANY = 'any'
#: The value is valid if all of its items pass validation.
ITEM_STRATEGY_ALL = 'all'
class MISSING:
"""
Stub for Exists validator to pass if the value is missing
(e.g. for dictionary keys).
"""
pass
def _reluctantly_translate(spec):
# `translate()` can do it itself but some validators have the `implies`
# attribute which can trigger instantiation of a BaseValidator subclass
# before the translation function is ready.
#
# We don't want to defer its usage as far as we can because it is best
# to fully build the validator in order to fail early.
#
# So this function is just a small barrier that prevents NameError
# in some cases.
if isinstance(spec, BaseValidator):
return spec
else:
return translate(spec)
class BaseValidator(object):
error_class = ValidationError
_default = NotImplemented
negated = False
def _combine(self, other, combinator):
# XXX should we flatten same-logic one-item combs?
if isinstance(other, type) and issubclass(other, BaseValidator):
# e.g. Exists instead of Exists()
raise TypeError('got {cls} class instead of its instance'
.format(cls=other.__name__))
return combinator([self, _reluctantly_translate(other)])
def _merge(self, value):
if value is not None:
raise NoDefaultValue('value is not None')
if self._default is NotImplemented:
raise NoDefaultValue('self._default is not implemented')
return self._default
def __and__(self, other):
return self._combine(other, All)
def __or__(self, other):
return self._combine(other, Any)
def __eq__(self, other):
return isinstance(other, type(self)) and self.__dict__ == other.__dict__
def __invert__(self):
clone = copy.deepcopy(self)
clone.negated = not self.negated
return clone
def __call__(self, value):
try:
self._check(value)
except ValidationError:
if self.negated:
return
else:
raise
else:
if self.negated:
self._raise_error(value)
def __hash__(self):
# TODO think this over and check Python docs
#return hash(((k,v) for k,v in self.__dict__.items()))
return hash('validator_'+str(self.__dict__))
def get_default_for(self, value, silent=True):
try:
return self._merge(value)
except NoDefaultValue:
if silent:
return value
else:
raise
def _check(self, value):
raise NotImplementedError
def _raise_error(self, value):
raise self.error_class(repr(self))
class BaseCombinator(BaseValidator):
error_class = CombinedValidationError
break_on_first_fail = False
_repr_tmpl = '{not_}({items})'
_repr_items_sep = '; '
def __init__(self, specs, default=None, first_is_default=False):
assert specs
self._specs = [_reluctantly_translate(s) for s in specs]
self._default = default
self._first_is_default = first_is_default
def _check(self, value):
errors = []
for spec in self._specs:
# TODO: group errors by exception type
# TODO: try recursive validators after all flat ones are OK
# (may be not a good idea because the order may matter)
#if spec.is_recursive and errors:
# # Don't collect nested errors if we already have one here.
# # Another optimized strategy would be to fail early instead of
# # trying to collect all exceptions for the node.
# continue
try:
spec(value)
except ValidationError as e:
if self.break_on_first_fail:
# don't even wrap the error
raise
errors.append(e)
if not self.can_tolerate(errors):
raise self.error_class(*errors)
def __repr__(self):
return self._repr_tmpl.format(
cls=self.__class__.__name__,
items=self._repr_items_sep.join(map(str, self._specs)),
not_='not ' if self.negated else '')
def can_tolerate(self, errors):
raise NotImplementedError
def _merge(self, value):
if self._default:
return self._default
defaults = []
for choice in self._specs:
try:
default = choice.get_default_for(value, silent=False)
except NoDefaultValue:
pass
else:
defaults.append(default)
if not defaults:
return value
if len(defaults) == 1:
return defaults[0]
else:
if self._first_is_default:
return defaults[0]
else:
return value
class All(BaseCombinator):
"""
Requires that the value passes all nested validators.
"""
error_class = AtLeastOneFailed
break_on_first_fail = True
_repr_items_sep = ' and '
def can_tolerate(self, errors):
# TODO: fail early, work as `or` does
# (this also enables basic if-then in the schema)
if not errors:
return True
class Any(BaseCombinator):
"""
Requires that the value passes at least one of nested validators.
"""
error_class = AllFailed
_repr_items_sep = ' or '
def can_tolerate(self, errors):
if len(errors) < len(self._specs):
return True
class BaseRequirement(BaseValidator):
# a hint for combinators, see their code
is_recursive = False
implies = NotImplemented
def __call__(self, value):
if self.implies is not NotImplemented:
self.implies(value)
super(BaseRequirement, self).__call__(value)
def _represent(self):
return self.__dict__
def __repr__(self):
return '{negated}{cls}({rep})'.format(
cls=self.__class__.__name__,
rep=self._represent(),
negated='~' if self.negated else '')
class Anything(BaseRequirement):
"""
Any values passes validation.
"""
def _check(self, value):
pass
def _represent(self):
return ''
class IsA(BaseRequirement):
"""
Requires that the value is an instance of given type.
"""
def __init__(self, expected_type, default=None):
self.expected_type = expected_type
self._default = default
def _check(self, value):
if not isinstance(value, self.expected_type):
self._raise_error(value)
def __repr__(self):
s = 'must be {pattern_}'.format(pattern_=self.expected_type.__name__)
if self.negated:
s = 'not ({s})'.format(s=s)
return s
class Equals(BaseRequirement):
"""
Requires that the value equals given expected value.
"""
def __init__(self, expected_value):
self._expected_value = expected_value
def _check(self, value):
if self._expected_value != value:
self._raise_error(value)
def __repr__(self):
s = 'must equal {pattern_!r}'.format(pattern_=self._expected_value)
if self.negated:
s = 'not ({s})'.format(s=s)
return s
@property
def _default(self):
return self._expected_value
class Contains(BaseRequirement):
"""
Requires that the value contains given expected value.
"""
def __init__(self, expected_value):
self._expected_value = expected_value
def _check(self, value):
if self._expected_value not in value:
self._raise_error(value)
def __repr__(self):
s = 'must contain {pattern_!r}'.format(pattern_=self._expected_value)
if self.negated:
s = 'not ({s})'.format(s=s)
return s
@property
def _default(self):
return self._expected_value
class Exists(BaseRequirement):
"""
Requires that the value exists. Obviously this only makes sense in
special cases like dictionary keys; otherwise there's simply nothing to
validate. Note that this is *not* a check against `None` or `False`.
"""
def __init__(self, default=None):
self._default = default
def _check(self, value):
if value is MISSING:
self._raise_error(value)
def __repr__(self):
if self.negated:
return 'must not exist'
else:
return 'must exist'
class BaseListOf(BaseRequirement):
"""
The base class for validating lists. Supports different error toleration
strategies which can be selected by subclasses. In many aspects this is
similar to :class:`BaseCombinator`.
"""
implies = IsA(list)
item_strategy = NotImplemented
error_class = CombinedValidationError
is_recursive = True
def __init__(self, validator, default=None):
self._nested_validator = translate(validator)
self._default = default
def _check(self, value):
if not value:
try:
self._nested_validator(MISSING)
except ValidationError as e:
raise ValidationError('lacks item: {error}'
.format(error=e))
errors = []
for i, nested_value in enumerate(value):
try:
self._nested_validator(nested_value)
except ValidationError as e:
annotated_error = ValidationError(
'item #{elem}: {error}'.format(elem=i, error=e))
if self.item_strategy == ITEM_STRATEGY_ALL:
raise annotated_error
errors.append(annotated_error)
if self.can_tolerate(errors, value):
return
raise self.error_class(*errors)
def can_tolerate(self, errors, value):
if self.item_strategy == ITEM_STRATEGY_ALL:
if errors:
return False
else:
return True
elif self.item_strategy == ITEM_STRATEGY_ANY:
if len(errors) < len(value):
return True
else:
return False
else:
raise ValueError('unknown strategy')
def _represent(self):
return repr(self._nested_validator)
def _merge(self, value):
""" Returns a list based on `value`:
* missing required value is converted to an empty list;
* missing required items are never created;
* nested items are merged recursively.
"""
if not value:
return []
if value is not None and not isinstance(value, list):
# bogus value; will not pass validation but should be preserved
return value
item_spec = self._nested_validator
return [x if x is None else item_spec.get_default_for(x) for x in value]
class ListOfAll(BaseListOf):
"""
Requires that the value is a `list` which items match given validator.
Usage::
>>> v = ListOfAll(IsA(int) | IsA(str))
>>> v([123, 'hello'])
>>> v([123, 'hello', 5.5])
Traceback (most recent call last):
...
ValidationError: item #2: must be int or must be str
"""
error_class = AtLeastOneFailed
item_strategy = ITEM_STRATEGY_ALL
class ListOfAny(BaseListOf):
"""
Same as :class:`ListOfAll` but tolerates invalid items as long as there
is at least one valid among them.
"""
error_class = AllFailed
item_strategy = ITEM_STRATEGY_ANY
ListOf = ListOfAll
#@requirement(implies=[IsA(dict)], is_recursive=True, vars=['key', 'req'])
#def dict_contains(ctx, value):
# nested_value = value[ctx['key']]
# ctx['req'](nested_value)
class DictOf(BaseRequirement):
"""
Requires that the value is a `dict` which items match given patterns.
Usage::
>>> v = DictOf([
... # key "name" must exist; its value must be a `str`
... (Equals('name'), IsA(str)),
... # key "age" may not exist; its value must be an `int`
... (Equals('age') | ~Exists(), IsA(int)),
... # there may be other `str` keys with `str` or `int` values
... (IsA(str), IsA(str) | IsA(int)),
... ])
>>> v({'name': 'John'})
>>> v({'name': 'John', 'age': 25})
>>> v({'name': 'John', 'age': 25.5})
Traceback (most recent call last):
...
DictValueError: 'age' value must be int
>>> v({'name': 'John', 'age': 25, 'note': 'custom field'})
>>> v({'name': 'John', 'age': 25, 'note': 5.5})
Traceback (most recent call last):
...
DictValueError: 'note' value must be str or must be int
Note that this validator supports :class:`Exists` to mark keys that can
be missing.
"""
implies = IsA(dict)
def __init__(self, pairs):
self._pairs = pairs
def _represent(self):
return repr(self._pairs)
def _check(self, value):
value = value or {}
validated_data_keys = []
missing_key_specs = []
for k_validator, v_validator in self._pairs:
# NOTE kspec.datatype can be None => any key of any datatype
# NOTE kspec.default can be None => any key of given datatype
# gather data keys that match given kspec;
# then validate them against vspec
matched = False
for k,v in value.items():
if k in validated_data_keys:
continue
# check if this key is described by current key validator;
# if it isn't, just skip it (and try another validator
# on it later on)
try:
k_validator(k)
except (TypeError, ValidationError):
continue
# this key *is* described by current value validator;
# validate the value (it *must* validate)
try:
v_validator(v)
except (ValidationError, TypeError) as e:
if isinstance(e, DictValueError):
msg = 'in {k!r} ({e})'
else:
msg = '{k!r} value {e}'
raise DictValueError(msg.format(k=k, e=e))
validated_data_keys.append(k)
matched = True
# if not matched and not k_validator.optional:
if not matched:
try:
k_validator(MISSING)
except ValidationError:
missing_key_specs.append(k_validator)
# TODO document that unknown keys are checked before missing ones
# check if there are data keys that did not match any key spec;
# if yes, raise InvalidKey for them
if len(validated_data_keys) < len(value):
invalid_keys = set(value) - set(validated_data_keys)
raise InvalidKeys(*invalid_keys)
if missing_key_specs:
# XXX this prints validators, not keys as strings;
# one exception is the Equals validator from which we get
# the expected value via internal API. And that's gross.
reprs = (spec._expected_value if isinstance(spec, Equals) else spec
for spec in missing_key_specs)
raise MissingKeys(*reprs)
def _merge(self, value):
"""
Returns a dictionary based on `value` with each value recursively
merged with `spec`.
"""
if value is not None and not isinstance(value, dict):
# bogus value; will not pass validation but should be preserved
return value
if not self._pairs:
return {}
collected = {}
# collected.update(value)
for k_validator, v_validator in self._pairs:
k_default = k_validator.get_default_for(None)
if k_default is None:
continue
# even None is ok
if value:
v_for_this_k = value.get(k_default)
else:
v_for_this_k = None
v_default = v_validator.get_default_for(v_for_this_k)
collected.update({k_default: v_default})
if value:
for k, v in value.items():
if k not in collected:
collected[k] = v
return collected
class InRange(BaseRequirement):
"""
Requires that the numeric value is in given boundaries.
"""
implies = IsA(int) | IsA(float)
def __init__(self, min=None, max=None, default=NotImplemented):
self._min = min
self._max = max
if default is not NotImplemented:
self._default = default
def _check(self, value):
if self._min is not None and self._min > value:
self._raise_error(value)
if self._max is not None and self._max < value:
self._raise_error(value)
def __repr__(self):
if self.negated:
must = 'must not'
else:
must = 'must'
def _fmt(x):
return '' if x is None else x
return '{must} belong to {min_}..{max_}'.format(
must=must, min_=_fmt(self._min), max_=_fmt(self._max))
class HasAttr(BaseRequirement):
"""
Requires that the value has given attribute.
"""
def __init__(self, attr_name):
self._attr_name = attr_name
def _check(self, value):
if not hasattr(value, self._attr_name):
self._raise_error(value)
def __repr__(self):
if self.negated:
must = 'must not'
else:
must = 'must'
return '{must} have attribute {name!r}'.format(
must=must, name=self._attr_name)
class Length(InRange):
"""
Requires that the value length is in given boundaries.
"""
implies = HasAttr('__len__')
def _check(self, value):
try:
super(Length, self)._check(len(value))
except ValidationError as e:
self._raise_error(value)
def __repr__(self):
if self.negated:
must = 'must not'
else:
must = 'must'
def _fmt(x):
return '' if x is None else x
return '{must} have length of {min_}..{max_}'.format(
must=must, min_=_fmt(self._min), max_=_fmt(self._max))
def translate(value):
"""
Translates given schema from "pythonic" syntax to a validator.
Usage::
>>> translate(str)
IsA(str)
>>> translate('hello')
IsA(str, default='hello')
"""
if isinstance(value, BaseValidator):
return value
if value is None:
return Anything()
if isinstance(value, type):
return IsA(value)
if type(value) in compat.func_types:
real_value = value()
return IsA(type(real_value), default=real_value)
if isinstance(value, list):
if value == []:
# no inner spec, just an empty list as the default value
return IsA(list)
elif len(value) == 1:
# the only item as spec for each item of the collection
return ListOf(translate(value[0]))
else:
raise StructureSpecificationError(
'Expected a list containing exactly 1 item; '
'got {cnt}: {spec}'.format(cnt=len(value), spec=value))
if isinstance(value, dict):
if not value:
return IsA(dict)
items = []
for k, v in value.items():
if isinstance(k, BaseValidator):
k_validator = k
else:
k_validator = translate(k)
default = k_validator.get_default_for(None)
if default is not None:
k_validator = Equals(default)
v_validator = translate(v)
items.append((k_validator, v_validator))
return DictOf(items)
return IsA(type(value), default=value)
| andifined/monk | monk/validators.py | Python | lgpl-3.0 | 21,852 |
# for rgenetics - lped to fbat
# recode to numeric fbat version
# much slower so best to always
# use numeric alleles internally
import sys,os,time
prog = os.path.split(sys.argv[0])[-1]
myversion = 'Oct 10 2009'
galhtmlprefix = """<?xml version="1.0" encoding="utf-8" ?>
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" lang="en">
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
<meta name="generator" content="Galaxy %s tool output - see http://getgalaxy.org" />
<title></title>
<link rel="stylesheet" href="/static/style/base.css" type="text/css" />
</head>
<body>
<div class="document">
"""
def timenow():
"""return current time as a string
"""
return time.strftime('%d/%m/%Y %H:%M:%S', time.localtime(time.time()))
def rgConv(inpedfilepath,outhtmlname,outfilepath):
"""convert linkage ped/map to fbat"""
recode={'A':'1','C':'2','G':'3','T':'4','N':'0','0':'0','1':'1','2':'2','3':'3','4':'4'}
basename = os.path.split(inpedfilepath)[-1] # get basename
inmap = '%s.map' % inpedfilepath
inped = '%s.ped' % inpedfilepath
outf = '%s.ped' % basename # note the fbat exe insists that this is the extension for the ped data
outfpath = os.path.join(outfilepath,outf) # where to write the fbat format file to
try:
mf = file(inmap,'r')
except:
sys.stderr.write('%s cannot open inmap file %s - do you have permission?\n' % (prog,inmap))
sys.exit(1)
try:
rsl = [x.split()[1] for x in mf]
except:
sys.stderr.write('## cannot parse %s' % inmap)
sys.exit(1)
try:
os.makedirs(outfilepath)
except:
pass # already exists
head = ' '.join(rsl) # list of rs numbers
# TODO add anno to rs but fbat will prolly barf?
pedf = file(inped,'r')
o = file(outfpath,'w',2**20)
o.write(head)
o.write('\n')
for i,row in enumerate(pedf):
if i == 0:
lrow = row.split()
try:
x = [int(x) for x in lrow[10:50]] # look for non numeric codes
except:
dorecode = 1
if dorecode:
lrow = row.strip().split()
p = lrow[:6]
g = lrow[6:]
gc = [recode.get(x,'0') for x in g]
lrow = p+gc
row = '%s\n' % ' '.join(lrow)
o.write(row)
o.close()
def main():
"""call fbater
need to work with rgenetics composite datatypes
so in and out are html files with data in extrafiles path
<command interpreter="python">rg_convert_lped_fped.py '$input1/$input1.metadata.base_name'
'$output1' '$output1.extra_files_path'
</command>
"""
nparm = 3
if len(sys.argv) < nparm:
sys.stderr.write('## %s called with %s - needs %d parameters \n' % (prog,sys.argv,nparm))
sys.exit(1)
inpedfilepath = sys.argv[1]
outhtmlname = sys.argv[2]
outfilepath = sys.argv[3]
try:
os.makedirs(outfilepath)
except:
pass
rgConv(inpedfilepath,outhtmlname,outfilepath)
f = file(outhtmlname,'w')
f.write(galhtmlprefix % prog)
flist = os.listdir(outfilepath)
print '## Rgenetics: http://rgenetics.org Galaxy Tools %s %s' % (prog,timenow()) # becomes info
f.write('<div>## Rgenetics: http://rgenetics.org Galaxy Tools %s %s\n<ol>' % (prog,timenow()))
for i, data in enumerate( flist ):
f.write('<li><a href="%s">%s</a></li>\n' % (os.path.split(data)[-1],os.path.split(data)[-1]))
f.write("</div></body></html>")
f.close()
if __name__ == "__main__":
main()
| mikel-egana-aranguren/SADI-Galaxy-Docker | galaxy-dist/lib/galaxy/datatypes/converters/lped_to_fped_converter.py | Python | gpl-3.0 | 3,564 |
"""
Suppose a sorted array is rotated at some pivot unknown to you beforehand.
(i.e., 0 1 2 4 5 6 7 might become 4 5 6 7 0 1 2).
You are given a target value to search. If found in the array return its index, otherwise return -1.
You may assume no duplicate exists in the array.
"""
class Solution:
# @param A, a list of integers
# @param target, an integer to be searched
# @return an integer
def search(self, A, target):
return self.search_1(A, target)
def search_1(self, A, target):
start = 0
end = len(A) - 1
while start + 1 < end:
mid = (start + end) / 2
if target == A[mid]:
return mid
if A[start] < A[mid]: # First half sorted
if A[start] <= target < A[mid]: # In first half
end = mid
else: # In second half
start = mid
else: # Second half sorted
if A[mid] < target <= A[end]: # In second half
start = mid
else:
end = mid
if A[start] == target:
return start
if A[end] == target:
return end
return -1
# Switching to NC way, use start+1 < end instead
def search_rec(self, A, target):
return self.search_helper(A, target, 0, len(A) - 1)
def search_helper(self, A, target, start, end):
if start > end:
return -1
mid = (start + end) / 2
if A[mid] == target:
return mid
elif A[mid] > A[end]: # First half sorted
if A[start] <= target and target < A[mid]:
return self.search_helper(A, target, start, mid - 1)
else:
return self.search_helper(A, target, mid + 1, end)
else: # Second half sorted
if A[mid] < target and target <= A[end]:
return self.search_helper(A, target, mid + 1, end)
else:
return self.search_helper(A, target, start, mid - 1)
| cyandterry/Python-Study | Ninja/Leetcode/33_Search_in_Rotated_Sorted_Array.py | Python | mit | 2,223 |
from p2ner.abstract.interface import Interface
# Copyright 2012 Loris Corazza, Sakis Christakidis
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class Interface(Interface):
def initInterface(self, *args, **kwargs):
pass
def contactServers(self,servers):
for s in servers:
self.root.interface.contactServer(s)
#d.addCallback(self.parent.getStream)
#d.addErrback(self.failedXMLRPC)
def subscribeStream(self,id,ip,port,outputMethod):
self.root.interface.subscribeStream(id,ip,port,outputMethod)
#d.addCallback(self.parent.succesfulSubscription,id)
#d.addErrback(self.failedXMLRPC)
def stopProducing(self,id,repub):
self.root.interface.stopProducing(id,repub)
#d.addCallback(self.parent.stopProducing,id)
#d.addErrback(self.failedXMLRPC)
def unregisterStream(self,id):
self.root.interface.unregisterStream(id)
#d.addCallback(self.parent.unregisterStream,id)
#d.addErrback(self.failedXMLRPC)
def startProducing(self,id,type):
self.root.interface.startProducing(id)
#d.addCallback(self.parent.changeStreamStatus,id,type)
#d.addErrback(self.failedXMLRPC)
def startRemoteProducer(self,id,type):
self.root.interface.startRemoteProducer(id)
#d.addCallback(self.parent.changeStreamStatus,id,type)
#d.addErrback(self.failedXMLRPC)
#self.parent.changeStreamStatus(d,id,type)
def registerStream(self,settings,inputMethod,outputMethod):
self.root.interface.registerStream(settings,inputMethod,outputMethod)
#d.addCallback(self.parent.registerStream)
#d.addErrback(self.failedXMLRPC)
def getComponentsInterfaces(self,comp):
self.root.interface.getComponentsInterfaces(comp)
def startConverting(self,gui,dir,filename,videorate,subs,subsFile,subsEnc):
self.root.interface.startConverting(gui,dir,filename,videorate,subs,subsFile,subsEnc)
def getConverterStatus(self,gui,id):
self.root.interface.getConverterStatus(gui,id)
def abortConverter(self,id):
self.root.interface.abortConverter(id)
def send(self,rcom,arg,lcom):
rcom=eval('self.root.interface.'+rcom)
if arg:
lcom(rcom(arg))
else:
lcom(rcom())
def exiting(self):
self.root.exiting()
def getAvailableStatistics(self,func):
func(self.root.interface.getStatistics())
def getStatValues(self,func,stats):
func(self.root.interface.getStatValues(stats))
def startMeasurement(self,ip,gui):
self.root.startBWMeasurement(ip,gui)
def setBW(self,bw):
self.root.setBW(bw)
def checkNetwork(self):
self.root.interface.checkNetwork()
def sendChatMessage(self,id,message,peer):
self.root.interface.sendChatMessage(id,message,peer)
def joinChatRoom(self,id,username,server):
self.root.interface.joinChatRoom(id,username,server)
def leaveChatRoom(self,id,username,server):
self.root.interface.leaveChatRoom(id,username,server)
| schristakidis/p2ner | p2ner/components/ui/gtkgui/gtkgui/interface/localinterface.py | Python | apache-2.0 | 3,773 |
# Copyright 2013 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""GCIConversationUser logic methods."""
from datetime import timedelta
from google.appengine.ext import db
from google.appengine.ext import ndb
from melange.logic import profile as profile_logic
from melange.models import profile as profile_model
from soc.tasks import mailer
from soc.modules.gci.logic import message as gcimessage_logic
from soc.modules.gci.logic.helper import notifications
from soc.modules.gci.models import conversation as gciconversation_model
from soc.modules.gci.models import message as gcimessage_model
from soc.models import conversation as conversation_model
def queryForProgramAndCreator(program, creator):
"""Creates a query for GCIConversation entities for the given program and
creator.
Args:
program: Key (ndb) of GCIProgram.
creator: Key (ndb) of User who created the conversation.
Returns:
An ndb query for GCIConversations for the program and creator.
"""
query = (gciconversation_model.GCIConversation.query()
.filter(gciconversation_model.GCIConversation.program == program)
.filter(gciconversation_model.GCIConversation.creator == creator))
return query
def queryForProgramAndUser(program, user):
"""Creates a query for GCIConversationUser entities for the given program and
user.
Args:
program: Key (ndb) of GCIProgram.
user: Key (ndb) of User.
Returns:
An ndb query for GCIConversationUsers for the program and user.
"""
query = (gciconversation_model.GCIConversationUser.query()
.filter(gciconversation_model.GCIConversationUser.program == program)
.filter(gciconversation_model.GCIConversationUser.user == user))
return query
def queryConversationsForProgram(program):
"""Creates a query for GCIConversation entities for the given program.
Args:
program: Key (ndb) of GCIProgram.
Returns:
An ndb query for GCIConversations for the program.
"""
return gciconversation_model.GCIConversation.query(
gciconversation_model.GCIConversation.program == program)
def queryConversationUserForConversation(conversation):
"""Creates a query for GCIConversationUser entities for a conversation.
Args:
conversation: Key (ndb) of GCIConversation.
Returns:
An ndb query for GCIConversationUsers for the conversation.
"""
return gciconversation_model.GCIConversationUser.query(
gciconversation_model.GCIConversationUser.conversation == conversation)
def queryConversationUserForConversationAndUser(conversation, user):
"""Creates a query for GCIConversationUser entities in a conversation for a
user.
Args:
conversation: Key (ndb) of GCIConversation.
user: Key (ndb) of User.
Returns:
An ndb query for GCIConversationUsers for a conversation and user.
"""
return queryConversationUserForConversation(conversation).filter(
gciconversation_model.GCIConversationUser.user == user)
def queryUnreadMessagesForConversationAndUser(conversation, user):
"""Creates a query for unread messages in a conversation for a user.
Args:
conversation: Key (ndb) of GCIConversation.
user: Key (ndb) of User.
Returns:
An ndb query for GCIMessages the user has not yet read in the conversation.
If the user is not part of the conversation, None is returned.
"""
conversation_user = queryConversationUserForConversationAndUser(
conversation, user).get()
if not conversation_user:
return None
date_last_seen = conversation_user.last_message_seen_on
# The > filter in the query below seemed to still include equivalent
# datetimes, so incrememting this by a second fixes this.
date_last_seen += timedelta(seconds=1)
return (gcimessage_logic.queryForConversation(conversation)
.filter(gcimessage_model.GCIMessage.sent_on > date_last_seen))
def numUnreadMessagesForConversationAndUser(conversation, user):
"""Calculates the number of unread messages in a conversation for a user.
Args:
conversation: Key (ndb) of GCIConversation.
user: Key (ndb) of User.
Returns:
The number of messages the user has not read in the conversation.
If the user is not involved in the conversation, 0 is returned.
"""
query = queryUnreadMessagesForConversationAndUser(conversation, user)
return 0 if query is None else query.count()
#TODO(drewgottlieb) use mapreduce for this
def numUnreadMessagesForProgramAndUser(program, user):
"""Returns the number of unread messages for all conversations the user is in
for a program.
Args:
program: Key (ndb) of GCIProgram.
user: Key (ndb) of User.
"""
conv_users = queryForProgramAndUser(program, user).fetch(1000)
unread_count = 0
for conv_user in conv_users:
unread_count += numUnreadMessagesForConversationAndUser(
conv_user.conversation, user)
return unread_count
def markAllReadForConversationAndUser(conversation, user):
"""Marks all messages in a conversation as read for the user.
Sets the GCIConversationUser's last_message_seen_on to the last message's
sent_on.
Args:
conversation: Key (ndb) of GCIConversation.
user: Key (ndb) of User.
"""
conv_user_results = queryConversationUserForConversationAndUser(
conversation, user).fetch(1)
if not conv_user_results:
raise Exception('No GCIConversationUser could be found.')
conv_user = conv_user_results[0]
last_message = gcimessage_logic.getLastMessageForConversation(conversation)
conv_user.last_message_seen_on = last_message.sent_on
conv_user.put()
def reputConversationUsers(conversation):
"""Updates all computed properties in each GCIConversationUser entity for
a conversation.
Args:
conversation: Key (ndb) of GCIConversation.
"""
@ndb.tasklet
def reput(conv_user):
conv_user.put()
queryConversationUserForConversation(conversation).map(reput)
def createMessage(conversation, user=None, content=''):
"""Creates and returns a new GCIMessage, and updates conversation and
conversationusers' last_message_sent_on date.
Args:
conversation: Key (ndb) of GCIConversation.
user: Key (ndb) of user who sent the message. Can be None if conversation
is created by Melange itself.
content: Content of message. This function will not sanitize it for you.
Returns:
The created GCIMessage.
"""
if content is None: return None
@ndb.transactional
def create():
message = gcimessage_model.GCIMessage(
parent=conversation,
conversation=conversation,
content=content,
author=user)
message.put()
# Update last_message_sent_on in conversation
conversation_ent = conversation.get()
conversation_ent.last_message_on = message.sent_on
conversation_ent.put()
return message
message = create()
# Reput each conversationuser for the conversation to update computed
# properties such as last_message_sent_on
reputConversationUsers(conversation)
return message
def addUserToConversation(conversation, user):
"""Creates a GCIConversationUser adding the user to the conversation, if the
user is not already part of the conversation.
Args:
conversation: Key (ndb) of GCIConversation.
user: Key (ndb) of User.
Returns:
The created (or existing) GCIConversationUser entity representing the
user's involvement.
"""
@ndb.transactional
def txn():
query = gciconversation_model.GCIConversationUser.query(
gciconversation_model.GCIConversationUser.user == user,
gciconversation_model.GCIConversationUser.conversation == conversation,
ancestor=conversation)
conv_user = query.get()
if conv_user:
return conv_user
conv_user = gciconversation_model.GCIConversationUser(
parent=conversation, conversation=conversation, user=user)
conv_user.put()
return conv_user
return txn()
def removeUserFromConversation(conversation, user):
"""Removes the GCIConversationUser for a user and conversation, if it exists.
Will remove all matching instances, even though there should never be more
then one.
Args:
conversation: Key (ndb) of GCIConversation.
user: Key (ndb) of User.
"""
keys = queryConversationUserForConversationAndUser(
conversation=conversation, user=user).fetch(100, keys_only=True)
ndb.delete_multi(keys)
def doesConversationUserBelong(
conversation_user, ignore_auto_update_users=True):
"""Decides whether the user in a conversation belongs in the conversation.
If ignore_auto_update_users is False, True will be returned if the
conversation's auto_update_users is False.
Args:
conversation_user: Key (ndb) of a GCIConversationUser representing the
user's involvement in the conversation.
ignore_auto_update_users: Whether this should ignore the conversation's
auto_update_users property.
Returns:
Whether the user belongs in the conversation. If the conversation's
recipients_type is 'User', True is always returned. Also returns true if
the user is the conversation's creator.
"""
conversation_user_ent = conversation_user.get()
return doesUserBelongInConversation(
user=conversation_user_ent.user,
conversation=conversation_user_ent.conversation,
ignore_auto_update_users=ignore_auto_update_users)
def doesUserBelongInConversation(
user, conversation, ignore_auto_update_users=True):
"""Decides whether the user in a conversation belongs in the conversation.
If ignore_auto_update_users is False, True will be returned if the
conversation's auto_update_users is False.
Args:
user: Key (ndb) of a User.
conversation: Key (ndb) of a GCIConversation.
ignore_auto_update_users: Whether this should ignore the conversation's
auto_update_users property.
Returns:
Whether the user belongs in the conversation. If the conversation's
recipients_type is 'User', True is always returned. Also returns true if
the user is the conversation's creator.
"""
conversation_ent = conversation.get()
if not conversation_ent.auto_update_users and not ignore_auto_update_users:
return True
if conversation_ent.creator == user:
return True
profile = profile_logic.getProfileForUsername(
user.id(), conversation_ent.program.to_old_key())
if not profile:
raise Exception('Could not find GCIProfile for user and program.')
if conversation_ent.recipients_type == conversation_model.PROGRAM:
if conversation_ent.include_admins and profile.is_admin:
return True
elif conversation_ent.include_mentors and profile.is_mentor:
return True
elif conversation_ent.include_students and profile.is_student:
return True
elif (profile.is_student and conversation_ent.include_winners
and profile.student_data.is_winner):
return True
else:
return False
elif conversation_ent.recipients_type == conversation_model.ORGANIZATION:
if (conversation_ent.include_admins and
conversation_ent.organization in profile.admin_for):
return True
elif (conversation_ent.include_mentors and
conversation_ent.organization in profile.mentor_for):
return True
elif (profile.is_student and conversation_ent.include_winners
and profile.student_data.is_winner and
conversation_ent.organization == profile.student_data.winner_for):
return True
else:
return False
# This might be reached if conversation recipients_type is 'User'
return True
def refreshConversationParticipants(conversation):
"""Creates/deletes GCIConversationUser entities depending on the converation's
criteria.
The conversation's owner is always included in the conversation.
If the conversation's recipients_type is 'User', this function will not do
anything because it is expected that the GCIConversationUser will be managed
elsewhere.
Args:
conversation: Key (ndb) of GCIConversation.
"""
conv = conversation.get()
def addProfile(profile):
addUserToConversation(
conversation=conversation,
user=profile.key.parent())
def deleteConvUserIfDoesntBelong(conv_user):
if not doesConversationUserBelong(conversation_user=conv_user.key):
conv_user.key.delete()
# Remove any users included who no longer fit the criteria
if conv.recipients_type != conversation_model.USER:
conv_user_query = queryConversationUserForConversation(conversation)
map(deleteConvUserIfDoesntBelong, conv_user_query)
# Make sure users who fit the criteria are included
if conv.recipients_type == conversation_model.PROGRAM:
if conv.include_admins:
query = profile_model.Profile.query(
profile_model.Profile.program == conv.program,
profile_model.Profile.is_admin == True)
map(addProfile, query)
if conv.include_mentors:
query = profile_logic.queryAllMentorsForProgram(conv.program.to_old_key())
map(addProfile, query)
if conv.include_students:
query = profile_model.Profile.query(
profile_model.Profile.program == conv.program,
profile_model.Profile.is_student == True)
map(addProfile, query)
if conv.include_winners:
query = profile_model.Profile.query(
profile_model.Profile.program == conv.program,
profile_model.Profile.student_data.is_winner == True)
map(addProfile, query)
elif conv.recipients_type == conversation_model.ORGANIZATION:
if conv.include_admins:
org_admins = profile_logic.getOrgAdmins(conv.organization)
map(addProfile, org_admins)
if conv.include_mentors:
query = profile_model.Profile.query(
profile_model.Profile.mentor_for == conv.organization,
profile_model.Profile.status == profile_model.Status.ACTIVE)
map(addProfile, query)
if conv.include_winners:
query = profile_model.Profile.query(
profile_model.Profile.student_data.winner_for == conv.organization,
profile_model.Profile.status == profile_model.Status.ACTIVE)
map(addProfile, query)
# Make sure conversation's creator is included
if conv.creator is not None:
addUserToConversation(conversation=conversation, user=conv.creator)
def refreshConversationsForUserAndProgram(user_key, program_key):
"""Adds/removes the user to/from conversations that they should be involved in
based on the conversation's criteria.
For example, if there is a conversation that should include all program
mentors, and this user is a program mentor who is involved with the program
but isn't part of the converation, this function will add the user to that
conversation. Likewise, it will remove the user from converstions they have no
business being in, unless they're the creator of the conversation or the
conversation is for specific users.
This will only look at conversations that have auto_update_users set as
True, and whoose, recipients_type is not 'User'.
This function will not add a user to a conversation if the user does not fit
the conversation's criteria, even if the user is the creator. If the user is
_only_ the creator of the conversation, the user's GCIConversationUser entity
should have been created when the conversation was initially created.
Args:
user_key: Key (ndb) of the User.
program_key: Key (ndb) of the GCIProgram.
"""
profile = profile_logic.getProfileForUsername(
user_key.id(), program_key.to_old_key())
if not profile:
raise Exception('Could not find Profile for user and program.')
def deleteConvUserIfDoesntBelong(conv_user):
if not doesConversationUserBelong(
conversation_user=conv_user.key, ignore_auto_update_users=False):
conv_user.key.delete()
# Remove user from any conversations they're in that they don't belong in
conv_user_query = queryForProgramAndUser(user=user_key, program=program_key)
map(deleteConvUserIfDoesntBelong, conv_user_query)
def addToConversation(conversation):
addUserToConversation(conversation=conversation.key, user=user_key)
mentor_org_keys = profile.mentor_for
admin_org_keys = profile.admin_for
# Make sure user is added to program conversations they belong in as a
# student
if profile.is_student:
query = (queryConversationsForProgram(program_key)
.filter(gciconversation_model.GCIConversation.recipients_type ==
conversation_model.PROGRAM)
.filter(gciconversation_model.GCIConversation.auto_update_users == True)
.filter(gciconversation_model.GCIConversation.include_students == True))
map(addToConversation, query)
# Make sure user is added to program conversations they belong in as a
# mentor
if profile.is_mentor:
query = (queryConversationsForProgram(program_key)
.filter(gciconversation_model.GCIConversation.recipients_type ==
conversation_model.PROGRAM)
.filter(gciconversation_model.GCIConversation.auto_update_users == True)
.filter(gciconversation_model.GCIConversation.include_mentors == True))
map(addToConversation, query)
# Make sure user is added to program conversations they belong in as an
# admin
if profile.is_admin:
query = (queryConversationsForProgram(program_key)
.filter(gciconversation_model.GCIConversation.recipients_type ==
conversation_model.PROGRAM)
.filter(gciconversation_model.GCIConversation.auto_update_users == True)
.filter(gciconversation_model.GCIConversation.include_admins == True))
map(addToConversation, query)
# Make sure user is added to program conversations they belong in as a
# winner
if profile.student_data and profile.student_data.is_winner:
query = (queryConversationsForProgram(program_key)
.filter(gciconversation_model.GCIConversation.recipients_type ==
conversation_model.PROGRAM)
.filter(gciconversation_model.GCIConversation.auto_update_users == True)
.filter(gciconversation_model.GCIConversation.include_winners == True))
map(addToConversation, query)
# Make sure user is added to org conversations they belong in as an org
# mentor
if profile.is_mentor and mentor_org_keys:
query = (queryConversationsForProgram(program_key)
.filter(gciconversation_model.GCIConversation.recipients_type ==
conversation_model.ORGANIZATION)
.filter(gciconversation_model.GCIConversation.auto_update_users == True)
.filter(gciconversation_model.GCIConversation.include_mentors == True)
.filter(gciconversation_model.GCIConversation.organization.IN(
mentor_org_keys)))
map(addToConversation, query)
# Make sure user is added to org conversations they belong in as an org
# admin
if profile.is_admin and admin_org_keys:
query = (queryConversationsForProgram(program_key)
.filter(gciconversation_model.GCIConversation.recipients_type ==
conversation_model.ORGANIZATION)
.filter(gciconversation_model.GCIConversation.auto_update_users == True)
.filter(gciconversation_model.GCIConversation.include_admins == True)
.filter(gciconversation_model.GCIConversation.organization.IN(
admin_org_keys)))
map(addToConversation, query)
# Make sure user is added to org conversations they belong in as an org
# winner
if profile.is_student and profile.student_data.is_winner:
query = (queryConversationsForProgram(program_key)
.filter(gciconversation_model.GCIConversation.recipients_type ==
conversation_model.ORGANIZATION)
.filter(gciconversation_model.GCIConversation.auto_update_users == True)
.filter(gciconversation_model.GCIConversation.include_winners == True)
.filter(gciconversation_model.GCIConversation.organization ==
profile.student_data.winner_for))
map(addToConversation, query)
def getSubscribedEmails(conversation, exclude=None):
"""Gets the list of email addresses for all users subscribed to a
conversation.
Args:
conversation: Key (ndb) of GCIConversation.
exclude: Keys (ndb) of Users that, if given, will not be in the set of
emails.
Returns:
Set of email addresses.
"""
conversation_ent = conversation.get()
conv_users = queryConversationUserForConversation(conversation)
program_key = ndb.Key.to_old_key(conversation_ent.program)
addresses = set()
for conv_user in conv_users:
if conv_user.enable_notifications and (
not exclude or conv_user.user not in exclude):
profile = profile_logic.getProfileForUsername(
conv_user.user.id(), program_key)
if not profile:
raise Exception('Could not find GCIProfile for user %s and program. %s'
% (conv_user.name, program_key.name()))
addresses.add(profile.contact.email)
return addresses
def notifyParticipantsOfMessage(message, is_reply):
"""Notifies participants in a conversation's participants of a new message.
Args:
message: Key (ndb) of GCIMessage of which to notify participants.
is_reply: Whether this message is a reply to an existing conversation.
"""
message_ent = message.get()
conversation_ent = message_ent.conversation.get()
to_emails = getSubscribedEmails(
message_ent.conversation, exclude=[message_ent.author])
context = notifications.getTaskConversationMessageContext(
message, list(to_emails), is_reply)
txn = mailer.getSpawnMailTaskTxn(context, parent=None)
db.run_in_transaction(txn)
| rhyolight/nupic.son | app/soc/modules/gci/logic/conversation.py | Python | apache-2.0 | 22,136 |
# -*- coding: utf-8 -*-
from collections import Iterable
from helpers import add_attribute_self, yesman
@add_attribute_self('filter_constructor')
class ftuple(tuple):
"""
Replacement class for tuple, with better API.
In place methods return 'self' instead of None, better for chaining and returning
"""
root = tuple
def __new__(cls, *args):
""" Replacement constructor, admits a single iterable or more than one parameter
"""
if len(args) == 1 and isinstance(args[0], Iterable):
return tuple.__new__(cls, args[0])
else:
return tuple.__new__(cls, args)
@add_attribute_self('filter_constructor')
class flist(list):
"""
Replacement class for list, with compatible API.
In place methods are redefined to return 'self' instead of None.
"""
root = list
def __init__(self, *args):
""" Replacement constructor, admits a single iterable or more than one parameter
"""
if len(args) == 1 and isinstance(args[0], Iterable):
list.__init__(self, args[0])
else:
list.__init__(self, args)
# mutable methods (return self)
def clear(self):
""" clear returns self
"""
del self[:]
return self
def append(self, x):
""" append replacement that returns self
"""
list.append(self, x)
return self
def extend(self, iterable):
""" extend replacement that returns self
"""
list.extend(self, iterable)
return self
def remove(self, value):
""" remove replacement that returns self
"""
list.remove(self, value)
return self
def remove_all(self, iterable):
""" iterable version of remove
"""
for i in iterable:
list.remove(self, i)
return self
def remove_index(self, at):
del self[at]
return self
def remove_slice(self, start=0, end=None):
""" Does not raise IndexError
"""
if end is None:
del self[start:]
else:
del self[start:end]
return self
def discard(self, value):
""" Like remove except it does not raise ValueError exception
"""
try:
list.remove(self, value)
except ValueError:
pass
return self
def discard_all(self, iterable):
""" iterable version of discard
"""
for i in iterable:
self.discard(i)
return self
def discard_index(self, at):
try:
del self[at]
except IndexError:
pass
return self
discard_slice = remove_slice
def reverse(self):
""" reverse replacement that returns self
"""
list.reverse()
return self
def sort(self, **p):
""" sort replacement that returns self
"""
list.sort(self, **p)
return self
__isub__ = discard_all
| francois-vincent/containers | base_containers.py | Python | gpl-2.0 | 3,025 |
from .base import *
import warnings
import pandas as pd
def validate_set_ops(df, other):
"""
Helper function to ensure that DataFrames are valid for set operations.
Columns must be the same name in the same order, and indices must be of the
same dimension with the same names.
"""
if df.columns.values.tolist() != other.columns.values.tolist():
not_in_df = [col for col in other.columns if col not in df.columns]
not_in_other = [col for col in df.columns if col not in other.columns]
error_string = 'Error: not compatible.'
if len(not_in_df):
error_string += ' Cols in y but not x: ' + str(not_in_df) + '.'
if len(not_in_other):
error_string += ' Cols in x but not y: ' + str(not_in_other) + '.'
raise ValueError(error_string)
if len(df.index.names) != len(other.index.names):
raise ValueError('Index dimension mismatch')
if df.index.names != other.index.names:
raise ValueError('Index mismatch')
else:
return
# ------------------------------------------------------------------------------
# `union`
# ------------------------------------------------------------------------------
@pipe
def union(df, other, index=False, keep='first'):
"""
Returns rows that appear in either DataFrame.
Args:
df (pandas.DataFrame): data passed in through the pipe.
other (pandas.DataFrame): other DataFrame to use for set operation with
the first.
Kwargs:
index (bool): Boolean indicating whether to consider the pandas index
as part of the set operation (default `False`).
keep (str): Indicates which duplicate should be kept. Options are `'first'`
and `'last'`.
"""
validate_set_ops(df, other)
stacked = df.append(other)
if index:
stacked_reset_indexes = stacked.reset_index()
index_cols = [col for col in stacked_reset_indexes.columns if col not in df.columns]
index_name = df.index.names
return_df = stacked_reset_indexes.drop_duplicates(keep=keep).set_index(index_cols)
return_df.index.names = index_name
return return_df
else:
return stacked.drop_duplicates(keep=keep)
# ------------------------------------------------------------------------------
# `intersect`
# ------------------------------------------------------------------------------
@pipe
def intersect(df, other, index=False, keep='first'):
"""
Returns rows that appear in both DataFrames.
Args:
df (pandas.DataFrame): data passed in through the pipe.
other (pandas.DataFrame): other DataFrame to use for set operation with
the first.
Kwargs:
index (bool): Boolean indicating whether to consider the pandas index
as part of the set operation (default `False`).
keep (str): Indicates which duplicate should be kept. Options are `'first'`
and `'last'`.
"""
validate_set_ops(df, other)
if index:
df_reset_index = df.reset_index()
other_reset_index = other.reset_index()
index_cols = [col for col in df_reset_index.columns if col not in df.columns]
df_index_names = df.index.names
return_df = (pd.merge(df_reset_index, other_reset_index,
how='inner',
left_on=df_reset_index.columns.values.tolist(),
right_on=df_reset_index.columns.values.tolist())
.set_index(index_cols))
return_df.index.names = df_index_names
return_df = return_df.drop_duplicates(keep=keep)
return return_df
else:
return_df = pd.merge(df, other,
how='inner',
left_on=df.columns.values.tolist(),
right_on=df.columns.values.tolist())
return_df = return_df.drop_duplicates(keep=keep)
return return_df
# ------------------------------------------------------------------------------
# `set_diff`
# ------------------------------------------------------------------------------
@pipe
def set_diff(df, other, index=False, keep='first'):
"""
Returns rows that appear in the first DataFrame but not the second.
Args:
df (pandas.DataFrame): data passed in through the pipe.
other (pandas.DataFrame): other DataFrame to use for set operation with
the first.
Kwargs:
index (bool): Boolean indicating whether to consider the pandas index
as part of the set operation (default `False`).
keep (str): Indicates which duplicate should be kept. Options are `'first'`
and `'last'`.
"""
validate_set_ops(df, other)
if index:
df_reset_index = df.reset_index()
other_reset_index = other.reset_index()
index_cols = [col for col in df_reset_index.columns if col not in df.columns]
df_index_names = df.index.names
return_df = (pd.merge(df_reset_index, other_reset_index,
how='left',
left_on=df_reset_index.columns.values.tolist(),
right_on=other_reset_index.columns.values.tolist(),
indicator=True)
.set_index(index_cols))
return_df = return_df[return_df._merge == 'left_only']
return_df.index.names = df_index_names
return_df = return_df.drop_duplicates(keep=keep)[df.columns]
return return_df
else:
return_df = pd.merge(df, other,
how='left',
left_on=df.columns.values.tolist(),
right_on=df.columns.values.tolist(),
indicator=True)
return_df = return_df[return_df._merge == 'left_only']
return_df = return_df.drop_duplicates(keep=keep)[df.columns]
return return_df
| kieferk/dfply | dfply/set_ops.py | Python | gpl-3.0 | 6,022 |
# Copyright 2017 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from google.cloud.pubsub.message import Message
from notification_polling import summarize
MESSAGE_ID = 12345
def test_parse_json_message():
attributes = {
'eventType': 'OBJECT_FINALIZE',
'bucketId': 'mybucket',
'objectId': 'myobject',
'objectGeneration': 1234567,
'resource': 'projects/_/buckets/mybucket/objects/myobject#1234567',
'notificationConfig': ('projects/_/buckets/mybucket/'
'notificationConfigs/5'),
'payloadFormat': 'JSON_API_V1'}
data = ('{'
' "size": 12345,'
' "contentType": "text/html",'
' "metageneration": 1'
'}')
message = Message(data, MESSAGE_ID, attributes=attributes)
assert summarize(message) == (
'\tEvent type: OBJECT_FINALIZE\n'
'\tBucket ID: mybucket\n'
'\tObject ID: myobject\n'
'\tGeneration: 1234567\n'
'\tContent type: text/html\n'
'\tSize: 12345\n'
'\tMetageneration: 1\n')
| JavaRabbit/CS496_capstone | storage/cloud-client/notification_polling_test.py | Python | apache-2.0 | 1,621 |
response.title = "LEL"
response.subtitle = "List Extraction Learning"
response.menu = [
(T('Home'), False, URL('default','index')),
(T('Tutorial'), False, URL('default','tutorial')),
(T('Contribute'), False, URL('default','contribute')),
]
| satwantrana/lelui | models/menu.py | Python | mit | 240 |
# Copyright (c) 2015 Advanced Micro Devices, Inc.
# All rights reserved.
#
# For use for simulation and test purposes only
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Sooraj Puthoor
import math
import m5
from m5.objects import *
from m5.defines import buildEnv
from m5.util import addToPath
from Ruby import create_topology
from Ruby import send_evicts
addToPath('../')
from topologies.Cluster import Cluster
from topologies.Crossbar import Crossbar
class CntrlBase:
_seqs = 0
@classmethod
def seqCount(cls):
# Use SeqCount not class since we need global count
CntrlBase._seqs += 1
return CntrlBase._seqs - 1
_cntrls = 0
@classmethod
def cntrlCount(cls):
# Use CntlCount not class since we need global count
CntrlBase._cntrls += 1
return CntrlBase._cntrls - 1
_version = 0
@classmethod
def versionCount(cls):
cls._version += 1 # Use count for this particular type
return cls._version - 1
class L1Cache(RubyCache):
resourceStalls = False
dataArrayBanks = 2
tagArrayBanks = 2
dataAccessLatency = 1
tagAccessLatency = 1
def create(self, size, assoc, options):
self.size = MemorySize(size)
self.assoc = assoc
self.replacement_policy = PseudoLRUReplacementPolicy()
class L2Cache(RubyCache):
resourceStalls = False
assoc = 16
dataArrayBanks = 16
tagArrayBanks = 16
def create(self, size, assoc, options):
self.size = MemorySize(size)
self.assoc = assoc
self.replacement_policy = PseudoLRUReplacementPolicy()
class CPCntrl(CorePair_Controller, CntrlBase):
def create(self, options, ruby_system, system):
self.version = self.versionCount()
self.L1Icache = L1Cache()
self.L1Icache.create(options.l1i_size, options.l1i_assoc, options)
self.L1D0cache = L1Cache()
self.L1D0cache.create(options.l1d_size, options.l1d_assoc, options)
self.L1D1cache = L1Cache()
self.L1D1cache.create(options.l1d_size, options.l1d_assoc, options)
self.L2cache = L2Cache()
self.L2cache.create(options.l2_size, options.l2_assoc, options)
self.sequencer = RubySequencer()
self.sequencer.version = self.seqCount()
self.sequencer.icache = self.L1Icache
self.sequencer.dcache = self.L1D0cache
self.sequencer.ruby_system = ruby_system
self.sequencer.coreid = 0
self.sequencer.is_cpu_sequencer = True
self.sequencer1 = RubySequencer()
self.sequencer1.version = self.seqCount()
self.sequencer1.icache = self.L1Icache
self.sequencer1.dcache = self.L1D1cache
self.sequencer1.ruby_system = ruby_system
self.sequencer1.coreid = 1
self.sequencer1.is_cpu_sequencer = True
self.issue_latency = options.cpu_to_dir_latency
self.send_evictions = send_evicts(options)
self.ruby_system = ruby_system
if options.recycle_latency:
self.recycle_latency = options.recycle_latency
class TCPCache(RubyCache):
size = "16kB"
assoc = 16
dataArrayBanks = 16
tagArrayBanks = 16
dataAccessLatency = 4
tagAccessLatency = 1
def create(self, options):
self.size = MemorySize(options.tcp_size)
self.dataArrayBanks = 16
self.tagArrayBanks = 16
self.dataAccessLatency = 4
self.tagAccessLatency = 1
self.resourceStalls = options.no_tcc_resource_stalls
self.replacement_policy = PseudoLRUReplacementPolicy()
class TCPCntrl(TCP_Controller, CntrlBase):
def create(self, options, ruby_system, system):
self.version = self.versionCount()
self.L1cache = TCPCache()
self.L1cache.create(options)
self.issue_latency = 1
self.coalescer = VIPERCoalescer()
self.coalescer.version = self.seqCount()
self.coalescer.icache = self.L1cache
self.coalescer.dcache = self.L1cache
self.coalescer.ruby_system = ruby_system
self.coalescer.support_inst_reqs = False
self.coalescer.is_cpu_sequencer = False
self.sequencer = RubySequencer()
self.sequencer.version = self.seqCount()
self.sequencer.icache = self.L1cache
self.sequencer.dcache = self.L1cache
self.sequencer.ruby_system = ruby_system
self.sequencer.is_cpu_sequencer = True
self.use_seq_not_coal = False
self.ruby_system = ruby_system
if options.recycle_latency:
self.recycle_latency = options.recycle_latency
class SQCCache(RubyCache):
dataArrayBanks = 8
tagArrayBanks = 8
dataAccessLatency = 1
tagAccessLatency = 1
def create(self, options):
self.size = MemorySize(options.sqc_size)
self.assoc = options.sqc_assoc
self.replacement_policy = PseudoLRUReplacementPolicy()
class SQCCntrl(SQC_Controller, CntrlBase):
def create(self, options, ruby_system, system):
self.version = self.versionCount()
self.L1cache = SQCCache()
self.L1cache.create(options)
self.L1cache.resourceStalls = False
self.sequencer = RubySequencer()
self.sequencer.version = self.seqCount()
self.sequencer.icache = self.L1cache
self.sequencer.dcache = self.L1cache
self.sequencer.ruby_system = ruby_system
self.sequencer.support_data_reqs = False
self.sequencer.is_cpu_sequencer = False
self.ruby_system = ruby_system
if options.recycle_latency:
self.recycle_latency = options.recycle_latency
class TCC(RubyCache):
size = MemorySize("256kB")
assoc = 16
dataAccessLatency = 8
tagAccessLatency = 2
resourceStalls = True
def create(self, options):
self.assoc = options.tcc_assoc
if hasattr(options, 'bw_scalor') and options.bw_scalor > 0:
s = options.num_compute_units
tcc_size = s * 128
tcc_size = str(tcc_size)+'kB'
self.size = MemorySize(tcc_size)
self.dataArrayBanks = 64
self.tagArrayBanks = 64
else:
self.size = MemorySize(options.tcc_size)
self.dataArrayBanks = 256 / options.num_tccs #number of data banks
self.tagArrayBanks = 256 / options.num_tccs #number of tag banks
self.size.value = self.size.value / options.num_tccs
if ((self.size.value / long(self.assoc)) < 128):
self.size.value = long(128 * self.assoc)
self.start_index_bit = math.log(options.cacheline_size, 2) + \
math.log(options.num_tccs, 2)
self.replacement_policy = PseudoLRUReplacementPolicy()
class TCCCntrl(TCC_Controller, CntrlBase):
def create(self, options, ruby_system, system):
self.version = self.versionCount()
self.L2cache = TCC()
self.L2cache.create(options)
self.ruby_system = ruby_system
self.L2cache.resourceStalls = options.no_tcc_resource_stalls
if options.recycle_latency:
self.recycle_latency = options.recycle_latency
class L3Cache(RubyCache):
dataArrayBanks = 16
tagArrayBanks = 16
def create(self, options, ruby_system, system):
self.size = MemorySize(options.l3_size)
self.size.value /= options.num_dirs
self.assoc = options.l3_assoc
self.dataArrayBanks /= options.num_dirs
self.tagArrayBanks /= options.num_dirs
self.dataArrayBanks /= options.num_dirs
self.tagArrayBanks /= options.num_dirs
self.dataAccessLatency = options.l3_data_latency
self.tagAccessLatency = options.l3_tag_latency
self.resourceStalls = False
self.replacement_policy = PseudoLRUReplacementPolicy()
class ProbeFilter(RubyCache):
size = "4MB"
assoc = 16
dataArrayBanks = 256
tagArrayBanks = 256
def create(self, options, ruby_system, system):
self.block_size = "%dB" % (64 * options.blocks_per_region)
self.size = options.region_dir_entries * \
self.block_size * options.num_compute_units
self.assoc = 8
self.tagArrayBanks = 8
self.tagAccessLatency = options.dir_tag_latency
self.dataAccessLatency = 1
self.resourceStalls = options.no_resource_stalls
self.start_index_bit = 6 + int(math.log(options.blocks_per_region, 2))
self.replacement_policy = PseudoLRUReplacementPolicy()
class L3Cntrl(L3Cache_Controller, CntrlBase):
def create(self, options, ruby_system, system):
self.version = self.versionCount()
self.L3cache = L3Cache()
self.L3cache.create(options, ruby_system, system)
self.l3_response_latency = \
max(self.L3cache.dataAccessLatency, self.L3cache.tagAccessLatency)
self.ruby_system = ruby_system
if options.recycle_latency:
self.recycle_latency = options.recycle_latency
def connectWireBuffers(self, req_to_dir, resp_to_dir, l3_unblock_to_dir,
req_to_l3, probe_to_l3, resp_to_l3):
self.reqToDir = req_to_dir
self.respToDir = resp_to_dir
self.l3UnblockToDir = l3_unblock_to_dir
self.reqToL3 = req_to_l3
self.probeToL3 = probe_to_l3
self.respToL3 = resp_to_l3
class DirMem(RubyDirectoryMemory, CntrlBase):
def create(self, options, ruby_system, system):
self.version = self.versionCount()
phys_mem_size = AddrRange(options.mem_size).size()
mem_module_size = phys_mem_size / options.num_dirs
dir_size = MemorySize('0B')
dir_size.value = mem_module_size
self.size = dir_size
class DirCntrl(Directory_Controller, CntrlBase):
def create(self, options, ruby_system, system):
self.version = self.versionCount()
self.response_latency = 30
self.directory = DirMem()
self.directory.create(options, ruby_system, system)
self.L3CacheMemory = L3Cache()
self.L3CacheMemory.create(options, ruby_system, system)
self.ProbeFilterMemory = ProbeFilter()
self.ProbeFilterMemory.create(options, ruby_system, system)
self.l3_hit_latency = \
max(self.L3CacheMemory.dataAccessLatency,
self.L3CacheMemory.tagAccessLatency)
self.ruby_system = ruby_system
if options.recycle_latency:
self.recycle_latency = options.recycle_latency
def connectWireBuffers(self, req_to_dir, resp_to_dir, l3_unblock_to_dir,
req_to_l3, probe_to_l3, resp_to_l3):
self.reqToDir = req_to_dir
self.respToDir = resp_to_dir
self.l3UnblockToDir = l3_unblock_to_dir
self.reqToL3 = req_to_l3
self.probeToL3 = probe_to_l3
self.respToL3 = resp_to_l3
def define_options(parser):
parser.add_option("--num-subcaches", type = "int", default = 4)
parser.add_option("--l3-data-latency", type = "int", default = 20)
parser.add_option("--l3-tag-latency", type = "int", default = 15)
parser.add_option("--cpu-to-dir-latency", type = "int", default = 120)
parser.add_option("--gpu-to-dir-latency", type = "int", default = 120)
parser.add_option("--no-resource-stalls", action = "store_false",
default = True)
parser.add_option("--no-tcc-resource-stalls", action = "store_false",
default = True)
parser.add_option("--num-tbes", type = "int", default = 2560)
parser.add_option("--l2-latency", type = "int", default = 50) # load to use
parser.add_option("--num-tccs", type = "int", default = 1,
help = "number of TCC banks in the GPU")
parser.add_option("--sqc-size", type = 'string', default = '32kB',
help = "SQC cache size")
parser.add_option("--sqc-assoc", type = 'int', default = 8,
help = "SQC cache assoc")
parser.add_option("--region-dir-entries", type = "int", default = 8192)
parser.add_option("--dir-tag-latency", type = "int", default = 8)
parser.add_option("--dir-tag-banks", type = "int", default = 4)
parser.add_option("--blocks-per-region", type = "int", default = 1)
parser.add_option("--use-L3-on-WT", action = "store_true", default = False)
parser.add_option("--nonInclusiveDir", action = "store_true",
default = False)
parser.add_option("--WB_L1", action = "store_true",
default = False, help = "writeback L2")
parser.add_option("--WB_L2", action = "store_true",
default = False, help = "writeback L2")
parser.add_option("--TCP_latency", type = "int",
default = 4, help = "TCP latency")
parser.add_option("--TCC_latency", type = "int",
default = 16, help = "TCC latency")
parser.add_option("--tcc-size", type = 'string', default = '2MB',
help = "agregate tcc size")
parser.add_option("--tcc-assoc", type = 'int', default = 16,
help = "tcc assoc")
parser.add_option("--tcp-size", type = 'string', default = '16kB',
help = "tcp size")
parser.add_option("--sampler-sets", type = "int", default = 1024)
parser.add_option("--sampler-assoc", type = "int", default = 16)
parser.add_option("--sampler-counter", type = "int", default = 512)
parser.add_option("--noL1", action = "store_true", default = False,
help = "bypassL1")
parser.add_option("--noL2", action = "store_true", default = False,
help = "bypassL2")
def create_system(options, full_system, system, dma_devices, bootmem,
ruby_system):
if buildEnv['PROTOCOL'] != 'GPU_VIPER_Baseline':
panic("This script requires the" \
"GPU_VIPER_Baseline protocol to be built.")
cpu_sequencers = []
#
# The ruby network creation expects the list of nodes in the system to be
# consistent with the NetDest list. Therefore the l1 controller nodes
# must be listed before the directory nodes and directory nodes before
# dma nodes, etc.
#
cp_cntrl_nodes = []
tcp_cntrl_nodes = []
sqc_cntrl_nodes = []
tcc_cntrl_nodes = []
dir_cntrl_nodes = []
l3_cntrl_nodes = []
#
# Must create the individual controllers before the network to ensure the
# controller constructors are called before the network constructor
#
# For an odd number of CPUs, still create the right number of controllers
TCC_bits = int(math.log(options.num_tccs, 2))
# This is the base crossbar that connects the L3s, Dirs, and cpu/gpu
# Clusters
crossbar_bw = 16 * options.num_compute_units #Assuming a 2GHz clock
mainCluster = Cluster(intBW = crossbar_bw)
for i in xrange(options.num_dirs):
dir_cntrl = DirCntrl(noTCCdir=True,TCC_select_num_bits = TCC_bits)
dir_cntrl.create(options, ruby_system, system)
dir_cntrl.number_of_TBEs = options.num_tbes
dir_cntrl.useL3OnWT = options.use_L3_on_WT
dir_cntrl.inclusiveDir = not options.nonInclusiveDir
# Connect the Directory controller to the ruby network
dir_cntrl.requestFromCores = MessageBuffer(ordered = True)
dir_cntrl.requestFromCores.slave = ruby_system.network.master
dir_cntrl.responseFromCores = MessageBuffer()
dir_cntrl.responseFromCores.slave = ruby_system.network.master
dir_cntrl.unblockFromCores = MessageBuffer()
dir_cntrl.unblockFromCores.slave = ruby_system.network.master
dir_cntrl.probeToCore = MessageBuffer()
dir_cntrl.probeToCore.master = ruby_system.network.slave
dir_cntrl.responseToCore = MessageBuffer()
dir_cntrl.responseToCore.master = ruby_system.network.slave
dir_cntrl.triggerQueue = MessageBuffer(ordered = True)
dir_cntrl.L3triggerQueue = MessageBuffer(ordered = True)
dir_cntrl.responseFromMemory = MessageBuffer()
exec("system.dir_cntrl%d = dir_cntrl" % i)
dir_cntrl_nodes.append(dir_cntrl)
mainCluster.add(dir_cntrl)
cpuCluster = Cluster(extBW = crossbar_bw, intBW=crossbar_bw)
for i in xrange((options.num_cpus + 1) / 2):
cp_cntrl = CPCntrl()
cp_cntrl.create(options, ruby_system, system)
exec("system.cp_cntrl%d = cp_cntrl" % i)
#
# Add controllers and sequencers to the appropriate lists
#
cpu_sequencers.extend([cp_cntrl.sequencer, cp_cntrl.sequencer1])
# Connect the CP controllers and the network
cp_cntrl.requestFromCore = MessageBuffer()
cp_cntrl.requestFromCore.master = ruby_system.network.slave
cp_cntrl.responseFromCore = MessageBuffer()
cp_cntrl.responseFromCore.master = ruby_system.network.slave
cp_cntrl.unblockFromCore = MessageBuffer()
cp_cntrl.unblockFromCore.master = ruby_system.network.slave
cp_cntrl.probeToCore = MessageBuffer()
cp_cntrl.probeToCore.slave = ruby_system.network.master
cp_cntrl.responseToCore = MessageBuffer()
cp_cntrl.responseToCore.slave = ruby_system.network.master
cp_cntrl.mandatoryQueue = MessageBuffer()
cp_cntrl.triggerQueue = MessageBuffer(ordered = True)
cpuCluster.add(cp_cntrl)
gpuCluster = Cluster(extBW = crossbar_bw, intBW = crossbar_bw)
for i in xrange(options.num_compute_units):
tcp_cntrl = TCPCntrl(TCC_select_num_bits = TCC_bits,
issue_latency = 1,
number_of_TBEs = 2560)
# TBEs set to max outstanding requests
tcp_cntrl.create(options, ruby_system, system)
tcp_cntrl.WB = options.WB_L1
tcp_cntrl.disableL1 = options.noL1
exec("system.tcp_cntrl%d = tcp_cntrl" % i)
#
# Add controllers and sequencers to the appropriate lists
#
cpu_sequencers.append(tcp_cntrl.coalescer)
tcp_cntrl_nodes.append(tcp_cntrl)
# Connect the CP (TCP) controllers to the ruby network
tcp_cntrl.requestFromTCP = MessageBuffer(ordered = True)
tcp_cntrl.requestFromTCP.master = ruby_system.network.slave
tcp_cntrl.responseFromTCP = MessageBuffer(ordered = True)
tcp_cntrl.responseFromTCP.master = ruby_system.network.slave
tcp_cntrl.unblockFromCore = MessageBuffer()
tcp_cntrl.unblockFromCore.master = ruby_system.network.slave
tcp_cntrl.probeToTCP = MessageBuffer(ordered = True)
tcp_cntrl.probeToTCP.slave = ruby_system.network.master
tcp_cntrl.responseToTCP = MessageBuffer(ordered = True)
tcp_cntrl.responseToTCP.slave = ruby_system.network.master
tcp_cntrl.mandatoryQueue = MessageBuffer()
gpuCluster.add(tcp_cntrl)
for i in xrange(options.num_sqc):
sqc_cntrl = SQCCntrl(TCC_select_num_bits = TCC_bits)
sqc_cntrl.create(options, ruby_system, system)
exec("system.sqc_cntrl%d = sqc_cntrl" % i)
#
# Add controllers and sequencers to the appropriate lists
#
cpu_sequencers.append(sqc_cntrl.sequencer)
# Connect the SQC controller to the ruby network
sqc_cntrl.requestFromSQC = MessageBuffer(ordered = True)
sqc_cntrl.requestFromSQC.master = ruby_system.network.slave
sqc_cntrl.probeToSQC = MessageBuffer(ordered = True)
sqc_cntrl.probeToSQC.slave = ruby_system.network.master
sqc_cntrl.responseToSQC = MessageBuffer(ordered = True)
sqc_cntrl.responseToSQC.slave = ruby_system.network.master
sqc_cntrl.mandatoryQueue = MessageBuffer()
# SQC also in GPU cluster
gpuCluster.add(sqc_cntrl)
# Because of wire buffers, num_tccs must equal num_tccdirs
numa_bit = 6
for i in xrange(options.num_tccs):
tcc_cntrl = TCCCntrl()
tcc_cntrl.create(options, ruby_system, system)
tcc_cntrl.l2_request_latency = options.gpu_to_dir_latency
tcc_cntrl.l2_response_latency = options.TCC_latency
tcc_cntrl_nodes.append(tcc_cntrl)
tcc_cntrl.WB = options.WB_L2
tcc_cntrl.number_of_TBEs = 2560 * options.num_compute_units
# Connect the TCC controllers to the ruby network
tcc_cntrl.requestFromTCP = MessageBuffer(ordered = True)
tcc_cntrl.requestFromTCP.slave = ruby_system.network.master
tcc_cntrl.responseToCore = MessageBuffer(ordered = True)
tcc_cntrl.responseToCore.master = ruby_system.network.slave
tcc_cntrl.probeFromNB = MessageBuffer()
tcc_cntrl.probeFromNB.slave = ruby_system.network.master
tcc_cntrl.responseFromNB = MessageBuffer()
tcc_cntrl.responseFromNB.slave = ruby_system.network.master
tcc_cntrl.requestToNB = MessageBuffer(ordered = True)
tcc_cntrl.requestToNB.master = ruby_system.network.slave
tcc_cntrl.responseToNB = MessageBuffer()
tcc_cntrl.responseToNB.master = ruby_system.network.slave
tcc_cntrl.unblockToNB = MessageBuffer()
tcc_cntrl.unblockToNB.master = ruby_system.network.slave
tcc_cntrl.triggerQueue = MessageBuffer(ordered = True)
exec("system.tcc_cntrl%d = tcc_cntrl" % i)
# connect all of the wire buffers between L3 and dirs up
# TCC cntrls added to the GPU cluster
gpuCluster.add(tcc_cntrl)
# Assuming no DMA devices
assert(len(dma_devices) == 0)
# Add cpu/gpu clusters to main cluster
mainCluster.add(cpuCluster)
mainCluster.add(gpuCluster)
ruby_system.network.number_of_virtual_networks = 10
return (cpu_sequencers, dir_cntrl_nodes, mainCluster)
| vineodd/PIMSim | GEM5Simulation/gem5/configs/ruby/GPU_VIPER_Baseline.py | Python | gpl-3.0 | 23,064 |
#! /usr/bin/env python
import os
import sys
import select
import time
import math
import getopt
import RPi.GPIO as GPIO
# Debug flag
debug = False
def Usage():
print('Usage: stepper.py -d -h -v --debug --help --version degrees timing-in-milliseconds')
# Stepper motor operation -- 28BYJ48 5V DC motor
# 32 teeth (poles) per 360 degrees = 11.25 degrees per full step
# Using half-steps then 5.625 degrees per half-step
# Gear ratio = 3.5555 (32/9), 2.000 (22/11), 2.888 (26/9), 3.100 (31/10)
# Final gear ratio = 3.555 * 2.000 * 2.888 * 3.100 = 63.683950617
# One rotation of main shaft = 63.683950617 turns of rotor shaft
# But 63.683950617 turns of rotor shaft requires 63.683950617 * 64 steps = 4075.7728395 steps
# 0.0883268 degrees per step
# Stepper Motor Parameters
pi = 3.14159265358979323846
halfStep = 5.625
stepsRotor = 360.0/5.625
gearRatio = 63.683950617
stepsMain = gearRatio * stepsRotor
stepDegree = 360.0/stepsMain
lastStep = 0
degreesRotated = 0.0
# Default timing for each step
timing = 10.0
# Define GPIO signals to use
# Physical pins 31, 33, 35, 37
# GPIO06, GPIO13, GPIO19, GPIO26
gpioPins = [6, 13, 19, 26]
# Define half-step sequence
# as shown in manufacturers datasheet
seq = [[1,0,0,0],
[1,1,0,0],
[0,1,0,0],
[0,1,1,0],
[0,0,1,0],
[0,0,1,1],
[0,0,0,1],
[1,0,0,1]]
# Step the motor. Degrees (positive or negative), time in milliseconds for each, energized state
def stepper(degrees, stepTime, stepLock) :
global lastStep
global stepDegree
global degreesRotated
# Use BCM GPIO references
# instead of physical pin numbers
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
# Set all pins as output
for pin in gpioPins:
if debug:
print('GPIO setup for pin %d' % pin)
GPIO.setup(pin, GPIO.OUT)
GPIO.output(pin, False)
if degrees > 0.0 :
direction = 1
else:
direction = -1
# The amount of rotation may be slightly less than requested. Steps are integral
stepsNeeded = abs(int(degrees/stepDegree))
stepCount = len(seq)
# Start main loop
# Variable for counting amount rotated in each step
rotation = 0.0
while stepsNeeded > 0 :
lastStep = lastStep + direction
if lastStep > len(seq) - 1 :
lastStep = 0
if lastStep < 0 :
lastStep = len(seq) - 1
for i in range(4):
pin = gpioPins[i]
if seq[lastStep][i]!=0 :
if debug:
print('Step %d: Enable GPIO %i' % (lastStep, pin))
GPIO.output(pin, True)
else:
if debug:
print ('Step %d: Disable GPIO %i' % (lastStep, pin))
GPIO.output(pin, False)
rotation = rotation + float(direction) * stepDegree
if debug:
print('Degrees rotated %4.3f' % rotation)
time.sleep(stepTime/1000.0)
stepsNeeded = stepsNeeded - 1
# De-energize all output pins. Leave motor energized to lock-in the step.
if not stepLock :
for pin in gpioPins:
GPIO.output(pin, False)
return rotation
# Start of main program
def main(degrees, timing):
global degreesRotated
rotation = stepper(degrees, timing, False)
degreesRotated = degreesRotated + rotation
print('Degrees rotated: %4.3f' % degreesRotated)
sys.exit(0)
if __name__ == "__main__":
# This code corrects getopt's handling of negative numbers as arguments
# Look for the first negative number (if any)
for i,arg in enumerate(sys.argv[1:]):
# stop if a non-argument is detected
if arg[0] != "-":
break
# if a valid number is found insert a "--" string before it which
# explicitly flags to getopt the end of options
try:
f = float(arg)
sys.argv.insert(i+1,"--")
break;
except ValueError:
pass
try:
options, args = getopt.getopt(sys.argv[1:], 'dhv', ['debug', 'help', 'version'])
except getopt.GetoptError:
Usage()
sys.exit(2)
for o, a in options:
if o in ("-d", "--debug"):
debug = True
if o in ("-h", "--help"):
Usage()
sys.exit()
if o in ("-v", "--version"):
print('stepper.py Version 1.0')
sys.exit()
# Degrees to move (float). Can be positive or negative value
if len(args) > 0 :
degrees = float(args[0])
else:
Usage()
sys.exit(-2)
# Timing in millisecond for each step. Default is 10.0 milliseconds
if len(args) > 1 :
timing = float(args[1])
if timing < 1.0 :
print('Timing value incorrect. Needs to be 1.0 milliseconds or greater')
sys.exit(-1)
main(degrees, timing)
| hankster/pi-eye | stepper.py | Python | mit | 4,952 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2017-11-03 22:23
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('backend', '0006_auto_20161129_1421'),
]
operations = [
migrations.CreateModel(
name='BattleHistory',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('is_victorious', models.BooleanField(default=False)),
('timestamp', models.DateTimeField(auto_now_add=True)),
('difficult_level', models.CharField(default='normal', max_length=10)),
('opponent', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='opponents', to=settings.AUTH_USER_MODEL)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='battlehistories', to=settings.AUTH_USER_MODEL)),
],
),
]
| Petrole/MaturePyRobots | WebPyRobot/backend/migrations/0007_battlehistory.py | Python | gpl-3.0 | 1,190 |
# -*-coding:utf-8 -*
from django.db import models
from agency.models import Vehicle, Agency
from django.contrib.auth.models import User
EVENT_TYPE = (
('code', 'Session de code'),
('driving', 'Conduite en circulation'),
('plateau', 'Conduite sur plateau'),
('code_exam', 'Examen de code'),
('driving_exam', 'Examen de conduite'),
('initial_circuit', 'Conduite sur circuit (initial)'),
('medium_circuit', 'Conduite sur circuit (intermédiaire)'),
('improvement_circuit', 'Conduite sur circuit (perfectionnement)'),
)
class Event(models.Model):
start = models.DateTimeField()
end = models.DateTimeField()
slots = models.IntegerField()
type = models.CharField(max_length=255, choices=EVENT_TYPE)
vehicle = models.ManyToManyField(Vehicle, null=True, blank=True, related_name='events')
agency = models.ForeignKey(Agency)
supervisor = models.ForeignKey(User)
def __unicode__(self):
return 'Evenement du %s au %s' % (self.start, self.end)
| SBillion/aegroupware | planner/models.py | Python | mit | 954 |
# -*- coding: utf-8 -*-
# Copyright (C) 2015 Osmo Salomaa
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import pan.test
import threading
import time
class TestConnectionPool(pan.test.TestCase):
def setup_method(self, method):
self.pool = pan.http.ConnectionPool(2)
self.http_url = "http://otsaloma.io/"
self.https_url = "https://otsaloma.io/"
def teardown_method(self, method):
self.pool.terminate()
def test_get__2(self):
connection1 = self.pool.get(self.http_url)
connection2 = self.pool.get(self.http_url)
assert connection1 is not None
assert connection2 is not None
def test_get__http(self):
connection = self.pool.get(self.http_url)
assert connection is not None
def test_get__https(self):
connection = self.pool.get(self.https_url)
assert connection is not None
def test_get__terminate_blocking(self):
kwargs = dict(target=self.pool.get, args=(self.http_url,))
threading.Thread(**kwargs).start()
threading.Thread(**kwargs).start()
# The third of these calls should block, but gracefully exit
# by raising an exception when terminate is called.
threading.Thread(**kwargs).start()
self.pool.terminate()
time.sleep(3)
def test_is_alive(self):
assert self.pool.is_alive()
self.pool.terminate()
assert not self.pool.is_alive()
def test_put(self):
connection = self.pool.get(self.http_url)
assert connection is not None
self.pool.put(self.http_url, connection)
connection = self.pool.get(self.http_url)
assert connection is not None
def test_reset(self):
connection = self.pool.get(self.http_url)
assert connection is not None
self.pool.put(self.http_url, connection)
self.pool.reset(self.http_url)
connection = self.pool.get(self.http_url)
assert connection is not None
def test_terminate(self):
self.pool.terminate()
assert not self.pool.is_alive()
class TestModule(pan.test.TestCase):
def test_get(self):
url = "https://otsaloma.io/"
blob = pan.http.get(url, encoding="utf_8")
assert blob.strip().startswith("<!DOCTYPE html>")
def test_get__error(self):
url = "https://xxx.yyy.zzz/"
self.assert_raises(Exception, pan.http.get, url)
def test_get__non_200(self):
url = "https://otsaloma.io/xxx/yyy/zzz"
self.assert_raises(Exception, pan.http.get, url)
def test_get_json(self):
url = "https://otsaloma.io/pub/test.json"
assert isinstance(pan.http.get_json(url), dict)
def test_get_json__error(self):
url = "https://otsaloma.io/pub/test.xml"
self.assert_raises(Exception, pan.http.get_json, url)
| otsaloma/pan-bikes | pan/test/test_http.py | Python | gpl-3.0 | 3,443 |
from plyer.platforms.win.libs.batterystatus import battery_status
from plyer.facades import Battery
class WinBattery(Battery):
def _get_status(self):
status = {"connected": None, "percentage": None}
query = battery_status()
if (not query):
return status
status["connected"] = query["ACLineStatus"] == 1
status["percentage"] = query["BatteryLifePercent"]
return status
def instance():
return WinBattery()
| brousch/pyohio-kivy-2014 | plyer/platforms/win/battery.py | Python | mit | 480 |
import datetime
import logging
import os
from django.conf import settings
from django.core.files.base import ContentFile
from django.core.mail import EmailMultiAlternatives
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from django.utils.translation import ugettext_lazy as _
from django.utils import timezone
from . import defaults
from .utils import get_storage, upload_to
logger = logging.getLogger(__name__)
class MailerMessageManager(models.Manager):
def send_queued(self, limit=None):
if limit is None:
limit = getattr(settings, 'MAILQUEUE_LIMIT', defaults.MAILQUEUE_LIMIT)
for email in self.filter(sent=False)[:limit]:
email.send_mail()
def clear_sent_messages(self, offset=None):
""" Deletes sent MailerMessage records """
if offset is None:
offset = getattr(settings, 'MAILQUEUE_CLEAR_OFFSET', defaults.MAILQUEUE_CLEAR_OFFSET)
if type(offset) is int:
offset = datetime.timedelta(hours=offset)
delete_before = timezone.now() - offset
self.filter(sent=True, last_attempt__lte=delete_before).delete()
@python_2_unicode_compatible
class MailerMessage(models.Model):
created = models.DateTimeField(_('Created'), auto_now_add=True, auto_now=False,
editable=False, null=True)
subject = models.CharField(_('Subject'), max_length=250, blank=True)
to_address = models.TextField(_('To'))
cc_address = models.TextField(_('CC'), blank=True)
bcc_address = models.TextField(_('BCC'), blank=True)
from_address = models.EmailField(_('From'), max_length=250)
reply_to = models.TextField(_('Reply to'), max_length=250, blank=True, null=True)
content = models.TextField(_('Content'), blank=True)
html_content = models.TextField(_('HTML Content'), blank=True)
app = models.CharField(_('App'), max_length=250, blank=True)
sent = models.BooleanField(_('Sent'), default=False, editable=False)
last_attempt = models.DateTimeField(_('Last attempt'), auto_now=False, auto_now_add=False,
blank=True, null=True, editable=False)
objects = MailerMessageManager()
class Meta:
verbose_name = _('Message')
verbose_name_plural = _('Messages')
def __str__(self):
return self.subject
def add_attachment(self, attachment):
"""
Takes a Django `File` object and creates an attachment for this mailer message.
"""
if self.pk is None:
self._save_without_sending()
original_filename = attachment.file.name.split(os.sep)[-1]
file_content = ContentFile(attachment.read())
new_attachment = Attachment()
new_attachment.file_attachment.save(original_filename, file_content, save=False)
new_attachment.email = self
new_attachment.original_filename = original_filename
try:
new_attachment.save()
except Exception as e:
logger.error(e)
new_attachment.file_attachment.delete()
def _save_without_sending(self, *args, **kwargs):
"""
Saves the MailerMessage instance without sending the e-mail. This ensures
other models (e.g. `Attachment`) have something to relate to in the database.
"""
self.do_not_send = True
super(MailerMessage, self).save(*args, **kwargs)
def send_mail(self):
""" Public api to send mail. Makes the determinination
of using celery or not and then calls the appropriate methods.
"""
if getattr(settings, 'MAILQUEUE_CELERY', defaults.MAILQUEUE_CELERY):
from mailqueue.tasks import send_mail
send_mail.delay(self.pk)
else:
self._send()
def _send(self):
if not self.sent:
self.last_attempt = timezone.now()
subject, from_email = self.subject, self.from_address
text_content = self.content
msg = EmailMultiAlternatives(subject, text_content, from_email)
if self.reply_to:
msg.extra_headers.update({"reply-to": self.reply_to})
if self.html_content:
html_content = self.html_content
msg.attach_alternative(html_content, "text/html")
msg.to = [email.strip() for email in self.to_address.split(',') if email.strip()]
msg.cc = [email.strip() for email in self.cc_address.split(',') if email.strip()]
msg.bcc = [email.strip() for email in self.bcc_address.split(',') if email.strip()]
# Add any additional attachments
for attachment in self.attachment_set.all():
path = attachment.file_attachment.path
if os.path.isfile(path):
with open(path, 'rb') as f:
content = f.read()
msg.attach(attachment.original_filename, content, None)
try:
msg.send()
self.sent = True
except Exception as e:
self.do_not_send = True
logger.error('Mail Queue Exception: {0}'.format(e))
self.save()
@python_2_unicode_compatible
class Attachment(models.Model):
file_attachment = models.FileField(storage=get_storage(), upload_to=upload_to,
blank=True, null=True)
original_filename = models.CharField(default=None, max_length=250, blank=False)
email = models.ForeignKey(MailerMessage, on_delete=models.CASCADE, blank=True, null=True)
class Meta:
verbose_name = _('Attachment')
verbose_name_plural = _('Attachments')
def __str__(self):
return self.original_filename
| Goury/django-mail-queue | mailqueue/models.py | Python | mit | 5,805 |
#
# serialize.py
#
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# from https://github.com/jgarzik/python-bitcoinlib/blob/master/bitcoin/script.py
# from __future__ import absolute_import, division, print_function, unicode_literals
import binascii
import struct
import inspect
from StringIO import StringIO
from enum import IntEnum
import types
from copy import deepcopy
from app import config as cfg
# Py3 compatibility
import sys
bchr = chr
if sys.version > '3':
bchr = lambda x: bytes([x])
def wrap_to_StringIO(f):
if isinstance(f, bytearray):
f = bytes(f)
if isinstance(f, str):
f = StringIO(f)
return f
class SerType(IntEnum):
# primary actions
SER_NETWORK = (1 << 0)
SER_DISK = (1 << 1)
SER_GETHASH = (1 << 2)
# modifiers
SER_SKIPSIG = (1 << 16)
SER_BLOCKHEADERONLY = (1 << 17)
def deser_str(f):
nit = struct.unpack(b"<B", f.read(1))[0]
if nit == 253:
nit = struct.unpack(b"<H", f.read(2))[0]
elif nit == 254:
nit = struct.unpack(b"<I", f.read(4))[0]
elif nit == 255:
nit = struct.unpack(b"<Q", f.read(8))[0]
return f.read(nit)
def ser_str(s):
if len(s) < 253:
return bchr(len(s)) + s
elif len(s) < 254:
return bchr(253) + struct.pack(b"<H", len(s)) + s
elif len(s) < 255:
return bchr(254) + struct.pack(b"<I", len(s)) + s
return bchr(255) + struct.pack(b"<Q", len(s)) + s
def ser_flatdata(s, n=-1):
s_size = len(s)
if s_size < n:
s += (b'\x00' * (n - s_size))
elif s_size > n:
s = s[:n]
return s
def deser_flatdata(f, n):
return f.read(n)
def deser_uint256(f):
r = 0
if type(f) is str:
f = StringIO(f)
for i in range(8):
t = struct.unpack(b"<I", f.read(4))[0]
r += t << (i * 32)
return r
def ser_uint256(u):
rs = b""
for i in range(8):
rs += struct.pack(b"<I", u & 0xFFFFFFFF)
u >>= 32
return rs
def hexser_uint256(u, in_type=None):
if in_type != 'str':
u = ser_uint256(u)
return binascii.hexlify(u) # ''.join(["%02X" % ord(x) for x in u])
def deser_uint160(f):
r = 0
if type(f) is str or isinstance(f, bytearray):
f = StringIO(f)
for i in range(5):
t = struct.unpack(b"<I", f.read(4))[0]
r += t << (i * 32)
return r
def ser_uint160(u):
rs = b""
for i in range(5):
rs += struct.pack(b"<I", u & 0xFFFFFFFF)
u >>= 32
return rs
def deser_int64(f):
return struct.unpack(b"<q", f.read(8))[0]
def ser_int64(u):
return struct.pack(b"<q", u)
def deser_uint64(f):
return struct.unpack(b"<Q", f.read(8))[0]
def ser_uint64(u):
return struct.pack(b"<Q", u)
def deser_uint(f, endian='small'):
if endian == 'big':
return struct.unpack(b">I", f.read(4))[0]
return struct.unpack(b"<I", f.read(4))[0]
def ser_uint(i, endian='small'):
if endian == 'big':
return struct.pack(b">I", i)
return struct.pack(b"<I", i)
def deser_int(f, endian='small'):
if endian == 'big':
return struct.unpack(b">i", f.read(4))[0]
return struct.unpack(b"<i", f.read(4))[0]
def ser_int(i, endian='small'):
if endian == 'big':
return struct.pack(b">i", i)
return struct.pack(b"<i", i)
def deser_short(f, endian='small'):
if endian == 'big':
return struct.unpack(b">h", f.read(2))[0]
return struct.unpack(b"<h", f.read(2))[0]
def ser_short(i, endian='small'):
if endian == 'big':
return struct.pack(b">h", i)
return struct.pack(b"<h", i)
def deser_ushort(f, endian='small'):
if endian == 'big':
return struct.unpack(b">H", f.read(2))[0]
return struct.unpack(b"<H", f.read(2))[0]
def ser_ushort(i, endian='small'):
if endian == 'big':
return struct.pack(b">H", i)
return struct.pack(b"<H", i)
def deser_char(f):
return struct.unpack(b"b", f.read(1))[0]
def ser_char(i):
return struct.pack(b"b", i)
def deser_uchar(f):
return struct.unpack(b"B", f.read(1))[0]
def ser_uchar(i):
return struct.pack(b"B", i)
# list
def deser_list(f, cls, arg1=None, nType=0, nVersion=cfg.VERSION):
nit = struct.unpack(b"<B", f.read(1))[0]
if nit == 253:
nit = struct.unpack(b"<H", f.read(2))[0]
elif nit == 254:
nit = struct.unpack(b"<I", f.read(4))[0]
elif nit == 255:
nit = struct.unpack(b"<Q", f.read(8))[0]
r = []
for i in range(nit):
if isinstance(cls, types.FunctionType):
t = cls(f)
else:
if arg1 is not None:
t = cls(arg1)
else:
t = cls()
t.deserialize(f)
r.append(t)
return r
def ser_list(l, ser_func=None, cls=None, nType=0, nVersion=cfg.VERSION):
s = StringIO()
if len(l) < 253:
s.write(bchr(len(l)))
elif len(l) < 254:
s.write(bchr(253) + struct.pack(b"<H", len(l)))
elif len(l) < 255:
s.write(bchr(254) + struct.pack(b"<I", len(l)))
else:
s.write(bchr(255) + struct.pack(b"<Q", len(l)))
for i in l:
if cls is not None:
s.write(cls.serialize(i, nType=nType, nVersion=nVersion))
else:
s.write(i.serialize(nType=nType, nVersion=nVersion) if ser_func is None else ser_func(i))
return s.getvalue()
def deser_uint256_list(f):
return deser_list(f, deser_uint256)
def ser_uint256_list(l):
return ser_list(l, ser_func=ser_uint256)
def deser_str_list(f):
return deser_list(f, deser_str)
def ser_str_list(l):
return ser_list(l, ser_func=ser_str)
def deser_strpair_list(f):
nit = struct.unpack(b"<B", f.read(1))[0]
if nit == 253:
nit = struct.unpack(b"<H", f.read(2))[0]
elif nit == 254:
nit = struct.unpack(b"<I", f.read(4))[0]
elif nit == 255:
nit = struct.unpack(b"<Q", f.read(8))[0]
r = []
for i in range(nit):
fir = deser_str(f)
sec = deser_str(f)
r.append((fir, sec))
return r
def ser_strpair_list(l):
r = b""
if len(l) < 253:
r = bchr(len(l))
elif len(l) < 254:
r = bchr(253) + struct.pack(b"<H", len(l))
elif len(l) < 255:
r = bchr(254) + struct.pack(b"<I", len(l))
else:
r = bchr(255) + struct.pack(b"<Q", len(l))
for sv in l:
r += ser_str(sv[0])
r += ser_str(sv[1])
return r
def deser_int_list(f):
return deser_list(f, deser_int)
def ser_int_list(l):
return ser_list(l, ser_func=ser_int)
def deser_str_dict(f):
return deser_dict(f, deser_str, deser_str)
def ser_str_dict(d):
return ser_dict(d, ser_str, ser_str)
def deser_dict(f, key_cls, value_cls, arg1=None, arg2=None):
nit = struct.unpack(b"<B", f.read(1))[0]
if nit == 253:
nit = struct.unpack(b"<H", f.read(2))[0]
elif nit == 254:
nit = struct.unpack(b"<I", f.read(4))[0]
elif nit == 255:
nit = struct.unpack(b"<Q", f.read(8))[0]
r = dict()
for i in range(nit):
if isinstance(key_cls, types.FunctionType):
k = key_cls(f)
else:
if arg1 is not None:
k = key_cls(arg1)
else:
k = key_cls()
k.deserialize(f)
if isinstance(value_cls, types.FunctionType):
v = value_cls(f)
else:
if arg2 is not None:
v = value_cls(arg2)
else:
v = value_cls()
v.deserialize(f)
r[k] = v
return r
def ser_dict(d, key_ser_fuc=None, value_ser_fuc=None):
r = b""
dict_size = len(d)
if dict_size < 253:
r = bchr(len(d))
elif dict_size < 254:
r = bchr(253) + struct.pack(b"<H", dict_size)
elif len(d) < 255:
r = bchr(254) + struct.pack(b"<I", dict_size)
else:
r = bchr(255) + struct.pack(b"<Q", dict_size)
for k, v in d.items():
r += key_ser_fuc(k) if key_ser_fuc is not None else k.serialize()
r += value_ser_fuc(v) if value_ser_fuc is not None else v.serialize()
return r
class DataType(IntEnum):
SER_NETWORK = (1 << 0)
SER_DISK = (1 << 1)
SER_GETHASH = (1 << 2)
# modifiers
SER_SKIPSIG = (1 << 16)
SER_BLOCKHEADERONLY = (1 << 17)
class Stream(object):
def write(self, s):
pass
def read(self, n=-1):
pass
class Serializable(object):
def deserialize(self, f, nType=0, nVersion=cfg.VERSION):
raise NotImplementedError("must implement deserialize function")
def serialize(self, nType=0, nVersion=cfg.VERSION):
raise NotImplementedError("must implement serialize function")
def serialize_size(self, nType=0, nVersion=cfg.VERSION):
return len(self.serialize(nType, nVersion))
def serialize_hash(s, nType=SerType.SER_GETHASH, nVersion=cfg.VERSION):
"""
:param s:
:type s: Serializable
:return:
"""
return s.serialize(nType, nVersion=nVersion)
UCHAR_MAX = 0xff
def _get_size_of_compact_size(size):
if size < (UCHAR_MAX - 2):
return 1
elif size <= 0xffff:
return 1 + 2
elif size <= 0xffffffff:
return 1 + 4
else:
return 1 + 8
# same as serialize.ser_string()
def _write_compact_size(stream, size):
if size < (UCHAR_MAX - 2): # uchar_max-2
stream.write(struct.pack("<B", size)) # unsigned char
elif size <= 0xffff: # ushort_max
ch_str = struct.pack("<B", UCHAR_MAX - 2) # unsigned char
size_str = struct.pack("<H", size) # unsigned shor
stream.write(ch_str)
stream.write(size_str)
elif size <= 0xffffffff: # uint_max
ch_str = struct.pack("<B", UCHAR_MAX - 1) # unsigned char
size_str = struct.pack("<I", size) # unsigned int
stream.write(ch_str)
stream.write(size_str)
else:
ch_str = struct.pack("<B", UCHAR_MAX) # unsigned char
size_str = struct.pack("<Q", size) # unsigned long long
stream.write(ch_str)
stream.write(size_str)
# same as serialize.deser_string()
def _read_compact_size(stream):
s = stream.read(1) # read 1 byte
n = struct.unpack("<B", s)[0]
if n < (UCHAR_MAX - 2):
return n
elif n == UCHAR_MAX - 2:
s = stream.read(2) # read 2B
return struct.unpack("<H", s)[0]
elif n == UCHAR_MAX - 1:
s = stream.read(4) # read 4B
return struct.unpack("<I", s)[0]
else:
s = stream.read(8) # read 8B
return struct.unpack("<Q", s)[0]
def Serialize(stream, obj, in_type=None, nType=0, nVersion=cfg.VERSION):
"""
:param stream:
:type stream: Stream
:param obj:
:type obj: any
:param in_type:
:type in_type: str | unicode | tuple
:param nType:
:param nVersion:
:return:
"""
if in_type is None:
in_type = type(obj).__name__
if in_type not in ['str', 'unicode', 'int', 'long', 'tuple', 'list', 'dict', 'set']:
in_type = Serializable
if isinstance(obj, Serializable) or in_type == 'Serializable':
s = obj.serialize(nType=nType, nVersion=nVersion)
stream.write(s)
elif (isinstance(obj, tuple) or in_type == 'tuple') and len(obj) == 2:
Serialize(stream, obj[0], in_type=in_type[0], nType=nType, nVersion=nVersion)
Serialize(stream, obj[1], in_type=in_type[1], nType=nType, nVersion=nVersion)
elif isinstance(obj, (list, set)) or in_type == 'list' or in_type == 'set':
_write_compact_size(stream, len(obj))
for i in obj:
stream.write(Serialize(stream, i, in_type=in_type, nType=nType, nVersion=nVersion))
elif isinstance(obj, dict) or in_type == 'dict':
_write_compact_size(stream, len(obj))
for k, v in obj.items():
stream.write(Serialize(stream, k, in_type=in_type[0], nType=nType, nVersion=nVersion))
stream.write(Serialize(stream, v, in_type=in_type[1], nType=nType, nVersion=nVersion))
elif in_type == 'str':
_write_compact_size(stream, len(obj))
stream.write(obj)
elif in_type == 'char':
stream.write(ser_char(obj))
elif in_type == 'uchar':
stream.write(ser_uchar(obj))
elif in_type == 'short':
stream.write(ser_short(obj))
elif in_type == 'ushort':
stream.write(ser_ushort(obj))
elif in_type == 'int':
stream.write(ser_int(obj))
elif in_type == 'uint':
stream.write(ser_uint(obj))
elif in_type == 'int64':
stream.write(ser_int64(obj))
elif in_type == 'uint64':
stream.write(ser_uint64(obj))
elif in_type == 'uint160':
stream.write(ser_uint160(obj))
elif in_type == 'uint256':
stream.write(ser_uint256(obj))
else:
raise TypeError("Unsupported type")
def Unserialize(f, out_type='str', cls=None, nType=0, nVersion=cfg.VERSION):
"""
:param f:
:type f: Stream
:param out_type:
:type out_type: str | unicode | None
:param cls:
:type cls: type | tuple | None
:param nType:
:param nVersion:
:return:
"""
if out_type == "str" and cls is not None:
out_type = None
if (out_type == 'Serializable' or out_type is None) and \
(cls is not None and inspect.isclass(cls) and issubclass(cls, Serializable)):
ins = cls() # new the instance
ins.deserialize(f, nType, nVersion)
return ins
elif out_type == 'tuple':
if not isinstance(cls, tuple) or len(cls) != 2:
raise TypeError("when out_type is tuple, the cls must be the tuple like ((info1,cls1), (info2,cls2))")
if type(cls[0]) is tuple:
fir = Unserialize(f, cls[0][0], cls[0][1], nType=nType, nVersion=nVersion)
else:
if isinstance(cls[0], (str, unicode)):
fir = Unserialize(f, cls[0], None, nType=nType, nVersion=nVersion)
elif inspect.isclass(cls) and issubclass(cls, Serializable):
fir = Unserialize(f, None, cls[0], nType=nType, nVersion=nVersion)
else:
raise TypeError(
"when out_type is 'tuple', the cls must be (ele_type, ele_class) or ele_type or Serializable class")
if type(cls[1]) is tuple:
sec = Unserialize(f, cls[1][0], cls[1][1], nType=nType, nVersion=nVersion)
else:
if isinstance(cls[1], (str, unicode)):
sec = Unserialize(f, cls[1], None, nType=nType, nVersion=nVersion)
elif inspect.isclass(cls) and issubclass(cls, Serializable):
sec = Unserialize(f, None, cls[1], nType=nType, nVersion=nVersion)
else:
raise TypeError(
"when out_type is 'tuple', the cls must be ('type', Serializable) or 'type' or Serializable")
return fir, sec
elif out_type == 'list' or out_type == 'set':
size = _read_compact_size(f)
if isinstance(cls, tuple) and len(cls) == 2:
ret = [Unserialize(f, out_type=cls[0], cls=cls[1], nType=nType, nVersion=nVersion) for _ in range(size)]
elif isinstance(cls, (str, unicode)):
ret = [Unserialize(f, out_type=cls, cls=None, nType=nType, nVersion=nVersion) for _ in range(size)]
elif inspect.isclass(cls) and issubclass(cls, Serializable):
ret = [Unserialize(f, out_type=None, cls=cls, nType=nType, nVersion=nVersion) for _ in range(size)]
else:
raise TypeError(
"when out_type is 'list' or 'set', the cls must be ('type', Serializable) 'type' or Serializable")
return ret if out_type == 'list' else set(ret)
elif out_type == 'dict':
if not isinstance(cls, tuple) or len(cls) != 2:
raise TypeError(
"when out_type is tuple, the cls must be the tuple like ((key_info,key_cls), (value_info,value_cls))")
size = _read_compact_size(f)
r = dict()
for _ in range(size):
if type(cls[0]) is type:
k = Unserialize(f, cls[0][0], cls[0][1], nType=nType, nVersion=nVersion)
else:
if isinstance(cls[0], (str, unicode)):
k = Unserialize(f, cls[0], None, nType=nType, nVersion=nVersion)
elif inspect.isclass(cls) and issubclass(cls, Serializable):
k = Unserialize(f, None, cls[0], nType=nType, nVersion=nVersion)
else:
raise TypeError(
"when out_type is 'dict', the cls must be ('key_type', Serializable) 'key_type' or Serializable")
if type(cls[1]) is type:
v = Unserialize(f, cls[1][0], cls[1][1], nType=nType, nVersion=nVersion)
else:
if isinstance(cls[1], (str, unicode)):
v = Unserialize(f, cls[1], None, nType=nType, nVersion=nVersion)
elif inspect.isclass(cls) and issubclass(cls, Serializable):
v = Unserialize(f, None, cls[1], nType=nType, nVersion=nVersion)
else:
raise TypeError(
"when out_type is 'dict', the cls must be ('key_type', Serializable) 'key_type' or Serializable")
r[k] = v
return r
elif out_type == 'str':
size = _read_compact_size(f)
return f.read(size)
elif out_type == 'char':
return deser_char(f)
elif out_type == 'uchar':
return deser_uchar(f)
elif out_type == 'short':
return deser_short(f)
elif out_type == 'ushort':
return deser_ushort(f)
elif out_type == 'int':
return deser_int(f)
elif out_type == 'uint':
return deser_uint(f)
elif out_type == 'int64':
return deser_int64(f)
elif out_type == 'uint64':
return deser_uint64(f)
elif out_type == 'uint160':
return deser_uint160(f)
elif out_type == 'uint256':
return deser_uint256(f)
else:
raise TypeError("Unsupported type")
def GetSerializeSize(s):
return _get_size_of_compact_size(len(s)) + len(s)
def GetSizeOfCompactSize(size):
return _get_size_of_compact_size(size)
class PyFlatData(Serializable):
def __init__(self, data, size=-1):
self.__data = data
if size == -1:
self.__size = len(self.__data)
else:
self.__size = size
def serialize(self, nType=0, nVersion=cfg.VERSION):
return self.__data
pass
def deserialize(self, f, nType=0, nVersion=cfg.VERSION):
if self.__size == -1:
raise RuntimeError("FlatData must init with size")
self.__data = f.read(self.__size)
return self.__data
def serialize_size(self, nType=0, nVersion=cfg.VERSION):
return self.__size
pass
class PyDataStream(StringIO, Stream, Serializable):
def __init__(self, s=b'', nType=0, nVersion=cfg.VERSION):
super(PyDataStream, self).__init__(s)
self.nType = nType
self.nVersion = nVersion
pass
def __add__(self, other):
s = self.getvalue()
pos = self.tell()
s += other.unread_str()
self.buf = s
self.len = len(self.buf)
self.seek(pos)
return self
def __str__(self):
return self.getvalue()[self.pos:]
def __len__(self):
return len(self.getvalue()) - self.pos
def __getitem__(self, n):
pos = self.tell()
s = self.getvalue()
if isinstance(n, slice):
fir = pos + (n.start if n.start else 0)
sec = pos + (n.stop if n.stop else len(self))
if n.step is not None:
raise NotImplementedError("not impl for step")
return s[fir:sec]
elif isinstance(n, (int, long)):
if pos + n > len(s):
return ""
return s[pos + n]
else:
raise NotImplementedError("just impl for slice and int/long")
def deserialize(self, f, nType=0, nVersion=cfg.VERSION):
print "Warning! Can't use this function"
pass
def serialize(self, nType=0, nVersion=cfg.VERSION):
# Special case: stream << stream concatenates like stream += stream
return str(self)
def copy(self):
return deepcopy(self)
def begin_index(self):
return self.tell() # pos
def end_index(self):
return len(self.getvalue())
def write(self, s):
pos = self.tell()
self.seek(len(self.getvalue()))
super(PyDataStream, self).write(s)
self.seek(pos)
def raw_write(self, s, pos):
old_pos = self.tell()
self.seek(old_pos + pos)
super(PyDataStream, self).write(s)
self.seek(old_pos)
def raw_read(self, fir, size):
old_pos = self.tell()
fir += old_pos
return self.getvalue()[fir:fir + size]
def raw_read_buf(self, fir, size):
return self.getvalue()[fir:fir + size]
def unread_str(self):
return self.getvalue()[self.tell():]
def insert(self, index, b):
self.flush()
s = self.getvalue()
pos = self.tell()
if index >= pos:
s = s[:index] + b + s[index:]
self.buf = s
self.len = len(self.buf)
self.seek(pos)
def erase(self, first, last=-1):
if last == -1:
last = self.end_index()
if last < first:
last = first
s = self.getvalue()
if first == self.tell():
# special case for erasing from the front
if last == len(s):
self.seek(0)
self.buf = ''
self.len = 0
else:
self.seek(last)
else:
self.buf = s[:first] + s[last:]
self.len = len(self.buf)
# if last == -1:
# last = first + 1
# assert first <= last, "last index must larger than first index"
# if first == last:
# return
# s = self.getvalue()
# pos = self.tell()
# if last < pos:
# return
# elif first < pos <= last:
# first = pos
# elif pos <= first:
# pos = first
# pass
# s = s[:first] + s[last:]
# self.buf = s
# self.len = len(self.buf)
# self.seek(pos)
def ignore(self, size):
if size < 0:
return
read_pos_next = self.tell() + size
buff_str = self.getvalue()
buff_str_size = len(buff_str)
if read_pos_next >= buff_str_size:
self.clear()
return self
self.seek(read_pos_next)
return self
def compact(self):
pos = self.tell()
self.buf = self.getvalue()[pos:]
self.len = len(self.buf)
self.seek(0)
def clear(self):
self.getvalue()
self.seek(0)
self.buf = ''
self.len = 0
def stream_in(self, obj, in_type=None):
Serialize(self, obj, in_type, nType=self.nType, nVersion=self.nVersion)
return self
def stream_out(self, out_type="str", cls=None):
return Unserialize(self, out_type, cls, nType=self.nType, nVersion=self.nVersion)
pass
def main():
print(hexser_uint256(0xfffffffffffff))
# class SerType(IntEnum):
# # primary actions
# SER_NETWORK = (1 << 0)
# SER_DISK = (1 << 1)
# SER_GETHASH = (1 << 2)
#
# # modifiers
# SER_SKIPSIG = (1 << 16)
# SER_BLOCKHEADERONLY = (1 << 17)
#
# def __or__(self, other):
# ret = self.value | other.value
# return self._reflect(ret)
#
# def _reflect(self, value):
# d = dict(zip(SerType.__dict__.values(), SerType.__dict__.keys()))
# return getattr(SerType, d[value])
r = SerType.SER_DISK | SerType.SER_GETHASH
print(type(r))
s = PyDataStream()
s2 = s.copy()
s.stream_in(1, in_type='int').stream_in(100, in_type='uint256')
print(s.stream_out(out_type='int'))
# print(s.stream_out(out_type='uint256'))
s2.stream_in([1, 2, 3], in_type="int")
s3 = s2.copy()
print(repr(str(s)))
print(s.tell())
print(repr(str(s2)))
s2.stream_out(out_type="list", cls=b"int")
print(s2.tell())
print(repr(str(s3)))
print(s3.tell())
# print(repr(s.stream_out(out_type="list", cls=b"int")))
print(repr(ser_uint64(0)))
s = PyDataStream('12345678123456')
s.read(3)
s.read(1)
s.write("123")
# s.erase(10)
# s.erase(5)
print s[:]
pass
if __name__ == "__main__":
main()
| JKingdom/KingCoin | app/base/serialize.py | Python | gpl-3.0 | 24,827 |
#!/bin/env python2.7
# -*- coding: utf-8 -*-
# This file is part of EPlatform.
#
# EPlatform is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# EPlatform is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EPlatform. If not, see <http://www.gnu.org/licenses/>.
import wxversion
wxversion.select('2.8')
import glob, os, time
import time
from random import shuffle
import wx
import wx.lib.buttons as bt
from pymouse import PyMouse
import Tkinter
import numpy as np
import subprocess as sp
import shlex
import pygame
from pygame import mixer
import check
#"nazwij obrazek"
class cwiczenia(wx.Frame):
def __init__(self, parent, id):
wx.Frame.__init__( self , parent , id , 'EMatch')
self.Maximize( True )
self.winWidth, self.winHeight = wx.DisplaySize( )
self.parent=parent
style = self.GetWindowStyle()
self.SetWindowStyle( style | wx.STAY_ON_TOP )
self.initializeParameters()
self.createGui()
self.initializeTimer()
self.Bind( wx.EVT_CLOSE , self.OnExit )
def initializeParameters(self):
self.pathToEPlatform = './'
with open( self.pathToEPlatform + 'parameters', 'r' ) as parametersFile:
for line in parametersFile:
if line[ :line.find('=')-1 ] == 'timeGap':
self.timeGap = int( line[ line.rfind('=')+2:-1 ] )
elif line[ :line.find('=')-1 ] == 'backgroundColour':
self.backgroundColour = line[ line.rfind('=')+2:-1 ]
elif line[ :line.find('=')-1 ] == 'textColour':
self.textColour = line[ line.rfind('=')+2:-1 ]
elif line[ :line.find('=')-1 ] == 'scanningColour':
self.scanningColour = line[ line.rfind('=')+2:-1 ]
elif line[ :line.find('=')-1 ] == 'selectionColour':
self.selectionColour = line[ line.rfind('=')+2:-1 ]
elif line[ :line.find('=')-1 ] == 'musicVolume':
pass
elif line[ :line.find('=')-1 ] == 'filmVolume':
pass
elif not line.isspace( ):
print '\nNiewłaściwie opisany parametr. Błąd w linii:\n%s' % line
self.timeGap = 1500
self.backgroundColour = 'white'
self.textColour = 'black'
self.scanningColour = '#E7FAFD'
self.selectionColour = '#9EE4EF'
with open( self.pathToEPlatform + 'parametersCW', 'r' ) as parametersFile:
for line in parametersFile:
if line[ :line.find('=')-1 ] == 'textSize':
self.textSize = int( line[ line.rfind('=')+2:-1 ])
elif line[ :line.find('=')-1 ] == 'checkTime':
pass
elif line[ :line.find('=')-1 ] == 'colorGrat':
pass
elif line[ :line.find('=')-1 ] == 'maxPoints':
self.maxPoints = int(line[ line.rfind('=')+2:-1 ])
elif line[ :line.find('=')-1 ] == 'colorNiest':
pass
elif line[ :line.find('=')-1 ] == 'ileLuk':
pass
elif not line.isspace( ):
print 'Niewłaściwie opisane parametry'
print 'Błąd w linii', line
self.textSize=80
sel.maxPoints=2
self.flaga=0
self.PicNr=0
self.result=0
self.mouseCursor = PyMouse( )
self.WordsList=os.listdir(self.pathToEPlatform+'multimedia/pictures')
shuffle(self.WordsList)
self.poczatek=True
self.numberOfPresses = 1
self.czyBack=False
mixer.init()
self.numberOfExtraWords= 4
#self.ktorySizer='wyrazy'
def initializeTimer(self):
id1=wx.NewId()
wx.RegisterId(id1)
self.stoper = wx.Timer(self,id1)
self.Bind( wx.EVT_TIMER, self.timerUpdate, self.stoper,id1 )
#self.id2=wx.NewId()
#wx.RegisterId(self.id2)
#self.stoper2 = wx.Timer( self ,self.id2)
self.id3=wx.NewId()
wx.RegisterId(self.id3)
self.stoper3 = wx.Timer( self ,self.id3)
self.id4=wx.NewId()
wx.RegisterId(self.id4)
self.stoper4=wx.Timer(self,self.id4)
self.Bind(wx.EVT_TIMER, self.pomocniczyStoper, self.stoper4,self.id4 )
self.stoper.Start( self.timeGap )
def timerUpdate(self,event):
self.mouseCursor.move( self.winWidth - 12, self.winHeight - 12 )
self.numberOfPresses = 0
'''for i in range(5):
item = self.subSizer.GetChildren()
b=item[i].GetWindow()
b.SetBackgroundColour( self.backgroundColour )
b.SetFocus()
for i in range(self.numberOfExtraWords+1):
item = self.wordSizer.GetChildren()
b=item[i].GetWindow()
b.SetBackgroundColour( self.backgroundColour )
b.SetFocus()'''
if self.flaga<= self.numberOfExtraWords+1 and self.flaga>0:
item = self.wordSizer.GetChildren()
b = item[self.flaga-1].GetWindow()
b.SetBackgroundColour( self.backgroundColour )
b.SetFocus()
else:
if self.flaga==0:
item = self.subSizer.GetChildren()
b=item[len(item)-1].GetWindow()
b.SetBackgroundColour( self.backgroundColour )
b.SetFocus()
else:
item = self.subSizer.GetChildren()
b=item[self.flaga-self.numberOfExtraWords -2].GetWindow()
b.SetBackgroundColour( self.backgroundColour )
b.SetFocus()
if self.poczatek:
time.sleep(1)
self.stoper.Stop()
mixer.music.load(self.pathToEPlatform+'multimedia/voices/'+str(self.word)+'.ogg')
mixer.music.play()
time.sleep(2)
self.stoper.Start(self.timeGap)
self.poczatek=False
if self.flaga >= self.numberOfExtraWords+1:
item = self.subSizer.GetChildren()
b=item[self.flaga-self.numberOfExtraWords -1].GetWindow()
b.SetBackgroundColour( self.scanningColour )
b.SetFocus()
else:
item = self.wordSizer.GetChildren()
b = item[self.flaga].GetWindow()
b.SetBackgroundColour( self.scanningColour )
b.SetFocus()
if self.flaga== 4 +self.numberOfExtraWords+1:
self.flaga=0
else:
self.flaga+=1
def createGui(self):
if self.PicNr ==len(self.WordsList):
self.PicNr=0
self.picture=self.WordsList[self.PicNr]
self.PicNr+=1
self.path=self.pathToEPlatform+'multimedia/pictures/'
im=wx.ImageFromStream( open(self.path+self.picture, "rb"))
x=im.GetWidth()
y=im.GetHeight()
if x >y:
im=im.Scale(600,500)
elif x==y:
im=im.Scale(600,600)
else:
im=im.Scale(500,600)
picture=wx.BitmapFromImage(im)
self.word=self.picture[:self.picture.index('.')]
self.extraWords=[] #wybiera dodatkowe slowa
while len(self.extraWords)<self.numberOfExtraWords:
slowo=self.WordsList[np.random.randint(0,len(self.WordsList),1)[0]]
slowo=slowo[:slowo.index('.')]
if slowo not in self.extraWords and slowo!= self.word:
self.extraWords.append(slowo)
b = bt.GenBitmapButton( self, -1, bitmap=picture,name='picture') #zdjecie
b.SetBackgroundColour( self.backgroundColour)
b.Bind( wx.EVT_LEFT_DOWN, self.onPress )
obiekty_wyrazow=[] #wyrazy
self.wyrazy_w_kolejnosci=[]
gdzie_poprawne=np.random.randint(0,self.numberOfExtraWords,1)[0]
for i,j in enumerate(self.extraWords):
be = bt.GenButton( self, -1, j)
be.SetFont( wx.Font(50, wx.FONTFAMILY_ROMAN, wx.FONTWEIGHT_LIGHT, False) )
be.SetBackgroundColour( self.backgroundColour)
be.Bind( wx.EVT_LEFT_DOWN, self.onPress )
obiekty_wyrazow.append(be)
self.wyrazy_w_kolejnosci.append(j)
be = bt.GenButton( self, -1, self.word)
be.SetFont( wx.Font(50, wx.FONTFAMILY_ROMAN, wx.FONTWEIGHT_LIGHT, False) )
be.SetBackgroundColour( self.backgroundColour)
be.Bind( wx.EVT_LEFT_DOWN, self.onPress )
obiekty_wyrazow.insert(gdzie_poprawne,be)
self.wyrazy_w_kolejnosci.insert(gdzie_poprawne,self.word)
#punkty
res = bt.GenButton( self, -1, u'twój wynik: '+str(self.result)+' / '+str(self.maxPoints))
res.SetFont( wx.Font(27, wx.FONTFAMILY_ROMAN, wx.FONTWEIGHT_LIGHT, False) )
res.SetBackgroundColour( self.backgroundColour)
res.Bind( wx.EVT_LEFT_DOWN, self.onPress )
self.wordSizer=wx.BoxSizer( wx.VERTICAL )
for i in obiekty_wyrazow:
self.wordSizer.Add( i, proportion=1, flag=wx.EXPAND )
try:
self.subSizerP.Hide(0)
self.subSizerP.Remove(0)
self.subSizerP.Add( res, 0,wx.EXPAND) #dodanie wyniku
self.subSizer0.Hide(0)
self.subSizer0.Remove(0)
self.subSizer0.Hide(0)
self.subSizer0.Remove(0)
self.subSizer0.Add( self.wordSizer, 0,wx.EXPAND ) #tutaj trzeba dodac caly zagniezdzony subsizer ze slowami
self.subSizer0.Add( b, 0,wx.EXPAND) #dodanie zdjecia
except AttributeError:
if self.czyBack:
self.czyBack=False
self.SetBackgroundColour((220, 220, 220, 255))
else:
self. mainSizer = wx.BoxSizer( wx.VERTICAL )
self.subSizerP=wx.GridSizer(1,1,3,3)
self.subSizer0 = wx.GridSizer(1,2,3,3)
self.subSizer=wx.GridSizer(1,5,3,3)
self.subSizerP.Add(res,0,wx.EXPAND)
self.subSizer0.Add( self.wordSizer, 0,wx.EXPAND )
self.subSizer0.Add( b, 0,wx.EXPAND )
self.icons=sorted(os.listdir(self.pathToEPlatform+'multimedia/icons'))
self.path=self.pathToEPlatform+'multimedia/icons/'
for idx,icon in enumerate(self.icons):
if icon[0].isdigit():
i=wx.BitmapFromImage( wx.ImageFromStream( open(self.path+icon, "rb")))
b = bt.GenBitmapButton( self, -1, bitmap=i)
b.SetBackgroundColour( self.backgroundColour)
b.Bind( wx.EVT_LEFT_DOWN, self.onPress )
self.subSizer.Add( b, 0,wx.EXPAND )
self. mainSizer.Add( self.subSizerP, proportion=1, flag=wx.EXPAND )
self. mainSizer.Add( self.subSizer0, proportion=7, flag=wx.EXPAND )
self. mainSizer.Add( self.subSizer, proportion=2, flag=wx.EXPAND )
self.SetSizer( self. mainSizer,deleteOld=True )
self.Layout()
self.Refresh()
self.Center()
self.MakeModal(True)
self.flaga=0
self.poczatek=True
def OnExit(self,event):
if self.parent:
self.parent.MakeModal(True)
self.parent.Show()
self.parent.stoper.Start(self.parent.timeGap)
else:
pass
self.MakeModal(False)
self.Destroy()
def onPress(self,event):
self.numberOfPresses += 1
if self.numberOfPresses == 1:
if self.flaga==0 :
items = self.subSizer.GetChildren()
item=items[ 4]
else:
if self.flaga >= self.numberOfExtraWords+2:
items = self.subSizer.GetChildren()
item=items[ self.flaga-self.numberOfExtraWords -2]
else:
items = self.wordSizer.GetChildren()
item=items[ self.flaga -1 ]
b = item.GetWindow()
b.SetBackgroundColour( self.selectionColour )
b.SetFocus()
b.Update()
if 'speller' in self.icons[self.flaga-self.numberOfExtraWords -2] and self.flaga>=self.numberOfExtraWords+2:
pass
#self.stoper.Stop()
#self.mainSizer.Clear(deleteWindows=True)
#self.spellerW = spellerMistake.speller( self)
#self.Bind( wx.EVT_TIMER, self.spellerW.timerUpdate, self.stoper2,self.id2 )
#self.stoper2.Start( self.spellerW.timeGap )
elif self.flaga == 0: #cancel or self.flaga==0:
if __name__ == '__main__':
self.stoper.Stop( )
self.Destroy( )
else:
self.stoper.Stop( )
self.MakeModal( False )
self.parent.Show( True )
self.parent.stoper.Start( self.parent.timeGap )
self.Destroy( )
elif 'speak' in self.icons[self.flaga-self.numberOfExtraWords -2] and self.flaga>=self.numberOfExtraWords+2:
time.sleep(1)
self.stoper.Stop()
mixer.music.load(self.pathToEPlatform+'multimedia/voices/'+str(self.word)+'.ogg')
mixer.music.play()
self.stoper4.Start(2000)
elif 'literuj' in self.icons[self.flaga-self.numberOfExtraWords -2]and self.flaga>=self.numberOfExtraWords+2 :
self.stoper.Stop()
if str(self.word)+'.ogg' not in os.listdir(self.pathToEPlatform+'multimedia/spelling/'):
command='sox -m '+self.pathToEPlatform+'sounds/phone/'+list(self.word)[0].swapcase()+'.wav'
ile=0
for l in list(self.word)[1:]:
ile+=2
command+=' "|sox '+self.pathToEPlatform+'sounds/phone/'+l.swapcase()+'.wav'+' -p pad '+str(ile)+'"'
command+=' '+self.pathToEPlatform+'multimedia/spelling/'+self.word+'.ogg'
wykonaj=sp.Popen(shlex.split(command))
time.sleep(1.5)
do_literowania=mixer.Sound(self.pathToEPlatform+'multimedia/spelling/'+self.word+'.ogg')
do_literowania.play()
self.stoper4.Start((do_literowania.get_length()+0.5 )* 1000)
elif 'undo' in self.icons[self.flaga-self.numberOfExtraWords -2]and self.flaga>=self.numberOfExtraWords+2 :
self.stoper.Stop()
self.createGui()
self.stoper.Start(self.timeGap)
else:
if self.wyrazy_w_kolejnosci[self.flaga-1] ==self.word:
self.ownWord=self.word
else:
self.ownWord=''
#self.MakeModal(False)
#self.Hide()
self.stoper.Stop()
self.check()
else:
event.Skip( )
def pomocniczyStoper(self,event):
self.stoper4.Stop()
self.stoper.Start( self.timeGap )
def check(self):
self.mainSizer.Clear(deleteWindows=True)
self.checkW=check.check( self )
self.Bind( wx.EVT_TIMER, self.checkW.zamknij, self.stoper3,self.id3 )
def back(self):
self.czyBack=True
del self.checkW
self.mainSizer.Clear(deleteWindows=True)
self.createGui()
self.stoper.Start(self.timeGap)
if __name__ == '__main__':
app = wx.PySimpleApp()
frame = cwiczenia(parent = None, id = -1)
frame.Show()
app.MainLoop()
| bjura/EPlatform | EMatch.py | Python | gpl-3.0 | 18,227 |
# -*- coding: utf-8 -*-
"""
by LW 03/20/2015
some wrapper functions for ophyd
"""
import Tkinter, tkFileDialog
def det_select(det):
"""
by LW 03/20/2015
function to select detector for scanning and ct
removes all existing detectors from ophyd function ascan.user_detectors
and replaces them with the list of names passed in this function
ONLY FOR SINGLE STRINGS (not lists) SO FAR! Only works if there is one default detector/trigger to start with
=> needs some work in the future
calling sequence: det_select(det)
"""
try:
rm_det=ascan.user_detectors[0].name
ascan.user_detectors.remove(session_mgr[rm_det])
ascan.default_triggers = []
ascan.default_detectors = []
print 'removed previous default detector: ',rm_det
except: print 'list of detectors appeared to be empty...'
ascan.user_detectors.append(det) # add detector
new_def_det=ascan.user_detectors[0].name
print ''
print 'new default detector: ',new_def_det
def cw_ascan(mot,xmin,xmax,npoints,acqt='default',pos_ret=True):
"""
by LW 03/21/2015
wrapper function for ophyd's acan
added functionality:
- default option: returns scanned axis back to the position where it was prior to starting the scan
- option to change acquition times on detectors (default: use current setting from corresponding css screen)
- stop camera and set image mode to single
- after the scan: restarts camera and set image mode to where it was prior to starting the scan
calling sequence: cw_ascan(det,xmin,xmax,npoints,acqt='default',pos_ret=True)
WILL NOT WORK FOR A LIST OF DETECTORS!
"""
# gather beamline information prior to starting the scan:
ini_motpos=caget(mot.record+'.RBV')
# current detector:
acq_pv=session_mgr[ascan.user_detectors[0].name].pvname
# set different exposure time for the scan, if requested:
if acqt!='default':
try:
ini_expt=caget(acq_pv[2]) # initial exposure time
session_mgr[ascan.user_detectors[0].name].acquire_time = acqt
print 'successfully set exposure time to [s]: ',acqt
except: print 'could not set exposure time to ',acqt
# execute the scan
ascan(mot,xmin,xmax,npoints)
# put beamline back into initial state
if pos_ret==True:
caput(mot.record+'.VAL',ini_motpos)
print 'returned axes to: ',ini_motpos
if acqt!='default':
try:
session_mgr[ascan.user_detectors[0].name].acquire_time = ini_expt
print 'successfully reset exposure time to [s]: ',ini_expt
except: print 'could not reset exposure time to ',ini_expt
def cw_dscan(mot,mdx,pdx,npoints,acqt='default',pos_ret=True):
"""
by LW 03/21/2015
wrapper function for ophyd's ascan
added functionality:
- option to change acquition times on detectors (default: use current setting from corresponding css screen)
calling sequence: cw_dscan(det,mdx,pdx,npoints,acqt='default',pos_ret=True)
WILL NOT WORK FOR A LIST OF DETECTORS!
"""
# current detector:
acq_pv=session_mgr[ascan.user_detectors[0].name].pvname
# set different exposure time for the scan, if requested:
if acqt!='default':
try:
ini_expt=caget(acq_pv[2]) # initial exposure time
session_mgr[ascan.user_detectors[0].name].acquire_time = acqt
print 'successfully set exposure time to [s]: ',acqt
except: print 'could not set exposure time to ',acqt
# execute the scan
dscan(mot,mdx,pdx,npoints)
#print 'finished scan'
if acqt!='default':
try:
session_mgr[ascan.user_detectors[0].name].acquire_time = ini_expt
print 'successfully reset exposure time to [s]: ',ini_expt
except: print 'could not reset exposure time to ',ini_expt
def cw_CCDseries(folder,filename,detector,imnum='default',startn=1,acqt='default',acqperiod='default'):
"""
by LW 04/06/2015
wrapper function to take a time series with a CCD detector
functionalities:
- select/create folder via GUI (name for new folder in the GUI, new folder will be created)
- select CCD
- gather current CCD acquisition parameters and restore these after the timeseries
- select acquisition time and acquiering period ('default': use current parameters from area detector
- switch autosave on before the series and off when done
- select start number for image series !!if file existed already, it will be overwritten !!
- currently saving .tiff as default
calling sequence: cw_CCDseries(folder,filename,detector,imnum='default',startn=1,acqt='default',acqperiod='default')
"""
import time
import Tkinter, tkFileDialog
import os, stat, sys
# get the detector name::
detector.pvname.split('}')[0]+'}'
#get folder interactively:
if folder == 'ia':
root=Tkinter.Tk()
root.withdraw()
directory=tkFileDialog.askdirectory()
folder=directory
# check whether target directory exists and create it, if it doesn't
if not os.path.exists(folder):
os.mkdir(folder)
os.chmod(folder,436) #make sure everybody can read an write
os.chmod(folder,stat.S_IRWXO)
print 'successfully created new directory: ',folder
# put folder:
r=caput(detector.pvname.split('}')[0]+'}TIFF1:FilePath', folder) # have some problem with syntax here...
if r==1:
print 'changed saving directory to: ',folder
else: print 'error: could not change directory for saving data files.'
# put the filename:
r=caput(detector.pvname.split('}')[0]+'}TIFF1:FileName', filename) # have some problem with syntax here...
if r==1:
print 'filename for saving: ',filename
else: print 'error: could not change file name for saving.'
# put start number
caput(detector.pvname.split('}')[0]+'}TIFF1:FileNumber',startn)
#gather information about current camera settings
acq_pv=session_mgr[ascan.user_detectors[0].name].pvname
ini_acq=caget(acq_pv.split('}')[0]+'}cam1:Acquire') # initial state: started or stopped
ini_mode=caget(acq_pv.split('}')[0]+'}cam1:ImageMode') # initial image mode: single [0], multiple [1], continous [2]
ini_expt=caget(acq_pv.split('}')[0]+'}cam1:AcquireTime') # initial exposure time
ini_acqperiod=caget(acq_pv.split('}')[0]+'}cam1:AcquirePeriod') # initial acquiring period
ini_imnum=caget(acq_pv.split('}')[0]+'}cam1:NumImages') # initial image number
if acqt!='default':
try:
caput(acq_pv.split('}')[0]+'}cam1:AcquireTime',acqt)
print 'successfully set exposure time to [s]: ',acqt
except: print 'could not set exposure time to ',acqt
# stop camara:
try:
caput(acq_pv.split('}')[0]+'}cam1:Acquire',0)
print 'successfully stopped camera'
except: print 'could not stop camera'
# try to set image mode to multiple
try:
caput(acq_pv.split('}')[0]+'}cam1:ImageMode',1)
print 'successfully set ImageMode to "multiple"'
except: print 'could not set ImageMode to "multiple"'
if acqperiod!='default':
try:
caput(acq_pv.split('}')[0]+'}cam1:AcquirePeriod',acqperiod)
print 'successfully set acquiering period to: ',acqperiod
except: print 'could not set aquireing period to ',acqperiod
# set number of images to be taken:
if imnum!='default':
try:
caput(acq_pv.split('}')[0]+'}cam1:NumImages',imnum)
print 'successfully set number of images to: ',imnum
except: print 'could not set number of images to ',imnum
print 'going to start the acquisition...'
time.sleep(1)
# start the acquisition!!!
caput(acq_pv.split('}')[0]+'}cam1:ArrayCounter',0)#set Images acquired (image counter) to 0
caput(detector.pvname.split('}')[0]+'}TIFF1:AutoSave',1) # start auto save
caput(acq_pv.split('}')[0]+'}cam1:Acquire',1) # start series acquisition
counting=0
current_file=caget(detector.pvname.split('}')[0]+'}TIFF1:FullFileName_RBV',as_string=True)
while counting==0:
time.sleep(.5)
if caget(acq_pv.split('}')[0]+'}cam1:ArrayCounter_RBV')==imnum: counting=1
if caget(detector.pvname.split('}')[0]+'}TIFF1:FullFileName_RBV',as_string=True)!=current_file:
current_file=caget(detector.pvname.split('}')[0]+'}TIFF1:FullFileName_RBV',as_string=True)
print 'file written: ',current_file
time.sleep(.1)
print 'going to stop the acquisition...'
time.sleep(.5)
caput(acq_pv.split('}')[0]+'}cam1:Acquire',0) # stop series acquisition (redundent...should have stopped already
caput(detector.pvname.split('}')[0]+'}TIFF1:AutoSave',0) # stop auto save
print 'going to reset camera...'
#time.sleep(5)
# put camera back into initial state:
if acqt!='default':
try:
caput(acq_pv.split('}')[0]+'}cam1:AcquireTime',ini_expt)
print 'successfully reset exposure time to [s]: ',ini_expt
except: print 'could not reset exposure time to ',ini_expt
try:
caput(acq_pv.split('}')[0]+'}cam1:ImageMode',ini_mode)
print 'successfully reset ImageMode'
except: print 'could not reset ImageMode'
try:
caput(acq_pv.split('}')[0]+'}cam1:Acquire',ini_acq)
print 'successfully reset camera acqusition mode'
except: print 'could not reset camera acquisition mode'
if acqperiod!='default':
try:
caput(acq_pv.split('}')[0]+'}cam1:AcquirePeriod',ini_acqperiod)
print 'successfully reset acquisition period to [s]: ',ini_acqperiod
except: print 'could not reset acquisition period to ',ini_acqperiod
if imnum!='default':
try:
caput(acq_pv.split('}')[0]+'}cam1:NumImages',ini_imnum)
print 'successfully reset image numbers to: ',ini_imnum
except: print 'could not reset image numbers to ',ini_imnum
time.sleep(.5)
try:
caput(acq_pv.split('}')[0]+'}cam1:Acquire',ini_acq) # restart camera if it was running before taking the series
print 'restarted camera'
except: print 'could not restart camera...'
| sameera2004/chxtools | chxtools/chx_wrapper.py | Python | bsd-3-clause | 10,326 |
#!/usr/bin/env python3
# Copyright (C) 2016 Kaspar Schleiser <[email protected]>
# Copyright (C) 2017 HAW Hamburg
#
# This file is subject to the terms and conditions of the GNU Lesser
# General Public License v2.1. See the file LICENSE in the top level
# directory for more details.
import os
import sys
def testfunc(child):
child.expect(r"MAIN: reply from T-\d+")
child.expect(r"MAIN: \d+! = \d+")
child.expect_exact("[SUCCESS]")
if __name__ == "__main__":
sys.path.append(os.path.join(os.environ['RIOTTOOLS'], 'testrunner'))
from testrunner import run
sys.exit(run(testfunc))
| MichelRottleuthner/RIOT | tests/thread_cooperation/tests/01-run.py | Python | lgpl-2.1 | 611 |
import numpy, pylab
D2S = 86400
PROT_SUN = 27.0
OMEGA_SUN = 2 * numpy.pi / (27.0 * D2S)
root = '/Users/aigrain/Data/Kepler/diffrot/'
X = numpy.genfromtxt('%snoise_free/regions_par.txt' % root).T
lmin = X[4]
lmax = X[5]
nsim = len(lmin)
incl = numpy.arcsin(numpy.sqrt(numpy.random.uniform(0, 1, nsim)))
n1 = nsim/10
n2 = nsim-n1
period_eq = 10.0**(numpy.append(numpy.random.uniform(0, 1, n1), \
numpy.random.uniform(1, 1.7, n2)))
numpy.random.shuffle(period_eq) # in days
omega_eq_solar = PROT_SUN / period_eq # relative to solar rotation rate
n1 = nsim/3
n2 = nsim-n1
delta_per_rel = numpy.append(10.0**(numpy.random.uniform(-1, 0, n2)), \
numpy.zeros(n1))
numpy.random.shuffle(delta_per_rel) # relative
period_pole = period_eq * (1 + delta_per_rel)
delta_omega_solar = delta_per_rel * omega_eq_solar # relative to solar rotation rate
omega_eq = omega_eq_solar * OMEGA_SUN # in radians
delta_omega = delta_omega_solar * OMEGA_SUN # in radians
omega_pole = omega_eq - delta_omega # in radians
period_eq_2 = 2 * numpy.pi / omega_eq / D2S # in days
period_pole_2 = 2 * numpy.pi / omega_pole / D2S # in days
x = period_pole_2
y = period_eq / (1 - period_eq * delta_per_rel / PROT_SUN)
pylab.clf()
pylab.plot(x, y, 'k.')
pylab.plot([0,200],[0,200])
pylab.xlim(0,200)
pylab.ylim(0,200)
for i in numpy.arange(nsim):
print 'Saved in output file:'
print 'Period_eq: ', period_eq[i]
print 'Period_pole: ', period_pole[i]
print 'Differential rotation in terms of relative period'
print 'Delta_per_rel: ', delta_per_rel[i]
print 'Rotation rate in solar units'
print 'Omega_eq_solar: ', omega_eq_solar[i]
print 'Delta_omega_solar: ', delta_omega_solar[i]
print 'Rotation rate in radians'
print 'Omega_eq: ', omega_eq[i]
print 'Delta_omega: ', delta_omega[i]
print 'Omega_pole: ', omega_pole[i]
print 'Computed from Omega_eq and Omega_pole'
print 'Period_eq: ', period_eq_2[i]
print 'Period_pole: ', period_pole_2[i]
print ''
raw_input('Next?')
| RuthAngus/LSST-max | code/soft/simlc_check.py | Python | mit | 2,065 |
#!/usr/bin/env python
"""
Network
(C) 2008 Michael 'Mickey' Lauer <[email protected]>
(C) 2008 Openmoko, Inc.
GPLv2 or later
Package: onetworkd
Module: network
"""
MODULE_NAME = "onetworkd"
__version__ = "0.0.1"
import gobject
import os
import socket
import fcntl
import struct
import logging
logger = logging.getLogger( MODULE_NAME )
#============================================================================#
class Network( dict ):
#============================================================================#
def __init__( self ):
gobject.idle_add( self._sync )
def _sync( self ):
# FIXME add listener so that this gets called whenever a change in
# interfaces occurs
interfaces = os.listdir( "/sys/class/net" )
# first pass: remove
for interface in self:
if interface not in interfaces:
logger.debug( "interface %s no longer present -- removing" % interface )
del self[interface]
# second pass: add
for interface in os.listdir( "/sys/class/net" ):
if interface not in self:
logger.debug( "new interface %s -- adding" % interface )
self[interface] = Interface( interface )
return False
#============================================================================#
class Interface( object ):
#============================================================================#
def __init__( self, name ):
self._name = name
def name( self ):
return self._name
def ipAddress4( self ):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
return socket.inet_ntoa(fcntl.ioctl(
s.fileno(),
0x8915, # SIOCGIFADDR
struct.pack('256s', self._name[:15])
)[20:24])
#============================================================================#
theNetwork = Network()
#============================================================================#
if __name__ == "__main__":
pass
| freesmartphone/framework | framework/subsystems/onetworkd/network.py | Python | gpl-2.0 | 2,048 |
from __future__ import print_function
import re
import traceback
class MiniObject(object):
def __init__(self, py_object, **meta):
( "The following python types map to the following mini types:\n"
" bool -> boolean\n"
" str -> string\n"
" int -> integer\n"
" float -> float\n"
" tuple -> list (may contain different types)\n"
" list -> vector (may only contain one type)\n"
" dict -> map\n"
" MiniSymbol -> symbol\n"
" Pair -> pair"
"mini vectors and maps should be treated as though immutable"
"s-expressions should be parsed as tuples"
)
self.py_object = py_object
self.meta = meta
def __repr__(self):
if self.py_object == None:
return 'nil'
if isinstance(self.py_object,bool):
return 'true' if self.py_object else 'false'
return repr(self.py_object)
def __str__(self):
if isinstance(self.py_object,str):
return self.py_object
return repr(self)
class Identifier(object):
def __init__(self,symbol,**kwargs):
assert isinstance(symbol,str)
self.symbol = symbol
self.start = kwargs.get('start')
self.end = kwargs.get('end')
def __repr__(self):
return '<identifier {}>'.format(self.symbol)
def is_identifier(mini_object):
assert isinstance(mini_object, MiniObject)
if isinstance(mini_object.py_object, Identifier):
return TRUE
return FALSE
SYMBOLS = {}
class MiniSymbol(object):
def __init__(self,string):
self.string = string
def __eq__(self,other):
return self is other
def __repr__(self):
return '<symbol :{}>'.format(self.string)
class MiniPair(object):
def __init__(self, car, cdr):
assert isinstance(car, MiniObject)
assert isinstance(cdr, MiniObject)
self.car = car
self.cdr = cdr
def __repr__(self):
return '<pair {}, {}>'.format(self.car, self.cdr)
def evaluate_arguments(arguments_cons_list, environment):
if arguments_cons_list == NIL:
return NIL
return cons(
evaluate(car(arguments_cons_list), environment),
evaluate_arguments(cdr(arguments_cons_list), environment))
class MiniEnvironment(MiniObject):
'This acts like a dict in Python code and a cons-dict in mini code'
def __init__(self):
super(self.__class__, self).__init__(None)
def __getitem__(self,key):
assert isinstance(key,str)
key_symbol = create_symbol(key)
return cons_dict_get(self,key_symbol)
def __setitem__(self,key,value):
assert isinstance(key,str)
key_symbol = create_symbol(key)
assert isinstance(value, MiniObject)
self.py_object = cons_dict_set(self,key_symbol,value).py_object
def __contains__(self,key):
assert isinstance(key,str)
key_symbol = create_symbol(key)
return cons_dict_has_key(self,key_symbol) == TRUE
def get(self,key):
assert isinstance(key,str)
if key in self:
return self[key]
return None
def dict_to_environment(dictionary):
result = MiniEnvironment()
for key,value in dictionary.iteritems():
result[key] = value
return result
class MiniApplicative(object):
def __init__(self, operative):
assert callable(operative)
self.operative = operative
def __call__(self, pattern, environment):
assert isinstance(pattern, MiniObject)
return self.operative(pattern, environment)
class MiniWrapper(object):
def __init__(self, operative):
assert isinstance(operative,MiniObject)
assert isinstance(operative.py_object, MiniApplicative) or isinstance(operative.py_object, MiniWrapper)
self.operative = operative
def __call__(self, pattern, environment):
assert isinstance(pattern, MiniObject)
return self.operative.py_object(evaluate_arguments(pattern, environment), environment)
def __repr__(self):
return "<wrapper {}>".format(repr(self.operative))
def wrap(thing):
return MiniObject(MiniWrapper(thing))
def unwrap(thing):
if isinstance(thing.py_object, MiniWrapper):
return thing.py_object.operative
raise Exception('UnwrapError')
def create_symbol(string,**kwargs):
if string in SYMBOLS:
return SYMBOLS[string]
k = MiniObject(MiniSymbol(string), **kwargs)
SYMBOLS[string] = k
return k
def create_cons_collection(py_collection):
result = NIL
for item in reversed(py_collection):
result = MiniObject(MiniPair(item, result))
return result
def cons_collection_to_py_collection(cons_collection):
while cons_collection != NIL:
yield car(cons_collection)
cons_collection = cdr(cons_collection)
token_regex = re.compile(r'''(?mx)
(\s*|\#.*?\n)*(?:
(?P<open_parenthese>\()|
(?P<close_parenthese>\))|
(?P<number>\-?\d+\.\d+|\-?\d+)|
(?P<string>"[^"]*")|
(?P<identifier>[_A-Za-z\?\-\+\*/=\>\<]+)|
(?P<symbol>\:[_A-Za-z\?\-\+\*/=\>\<]*)
)''')
def parse_all(source):
def parse(matches, index_holder):
match = matches[index_holder[0]]
index_holder[0] += 1
if match.group('open_parenthese'):
r = []
while index_holder[0] < len(matches) and not matches[index_holder[0]].group('close_parenthese'):
r.append(parse(matches, index_holder))
if index_holder[0] == len(matches):
raise Exception('Unmatched parenthese (')
index_holder[0] += 1
return create_cons_collection(r)
if match.group('close_parenthese'):
raise Exception("Unmatched parenthese )")
if match.group('number'):
v = float(match.group('number'))
if v.is_integer(): v = int(v)
return MiniObject(
v,
start = match.start('number'),
end = match.end('number'))
if match.group('string'):
return MiniObject(
match.group('string')[1:-1],
start = match.start('string'),
end = match.end('string'))
if match.group('identifier'):
return MiniObject(Identifier(
match.group('identifier'),
start = match.start('identifier'),
end = match.end('identifier')))
if match.group('symbol'):
return create_symbol(
match.group('symbol')[1:],
start = match.start('symbol'),
end = match.end('symbol'))
assert False, "I'm not sure how this happened"
def parse_all_internal(matches, index_holder):
if index_holder[0] == len(matches):
return NIL
parsed_atom = parse(matches, index_holder)
return cons(parsed_atom, parse_all_internal(matches, index_holder))
matches = list(token_regex.finditer(source))
match_index_wrapped = [0]
return parse_all_internal(matches, match_index_wrapped)
NIL = MiniObject(None)
class Boolean(MiniObject):
def __init__(self, py_object, **kwargs):
super(Boolean,self).__init__(py_object, **kwargs)
TRUE = Boolean(True)
FALSE = Boolean(False)
def is_number(arg):
if isinstance(arg, float):
return True
# isinstance(True, int) returns True
return isinstance(arg, int) and not isinstance(arg, bool)
def py_to_mini(py_object):
assert callable(py_object)
def wrapped(pattern, environment):
result = py_object(*cons_collection_to_py_collection(pattern))
if is_number(result) or isinstance(result,MiniPair):
return MiniObject(result)
if isinstance(result,str):
return MiniObject(result)
return {
True : TRUE,
False : FALSE,
None : NIL,
}.get(result, result)
return MiniObject(MiniWrapper(MiniObject(MiniApplicative(wrapped))))
def apply(applicative, pattern, environment):
assert isinstance(applicative, MiniObject)
return applicative.py_object(pattern, environment)
def evaluate(expression, environment):
assert isinstance(expression, MiniObject)
if isinstance(expression.py_object, str) or is_number(expression.py_object):
return expression
if isinstance(expression.py_object, MiniSymbol):
return expression
if isinstance(expression.py_object, MiniPair):
applicative = evaluate(car(expression), environment)
arguments = cdr(expression)
assert isinstance(applicative, MiniObject)
assert isinstance(arguments, MiniObject)
if isinstance(applicative.py_object, MiniApplicative) or isinstance(applicative.py_object, MiniWrapper):
return apply(applicative, arguments, environment)
raise Exception("Expected applicative, got {}".format(applicative.py_object))
if isinstance(expression.py_object, Identifier):
parent_symbol = create_symbol('__parent__')
while environment != None:
if cons_dict_has_key(environment, create_symbol(expression.py_object.symbol)) == TRUE:
return cons_dict_get(environment, create_symbol(expression.py_object.symbol))
if cons_dict_has_key(environment, parent_symbol) == TRUE:
environment = cons_dict_get(environment, create_symbol('__parent__'))
else:
raise Exception('UndefinedIdentifierError: Undefined identifier {}'.format(expression.py_object.symbol))
def length(string):
assert isinstance(string, MiniObject)
if isinstance(string.py_object, str):
return len(string.py_object)
raise Exception("TypeError")
def concatenate(l,r):
# TODO Implement ropes: http://citeseer.ist.psu.edu/viewdoc/download?doi=10.1.1.14.9450&rep=rep1&type=pdf
# TODO Apply this to other collection types
if isinstance(l.py_object,str) and isinstance(r.py_object, str):
return MiniObject(l.py_object + r.py_object)
raise Exception('TypeError')
def is_integer(arg):
return isinstance(arg, int) and not isinstance(arg, bool)
def slice(string, start, end):
if not isinstance(string.py_object, str):
raise Exception('TypeError')
py_string = string.py_object
if is_integer(start.py_object):
py_start = start.py_object
elif start.py_object == None:
py_start = 0
else:
raise Exception('TypeError')
if is_integer(end.py_object):
py_end = end.py_object
elif end.py_object == None:
py_end = len(py_string)
else:
raise Exception('TypeError')
return MiniObject(py_string[py_start:py_end])
def _assert(pattern, environment):
def assert_internal(*arguments):
if len(arguments) == 0:
raise Exception("ArgumentError: assert expects 1 or more arguments, received none")
if len(arguments) == 1:
description = 'assertion failed'
assertion = arguments
else:
description = arguments[0].py_object
assertion = arguments[1:]
if not isinstance(assertion[-1].py_object, bool):
raise Exception("TypeError: `assert` expected Boolean assertion but received {} {}".format(type(assertion[-1].py_object), assertion[-1]))
if assertion[-1] is TRUE:
return None
if assertion[-1] is FALSE:
raise Exception("AssertionError: {}".format(description))
assert False
# Execute in nested scope
return py_to_mini(assert_internal).py_object(pattern, nest(environment))
def throws(pattern, environment):
if cons_collection_len(pattern) != 2:
raise Exception("throws? expects 2 argument, received {}".format(len(pattern)))
expression = car(pattern)
exception = evaluate(car(cdr(pattern)), environment)
if not isinstance(exception.py_object, str):
raise Exception('throws? expects a string as the second argument')
try:
evaluate(expression, environment)
return FALSE
except Exception as e:
if ':' in e.message:
exception_type, message = e.message.split(':',1)
else:
exception_type = e.message
if exception_type == exception.py_object:
return TRUE
raise
def _not(argument):
if not isinstance(argument, Boolean):
raise Exception('TypeError: Expected Boolean but received {}'.format(type(argument)))
if argument == TRUE:
return FALSE
if argument == FALSE:
return TRUE
assert False
def evaluate_expressions(expressions, environment):
result = NIL
while expressions != NIL:
result = evaluate(car(expressions), environment)
expressions = cdr(expressions)
return result
def cons_collection_len(cons_collection):
result = 0
while cons_collection != NIL:
result += 1
cons_collection = cdr(cons_collection)
return result
def define(pattern, environment):
if cons_collection_len(pattern) < 2:
raise Exception('DefineError: `define` expected two arguments, received {}'.format(cons_collection_len(pattern)))
head = car(pattern)
body = cdr(pattern)
if isinstance(head.py_object, Identifier):
identifier = head.py_object.symbol
if is_defined(head, environment) == TRUE:
raise Exception('AlreadyDefinedError: the identifier {} is already defined'.format(identifier))
environment[identifier] = evaluate_expressions(body, environment)
return NIL
elif isinstance(head.py_object, MiniPair):
raise Exception('NotImplementedError: Defining patterns is not yet implemented')
else:
raise Exception("DefineError")
def defined_p(pattern, environment):
if cons_collection_len(pattern) != 1:
raise Exception("ArgumentError: `defined?` expects 1 argument, received {}".format(len(pattern)))
if not isinstance(car(pattern).py_object, Identifier):
raise Exception("TypeError: Expected Identifier but got {}".format(type(car(pattern).py_object)))
return is_defined(car(pattern), environment)
def is_defined(identifier, environment):
assert isinstance(identifier, MiniObject)
assert isinstance(environment, MiniObject)
identifier_symbol = identifier_to_symbol(identifier)
parent_symbol = create_symbol('__parent__')
while True:
if cons_dict_has_key(environment, identifier_symbol) == TRUE:
return TRUE
elif cons_dict_has_key(environment, parent_symbol) == TRUE:
environment = cons_dict_get(environment, parent_symbol)
else:
return FALSE
def _if(pattern, environment):
if not cons_collection_len(pattern) in [2,3]:
raise Exception("ArgumentError")
condition = car(pattern)
if_result_true = car(cdr(pattern))
if_result_false = car(cdr(cdr(pattern)))
result = evaluate(condition, environment)
if result is TRUE:
return evaluate(if_result_true, environment)
if result is FALSE:
return evaluate(if_result_false, environment)
raise Exception("TypeError: `if` expects boolean, received {}".format(type(result)))
def nest(environment):
isinstance(environment,MiniEnvironment)
result = MiniEnvironment()
result['__parent__'] = environment
return result
# This is vau from John N. Shutt's seminal paper
# https://www.wpi.edu/Pubs/ETD/Available/etd-090110-124904/unrestricted/jshutt.pdf
# While Greek letters are appropriate for an academic, theoretical context, they make for
# poor variable names, so this is tentatively named `operative`
def operative(pattern, defining_environment):
argument_list_identifier = None
argument_identifiers = None
calling_environment_identifier = car(cdr(pattern)).py_object.symbol
if isinstance(car(pattern).py_object, Identifier):
argument_list_identifier = car(pattern).py_object.symbol
if calling_environment_identifier == argument_list_identifier:
raise Exception("ArgumentError: Argument list identifier `{}` may not be the same as calling environment identifier".format(ai))
elif car(pattern).py_object == None or isinstance(car(pattern).py_object, MiniPair):
if not all([isinstance(arg.py_object, Identifier) for arg in cons_collection_to_py_collection(car(pattern))]):
raise Exception("ArgumentError: Unexpected {} {}".format(type(arg),arg))
argument_identifiers = [ai.py_object.symbol for ai in cons_collection_to_py_collection(car(pattern))]
existing = set()
for ai in argument_identifiers:
if ai in existing:
raise Exception("ArgumentError: Argument `{}` already defined".format(ai))
if calling_environment_identifier == ai:
raise Exception("ArgumentError: Argument `{}` may not be the same as calling environment identifier".format(ai))
existing.add(ai)
else:
raise Exception("ArgumentError: `operative` expected identifier or cons-list as first argument, received {}".format(type(car(pattern).py_object)))
if not isinstance(car(cdr(pattern)).py_object,Identifier):
raise Exception("ArgumentError: The second argument to `operative` should be an identifer")
def result(calling_pattern, calling_environment):
local_environment = nest(defining_environment)
assert (argument_list_identifier == None) != (argument_identifiers == None)
if argument_list_identifier != None:
local_environment[argument_list_identifier] = calling_pattern
if argument_identifiers != None:
if not cons_collection_len(calling_pattern) == len(argument_identifiers):
raise Exception("ArgumentError: operative expected {} arguments, received {}".format(len(argument_identifiers),len(calling_pattern)))
calling_pattern = list(cons_collection_to_py_collection(calling_pattern))
for i in range(len(argument_identifiers)):
local_environment[argument_identifiers[i]] = calling_pattern[i]
local_environment[calling_environment_identifier] = calling_environment
return evaluate_expressions(cdr(cdr(pattern)), local_environment)
return MiniObject(MiniApplicative(result))
def read_file(filename):
assert isinstance(filename, MiniObject)
with open(filename.py_object, 'r') as f:
return f.read()
def write_file(filename, string):
assert isinstance(filename, MiniObject)
assert isinstance(string, MiniObject)
with open(filename.py_object, 'w') as f:
f.write(string.py_object)
def add(l,r):
if isinstance(l, MiniObject) and isinstance(r, MiniObject):
l = l.py_object
r = r.py_object
if is_number(l) and is_number(r):
return l + r
raise Excepion('TypeError')
def subtract(l,r):
if isinstance(l, MiniObject) and isinstance(r, MiniObject):
l = l.py_object
r = r.py_object
if is_number(l) and is_number(r):
return l - r
raise Excepion('TypeError')
def multiply(l,r):
if isinstance(l, MiniObject) and isinstance(r, MiniObject):
l = l.py_object
r = r.py_object
if is_number(l) and is_number(r):
return l * r
raise Excepion('TypeError')
def divide(l,r):
if isinstance(l, MiniObject) and isinstance(r, MiniObject):
l = l.py_object
r = r.py_object
if is_number(l) and is_number(r):
if isinstance(l,int) and isinstance(r,int) and l % r != 0:
l = float(l)
return l / r
raise Excepion('TypeError')
def idivide(l,r):
if isinstance(l, MiniObject) and isinstance(r, MiniObject):
l = l.py_object
r = r.py_object
if is_number(l) and is_number(r):
return l // r
raise Excepion('TypeError')
def mod(l,r):
if isinstance(l, MiniObject) and isinstance(r, MiniObject):
l = l.py_object
r = r.py_object
if is_number(l) and is_number(r):
return l % r
raise Excepion('TypeError')
def eq(l,r):
assert isinstance(l,MiniObject)
assert isinstance(r,MiniObject)
return l.py_object == r.py_object
def lt(l,r):
assert isinstance(l,MiniObject)
assert isinstance(r,MiniObject)
if is_number(l.py_object) and is_number(r.py_object):
return l.py_object < r.py_object
if isinstance(l.py_object,str) and isinstance(r.py_object,str):
return l.py_object < r.py_object
if isinstance(l.py_object,MiniSymbol) and isinstance(r.py_object,MiniSymbol):
return l.py_object.string < r.py_object.string
raise TypeError('`<` expected number or string, received {} and {}'.format(l.py_object, r.py_object))
def gt(l,r):
assert isinstance(l,MiniObject)
assert isinstance(r,MiniObject)
if is_number(l.py_object) and is_number(r.py_object):
return l.py_object > r.py_object
if isinstance(l.py_object,str) and isinstance(r.py_object,str):
return l.py_object > r.py_object
if isinstance(l.py_object,MiniSymbol) and isinstance(r.py_object,MiniSymbol):
return l.py_object.string > r.py_object.string
raise TypeError('`>` expected number or string, received {} and {}'.format(l.py_object, r.py_object))
def le(l,r):
return lt(l,r) or eq(l,r)
def ge(l,r):
return gt(l,r) or eq(l,r)
def cons(l,r):
return MiniObject(MiniPair(l,r))
def car(p):
return p.py_object.car
def cdr(p):
return p.py_object.cdr
def is_cons_list(mini_object):
assert isinstance(mini_object,MiniObject)
if eq(mini_object,NIL) or isinstance(mini_object.py_object,MiniPair):
return TRUE
return FALSE
def cons_dict_set(dictionary,key,value):
assert isinstance(dictionary,MiniObject)
assert isinstance(key,MiniObject)
assert isinstance(value,MiniObject)
if eq(dictionary,NIL):
return cons(cons(key,value),cons(NIL,NIL))
current_node_key = car(car(dictionary))
if lt(key,current_node_key):
return cons(
car(dictionary),
cons(
cons_dict_set(car(cdr(dictionary)), key, value),
cdr(cdr(dictionary))))
if gt(key,current_node_key):
return cons(
car(dictionary),
cons(
car(cdr(dictionary)),
cons_dict_set(cdr(cdr(dictionary)), key, value)))
if eq(key,current_node_key):
return cons(cons(key,value), cdr(dictionary))
assert False
def cons_dict_get(dictionary,key):
assert isinstance(dictionary, MiniObject)
assert isinstance(key, MiniObject)
if eq(dictionary,NIL):
raise Exception('KeyError: Dictionary does not contain key "{}"'.format(key))
current_node_key = car(car(dictionary))
if lt(key, current_node_key):
return cons_dict_get(car(cdr(dictionary)), key)
if gt(key, current_node_key):
return cons_dict_get(cdr(cdr(dictionary)), key)
if eq(key, current_node_key):
return cdr(car(dictionary))
def cons_dict_has_key(dictionary,key):
assert isinstance(dictionary, MiniObject)
assert isinstance(key, MiniObject)
if eq(dictionary,NIL):
return FALSE
current_node_key = car(car(dictionary))
if lt(key, current_node_key):
return cons_dict_has_key(car(cdr(dictionary)), key)
if gt(key, current_node_key):
return cons_dict_has_key(cdr(cdr(dictionary)), key)
if eq(key, current_node_key):
return TRUE
def identifier_to_symbol(identifier):
assert isinstance(identifier, MiniObject)
if not isinstance(identifier.py_object, Identifier):
raise Exception('`identifier->symbol` expected identifier, received {}'.format(type(identifier.py_object)))
return create_symbol(identifier.py_object.symbol)
def read(string):
assert isinstance(string,MiniObject)
if not isinstance(string.py_object,str):
raise Exception("TypeError: `read` expected string, got {}".format(type(strin.py_object)))
result = parse_all(string.py_object)
assert cdr(result) == NIL
return car(result)
builtins = {
# Builtin constants
'true' : TRUE,
'false' : FALSE,
'nil' : NIL,
# Builtin comparison functions
'=' : py_to_mini(eq),
'<' : py_to_mini(lt),
'>' : py_to_mini(gt),
'<=' : py_to_mini(le),
'>=' : py_to_mini(ge),
# Builtin conversion functions
'identifier->symbol' : py_to_mini(identifier_to_symbol),
# Builtin type test functions
'cons-list?' : py_to_mini(is_cons_list),
'identifier?' : py_to_mini(is_identifier),
# Builtin general functions
'evaluate' : py_to_mini(evaluate),
'evaluate-expressions' : py_to_mini(evaluate_expressions),
'print' : py_to_mini(print),
'prompt' : py_to_mini(raw_input),
'read-file' : py_to_mini(read_file),
'write-file' : py_to_mini(write_file),
'read' : py_to_mini(read),
'wrap' : py_to_mini(wrap),
'unwrap' : py_to_mini(unwrap),
# Builtin number functions
'+' : py_to_mini(add),
'-' : py_to_mini(subtract),
'*' : py_to_mini(multiply),
'/' : py_to_mini(divide),
'//' : py_to_mini(idivide),
'mod' : py_to_mini(mod),
# Builtin pair functions
'cons' : py_to_mini(cons),
'car' : py_to_mini(car),
'cdr' : py_to_mini(cdr),
# Builtin cons dictionary functions
'cons-dict-set' : py_to_mini(cons_dict_set),
'cons-dict-get' : py_to_mini(cons_dict_get),
# Builtin string functions
'concatenate' : py_to_mini(concatenate),
'length' : py_to_mini(length),
'slice' : py_to_mini(slice),
# Builtin boolean functions
'not' : py_to_mini(_not),
# Builtin special forms
'assert' : MiniObject(MiniApplicative(_assert)),
'define' : MiniObject(MiniApplicative(define)),
'defined?' : MiniObject(MiniApplicative(defined_p)),
'if' : MiniObject(MiniApplicative(_if)),
'operative' : MiniObject(MiniApplicative(operative)),
'throws?' : MiniObject(MiniApplicative(throws)),
}
builtins = dict_to_environment(builtins)
if __name__ == '__main__':
import os.path
import sys
arguments = sys.argv[1:]
predefineds = nest(builtins)
predefineds_filename = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'predefineds.mini')
with open(predefineds_filename, 'r') as predefineds_file:
predefineds_source = predefineds_file.read()
try:
evaluate_expressions(parse_all(predefineds_source), predefineds)
except:
traceback.print_exc()
if len(arguments) == 0:
environment = nest(predefineds)
while True:
source = raw_input('>>> ')
try:
print(evaluate_expressions(parse_all(source), environment))
except:
traceback.print_exc()
else:
filename = arguments[0]
arguments = arguments[1:]
environment = nest(predefineds)
environment['__file__'] = MiniObject(os.path.join(os.path.realpath(filename)))
environment['__arguments__'] = create_cons_collection(map(MiniObject,arguments))
with open(filename,'r') as f:
source = f.read()
try:
print(evaluate_expressions(parse_all(source), environment))
except:
traceback.print_exc()
| kerkeslager/sandbox | mini/mini.py | Python | mit | 27,970 |
from PyQt4.QtCore import QSettings
__author__ = 'Igor Maculan <[email protected]>'
config = QSettings('pushbullet','qpushbullet') | n3wtron/qpushbullet | qpushbullet/__init__.py | Python | gpl-3.0 | 131 |
# This flexes PR#170 which is caused by a suspected JIT bug. Said bug for
# some reason breaks exception matching. There is now a workaround that seems
# to do the trick most of the time.
try:
int("foo")
except ValueError:
pass
| DarioGT/OMS-PluginXML | org.modelsphere.sms/lib/jython-2.2.1/Lib/test/bugs/pr170.py | Python | gpl-3.0 | 245 |
# Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import jmespath
from jsonschema import Draft4Validator
import pytest
import botocore.session
from botocore.exceptions import UnknownServiceError
from botocore.utils import ArgumentGenerator
WAITER_SCHEMA = {
"type": "object",
"properties": {
"version": {"type": "number"},
"waiters": {
"type": "object",
"additionalProperties": {
"type": "object",
"properties": {
"type": {
"type": "string",
"enum": ["api"]
},
"operation": {"type": "string"},
"description": {"type": "string"},
"delay": {
"type": "number",
"minimum": 0,
},
"maxAttempts": {
"type": "integer",
"minimum": 1
},
"acceptors": {
"type": "array",
"items": {
"type": "object",
"properties": {
"state": {
"type": "string",
"enum": ["success", "retry", "failure"]
},
"matcher": {
"type": "string",
"enum": [
"path", "pathAll", "pathAny",
"status", "error"
]
},
"argument": {"type": "string"},
"expected": {
"oneOf": [
{"type": "string"},
{"type": "number"},
{"type": "boolean"}
]
}
},
"required": [
"state", "matcher", "expected"
],
"additionalProperties": False
}
}
},
"required": ["operation", "delay", "maxAttempts", "acceptors"],
"additionalProperties": False
}
}
},
"additionalProperties": False
}
def _waiter_configs():
session = botocore.session.get_session()
validator = Draft4Validator(WAITER_SCHEMA)
for service_name in session.get_available_services():
client = session.create_client(service_name, 'us-east-1')
try:
# We use the loader directly here because we need the entire
# json document, not just the portions exposed (either
# internally or externally) by the WaiterModel class.
loader = session.get_component('data_loader')
waiter_model = loader.load_service_model(
service_name, 'waiters-2')
except UnknownServiceError:
# The service doesn't have waiters
continue
yield validator, waiter_model, client
@pytest.mark.parametrize("validator, waiter_model, client", _waiter_configs())
def test_lint_waiter_configs(validator, waiter_model, client):
_validate_schema(validator, waiter_model)
for waiter_name in client.waiter_names:
_lint_single_waiter(client, waiter_name, client.meta.service_model)
def _lint_single_waiter(client, waiter_name, service_model):
try:
waiter = client.get_waiter(waiter_name)
# The 'acceptors' property is dynamic and will create
# the acceptor configs when first accessed. This is still
# considered a failure to construct the waiter which is
# why it's in this try/except block.
# This catches things like:
# * jmespath expression compiles
# * matcher has a known value
acceptors = waiter.config.acceptors
except Exception as e:
raise AssertionError("Could not create waiter '%s': %s"
% (waiter_name, e))
operation_name = waiter.config.operation
# Needs to reference an existing operation name.
if operation_name not in service_model.operation_names:
raise AssertionError("Waiter config references unknown "
"operation: %s" % operation_name)
# Needs to have at least one acceptor.
if not waiter.config.acceptors:
raise AssertionError("Waiter config must have at least "
"one acceptor state: %s" % waiter.name)
op_model = service_model.operation_model(operation_name)
for acceptor in acceptors:
_validate_acceptor(acceptor, op_model, waiter.name)
if not waiter.name.isalnum():
raise AssertionError(
"Waiter name %s is not alphanumeric." % waiter_name
)
def _validate_schema(validator, waiter_json):
errors = list(e.message for e in validator.iter_errors(waiter_json))
if errors:
raise AssertionError('\n'.join(errors))
def _validate_acceptor(acceptor, op_model, waiter_name):
if acceptor.matcher.startswith('path'):
expression = acceptor.argument
# The JMESPath expression should have the potential to match something
# in the response shape.
output_shape = op_model.output_shape
assert output_shape is not None, (
"Waiter '%s' has JMESPath expression with no output shape: %s"
% (waiter_name, op_model))
# We want to check if the JMESPath expression makes sense.
# To do this, we'll generate sample output and evaluate the
# JMESPath expression against the output. We'll then
# check a few things about this returned search result.
search_result = _search_jmespath_expression(expression, op_model)
if search_result is None:
raise AssertionError(
f"JMESPath expression did not match anything for waiter "
f"'{waiter_name}': {expression}"
)
if acceptor.matcher in ['pathAll', 'pathAny']:
assert isinstance(search_result, list), (
f"Attempted to use '{acceptor.matcher}' matcher in waiter "
f"'{waiter_name}' with non list result in JMESPath expression: "
f"{expression}"
)
def _search_jmespath_expression(expression, op_model):
arg_gen = ArgumentGenerator(use_member_names=True)
sample_output = arg_gen.generate_skeleton(op_model.output_shape)
search_result = jmespath.search(expression, sample_output)
return search_result
| boto/botocore | tests/functional/test_waiter_config.py | Python | apache-2.0 | 7,458 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.paging import Paged
class RouteFilterRulePaged(Paged):
"""
A paging container for iterating over a list of :class:`RouteFilterRule <azure.mgmt.network.v2017_06_01.models.RouteFilterRule>` object
"""
_attribute_map = {
'next_link': {'key': 'nextLink', 'type': 'str'},
'current_page': {'key': 'value', 'type': '[RouteFilterRule]'}
}
def __init__(self, *args, **kwargs):
super(RouteFilterRulePaged, self).__init__(*args, **kwargs)
| lmazuel/azure-sdk-for-python | azure-mgmt-network/azure/mgmt/network/v2017_06_01/models/route_filter_rule_paged.py | Python | mit | 967 |
#! /usr/bin/env python
import eris.server as server
if __name__ == "__main__":
server.run()
| jedisct1/eris | eris-server.py | Python | bsd-2-clause | 95 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.image_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import colorsys
import math
import os
import time
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_image_ops
from tensorflow.python.ops import image_ops
from tensorflow.python.ops import io_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
from tensorflow.python.platform import test
class RGBToHSVTest(test_util.TensorFlowTestCase):
def testBatch(self):
# Build an arbitrary RGB image
np.random.seed(7)
batch_size = 5
shape = (batch_size, 2, 7, 3)
for nptype in [np.float32, np.float64]:
inp = np.random.rand(*shape).astype(nptype)
# Convert to HSV and back, as a batch and individually
with self.test_session(use_gpu=True) as sess:
batch0 = constant_op.constant(inp)
batch1 = image_ops.rgb_to_hsv(batch0)
batch2 = image_ops.hsv_to_rgb(batch1)
split0 = array_ops.unstack(batch0)
split1 = list(map(image_ops.rgb_to_hsv, split0))
split2 = list(map(image_ops.hsv_to_rgb, split1))
join1 = array_ops.stack(split1)
join2 = array_ops.stack(split2)
batch1, batch2, join1, join2 = sess.run([batch1, batch2, join1, join2])
# Verify that processing batch elements together is the same as separate
self.assertAllClose(batch1, join1)
self.assertAllClose(batch2, join2)
self.assertAllClose(batch2, inp)
def testRGBToHSVRoundTrip(self):
data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
for nptype in [np.float32, np.float64]:
rgb_np = np.array(data, dtype=nptype).reshape([2, 2, 3]) / 255.
with self.test_session(use_gpu=True):
hsv = image_ops.rgb_to_hsv(rgb_np)
rgb = image_ops.hsv_to_rgb(hsv)
rgb_tf = rgb.eval()
self.assertAllClose(rgb_tf, rgb_np)
class GrayscaleToRGBTest(test_util.TensorFlowTestCase):
def _RGBToGrayscale(self, images):
is_batch = True
if len(images.shape) == 3:
is_batch = False
images = np.expand_dims(images, axis=0)
out_shape = images.shape[0:3] + (1,)
out = np.zeros(shape=out_shape, dtype=np.uint8)
for batch in xrange(images.shape[0]):
for y in xrange(images.shape[1]):
for x in xrange(images.shape[2]):
red = images[batch, y, x, 0]
green = images[batch, y, x, 1]
blue = images[batch, y, x, 2]
gray = 0.2989 * red + 0.5870 * green + 0.1140 * blue
out[batch, y, x, 0] = int(gray)
if not is_batch:
out = np.squeeze(out, axis=0)
return out
def _TestRGBToGrayscale(self, x_np):
y_np = self._RGBToGrayscale(x_np)
with self.test_session(use_gpu=True):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.rgb_to_grayscale(x_tf)
y_tf = y.eval()
self.assertAllEqual(y_tf, y_np)
def testBasicRGBToGrayscale(self):
# 4-D input with batch dimension.
x_np = np.array(
[[1, 2, 3], [4, 10, 1]], dtype=np.uint8).reshape([1, 1, 2, 3])
self._TestRGBToGrayscale(x_np)
# 3-D input with no batch dimension.
x_np = np.array([[1, 2, 3], [4, 10, 1]], dtype=np.uint8).reshape([1, 2, 3])
self._TestRGBToGrayscale(x_np)
def testBasicGrayscaleToRGB(self):
# 4-D input with batch dimension.
x_np = np.array([[1, 2]], dtype=np.uint8).reshape([1, 1, 2, 1])
y_np = np.array(
[[1, 1, 1], [2, 2, 2]], dtype=np.uint8).reshape([1, 1, 2, 3])
with self.test_session(use_gpu=True):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.grayscale_to_rgb(x_tf)
y_tf = y.eval()
self.assertAllEqual(y_tf, y_np)
# 3-D input with no batch dimension.
x_np = np.array([[1, 2]], dtype=np.uint8).reshape([1, 2, 1])
y_np = np.array([[1, 1, 1], [2, 2, 2]], dtype=np.uint8).reshape([1, 2, 3])
with self.test_session(use_gpu=True):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.grayscale_to_rgb(x_tf)
y_tf = y.eval()
self.assertAllEqual(y_tf, y_np)
def testShapeInference(self):
# Shape inference works and produces expected output where possible
rgb_shape = [7, None, 19, 3]
gray_shape = rgb_shape[:-1] + [1]
with self.test_session(use_gpu=True):
rgb_tf = array_ops.placeholder(dtypes.uint8, shape=rgb_shape)
gray = image_ops.rgb_to_grayscale(rgb_tf)
self.assertEqual(gray_shape, gray.get_shape().as_list())
with self.test_session(use_gpu=True):
gray_tf = array_ops.placeholder(dtypes.uint8, shape=gray_shape)
rgb = image_ops.grayscale_to_rgb(gray_tf)
self.assertEqual(rgb_shape, rgb.get_shape().as_list())
# Shape inference does not break for unknown shapes
with self.test_session(use_gpu=True):
rgb_tf_unknown = array_ops.placeholder(dtypes.uint8)
gray_unknown = image_ops.rgb_to_grayscale(rgb_tf_unknown)
self.assertFalse(gray_unknown.get_shape())
with self.test_session(use_gpu=True):
gray_tf_unknown = array_ops.placeholder(dtypes.uint8)
rgb_unknown = image_ops.grayscale_to_rgb(gray_tf_unknown)
self.assertFalse(rgb_unknown.get_shape())
class AdjustGamma(test_util.TensorFlowTestCase):
def test_adjust_gamma_one(self):
"""Same image should be returned for gamma equal to one"""
with self.test_session():
x_data = np.random.uniform(0, 255, (8, 8))
x_np = np.array(x_data, dtype=np.float32)
x = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.adjust_gamma(x, gamma=1)
y_tf = y.eval()
y_np = x_np
self.assertAllClose(y_tf, y_np, 1e-6)
def test_adjust_gamma_zero(self):
"""White image should be returned for gamma equal to zero"""
with self.test_session():
x_data = np.random.uniform(0, 255, (8, 8))
x_np = np.array(x_data, dtype=np.float32)
x = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.adjust_gamma(x, gamma=0)
y_tf = y.eval()
dtype = x.dtype.as_numpy_dtype
y_np = np.array([dtypes.dtype_range[dtype][1]] * x_np.size)
y_np = y_np.reshape((8, 8))
self.assertAllClose(y_tf, y_np, 1e-6)
def test_adjust_gamma_less_one(self):
"""Verifying the output with expected results for gamma
correction with gamma equal to half"""
with self.test_session():
x_np = np.arange(0, 255, 4, np.uint8).reshape(8, 8)
y = image_ops.adjust_gamma(x_np, gamma=0.5)
y_tf = np.trunc(y.eval())
y_np = np.array(
[[0, 31, 45, 55, 63, 71, 78, 84],
[90, 95, 100, 105, 110, 115, 119, 123],
[127, 131, 135, 139, 142, 146, 149, 153],
[156, 159, 162, 165, 168, 171, 174, 177],
[180, 183, 186, 188, 191, 194, 196, 199],
[201, 204, 206, 209, 211, 214, 216, 218],
[221, 223, 225, 228, 230, 232, 234, 236],
[238, 241, 243, 245, 247, 249, 251, 253]],
dtype=np.float32)
self.assertAllClose(y_tf, y_np, 1e-6)
def test_adjust_gamma_greater_one(self):
"""Verifying the output with expected results for gamma
correction with gamma equal to two"""
with self.test_session():
x_np = np.arange(0, 255, 4, np.uint8).reshape(8, 8)
y = image_ops.adjust_gamma(x_np, gamma=2)
y_tf = np.trunc(y.eval())
y_np = np.array(
[[0, 0, 0, 0, 1, 1, 2, 3],
[4, 5, 6, 7, 9, 10, 12, 14],
[16, 18, 20, 22, 25, 27, 30, 33],
[36, 39, 42, 45, 49, 52, 56, 60],
[64, 68, 72, 76, 81, 85, 90, 95],
[100, 105, 110, 116, 121, 127, 132, 138],
[144, 150, 156, 163, 169, 176, 182, 189],
[196, 203, 211, 218, 225, 233, 241, 249]],
dtype=np.float32)
self.assertAllClose(y_tf, y_np, 1e-6)
class AdjustHueTest(test_util.TensorFlowTestCase):
def testAdjustNegativeHue(self):
x_shape = [2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
delta = -0.25
y_data = [0, 13, 1, 54, 226, 59, 8, 234, 150, 255, 39, 1]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
with self.test_session(use_gpu=True):
x = constant_op.constant(x_np, shape=x_shape)
y = image_ops.adjust_hue(x, delta)
y_tf = y.eval()
self.assertAllEqual(y_tf, y_np)
def testAdjustPositiveHue(self):
x_shape = [2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
delta = 0.25
y_data = [13, 0, 11, 226, 54, 221, 234, 8, 92, 1, 217, 255]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
with self.test_session(use_gpu=True):
x = constant_op.constant(x_np, shape=x_shape)
y = image_ops.adjust_hue(x, delta)
y_tf = y.eval()
self.assertAllEqual(y_tf, y_np)
def _adjustHueNp(self, x_np, delta_h):
self.assertEqual(x_np.shape[-1], 3)
x_v = x_np.reshape([-1, 3])
y_v = np.ndarray(x_v.shape, dtype=x_v.dtype)
channel_count = x_v.shape[0]
for i in xrange(channel_count):
r = x_v[i][0]
g = x_v[i][1]
b = x_v[i][2]
h, s, v = colorsys.rgb_to_hsv(r, g, b)
h += delta_h
h = math.fmod(h + 10.0, 1.0)
r, g, b = colorsys.hsv_to_rgb(h, s, v)
y_v[i][0] = r
y_v[i][1] = g
y_v[i][2] = b
return y_v.reshape(x_np.shape)
def _adjustHueTf(self, x_np, delta_h):
with self.test_session(use_gpu=True):
x = constant_op.constant(x_np)
y = image_ops.adjust_hue(x, delta_h)
y_tf = y.eval()
return y_tf
def testAdjustRandomHue(self):
x_shapes = [
[2, 2, 3],
[4, 2, 3],
[2, 4, 3],
[2, 5, 3],
[1000, 1, 3],
]
test_styles = [
'all_random',
'rg_same',
'rb_same',
'gb_same',
'rgb_same',
]
for x_shape in x_shapes:
for test_style in test_styles:
x_np = np.random.rand(*x_shape) * 255.
delta_h = np.random.rand() * 2.0 - 1.0
if test_style == 'all_random':
pass
elif test_style == 'rg_same':
x_np[..., 1] = x_np[..., 0]
elif test_style == 'rb_same':
x_np[..., 2] = x_np[..., 0]
elif test_style == 'gb_same':
x_np[..., 2] = x_np[..., 1]
elif test_style == 'rgb_same':
x_np[..., 1] = x_np[..., 0]
x_np[..., 2] = x_np[..., 0]
else:
raise AssertionError('Invalid test style: %s' % (test_style))
y_np = self._adjustHueNp(x_np, delta_h)
y_tf = self._adjustHueTf(x_np, delta_h)
self.assertAllClose(y_tf, y_np, rtol=2e-5, atol=1e-5)
def testInvalidShapes(self):
fused = False
if not fused:
# The tests are known to pass with the fused adjust_hue. We will enable
# them when the fused implementation is the default.
return
x_np = np.random.rand(2, 3) * 255.
delta_h = np.random.rand() * 2.0 - 1.0
fused = False
with self.assertRaisesRegexp(ValueError, 'Shape must be at least rank 3'):
self._adjustHueTf(x_np, delta_h)
x_np = np.random.rand(4, 2, 4) * 255.
delta_h = np.random.rand() * 2.0 - 1.0
with self.assertRaisesOpError('input must have 3 channels'):
self._adjustHueTf(x_np, delta_h)
class AdjustHueBenchmark(test.Benchmark):
def _benchmarkAdjustHue(self, device, cpu_count):
image_shape = [299, 299, 3]
warmup_rounds = 100
benchmark_rounds = 1000
config = config_pb2.ConfigProto()
if cpu_count is not None:
config.inter_op_parallelism_threads = 1
config.intra_op_parallelism_threads = cpu_count
with session.Session('', graph=ops.Graph(), config=config) as sess:
with ops.device(device):
inputs = variables.Variable(
random_ops.random_uniform(
image_shape, dtype=dtypes.float32) * 255,
trainable=False,
dtype=dtypes.float32)
delta = constant_op.constant(0.1, dtype=dtypes.float32)
outputs = image_ops.adjust_hue(inputs, delta)
run_op = control_flow_ops.group(outputs)
sess.run(variables.global_variables_initializer())
for i in xrange(warmup_rounds + benchmark_rounds):
if i == warmup_rounds:
start = time.time()
sess.run(run_op)
end = time.time()
step_time = (end - start) / benchmark_rounds
tag = '%s' % (cpu_count) if cpu_count is not None else '_all'
print('benchmarkAdjustHue_299_299_3_cpu%s step_time: %.2f us' %
(tag, step_time * 1e6))
self.report_benchmark(
name='benchmarkAdjustHue_299_299_3_cpu%s' % (tag),
iters=benchmark_rounds,
wall_time=step_time)
def benchmarkAdjustHueCpu1(self):
self._benchmarkAdjustHue('/cpu:0', 1)
def benchmarkAdjustHueCpuAll(self):
self._benchmarkAdjustHue('/cpu:0', None)
def benchmarkAdjustHueGpu(self):
self._benchmarkAdjustHue(test.gpu_device_name(), None)
class AdjustSaturationBenchmark(test.Benchmark):
def _benchmarkAdjustSaturation(self, device, cpu_count):
image_shape = [299, 299, 3]
warmup_rounds = 100
benchmark_rounds = 1000
config = config_pb2.ConfigProto()
if cpu_count is not None:
config.inter_op_parallelism_threads = 1
config.intra_op_parallelism_threads = cpu_count
with session.Session('', graph=ops.Graph(), config=config) as sess:
with ops.device(device):
inputs = variables.Variable(
random_ops.random_uniform(
image_shape, dtype=dtypes.float32) * 255,
trainable=False,
dtype=dtypes.float32)
delta = constant_op.constant(0.1, dtype=dtypes.float32)
outputs = image_ops.adjust_saturation(inputs, delta)
run_op = control_flow_ops.group(outputs)
sess.run(variables.global_variables_initializer())
for _ in xrange(warmup_rounds):
sess.run(run_op)
start = time.time()
for _ in xrange(benchmark_rounds):
sess.run(run_op)
end = time.time()
step_time = (end - start) / benchmark_rounds
tag = '%s' % (cpu_count) if cpu_count is not None else '_all'
print('benchmarkAdjustSaturation_599_599_3_cpu%s step_time: %.2f us' %
(tag, step_time * 1e6))
self.report_benchmark(
name='benchmarkAdjustSaturation_599_599_3_cpu%s' % (tag),
iters=benchmark_rounds,
wall_time=step_time)
def benchmarkAdjustSaturationCpu1(self):
self._benchmarkAdjustSaturation('/cpu:0', 1)
def benchmarkAdjustSaturationCpuAll(self):
self._benchmarkAdjustSaturation('/cpu:0', None)
def benchmarkAdjustSaturationGpu(self):
self._benchmarkAdjustSaturation(test.gpu_device_name(), None)
class ResizeBilinearBenchmark(test.Benchmark):
def _benchmarkResize(self, image_size, num_channels):
batch_size = 1
num_ops = 1000
img = variables.Variable(
random_ops.random_normal(
[batch_size, image_size[0], image_size[1], num_channels]),
name='img')
deps = []
for _ in xrange(num_ops):
with ops.control_dependencies(deps):
resize_op = image_ops.resize_bilinear(
img, [299, 299], align_corners=False)
deps = [resize_op]
benchmark_op = control_flow_ops.group(*deps)
with session.Session() as sess:
sess.run(variables.global_variables_initializer())
results = self.run_op_benchmark(
sess,
benchmark_op,
name=('resize_bilinear_%s_%s_%s' %
(image_size[0], image_size[1], num_channels)))
print('%s : %.2f ms/img' % (results['name'], 1000 * results['wall_time']
/ (batch_size * num_ops)))
def benchmarkSimilar3Channel(self):
self._benchmarkResize((183, 229), 3)
def benchmarkScaleUp3Channel(self):
self._benchmarkResize((141, 186), 3)
def benchmarkScaleDown3Channel(self):
self._benchmarkResize((749, 603), 3)
def benchmarkSimilar1Channel(self):
self._benchmarkResize((183, 229), 1)
def benchmarkScaleUp1Channel(self):
self._benchmarkResize((141, 186), 1)
def benchmarkScaleDown1Channel(self):
self._benchmarkResize((749, 603), 1)
class ResizeBicubicBenchmark(test.Benchmark):
def _benchmarkResize(self, image_size, num_channels):
batch_size = 1
num_ops = 1000
img = variables.Variable(
random_ops.random_normal(
[batch_size, image_size[0], image_size[1], num_channels]),
name='img')
deps = []
for _ in xrange(num_ops):
with ops.control_dependencies(deps):
resize_op = image_ops.resize_bicubic(
img, [299, 299], align_corners=False)
deps = [resize_op]
benchmark_op = control_flow_ops.group(*deps)
with session.Session() as sess:
sess.run(variables.global_variables_initializer())
results = self.run_op_benchmark(
sess,
benchmark_op,
min_iters=20,
name=('resize_bicubic_%s_%s_%s' % (image_size[0], image_size[1],
num_channels)))
print('%s : %.2f ms/img' % (results['name'], 1000 * results['wall_time']
/ (batch_size * num_ops)))
def benchmarkSimilar3Channel(self):
self._benchmarkResize((183, 229), 3)
def benchmarkScaleUp3Channel(self):
self._benchmarkResize((141, 186), 3)
def benchmarkScaleDown3Channel(self):
self._benchmarkResize((749, 603), 3)
def benchmarkSimilar1Channel(self):
self._benchmarkResize((183, 229), 1)
def benchmarkScaleUp1Channel(self):
self._benchmarkResize((141, 186), 1)
def benchmarkScaleDown1Channel(self):
self._benchmarkResize((749, 603), 1)
def benchmarkSimilar4Channel(self):
self._benchmarkResize((183, 229), 4)
def benchmarkScaleUp4Channel(self):
self._benchmarkResize((141, 186), 4)
def benchmarkScaleDown4Channel(self):
self._benchmarkResize((749, 603), 4)
class ResizeAreaBenchmark(test.Benchmark):
def _benchmarkResize(self, image_size, num_channels):
batch_size = 1
num_ops = 1000
img = variables.Variable(
random_ops.random_normal([batch_size, image_size[0],
image_size[1], num_channels]),
name='img')
deps = []
for _ in xrange(num_ops):
with ops.control_dependencies(deps):
resize_op = image_ops.resize_area(img, [299, 299], align_corners=False)
deps = [resize_op]
benchmark_op = control_flow_ops.group(*deps)
with session.Session() as sess:
sess.run(variables.global_variables_initializer())
results = self.run_op_benchmark(
sess, benchmark_op,
name=('resize_area_%s_%s_%s' %
(image_size[0], image_size[1], num_channels)))
print('%s : %.2f ms/img' % (
results['name'],
1000*results['wall_time'] / (batch_size * num_ops)))
def benchmarkSimilar3Channel(self):
self._benchmarkResize((183, 229), 3)
def benchmarkScaleUp3Channel(self):
self._benchmarkResize((141, 186), 3)
def benchmarkScaleDown3Channel(self):
self._benchmarkResize((749, 603), 3)
def benchmarkSimilar1Channel(self):
self._benchmarkResize((183, 229), 1)
def benchmarkScaleUp1Channel(self):
self._benchmarkResize((141, 186), 1)
def benchmarkScaleDown1Channel(self):
self._benchmarkResize((749, 603), 1)
class AdjustSaturationTest(test_util.TensorFlowTestCase):
def testHalfSaturation(self):
x_shape = [2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
saturation_factor = 0.5
y_data = [6, 9, 13, 140, 180, 226, 135, 121, 234, 172, 255, 128]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
with self.test_session(use_gpu=True):
x = constant_op.constant(x_np, shape=x_shape)
y = image_ops.adjust_saturation(x, saturation_factor)
y_tf = y.eval()
self.assertAllEqual(y_tf, y_np)
def testTwiceSaturation(self):
x_shape = [2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
saturation_factor = 2.0
y_data = [0, 5, 13, 0, 106, 226, 30, 0, 234, 89, 255, 0]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
with self.test_session(use_gpu=True):
x = constant_op.constant(x_np, shape=x_shape)
y = image_ops.adjust_saturation(x, saturation_factor)
y_tf = y.eval()
self.assertAllEqual(y_tf, y_np)
def _adjust_saturation(self, image, saturation_factor):
image = ops.convert_to_tensor(image, name='image')
orig_dtype = image.dtype
flt_image = image_ops.convert_image_dtype(image, dtypes.float32)
saturation_adjusted_image = gen_image_ops.adjust_saturation(
flt_image, saturation_factor)
return image_ops.convert_image_dtype(saturation_adjusted_image,
orig_dtype)
def testHalfSaturationFused(self):
x_shape = [2, 2, 3]
x_rgb_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_rgb_data, dtype=np.uint8).reshape(x_shape)
saturation_factor = 0.5
y_rgb_data = [6, 9, 13, 140, 180, 226, 135, 121, 234, 172, 255, 128]
y_np = np.array(y_rgb_data, dtype=np.uint8).reshape(x_shape)
with self.test_session(use_gpu=True):
x = constant_op.constant(x_np, shape=x_shape)
y = self._adjust_saturation(x, saturation_factor)
y_tf = y.eval()
self.assertAllEqual(y_tf, y_np)
def testTwiceSaturationFused(self):
x_shape = [2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
saturation_factor = 2.0
y_data = [0, 5, 13, 0, 106, 226, 30, 0, 234, 89, 255, 0]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
with self.test_session(use_gpu=True):
x = constant_op.constant(x_np, shape=x_shape)
y = self._adjust_saturation(x, saturation_factor)
y_tf = y.eval()
self.assertAllEqual(y_tf, y_np)
def _adjustSaturationNp(self, x_np, scale):
self.assertEqual(x_np.shape[-1], 3)
x_v = x_np.reshape([-1, 3])
y_v = np.ndarray(x_v.shape, dtype=x_v.dtype)
channel_count = x_v.shape[0]
for i in xrange(channel_count):
r = x_v[i][0]
g = x_v[i][1]
b = x_v[i][2]
h, s, v = colorsys.rgb_to_hsv(r, g, b)
s *= scale
s = min(1.0, max(0.0, s))
r, g, b = colorsys.hsv_to_rgb(h, s, v)
y_v[i][0] = r
y_v[i][1] = g
y_v[i][2] = b
return y_v.reshape(x_np.shape)
def testAdjustRandomSaturation(self):
x_shapes = [
[2, 2, 3],
[4, 2, 3],
[2, 4, 3],
[2, 5, 3],
[1000, 1, 3],
]
test_styles = [
'all_random',
'rg_same',
'rb_same',
'gb_same',
'rgb_same',
]
with self.test_session():
for x_shape in x_shapes:
for test_style in test_styles:
x_np = np.random.rand(*x_shape) * 255.
scale = np.random.rand()
if test_style == 'all_random':
pass
elif test_style == 'rg_same':
x_np[..., 1] = x_np[..., 0]
elif test_style == 'rb_same':
x_np[..., 2] = x_np[..., 0]
elif test_style == 'gb_same':
x_np[..., 2] = x_np[..., 1]
elif test_style == 'rgb_same':
x_np[..., 1] = x_np[..., 0]
x_np[..., 2] = x_np[..., 0]
else:
raise AssertionError('Invalid test style: %s' % (test_style))
y_baseline = self._adjustSaturationNp(x_np, scale)
y_fused = self._adjust_saturation(x_np, scale).eval()
self.assertAllClose(y_fused, y_baseline, rtol=2e-5, atol=1e-5)
class FlipTransposeRotateTest(test_util.TensorFlowTestCase):
def testIdempotentLeftRight(self):
x_np = np.array([[1, 2, 3], [1, 2, 3]], dtype=np.uint8).reshape([2, 3, 1])
with self.test_session(use_gpu=True):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.flip_left_right(image_ops.flip_left_right(x_tf))
y_tf = y.eval()
self.assertAllEqual(y_tf, x_np)
def testLeftRight(self):
x_np = np.array([[1, 2, 3], [1, 2, 3]], dtype=np.uint8).reshape([2, 3, 1])
y_np = np.array([[3, 2, 1], [3, 2, 1]], dtype=np.uint8).reshape([2, 3, 1])
with self.test_session(use_gpu=True):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.flip_left_right(x_tf)
y_tf = y.eval()
self.assertAllEqual(y_tf, y_np)
def testRandomFlipLeftRight(self):
x_np = np.array([[1, 2, 3], [1, 2, 3]], dtype=np.uint8).reshape([2, 3, 1])
y_np = np.array([[3, 2, 1], [3, 2, 1]], dtype=np.uint8).reshape([2, 3, 1])
with self.test_session(use_gpu=True):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.random_flip_left_right(x_tf)
count_flipped = 0
count_unflipped = 0
for _ in range(50):
y_tf = y.eval()
if y_tf[0][0] == 1:
self.assertAllEqual(y_tf, x_np)
count_unflipped += 1
else:
self.assertAllEqual(y_tf, y_np)
count_flipped += 1
self.assertGreaterEqual(count_flipped, 1)
self.assertGreaterEqual(count_unflipped, 1)
def testIdempotentUpDown(self):
x_np = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.uint8).reshape([2, 3, 1])
with self.test_session(use_gpu=True):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.flip_up_down(image_ops.flip_up_down(x_tf))
y_tf = y.eval()
self.assertAllEqual(y_tf, x_np)
def testUpDown(self):
x_np = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.uint8).reshape([2, 3, 1])
y_np = np.array([[4, 5, 6], [1, 2, 3]], dtype=np.uint8).reshape([2, 3, 1])
with self.test_session(use_gpu=True):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.flip_up_down(x_tf)
y_tf = y.eval()
self.assertAllEqual(y_tf, y_np)
def testRandomFlipUpDown(self):
x_np = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.uint8).reshape([2, 3, 1])
y_np = np.array([[4, 5, 6], [1, 2, 3]], dtype=np.uint8).reshape([2, 3, 1])
with self.test_session(use_gpu=True):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.random_flip_up_down(x_tf)
count_flipped = 0
count_unflipped = 0
for _ in range(50):
y_tf = y.eval()
if y_tf[0][0] == 1:
self.assertAllEqual(y_tf, x_np)
count_unflipped += 1
else:
self.assertAllEqual(y_tf, y_np)
count_flipped += 1
self.assertGreaterEqual(count_flipped, 1)
self.assertGreaterEqual(count_unflipped, 1)
def testIdempotentTranspose(self):
x_np = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.uint8).reshape([2, 3, 1])
with self.test_session(use_gpu=True):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.transpose_image(image_ops.transpose_image(x_tf))
y_tf = y.eval()
self.assertAllEqual(y_tf, x_np)
def testTranspose(self):
x_np = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.uint8).reshape([2, 3, 1])
y_np = np.array([[1, 4], [2, 5], [3, 6]], dtype=np.uint8).reshape([3, 2, 1])
with self.test_session(use_gpu=True):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.transpose_image(x_tf)
y_tf = y.eval()
self.assertAllEqual(y_tf, y_np)
def testPartialShapes(self):
p_unknown_rank = array_ops.placeholder(dtypes.uint8)
p_unknown_dims = array_ops.placeholder(
dtypes.uint8, shape=[None, None, None])
p_unknown_width = array_ops.placeholder(dtypes.uint8, shape=[64, None, 3])
p_wrong_rank = array_ops.placeholder(dtypes.uint8, shape=[None, None])
p_zero_dim = array_ops.placeholder(dtypes.uint8, shape=[64, 0, 3])
for op in [
image_ops.flip_left_right, image_ops.flip_up_down,
image_ops.random_flip_left_right, image_ops.random_flip_up_down,
image_ops.transpose_image, image_ops.rot90
]:
transformed_unknown_rank = op(p_unknown_rank)
self.assertEqual(3, transformed_unknown_rank.get_shape().ndims)
transformed_unknown_dims = op(p_unknown_dims)
self.assertEqual(3, transformed_unknown_dims.get_shape().ndims)
transformed_unknown_width = op(p_unknown_width)
self.assertEqual(3, transformed_unknown_width.get_shape().ndims)
with self.assertRaisesRegexp(ValueError, 'must be three-dimensional'):
op(p_wrong_rank)
with self.assertRaisesRegexp(ValueError, 'must be > 0'):
op(p_zero_dim)
def testRot90GroupOrder(self):
image = np.arange(24, dtype=np.uint8).reshape([2, 4, 3])
with self.test_session(use_gpu=True):
rotated = image
for _ in xrange(4):
rotated = image_ops.rot90(rotated)
self.assertAllEqual(image, rotated.eval())
def testRot90NumpyEquivalence(self):
image = np.arange(24, dtype=np.uint8).reshape([2, 4, 3])
with self.test_session(use_gpu=True):
k_placeholder = array_ops.placeholder(dtypes.int32, shape=[])
y_tf = image_ops.rot90(image, k_placeholder)
for k in xrange(4):
y_np = np.rot90(image, k=k)
self.assertAllEqual(y_np, y_tf.eval({k_placeholder: k}))
class RandomFlipTest(test_util.TensorFlowTestCase):
def testRandomLeftRight(self):
x_np = np.array([0, 1], dtype=np.uint8).reshape([1, 2, 1])
num_iterations = 500
hist = [0, 0]
with self.test_session(use_gpu=True):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.random_flip_left_right(x_tf)
for _ in xrange(num_iterations):
y_np = y.eval().flatten()[0]
hist[y_np] += 1
# Ensure that each entry is observed within 4 standard deviations.
four_stddev = 4.0 * np.sqrt(num_iterations / 2.0)
self.assertAllClose(hist, [num_iterations / 2.0] * 2, atol=four_stddev)
def testRandomUpDown(self):
x_np = np.array([0, 1], dtype=np.uint8).reshape([2, 1, 1])
num_iterations = 500
hist = [0, 0]
with self.test_session(use_gpu=True):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.random_flip_up_down(x_tf)
for _ in xrange(num_iterations):
y_np = y.eval().flatten()[0]
hist[y_np] += 1
# Ensure that each entry is observed within 4 standard deviations.
four_stddev = 4.0 * np.sqrt(num_iterations / 2.0)
self.assertAllClose(hist, [num_iterations / 2.0] * 2, atol=four_stddev)
class AdjustContrastTest(test_util.TensorFlowTestCase):
def _testContrast(self, x_np, y_np, contrast_factor):
with self.test_session(use_gpu=True):
x = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.adjust_contrast(x, contrast_factor)
y_tf = y.eval()
self.assertAllClose(y_tf, y_np, 1e-6)
def testDoubleContrastUint8(self):
x_shape = [1, 2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
y_data = [0, 0, 0, 62, 169, 255, 28, 0, 255, 135, 255, 0]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
self._testContrast(x_np, y_np, contrast_factor=2.0)
def testDoubleContrastFloat(self):
x_shape = [1, 2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.float).reshape(x_shape) / 255.
y_data = [
-45.25, -90.75, -92.5, 62.75, 169.25, 333.5, 28.75, -84.75, 349.5,
134.75, 409.25, -116.5
]
y_np = np.array(y_data, dtype=np.float).reshape(x_shape) / 255.
self._testContrast(x_np, y_np, contrast_factor=2.0)
def testHalfContrastUint8(self):
x_shape = [1, 2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
y_data = [22, 52, 65, 49, 118, 172, 41, 54, 176, 67, 178, 59]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
self._testContrast(x_np, y_np, contrast_factor=0.5)
def testBatchDoubleContrast(self):
x_shape = [2, 1, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
y_data = [0, 0, 0, 81, 200, 255, 10, 0, 255, 116, 255, 0]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
self._testContrast(x_np, y_np, contrast_factor=2.0)
def _adjustContrastNp(self, x_np, contrast_factor):
mean = np.mean(x_np, (1, 2), keepdims=True)
y_np = mean + contrast_factor * (x_np - mean)
return y_np
def _adjustContrastTf(self, x_np, contrast_factor):
with self.test_session(use_gpu=True):
x = constant_op.constant(x_np)
y = image_ops.adjust_contrast(x, contrast_factor)
y_tf = y.eval()
return y_tf
def testRandomContrast(self):
x_shapes = [
[1, 2, 2, 3],
[2, 1, 2, 3],
[1, 2, 2, 3],
[2, 5, 5, 3],
[2, 1, 1, 3],
]
for x_shape in x_shapes:
x_np = np.random.rand(*x_shape) * 255.
contrast_factor = np.random.rand() * 2.0 + 0.1
y_np = self._adjustContrastNp(x_np, contrast_factor)
y_tf = self._adjustContrastTf(x_np, contrast_factor)
self.assertAllClose(y_tf, y_np, rtol=1e-5, atol=1e-5)
class AdjustBrightnessTest(test_util.TensorFlowTestCase):
def _testBrightness(self, x_np, y_np, delta):
with self.test_session(use_gpu=True):
x = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.adjust_brightness(x, delta)
y_tf = y.eval()
self.assertAllClose(y_tf, y_np, 1e-6)
def testPositiveDeltaUint8(self):
x_shape = [2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
y_data = [10, 15, 23, 64, 145, 236, 47, 18, 244, 100, 255, 11]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
self._testBrightness(x_np, y_np, delta=10. / 255.)
def testPositiveDeltaFloat(self):
x_shape = [2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 245, 90, 255, 1]
x_np = np.array(x_data, dtype=np.float32).reshape(x_shape) / 255.
y_data = [10, 15, 23, 64, 145, 236, 47, 18, 255, 100, 255, 11]
y_np = np.array(y_data, dtype=np.float32).reshape(x_shape) / 255.
self._testBrightness(x_np, y_np, delta=10. / 255.)
def testNegativeDeltaFloat(self):
x_shape = [2, 2, 3]
x_data = [0, 5, 13, 10, 135, 226, 37, 8, 245, 90, 255, 1]
x_np = np.array(x_data, dtype=np.float32).reshape(x_shape) / 255.
y_data = [0, 0, 3, 0, 125, 216, 27, 0, 235, 80, 245, 0]
y_np = np.array(y_data, dtype=np.float32).reshape(x_shape) / 255.
self._testBrightness(x_np, y_np, delta=-10. / 255.)
def testNegativeDelta(self):
x_shape = [2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
y_data = [0, 0, 3, 44, 125, 216, 27, 0, 224, 80, 245, 0]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
self._testBrightness(x_np, y_np, delta=-10. / 255.)
class PerImageWhiteningTest(test_util.TensorFlowTestCase):
def _NumpyPerImageWhitening(self, x):
num_pixels = np.prod(x.shape)
x2 = np.square(x).astype(np.float32)
mn = np.mean(x)
vr = np.mean(x2) - (mn * mn)
stddev = max(math.sqrt(vr), 1.0 / math.sqrt(num_pixels))
y = x.astype(np.float32)
y -= mn
y /= stddev
return y
def testBasic(self):
x_shape = [13, 9, 3]
x_np = np.arange(0, np.prod(x_shape), dtype=np.int32).reshape(x_shape)
y_np = self._NumpyPerImageWhitening(x_np)
with self.test_session(use_gpu=True):
x = constant_op.constant(x_np, shape=x_shape)
y = image_ops.per_image_standardization(x)
y_tf = y.eval()
self.assertAllClose(y_tf, y_np, atol=1e-4)
def testUniformImage(self):
im_np = np.ones([19, 19, 3]).astype(np.float32) * 249
im = constant_op.constant(im_np)
whiten = image_ops.per_image_standardization(im)
with self.test_session(use_gpu=True):
whiten_np = whiten.eval()
self.assertFalse(np.any(np.isnan(whiten_np)))
class CropToBoundingBoxTest(test_util.TensorFlowTestCase):
def _CropToBoundingBox(self, x, offset_height, offset_width, target_height,
target_width, use_tensor_inputs):
if use_tensor_inputs:
offset_height = ops.convert_to_tensor(offset_height)
offset_width = ops.convert_to_tensor(offset_width)
target_height = ops.convert_to_tensor(target_height)
target_width = ops.convert_to_tensor(target_width)
x_tensor = array_ops.placeholder(x.dtype, shape=[None] * x.ndim)
feed_dict = {x_tensor: x}
else:
x_tensor = x
feed_dict = {}
y = image_ops.crop_to_bounding_box(x_tensor, offset_height, offset_width,
target_height, target_width)
if not use_tensor_inputs:
self.assertTrue(y.get_shape().is_fully_defined())
with self.test_session(use_gpu=True):
return y.eval(feed_dict=feed_dict)
def _assertReturns(self,
x,
x_shape,
offset_height,
offset_width,
y,
y_shape,
use_tensor_inputs_options=None):
use_tensor_inputs_options = use_tensor_inputs_options or [False, True]
target_height, target_width, _ = y_shape
x = np.array(x).reshape(x_shape)
y = np.array(y).reshape(y_shape)
for use_tensor_inputs in use_tensor_inputs_options:
y_tf = self._CropToBoundingBox(x, offset_height, offset_width,
target_height, target_width,
use_tensor_inputs)
self.assertAllClose(y, y_tf)
def _assertRaises(self,
x,
x_shape,
offset_height,
offset_width,
target_height,
target_width,
err_msg,
use_tensor_inputs_options=None):
use_tensor_inputs_options = use_tensor_inputs_options or [False, True]
x = np.array(x).reshape(x_shape)
for use_tensor_inputs in use_tensor_inputs_options:
try:
self._CropToBoundingBox(x, offset_height, offset_width, target_height,
target_width, use_tensor_inputs)
except Exception as e:
if err_msg not in str(e):
raise
else:
raise AssertionError('Exception not raised: %s' % err_msg)
def _assertShapeInference(self, pre_shape, height, width, post_shape):
image = array_ops.placeholder(dtypes.float32, shape=pre_shape)
y = image_ops.crop_to_bounding_box(image, 0, 0, height, width)
self.assertEqual(y.get_shape().as_list(), post_shape)
def testNoOp(self):
x_shape = [10, 10, 10]
x = np.random.uniform(size=x_shape)
self._assertReturns(x, x_shape, 0, 0, x, x_shape)
def testCrop(self):
x = [1, 2, 3, 4, 5, 6, 7, 8, 9]
x_shape = [3, 3, 1]
offset_height, offset_width = [1, 0]
y_shape = [2, 3, 1]
y = [4, 5, 6, 7, 8, 9]
self._assertReturns(x, x_shape, offset_height, offset_width, y, y_shape)
offset_height, offset_width = [0, 1]
y_shape = [3, 2, 1]
y = [2, 3, 5, 6, 8, 9]
self._assertReturns(x, x_shape, offset_height, offset_width, y, y_shape)
offset_height, offset_width = [0, 0]
y_shape = [2, 3, 1]
y = [1, 2, 3, 4, 5, 6]
self._assertReturns(x, x_shape, offset_height, offset_width, y, y_shape)
offset_height, offset_width = [0, 0]
y_shape = [3, 2, 1]
y = [1, 2, 4, 5, 7, 8]
self._assertReturns(x, x_shape, offset_height, offset_width, y, y_shape)
def testShapeInference(self):
self._assertShapeInference([55, 66, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([59, 69, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([None, 66, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([None, 69, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([55, None, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([59, None, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([None, None, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([55, 66, None], 55, 66, [55, 66, None])
self._assertShapeInference([59, 69, None], 55, 66, [55, 66, None])
self._assertShapeInference([None, None, None], 55, 66, [55, 66, None])
self._assertShapeInference(None, 55, 66, [55, 66, None])
def testNon3DInput(self):
# Input image is not 3D
x = [0] * 15
offset_height, offset_width = [0, 0]
target_height, target_width = [2, 2]
for x_shape in ([1, 3, 5, 1], [3, 5]):
self._assertRaises(x, x_shape, offset_height, offset_width, target_height,
target_width, "'image' must be three-dimensional")
def testZeroLengthInput(self):
# Input image has 0-length dimension(s).
# Each line is a test configuration:
# x_shape, target_height, target_width
test_config = (([0, 2, 2], 1, 1), ([2, 0, 2], 1, 1), ([2, 2, 0], 1, 1),
([0, 2, 2], 0, 1), ([2, 0, 2], 1, 0))
offset_height, offset_width = [0, 0]
x = []
for x_shape, target_height, target_width in test_config:
self._assertRaises(
x,
x_shape,
offset_height,
offset_width,
target_height,
target_width,
"all dims of 'image.shape' must be > 0",
use_tensor_inputs_options=[False])
# Multiple assertion could fail, but the evaluation order is arbitrary.
# Match gainst generic pattern.
self._assertRaises(
x,
x_shape,
offset_height,
offset_width,
target_height,
target_width,
'assertion failed:',
use_tensor_inputs_options=[True])
def testBadParams(self):
x_shape = [4, 4, 1]
x = np.zeros(x_shape)
# Each line is a test configuration:
# (offset_height, offset_width, target_height, target_width), err_msg
test_config = (([-1, 0, 3, 3], 'offset_height must be >= 0'),
([0, -1, 3, 3], 'offset_width must be >= 0'),
([0, 0, 0, 3], 'target_height must be > 0'),
([0, 0, 3, 0], 'target_width must be > 0'),
([2, 0, 3, 3], 'height must be >= target + offset'),
([0, 2, 3, 3], 'width must be >= target + offset'))
for params, err_msg in test_config:
self._assertRaises(x, x_shape, *params, err_msg=err_msg)
class CentralCropTest(test_util.TensorFlowTestCase):
def _assertShapeInference(self, pre_shape, fraction, post_shape):
image = array_ops.placeholder(dtypes.float32, shape=pre_shape)
y = image_ops.central_crop(image, fraction)
if post_shape is None:
self.assertEqual(y.get_shape().dims, None)
else:
self.assertEqual(y.get_shape().as_list(), post_shape)
def testNoOp(self):
x_shape = [13, 9, 3]
x_np = np.ones(x_shape, dtype=np.float32)
with self.test_session(use_gpu=True):
x = constant_op.constant(x_np, shape=x_shape)
y = image_ops.central_crop(x, 1.0)
y_tf = y.eval()
self.assertAllEqual(y_tf, x_np)
self.assertEqual(y.op.name, x.op.name)
def testCropping(self):
x_shape = [4, 8, 1]
x_np = np.array([[1, 2, 3, 4, 5, 6, 7, 8], [1, 2, 3, 4, 5, 6, 7, 8],
[1, 2, 3, 4, 5, 6, 7, 8], [1, 2, 3, 4, 5, 6, 7, 8]],
dtype=np.int32).reshape(x_shape)
y_np = np.array([[3, 4, 5, 6], [3, 4, 5, 6]]).reshape([2, 4, 1])
with self.test_session(use_gpu=True):
x = constant_op.constant(x_np, shape=x_shape)
y = image_ops.central_crop(x, 0.5)
y_tf = y.eval()
self.assertAllEqual(y_tf, y_np)
def testShapeInference(self):
# Test no-op fraction=1.0
self._assertShapeInference([50, 60, 3], 1.0, [50, 60, 3])
self._assertShapeInference([None, 60, 3], 1.0, [None, 60, 3])
self._assertShapeInference([50, None, 3], 1.0, [50, None, 3])
self._assertShapeInference([None, None, 3], 1.0, [None, None, 3])
self._assertShapeInference([50, 60, None], 1.0, [50, 60, None])
self._assertShapeInference([None, None, None], 1.0, [None, None, None])
self._assertShapeInference(None, 1.0, None)
# TODO(toddw): Currently central_crop() doesn't infer the result shape even
# when it's possible. If we change it to do so, we can test as follows:
#
# self._assertShapeInference([50, 60, 3], 0.5, [25, 30, 3])
# self._assertShapeInference([None, 60, 3], 0.5, [None, 30, 3])
# self._assertShapeInference([50, None, 3], 0.5, [25, None, 3])
# self._assertShapeInference([None, None, 3], 0.5, [None, None, 3])
# self._assertShapeInference([50, 60, None], 0.5, [25, 30, None])
# self._assertShapeInference([None, None, None], 0.5, [None, None, None])
# self._assertShapeInference(None, 0.5, None)
def testError(self):
x_shape = [13, 9, 3]
x_np = np.ones(x_shape, dtype=np.float32)
with self.test_session(use_gpu=True):
x = constant_op.constant(x_np, shape=x_shape)
with self.assertRaises(ValueError):
_ = image_ops.central_crop(x, 0.0)
with self.assertRaises(ValueError):
_ = image_ops.central_crop(x, 1.01)
class PadToBoundingBoxTest(test_util.TensorFlowTestCase):
def _PadToBoundingBox(self, x, offset_height, offset_width, target_height,
target_width, use_tensor_inputs):
if use_tensor_inputs:
offset_height = ops.convert_to_tensor(offset_height)
offset_width = ops.convert_to_tensor(offset_width)
target_height = ops.convert_to_tensor(target_height)
target_width = ops.convert_to_tensor(target_width)
x_tensor = array_ops.placeholder(x.dtype, shape=[None] * x.ndim)
feed_dict = {x_tensor: x}
else:
x_tensor = x
feed_dict = {}
y = image_ops.pad_to_bounding_box(x_tensor, offset_height, offset_width,
target_height, target_width)
if not use_tensor_inputs:
self.assertTrue(y.get_shape().is_fully_defined())
with self.test_session(use_gpu=True):
return y.eval(feed_dict=feed_dict)
def _assertReturns(self,
x,
x_shape,
offset_height,
offset_width,
y,
y_shape,
use_tensor_inputs_options=None):
use_tensor_inputs_options = use_tensor_inputs_options or [False, True]
target_height, target_width, _ = y_shape
x = np.array(x).reshape(x_shape)
y = np.array(y).reshape(y_shape)
for use_tensor_inputs in use_tensor_inputs_options:
y_tf = self._PadToBoundingBox(x, offset_height, offset_width,
target_height, target_width,
use_tensor_inputs)
self.assertAllClose(y, y_tf)
def _assertRaises(self,
x,
x_shape,
offset_height,
offset_width,
target_height,
target_width,
err_msg,
use_tensor_inputs_options=None):
use_tensor_inputs_options = use_tensor_inputs_options or [False, True]
x = np.array(x).reshape(x_shape)
for use_tensor_inputs in use_tensor_inputs_options:
try:
self._PadToBoundingBox(x, offset_height, offset_width, target_height,
target_width, use_tensor_inputs)
except Exception as e:
if err_msg not in str(e):
raise
else:
raise AssertionError('Exception not raised: %s' % err_msg)
def _assertShapeInference(self, pre_shape, height, width, post_shape):
image = array_ops.placeholder(dtypes.float32, shape=pre_shape)
y = image_ops.pad_to_bounding_box(image, 0, 0, height, width)
self.assertEqual(y.get_shape().as_list(), post_shape)
def testNoOp(self):
x_shape = [10, 10, 10]
x = np.random.uniform(size=x_shape)
offset_height, offset_width = [0, 0]
self._assertReturns(x, x_shape, offset_height, offset_width, x, x_shape)
def testPadding(self):
x = [1, 2, 3,
4, 5, 6,
7, 8, 9]
x_shape = [3, 3, 1]
offset_height, offset_width = [1, 0]
y = [0, 0, 0,
1, 2, 3,
4, 5, 6,
7, 8, 9]
y_shape = [4, 3, 1]
self._assertReturns(x, x_shape, offset_height, offset_width, y, y_shape)
offset_height, offset_width = [0, 1]
y = [0, 1, 2, 3,
0, 4, 5, 6,
0, 7, 8, 9]
y_shape = [3, 4, 1]
self._assertReturns(x, x_shape, offset_height, offset_width, y, y_shape)
offset_height, offset_width = [0, 0]
y = [1, 2, 3,
4, 5, 6,
7, 8, 9,
0, 0, 0]
y_shape = [4, 3, 1]
self._assertReturns(x, x_shape, offset_height, offset_width, y, y_shape)
offset_height, offset_width = [0, 0]
y = [1, 2, 3, 0,
4, 5, 6, 0,
7, 8, 9, 0]
y_shape = [3, 4, 1]
self._assertReturns(x, x_shape, offset_height, offset_width, y, y_shape)
def testShapeInference(self):
self._assertShapeInference([55, 66, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([50, 60, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([None, 66, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([None, 60, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([55, None, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([50, None, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([None, None, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([55, 66, None], 55, 66, [55, 66, None])
self._assertShapeInference([50, 60, None], 55, 66, [55, 66, None])
self._assertShapeInference([None, None, None], 55, 66, [55, 66, None])
self._assertShapeInference(None, 55, 66, [55, 66, None])
def testNon3DInput(self):
# Input image is not 3D
x = [0] * 15
offset_height, offset_width = [0, 0]
target_height, target_width = [2, 2]
for x_shape in ([1, 3, 5, 1], [3, 5]):
self._assertRaises(x, x_shape, offset_height, offset_width, target_height,
target_width, "'image' must be three-dimensional")
def testZeroLengthInput(self):
# Input image has 0-length dimension(s).
# Each line is a test configuration:
# x_shape, target_height, target_width
test_config = (([0, 2, 2], 2, 2),
([2, 0, 2], 2, 2),
([2, 2, 0], 2, 2))
offset_height, offset_width = [0, 0]
x = []
for x_shape, target_height, target_width in test_config:
self._assertRaises(
x,
x_shape,
offset_height,
offset_width,
target_height,
target_width,
"all dims of 'image.shape' must be > 0",
use_tensor_inputs_options=[False])
# The orignal error message does not contain back slashes. However, they
# are added by either the assert op or the runtime. If this behaviour
# changes in the future, the match string will also needs to be changed.
self._assertRaises(
x,
x_shape,
offset_height,
offset_width,
target_height,
target_width,
"all dims of \\'image.shape\\' must be > 0",
use_tensor_inputs_options=[True])
def testBadParams(self):
x_shape = [3, 3, 1]
x = np.zeros(x_shape)
# Each line is a test configuration:
# offset_height, offset_width, target_height, target_width, err_msg
test_config = ((-1, 0, 4, 4, 'offset_height must be >= 0'),
( 0,-1, 4, 4, 'offset_width must be >= 0'),
( 2, 0, 4, 4, 'height must be <= target - offset'),
( 0, 2, 4, 4, 'width must be <= target - offset'))
for config_item in test_config:
self._assertRaises(x, x_shape, *config_item)
class SelectDistortedCropBoxTest(test_util.TensorFlowTestCase):
def _testSampleDistortedBoundingBox(self, image, bounding_box,
min_object_covered, aspect_ratio_range,
area_range):
original_area = float(np.prod(image.shape))
bounding_box_area = float((bounding_box[3] - bounding_box[1]) *
(bounding_box[2] - bounding_box[0]))
image_size_np = np.array(image.shape, dtype=np.int32)
bounding_box_np = (np.array(
bounding_box, dtype=np.float32).reshape([1, 1, 4]))
aspect_ratios = []
area_ratios = []
fraction_object_covered = []
num_iter = 1000
with self.test_session(use_gpu=True):
image_tf = constant_op.constant(image, shape=image.shape)
image_size_tf = constant_op.constant(
image_size_np, shape=image_size_np.shape)
bounding_box_tf = constant_op.constant(
bounding_box_np, dtype=dtypes.float32, shape=bounding_box_np.shape)
begin, size, _ = image_ops.sample_distorted_bounding_box(
image_size=image_size_tf,
bounding_boxes=bounding_box_tf,
min_object_covered=min_object_covered,
aspect_ratio_range=aspect_ratio_range,
area_range=area_range)
y = array_ops.strided_slice(image_tf, begin, begin + size)
for _ in xrange(num_iter):
y_tf = y.eval()
crop_height = y_tf.shape[0]
crop_width = y_tf.shape[1]
aspect_ratio = float(crop_width) / float(crop_height)
area = float(crop_width * crop_height)
aspect_ratios.append(aspect_ratio)
area_ratios.append(area / original_area)
fraction_object_covered.append(float(np.sum(y_tf)) / bounding_box_area)
# Ensure that each entry is observed within 3 standard deviations.
# num_bins = 10
# aspect_ratio_hist, _ = np.histogram(aspect_ratios,
# bins=num_bins,
# range=aspect_ratio_range)
# mean = np.mean(aspect_ratio_hist)
# stddev = np.sqrt(mean)
# TODO(wicke, shlens, dga): Restore this test so that it is no longer flaky.
# TODO(irving): Since the rejection probability is not independent of the
# aspect ratio, the aspect_ratio random value is not exactly uniformly
# distributed in [min_aspect_ratio, max_aspect_ratio). This test should be
# fixed to reflect the true statistical property, then tightened to enforce
# a stricter bound. Or, ideally, the sample_distorted_bounding_box Op
# be fixed to not use rejection sampling and generate correctly uniform
# aspect ratios.
# self.assertAllClose(aspect_ratio_hist,
# [mean] * num_bins, atol=3.6 * stddev)
# The resulting crop will not be uniformly distributed in area. In practice,
# we find that the area skews towards the small sizes. Instead, we perform
# a weaker test to ensure that the area ratios are merely within the
# specified bounds.
self.assertLessEqual(max(area_ratios), area_range[1])
self.assertGreaterEqual(min(area_ratios), area_range[0])
# For reference, here is what the distribution of area ratios look like.
area_ratio_hist, _ = np.histogram(area_ratios, bins=10, range=area_range)
print('area_ratio_hist ', area_ratio_hist)
# Ensure that fraction_object_covered is satisfied.
# TODO(wicke, shlens, dga): Restore this test so that it is no longer flaky.
# self.assertGreaterEqual(min(fraction_object_covered), min_object_covered)
def testWholeImageBoundingBox(self):
height = 40
width = 50
image_size = [height, width, 1]
bounding_box = [0.0, 0.0, 1.0, 1.0]
image = np.arange(
0, np.prod(image_size), dtype=np.int32).reshape(image_size)
self._testSampleDistortedBoundingBox(
image,
bounding_box,
min_object_covered=0.1,
aspect_ratio_range=(0.75, 1.33),
area_range=(0.05, 1.0))
def testWithBoundingBox(self):
height = 40
width = 50
x_shape = [height, width, 1]
image = np.zeros(x_shape, dtype=np.int32)
# Create an object with 1's in a region with area A and require that
# the total pixel values >= 0.1 * A.
min_object_covered = 0.1
xmin = 2
ymin = 3
xmax = 12
ymax = 13
for x in np.arange(xmin, xmax + 1, 1):
for y in np.arange(ymin, ymax + 1, 1):
image[x, y] = 1
# Bounding box is specified as (ymin, xmin, ymax, xmax) in
# relative coordinates.
bounding_box = (float(ymin) / height, float(xmin) / width,
float(ymax) / height, float(xmax) / width)
self._testSampleDistortedBoundingBox(
image,
bounding_box=bounding_box,
min_object_covered=min_object_covered,
aspect_ratio_range=(0.75, 1.33),
area_range=(0.05, 1.0))
def testSampleDistortedBoundingBoxShape(self):
with self.test_session(use_gpu=True):
image_size = constant_op.constant(
[40, 50, 1], shape=[3], dtype=dtypes.int32)
bounding_box = constant_op.constant(
[0.0, 0.0, 1.0, 1.0],
shape=[4],
dtype=dtypes.float32,)
begin, end, bbox_for_drawing = image_ops.sample_distorted_bounding_box(
image_size=image_size,
bounding_boxes=bounding_box,
min_object_covered=0.1,
aspect_ratio_range=(0.75, 1.33),
area_range=(0.05, 1.0))
# Test that the shapes are correct.
self.assertAllEqual([3], begin.get_shape().as_list())
self.assertAllEqual([3], end.get_shape().as_list())
self.assertAllEqual([1, 1, 4], bbox_for_drawing.get_shape().as_list())
class ResizeImagesTest(test_util.TensorFlowTestCase):
OPTIONS = [image_ops.ResizeMethod.BILINEAR,
image_ops.ResizeMethod.NEAREST_NEIGHBOR,
image_ops.ResizeMethod.BICUBIC,
image_ops.ResizeMethod.AREA]
TYPES = [np.uint8, np.int8, np.int16, np.int32, np.int64,
np.float32, np.float64]
def _assertShapeInference(self, pre_shape, size, post_shape):
# Try single image resize
single_image = array_ops.placeholder(dtypes.float32, shape=pre_shape)
y = image_ops.resize_images(single_image, size)
self.assertEqual(y.get_shape().as_list(), post_shape)
# Try batch images resize with known batch size
images = array_ops.placeholder(dtypes.float32, shape=[99] + pre_shape)
y = image_ops.resize_images(images, size)
self.assertEqual(y.get_shape().as_list(), [99] + post_shape)
# Try batch images resize with unknown batch size
images = array_ops.placeholder(dtypes.float32, shape=[None] + pre_shape)
y = image_ops.resize_images(images, size)
self.assertEqual(y.get_shape().as_list(), [None] + post_shape)
def shouldRunOnGPU(self, opt, nptype):
if opt == image_ops.ResizeMethod.NEAREST_NEIGHBOR \
and nptype in [np.float32, np.float64]:
return True
else:
return False
def testNoOp(self):
img_shape = [1, 6, 4, 1]
single_shape = [6, 4, 1]
# This test is also conducted with int8, so 127 is the maximum
# value that can be used.
data = [127, 127, 64, 64,
127, 127, 64, 64,
64, 64, 127, 127,
64, 64, 127, 127,
50, 50, 100, 100,
50, 50, 100, 100]
target_height = 6
target_width = 4
for nptype in self.TYPES:
img_np = np.array(data, dtype=nptype).reshape(img_shape)
for opt in self.OPTIONS:
if test.is_gpu_available() and self.shouldRunOnGPU(opt, nptype):
with self.test_session(use_gpu=True) as sess:
image = constant_op.constant(img_np, shape=img_shape)
y = image_ops.resize_images(image, [target_height, target_width],
opt)
yshape = array_ops.shape(y)
resized, newshape = sess.run([y, yshape])
self.assertAllEqual(img_shape, newshape)
self.assertAllClose(resized, img_np, atol=1e-5)
# Resizing with a single image must leave the shape unchanged also.
with self.test_session(use_gpu=True):
img_single = img_np.reshape(single_shape)
image = constant_op.constant(img_single, shape=single_shape)
y = image_ops.resize_images(image, [target_height, target_width],
self.OPTIONS[0])
yshape = array_ops.shape(y)
newshape = yshape.eval()
self.assertAllEqual(single_shape, newshape)
def testTensorArguments(self):
img_shape = [1, 6, 4, 1]
single_shape = [6, 4, 1]
# This test is also conducted with int8, so 127 is the maximum
# value that can be used.
data = [127, 127, 64, 64,
127, 127, 64, 64,
64, 64, 127, 127,
64, 64, 127, 127,
50, 50, 100, 100,
50, 50, 100, 100]
new_size = array_ops.placeholder(dtypes.int32, shape=(2))
img_np = np.array(data, dtype=np.uint8).reshape(img_shape)
for opt in self.OPTIONS:
with self.test_session(use_gpu=True) as sess:
image = constant_op.constant(img_np, shape=img_shape)
y = image_ops.resize_images(image, new_size, opt)
yshape = array_ops.shape(y)
resized, newshape = sess.run([y, yshape], {new_size: [6, 4]})
self.assertAllEqual(img_shape, newshape)
self.assertAllClose(resized, img_np, atol=1e-5)
# Resizing with a single image must leave the shape unchanged also.
with self.test_session(use_gpu=True):
img_single = img_np.reshape(single_shape)
image = constant_op.constant(img_single, shape=single_shape)
y = image_ops.resize_images(image, new_size, self.OPTIONS[0])
yshape = array_ops.shape(y)
resized, newshape = sess.run([y, yshape], {new_size: [6, 4]})
self.assertAllEqual(single_shape, newshape)
self.assertAllClose(resized, img_single, atol=1e-5)
# Incorrect shape.
with self.assertRaises(ValueError):
new_size = constant_op.constant(4)
_ = image_ops.resize_images(image, new_size,
image_ops.ResizeMethod.BILINEAR)
with self.assertRaises(ValueError):
new_size = constant_op.constant([4])
_ = image_ops.resize_images(image, new_size,
image_ops.ResizeMethod.BILINEAR)
with self.assertRaises(ValueError):
new_size = constant_op.constant([1, 2, 3])
_ = image_ops.resize_images(image, new_size,
image_ops.ResizeMethod.BILINEAR)
# Incorrect dtypes.
with self.assertRaises(ValueError):
new_size = constant_op.constant([6.0, 4])
_ = image_ops.resize_images(image, new_size,
image_ops.ResizeMethod.BILINEAR)
with self.assertRaises(ValueError):
_ = image_ops.resize_images(image, [6, 4.0],
image_ops.ResizeMethod.BILINEAR)
with self.assertRaises(ValueError):
_ = image_ops.resize_images(image, [None, 4],
image_ops.ResizeMethod.BILINEAR)
with self.assertRaises(ValueError):
_ = image_ops.resize_images(image, [6, None],
image_ops.ResizeMethod.BILINEAR)
def testSumTensor(self):
img_shape = [1, 6, 4, 1]
# This test is also conducted with int8, so 127 is the maximum
# value that can be used.
data = [127, 127, 64, 64,
127, 127, 64, 64,
64, 64, 127, 127,
64, 64, 127, 127,
50, 50, 100, 100,
50, 50, 100, 100]
# Test size where width is specified as a tensor which is a sum
# of two tensors.
width_1 = constant_op.constant(1)
width_2 = constant_op.constant(3)
width = math_ops.add(width_1, width_2)
height = constant_op.constant(6)
img_np = np.array(data, dtype=np.uint8).reshape(img_shape)
for opt in self.OPTIONS:
with self.test_session() as sess:
image = constant_op.constant(img_np, shape=img_shape)
y = image_ops.resize_images(image, [height, width], opt)
yshape = array_ops.shape(y)
resized, newshape = sess.run([y, yshape])
self.assertAllEqual(img_shape, newshape)
self.assertAllClose(resized, img_np, atol=1e-5)
def testResizeDown(self):
# This test is also conducted with int8, so 127 is the maximum
# value that can be used.
data = [127, 127, 64, 64,
127, 127, 64, 64,
64, 64, 127, 127,
64, 64, 127, 127,
50, 50, 100, 100,
50, 50, 100, 100]
expected_data = [127, 64,
64, 127,
50, 100]
target_height = 3
target_width = 2
# Test out 3-D and 4-D image shapes.
img_shapes = [[1, 6, 4, 1], [6, 4, 1]]
target_shapes = [[1, target_height, target_width, 1],
[target_height, target_width, 1]]
for target_shape, img_shape in zip(target_shapes, img_shapes):
for nptype in self.TYPES:
img_np = np.array(data, dtype=nptype).reshape(img_shape)
for opt in self.OPTIONS:
if test.is_gpu_available() and self.shouldRunOnGPU(opt, nptype):
with self.test_session(use_gpu=True):
image = constant_op.constant(img_np, shape=img_shape)
y = image_ops.resize_images(image, [target_height, target_width],
opt)
expected = np.array(expected_data).reshape(target_shape)
resized = y.eval()
self.assertAllClose(resized, expected, atol=1e-5)
def testResizeUp(self):
img_shape = [1, 3, 2, 1]
data = [64, 32,
32, 64,
50, 100]
target_height = 6
target_width = 4
expected_data = {}
expected_data[image_ops.ResizeMethod.BILINEAR] = [
64.0, 48.0, 32.0, 32.0,
48.0, 48.0, 48.0, 48.0,
32.0, 48.0, 64.0, 64.0,
41.0, 61.5, 82.0, 82.0,
50.0, 75.0, 100.0, 100.0,
50.0, 75.0, 100.0, 100.0]
expected_data[image_ops.ResizeMethod.NEAREST_NEIGHBOR] = [
64.0, 64.0, 32.0, 32.0,
64.0, 64.0, 32.0, 32.0,
32.0, 32.0, 64.0, 64.0,
32.0, 32.0, 64.0, 64.0,
50.0, 50.0, 100.0, 100.0,
50.0, 50.0, 100.0, 100.0]
expected_data[image_ops.ResizeMethod.AREA] = [
64.0, 64.0, 32.0, 32.0,
64.0, 64.0, 32.0, 32.0,
32.0, 32.0, 64.0, 64.0,
32.0, 32.0, 64.0, 64.0,
50.0, 50.0, 100.0, 100.0,
50.0, 50.0, 100.0, 100.0]
for nptype in self.TYPES:
for opt in [
image_ops.ResizeMethod.BILINEAR,
image_ops.ResizeMethod.NEAREST_NEIGHBOR,
image_ops.ResizeMethod.AREA]:
if test.is_gpu_available() and self.shouldRunOnGPU(opt, nptype):
with self.test_session(use_gpu=True):
img_np = np.array(data, dtype=nptype).reshape(img_shape)
image = constant_op.constant(img_np, shape=img_shape)
y = image_ops.resize_images(
image, [target_height, target_width], opt)
resized = y.eval()
expected = np.array(expected_data[opt]).reshape(
[1, target_height, target_width, 1])
self.assertAllClose(resized, expected, atol=1e-05)
def testResizeUpBicubic(self):
img_shape = [1, 6, 6, 1]
data = [128, 128, 64, 64, 128, 128, 64, 64,
64, 64, 128, 128, 64, 64, 128, 128,
50, 50, 100, 100, 50, 50, 100, 100,
50, 50, 100, 100, 50, 50, 100, 100,
50, 50, 100, 100]
img_np = np.array(data, dtype=np.uint8).reshape(img_shape)
target_height = 8
target_width = 8
expected_data = [128, 135, 96, 55, 64, 114, 134, 128,
78, 81, 68, 52, 57, 118, 144, 136,
55, 49, 79, 109, 103, 89, 83, 84,
74, 70, 95, 122, 115, 69, 49, 55,
100, 105, 75, 43, 50, 89, 105, 100,
57, 54, 74, 96, 91, 65, 55, 58,
70, 69, 75, 81, 80, 72, 69, 70,
105, 112, 75, 36, 45, 92, 111, 105]
with self.test_session(use_gpu=True):
image = constant_op.constant(img_np, shape=img_shape)
y = image_ops.resize_images(image, [target_height, target_width],
image_ops.ResizeMethod.BICUBIC)
resized = y.eval()
expected = np.array(expected_data).reshape(
[1, target_height, target_width, 1])
self.assertAllClose(resized, expected, atol=1)
def testResizeDownArea(self):
img_shape = [1, 6, 6, 1]
data = [128, 64, 32, 16, 8, 4,
4, 8, 16, 32, 64, 128,
128, 64, 32, 16, 8, 4,
5, 10, 15, 20, 25, 30,
30, 25, 20, 15, 10, 5,
5, 10, 15, 20, 25, 30]
img_np = np.array(data, dtype=np.uint8).reshape(img_shape)
target_height = 4
target_width = 4
expected_data = [73, 33, 23, 39,
73, 33, 23, 39,
14, 16, 19, 21,
14, 16, 19, 21]
with self.test_session(use_gpu=True):
image = constant_op.constant(img_np, shape=img_shape)
y = image_ops.resize_images(image, [target_height, target_width],
image_ops.ResizeMethod.AREA)
expected = np.array(expected_data).reshape(
[1, target_height, target_width, 1])
resized = y.eval()
self.assertAllClose(resized, expected, atol=1)
def testCompareNearestNeighbor(self):
if test.is_gpu_available():
input_shape = [1, 5, 6, 3]
target_height = 8
target_width = 12
for nptype in [np.float32, np.float64]:
for align_corners in [True, False]:
img_np = np.arange(
0, np.prod(input_shape), dtype=nptype).reshape(input_shape)
with self.test_session(use_gpu=True):
image = constant_op.constant(img_np, shape=input_shape)
new_size = constant_op.constant([target_height, target_width])
out_op = image_ops.resize_images(
image,
new_size,
image_ops.ResizeMethod.NEAREST_NEIGHBOR,
align_corners=align_corners)
gpu_val = out_op.eval()
with self.test_session(use_gpu=False):
image = constant_op.constant(img_np, shape=input_shape)
new_size = constant_op.constant([target_height, target_width])
out_op = image_ops.resize_images(
image,
new_size,
image_ops.ResizeMethod.NEAREST_NEIGHBOR,
align_corners=align_corners)
cpu_val = out_op.eval()
self.assertAllClose(cpu_val, gpu_val, rtol=1e-5, atol=1e-5)
def testCompareBilinear(self):
if test.is_gpu_available():
input_shape = [1, 5, 6, 3]
target_height = 8
target_width = 12
for nptype in [np.float32, np.float64]:
for align_corners in [True, False]:
img_np = np.arange(
0, np.prod(input_shape), dtype=nptype).reshape(input_shape)
value = {}
for use_gpu in [True, False]:
with self.test_session(use_gpu=use_gpu):
image = constant_op.constant(img_np, shape=input_shape)
new_size = constant_op.constant([target_height, target_width])
out_op = image_ops.resize_images(
image,
new_size,
image_ops.ResizeMethod.BILINEAR,
align_corners=align_corners)
value[use_gpu] = out_op.eval()
self.assertAllClose(value[True], value[False], rtol=1e-5, atol=1e-5)
def testShapeInference(self):
self._assertShapeInference([50, 60, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([55, 66, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([59, 69, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([50, 69, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([59, 60, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([None, 60, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([None, 66, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([None, 69, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([50, None, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([55, None, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([59, None, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([None, None, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([50, 60, None], [55, 66], [55, 66, None])
self._assertShapeInference([55, 66, None], [55, 66], [55, 66, None])
self._assertShapeInference([59, 69, None], [55, 66], [55, 66, None])
self._assertShapeInference([50, 69, None], [55, 66], [55, 66, None])
self._assertShapeInference([59, 60, None], [55, 66], [55, 66, None])
self._assertShapeInference([None, None, None], [55, 66], [55, 66, None])
class ResizeImageWithCropOrPadTest(test_util.TensorFlowTestCase):
def _ResizeImageWithCropOrPad(self, x, target_height, target_width,
use_tensor_inputs):
if use_tensor_inputs:
target_height = ops.convert_to_tensor(target_height)
target_width = ops.convert_to_tensor(target_width)
x_tensor = array_ops.placeholder(x.dtype, shape=[None] * x.ndim)
feed_dict = {x_tensor: x}
else:
x_tensor = x
feed_dict = {}
y = image_ops.resize_image_with_crop_or_pad(x_tensor, target_height,
target_width)
if not use_tensor_inputs:
self.assertTrue(y.get_shape().is_fully_defined())
with self.test_session(use_gpu=True):
return y.eval(feed_dict=feed_dict)
def _assertReturns(self,
x,
x_shape,
y,
y_shape,
use_tensor_inputs_options=None):
use_tensor_inputs_options = use_tensor_inputs_options or [False, True]
target_height, target_width, _ = y_shape
x = np.array(x).reshape(x_shape)
y = np.array(y).reshape(y_shape)
for use_tensor_inputs in use_tensor_inputs_options:
y_tf = self._ResizeImageWithCropOrPad(x, target_height, target_width,
use_tensor_inputs)
self.assertAllClose(y, y_tf)
def _assertRaises(self,
x,
x_shape,
target_height,
target_width,
err_msg,
use_tensor_inputs_options=None):
use_tensor_inputs_options = use_tensor_inputs_options or [False, True]
x = np.array(x).reshape(x_shape)
for use_tensor_inputs in use_tensor_inputs_options:
try:
self._ResizeImageWithCropOrPad(x, target_height, target_width,
use_tensor_inputs)
except Exception as e:
if err_msg not in str(e):
raise
else:
raise AssertionError('Exception not raised: %s' % err_msg)
def _assertShapeInference(self, pre_shape, height, width, post_shape):
image = array_ops.placeholder(dtypes.float32, shape=pre_shape)
y = image_ops.resize_image_with_crop_or_pad(image, height, width)
self.assertEqual(y.get_shape().as_list(), post_shape)
def testNoOp(self):
x_shape = [10, 10, 10]
x = np.random.uniform(size=x_shape)
self._assertReturns(x, x_shape, x, x_shape)
def testPad(self):
# Pad even along col.
x = [1, 2, 3, 4,
5, 6, 7, 8]
x_shape = [2, 4, 1]
y = [0, 1, 2, 3, 4, 0,
0, 5, 6, 7, 8, 0]
y_shape = [2, 6, 1]
self._assertReturns(x, x_shape, y, y_shape)
# Pad odd along col.
x = [1, 2, 3, 4,
5, 6, 7, 8]
x_shape = [2, 4, 1]
y = [0, 1, 2, 3, 4, 0, 0,
0, 5, 6, 7, 8, 0, 0]
y_shape = [2, 7, 1]
self._assertReturns(x, x_shape, y, y_shape)
# Pad even along row.
x = [1, 2, 3, 4,
5, 6, 7, 8]
x_shape = [2, 4, 1]
y = [0, 0, 0, 0,
1, 2, 3, 4,
5, 6, 7, 8,
0, 0, 0, 0]
y_shape = [4, 4, 1]
self._assertReturns(x, x_shape, y, y_shape)
# Pad odd along row.
x = [1, 2, 3, 4,
5, 6, 7, 8]
x_shape = [2, 4, 1]
y = [0, 0, 0, 0,
1, 2, 3, 4,
5, 6, 7, 8,
0, 0, 0, 0,
0, 0, 0, 0]
y_shape = [5, 4, 1]
self._assertReturns(x, x_shape, y, y_shape)
def testCrop(self):
# Crop even along col.
x = [1, 2, 3, 4,
5, 6, 7, 8]
x_shape = [2, 4, 1]
y = [2, 3,
6, 7]
y_shape = [2, 2, 1]
self._assertReturns(x, x_shape, y, y_shape)
# Crop odd along col.
x = [1, 2, 3, 4, 5, 6,
7, 8, 9, 10, 11, 12]
x_shape = [2, 6, 1]
y = [2, 3, 4,
8, 9, 10]
y_shape = [2, 3, 1]
self._assertReturns(x, x_shape, y, y_shape)
# Crop even along row.
x = [1, 2,
3, 4,
5, 6,
7, 8]
x_shape = [4, 2, 1]
y = [3, 4,
5, 6]
y_shape = [2, 2, 1]
self._assertReturns(x, x_shape, y, y_shape)
# Crop odd along row.
x = [1, 2,
3, 4,
5, 6,
7, 8,
9, 10,
11, 12,
13, 14,
15, 16]
x_shape = [8, 2, 1]
y = [3, 4,
5, 6,
7, 8,
9, 10,
11, 12]
y_shape = [5, 2, 1]
self._assertReturns(x, x_shape, y, y_shape)
def testCropAndPad(self):
# Pad along row but crop along col.
x = [1, 2, 3, 4,
5, 6, 7, 8]
x_shape = [2, 4, 1]
y = [0, 0,
2, 3,
6, 7,
0, 0]
y_shape = [4, 2, 1]
self._assertReturns(x, x_shape, y, y_shape)
# Crop along row but pad along col.
x = [1, 2,
3, 4,
5, 6,
7, 8]
x_shape = [4, 2, 1]
y = [0, 3, 4, 0,
0, 5, 6, 0]
y_shape = [2, 4, 1]
self._assertReturns(x, x_shape, y, y_shape)
def testShapeInference(self):
self._assertShapeInference([50, 60, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([55, 66, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([59, 69, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([50, 69, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([59, 60, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([None, 60, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([None, 66, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([None, 69, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([50, None, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([55, None, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([59, None, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([None, None, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([50, 60, None], 55, 66, [55, 66, None])
self._assertShapeInference([55, 66, None], 55, 66, [55, 66, None])
self._assertShapeInference([59, 69, None], 55, 66, [55, 66, None])
self._assertShapeInference([50, 69, None], 55, 66, [55, 66, None])
self._assertShapeInference([59, 60, None], 55, 66, [55, 66, None])
self._assertShapeInference([None, None, None], 55, 66, [55, 66, None])
self._assertShapeInference(None, 55, 66, [55, 66, None])
def testNon3DInput(self):
# Input image is not 3D
x = [0] * 15
target_height, target_width = [4, 4]
for x_shape in ([1, 3, 5, 1], [3, 5]):
self._assertRaises(x, x_shape, target_height, target_width,
"'image' must be three-dimensional")
def testZeroLengthInput(self):
# Input image has 0-length dimension(s).
target_height, target_width = [1, 1]
x = []
for x_shape in ([0, 2, 2], [2, 0, 2], [2, 2, 0]):
self._assertRaises(
x,
x_shape,
target_height,
target_width,
"all dims of 'image.shape' must be > 0",
use_tensor_inputs_options=[False])
# The orignal error message does not contain back slashes. However, they
# are added by either the assert op or the runtime. If this behaviour
# changes in the future, the match string will also needs to be changed.
self._assertRaises(
x,
x_shape,
target_height,
target_width,
"all dims of \\'image.shape\\' must be > 0",
use_tensor_inputs_options=[True])
def testBadParams(self):
x_shape = [4, 4, 1]
x = np.zeros(x_shape)
# target_height <= 0
target_height, target_width = [0, 5]
self._assertRaises(x, x_shape, target_height, target_width,
'target_height must be > 0')
# target_width <= 0
target_height, target_width = [5, 0]
self._assertRaises(x, x_shape, target_height, target_width,
'target_width must be > 0')
def _SimpleColorRamp():
"""Build a simple color ramp RGB image."""
w, h = 256, 200
i = np.arange(h)[:, None]
j = np.arange(w)
image = np.empty((h, w, 3), dtype=np.uint8)
image[:, :, 0] = i
image[:, :, 1] = j
image[:, :, 2] = (i + j) >> 1
return image
class JpegTest(test_util.TensorFlowTestCase):
# TODO(irving): Add self.assertAverageLess or similar to test_util
def averageError(self, image0, image1):
self.assertEqual(image0.shape, image1.shape)
image0 = image0.astype(int) # Avoid overflow
return np.abs(image0 - image1).sum() / np.prod(image0.shape)
def testExisting(self):
# Read a real jpeg and verify shape
path = ('tensorflow/core/lib/jpeg/testdata/'
'jpeg_merge_test1.jpg')
with self.test_session(use_gpu=True) as sess:
jpeg0 = io_ops.read_file(path)
image0 = image_ops.decode_jpeg(jpeg0)
image1 = image_ops.decode_jpeg(image_ops.encode_jpeg(image0))
jpeg0, image0, image1 = sess.run([jpeg0, image0, image1])
self.assertEqual(len(jpeg0), 3771)
self.assertEqual(image0.shape, (256, 128, 3))
self.assertLess(self.averageError(image0, image1), 1.4)
def testCmyk(self):
# Confirm that CMYK reads in as RGB
base = 'tensorflow/core/lib/jpeg/testdata'
rgb_path = os.path.join(base, 'jpeg_merge_test1.jpg')
cmyk_path = os.path.join(base, 'jpeg_merge_test1_cmyk.jpg')
shape = 256, 128, 3
for channels in 3, 0:
with self.test_session(use_gpu=True) as sess:
rgb = image_ops.decode_jpeg(
io_ops.read_file(rgb_path), channels=channels)
cmyk = image_ops.decode_jpeg(
io_ops.read_file(cmyk_path), channels=channels)
rgb, cmyk = sess.run([rgb, cmyk])
self.assertEqual(rgb.shape, shape)
self.assertEqual(cmyk.shape, shape)
error = self.averageError(rgb, cmyk)
self.assertLess(error, 4)
def testSynthetic(self):
with self.test_session(use_gpu=True) as sess:
# Encode it, then decode it, then encode it
image0 = constant_op.constant(_SimpleColorRamp())
jpeg0 = image_ops.encode_jpeg(image0)
image1 = image_ops.decode_jpeg(jpeg0, dct_method='INTEGER_ACCURATE')
image2 = image_ops.decode_jpeg(
image_ops.encode_jpeg(image1), dct_method='INTEGER_ACCURATE')
jpeg0, image0, image1, image2 = sess.run([jpeg0, image0, image1, image2])
# The decoded-encoded image should be similar to the input
self.assertLess(self.averageError(image0, image1), 0.6)
# We should be very close to a fixpoint
self.assertLess(self.averageError(image1, image2), 0.02)
# Smooth ramps compress well (input size is 153600)
self.assertGreaterEqual(len(jpeg0), 5000)
self.assertLessEqual(len(jpeg0), 6000)
def testSyntheticFasterAlgorithm(self):
with self.test_session(use_gpu=True) as sess:
# Encode it, then decode it, then encode it
image0 = constant_op.constant(_SimpleColorRamp())
jpeg0 = image_ops.encode_jpeg(image0)
image1 = image_ops.decode_jpeg(jpeg0, dct_method='INTEGER_FAST')
image2 = image_ops.decode_jpeg(
image_ops.encode_jpeg(image1), dct_method='INTEGER_FAST')
jpeg0, image0, image1, image2 = sess.run([jpeg0, image0, image1, image2])
# The decoded-encoded image should be similar to the input, but
# note this is worse than the slower algorithm because it is
# less accurate.
self.assertLess(self.averageError(image0, image1), 0.95)
# Repeated compression / decompression will have a higher error
# with a lossier algorithm.
self.assertLess(self.averageError(image1, image2), 1.05)
# Smooth ramps compress well (input size is 153600)
self.assertGreaterEqual(len(jpeg0), 5000)
self.assertLessEqual(len(jpeg0), 6000)
def testDefaultDCTMethodIsIntegerFast(self):
with self.test_session(use_gpu=True) as sess:
# Compare decoding with both dct_option=INTEGER_FAST and
# default. They should be the same.
image0 = constant_op.constant(_SimpleColorRamp())
jpeg0 = image_ops.encode_jpeg(image0)
image1 = image_ops.decode_jpeg(jpeg0, dct_method='INTEGER_FAST')
image2 = image_ops.decode_jpeg(jpeg0)
image1, image2 = sess.run([image1, image2])
# The images should be the same.
self.assertAllClose(image1, image2)
def testShape(self):
with self.test_session(use_gpu=True) as sess:
jpeg = constant_op.constant('nonsense')
for channels in 0, 1, 3:
image = image_ops.decode_jpeg(jpeg, channels=channels)
self.assertEqual(image.get_shape().as_list(),
[None, None, channels or None])
class PngTest(test_util.TensorFlowTestCase):
def testExisting(self):
# Read some real PNGs, converting to different channel numbers
prefix = 'tensorflow/core/lib/png/testdata/'
inputs = (1, 'lena_gray.png'), (4, 'lena_rgba.png')
for channels_in, filename in inputs:
for channels in 0, 1, 3, 4:
with self.test_session(use_gpu=True) as sess:
png0 = io_ops.read_file(prefix + filename)
image0 = image_ops.decode_png(png0, channels=channels)
png0, image0 = sess.run([png0, image0])
self.assertEqual(image0.shape, (26, 51, channels or channels_in))
if channels == channels_in:
image1 = image_ops.decode_png(image_ops.encode_png(image0))
self.assertAllEqual(image0, image1.eval())
def testSynthetic(self):
with self.test_session(use_gpu=True) as sess:
# Encode it, then decode it
image0 = constant_op.constant(_SimpleColorRamp())
png0 = image_ops.encode_png(image0, compression=7)
image1 = image_ops.decode_png(png0)
png0, image0, image1 = sess.run([png0, image0, image1])
# PNG is lossless
self.assertAllEqual(image0, image1)
# Smooth ramps compress well, but not too well
self.assertGreaterEqual(len(png0), 400)
self.assertLessEqual(len(png0), 750)
def testSyntheticUint16(self):
with self.test_session(use_gpu=True) as sess:
# Encode it, then decode it
image0 = constant_op.constant(_SimpleColorRamp(), dtype=dtypes.uint16)
png0 = image_ops.encode_png(image0, compression=7)
image1 = image_ops.decode_png(png0, dtype=dtypes.uint16)
png0, image0, image1 = sess.run([png0, image0, image1])
# PNG is lossless
self.assertAllEqual(image0, image1)
# Smooth ramps compress well, but not too well
self.assertGreaterEqual(len(png0), 800)
self.assertLessEqual(len(png0), 1500)
def testSyntheticTwoChannel(self):
with self.test_session(use_gpu=True) as sess:
# Strip the b channel from an rgb image to get a two-channel image.
gray_alpha = _SimpleColorRamp()[:, :, 0:2]
image0 = constant_op.constant(gray_alpha)
png0 = image_ops.encode_png(image0, compression=7)
image1 = image_ops.decode_png(png0)
png0, image0, image1 = sess.run([png0, image0, image1])
self.assertEqual(2, image0.shape[-1])
self.assertAllEqual(image0, image1)
def testSyntheticTwoChannelUint16(self):
with self.test_session(use_gpu=True) as sess:
# Strip the b channel from an rgb image to get a two-channel image.
gray_alpha = _SimpleColorRamp()[:, :, 0:2]
image0 = constant_op.constant(gray_alpha, dtype=dtypes.uint16)
png0 = image_ops.encode_png(image0, compression=7)
image1 = image_ops.decode_png(png0, dtype=dtypes.uint16)
png0, image0, image1 = sess.run([png0, image0, image1])
self.assertEqual(2, image0.shape[-1])
self.assertAllEqual(image0, image1)
def testShape(self):
with self.test_session(use_gpu=True):
png = constant_op.constant('nonsense')
for channels in 0, 1, 3:
image = image_ops.decode_png(png, channels=channels)
self.assertEqual(image.get_shape().as_list(),
[None, None, channels or None])
class GifTest(test_util.TensorFlowTestCase):
def testValid(self):
# Read some real GIFs
prefix = 'tensorflow/core/lib/gif/testdata/'
filename = 'scan.gif'
WIDTH = 20
HEIGHT = 40
STRIDE = 5
shape = (12, HEIGHT, WIDTH, 3)
with self.test_session(use_gpu=True) as sess:
gif0 = io_ops.read_file(prefix + filename)
image0 = image_ops.decode_gif(gif0)
gif0, image0 = sess.run([gif0, image0])
self.assertEqual(image0.shape, shape)
for frame_idx, frame in enumerate(image0):
gt = np.zeros(shape[1:], dtype=np.uint8)
start = frame_idx * STRIDE
end = (frame_idx + 1) * STRIDE
print(frame_idx)
if end <= WIDTH:
gt[:, start:end, :] = 255
else:
start -= WIDTH
end -= WIDTH
gt[start:end, :, :] = 255
self.assertAllClose(frame, gt)
def testInValid(self):
# Read some real GIFs
prefix = 'tensorflow/core/lib/gif/testdata/'
filename = 'optimized.gif'
with self.test_session(use_gpu=True) as sess:
gif0 = io_ops.read_file(prefix + filename)
image0 = image_ops.decode_gif(gif0)
with self.assertRaises(errors.InvalidArgumentError):
gif0, image0 = sess.run([gif0, image0])
def testShape(self):
with self.test_session(use_gpu=True) as sess:
gif = constant_op.constant('nonsense')
image = image_ops.decode_gif(gif)
self.assertEqual(image.get_shape().as_list(), [None, None, None, 3])
class ConvertImageTest(test_util.TensorFlowTestCase):
def _convert(self, original, original_dtype, output_dtype, expected):
x_np = np.array(original, dtype=original_dtype.as_numpy_dtype())
y_np = np.array(expected, dtype=output_dtype.as_numpy_dtype())
with self.test_session(use_gpu=True):
image = constant_op.constant(x_np)
y = image_ops.convert_image_dtype(image, output_dtype)
self.assertTrue(y.dtype == output_dtype)
self.assertAllClose(y.eval(), y_np, atol=1e-5)
def testNoConvert(self):
# Make sure converting to the same data type creates only an identity op
with self.test_session(use_gpu=True):
image = constant_op.constant([1], dtype=dtypes.uint8)
image_ops.convert_image_dtype(image, dtypes.uint8)
y = image_ops.convert_image_dtype(image, dtypes.uint8)
self.assertEquals(y.op.type, 'Identity')
self.assertEquals(y.op.inputs[0], image)
def testConvertBetweenInteger(self):
# Make sure converting to between integer types scales appropriately
with self.test_session(use_gpu=True):
self._convert([0, 255], dtypes.uint8, dtypes.int16, [0, 255 * 128])
self._convert([0, 32767], dtypes.int16, dtypes.uint8, [0, 255])
def testConvertBetweenFloat(self):
# Make sure converting to between float types does nothing interesting
with self.test_session(use_gpu=True):
self._convert([-1.0, 0, 1.0, 200000], dtypes.float32, dtypes.float64,
[-1.0, 0, 1.0, 200000])
self._convert([-1.0, 0, 1.0, 200000], dtypes.float64, dtypes.float32,
[-1.0, 0, 1.0, 200000])
def testConvertBetweenIntegerAndFloat(self):
# Make sure converting from and to a float type scales appropriately
with self.test_session(use_gpu=True):
self._convert([0, 1, 255], dtypes.uint8, dtypes.float32,
[0, 1.0 / 255.0, 1])
self._convert([0, 1.1 / 255.0, 1], dtypes.float32, dtypes.uint8,
[0, 1, 255])
def testConvertBetweenInt16AndInt8(self):
with self.test_session(use_gpu=True):
# uint8, uint16
self._convert([0, 255 * 256], dtypes.uint16, dtypes.uint8,
[0, 255])
self._convert([0, 255], dtypes.uint8, dtypes.uint16,
[0, 255 * 256])
# int8, uint16
self._convert([0, 127 * 2 * 256], dtypes.uint16, dtypes.int8,
[0, 127])
self._convert([0, 127], dtypes.int8, dtypes.uint16,
[0, 127 * 2 * 256])
# int16, uint16
self._convert([0, 255 * 256], dtypes.uint16, dtypes.int16,
[0, 255 * 128])
self._convert([0, 255 * 128], dtypes.int16, dtypes.uint16,
[0, 255 * 256])
class TotalVariationTest(test_util.TensorFlowTestCase):
"""Tests the function total_variation() in image_ops.
We test a few small handmade examples, as well as
some larger examples using an equivalent numpy
implementation of the total_variation() function.
We do NOT test for overflows and invalid / edge-case arguments.
"""
def _test(self, x_np, y_np):
"""Test that the TensorFlow implementation of
total_variation(x_np) calculates the values in y_np.
Note that these may be float-numbers so we only test
for approximate equality within some narrow error-bound.
"""
# Create a TensorFlow session.
with self.test_session(use_gpu=True):
# Add a constant to the TensorFlow graph that holds the input.
x_tf = constant_op.constant(x_np, shape=x_np.shape)
# Add ops for calculating the total variation using TensorFlow.
y = image_ops.total_variation(images=x_tf)
# Run the TensorFlow session to calculate the result.
y_tf = y.eval()
# Assert that the results are as expected within
# some small error-bound in case they are float-values.
self.assertAllClose(y_tf, y_np)
def _total_variation_np(self, x_np):
"""Calculate the total variation of x_np using numpy.
This implements the same function as TensorFlow but
using numpy instead.
Args:
x_np: Numpy array with 3 or 4 dimensions.
"""
dim = len(x_np.shape)
if dim == 3:
# Calculate differences for neighboring pixel-values using slices.
dif1 = x_np[1:, :, :] - x_np[:-1, :, :]
dif2 = x_np[:, 1:, :] - x_np[:, :-1, :]
# Sum for all axis.
sum_axis = None
elif dim == 4:
# Calculate differences for neighboring pixel-values using slices.
dif1 = x_np[:, 1:, :, :] - x_np[:, :-1, :, :]
dif2 = x_np[:, :, 1:, :] - x_np[:, :, :-1, :]
# Only sum for the last 3 axis.
sum_axis = (1, 2, 3)
else:
# This should not occur in this test-code.
pass
tot_var = np.sum(np.abs(dif1), axis=sum_axis) + \
np.sum(np.abs(dif2), axis=sum_axis)
return tot_var
def _test_tensorflow_vs_numpy(self, x_np):
"""Test the TensorFlow implementation against a numpy implementation.
Args:
x_np: Numpy array with 3 or 4 dimensions.
"""
# Calculate the y-values using the numpy implementation.
y_np = self._total_variation_np(x_np)
self._test(x_np, y_np)
def _generateArray(self, shape):
"""Generate an array of the given shape for use in testing.
The numbers are calculated as the cumulative sum, which
causes the difference between neighboring numbers to vary."""
# Flattened length of the array.
flat_len = np.prod(shape)
a = np.array(range(flat_len), dtype=int)
a = np.cumsum(a)
a = a.reshape(shape)
return a
def testTotalVariationNumpy(self):
"""Test the TensorFlow implementation against a numpy implementation.
The two implementations are very similar so it is possible that both
have the same bug, which would not be detected by this test. It is
therefore necessary to test with manually crafted data as well."""
# Generate a test-array.
# This is an 'image' with 100x80 pixels and 3 color channels.
a = self._generateArray(shape=(100, 80, 3))
# Test the TensorFlow implementation vs. numpy implementation.
# We use a numpy implementation to check the results that are
# calculated using TensorFlow are correct.
self._test_tensorflow_vs_numpy(a)
self._test_tensorflow_vs_numpy(a + 1)
self._test_tensorflow_vs_numpy(-a)
self._test_tensorflow_vs_numpy(1.1 * a)
# Expand to a 4-dim array.
b = a[np.newaxis, :]
# Combine several variations of the image into a single 4-dim array.
multi = np.vstack((b, b + 1, -b, 1.1 * b))
# Test that the TensorFlow function can also handle 4-dim arrays.
self._test_tensorflow_vs_numpy(multi)
def testTotalVariationHandmade(self):
"""Test the total variation for a few handmade examples."""
# We create an image that is 2x2 pixels with 3 color channels.
# The image is very small so we can check the result by hand.
# Red color channel.
# The following are the sum of absolute differences between the pixels.
# sum row dif = (4-1) + (7-2) = 3 + 5 = 8
# sum col dif = (2-1) + (7-4) = 1 + 3 = 4
r = [[1, 2],
[4, 7]]
# Blue color channel.
# sum row dif = 18 + 29 = 47
# sum col dif = 7 + 18 = 25
g = [[11, 18],
[29, 47]]
# Green color channel.
# sum row dif = 120 + 193 = 313
# sum col dif = 47 + 120 = 167
b = [[73, 120],
[193, 313]]
# Combine the 3 color channels into a single 3-dim array.
# The shape is (2, 2, 3) corresponding to (height, width and color).
a = np.dstack((r, g, b))
# Total variation for this image.
# Sum of all pixel differences = 8 + 4 + 47 + 25 + 313 + 167 = 564
tot_var = 564
# Calculate the total variation using TensorFlow and assert it is correct.
self._test(a, tot_var)
# If we add 1 to all pixel-values then the total variation is unchanged.
self._test(a + 1, tot_var)
# If we negate all pixel-values then the total variation is unchanged.
self._test(-a, tot_var)
# Scale the pixel-values by a float. This scales the total variation as well.
b = 1.1 * a
self._test(b, 1.1 * tot_var)
# Scale by another float.
c = 1.2 * a
self._test(c, 1.2 * tot_var)
# Combine these 3 images into a single array of shape (3, 2, 2, 3)
# where the first dimension is for the image-number.
multi = np.vstack((a[np.newaxis, :],
b[np.newaxis, :],
c[np.newaxis, :]))
# Check that TensorFlow correctly calculates the total variation
# for each image individually and returns the correct array.
self._test(multi, tot_var * np.array([1.0, 1.1, 1.2]))
if __name__ == '__main__':
googletest.main()
| strint/tensorflow | tensorflow/python/ops/image_ops_test.py | Python | apache-2.0 | 100,652 |
import logging
from bs4 import BeautifulSoup as BS
class ResponseParser(object):
#def __init__(self):
# print('nothing')
"""
Attempt to write a generic parser for the perverted, non-uniformed html tables.
Works with the following URLs
"""
def generic_parse(self, html_code):
### TODO write data to backend instead into debug messages
soup = BS(html_code)
tables = soup.find_all('table')
for table in tables:
table_name = table.get('summary')
logging.debug('Table Name is ""%s"', table_name)
# TODO parse thead and tfoot for description
trs = table.find('thead').find_all('tr')
logging.debug('More captions: ')
for tr in trs:
ths = tr.find_all('th')
for th in ths:
logging.debug(' ** TH: %s', th.contents[0].strip())
trs = table.find('tbody').find_all('tr')
for tr in trs:
th = tr.find('th')
logging.debug('- TH: %s',th.contents[0].strip())
tds = tr.find_all('td')
for td in tds:
logging.debug(' ** TD: %s', td.contents[0].strip())
def parse_average_age(self, html_code):
self.generic_parse(html_code)
| UlmApi/bw-srdb-parser | lib/ResponseParser.py | Python | mit | 1,327 |
# This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from docutils import nodes
from docutils.parsers.rst import Directive
from sphinx import addnodes
from sphinx.domains import Domain
from sphinx.domains import Index
from sphinx.domains import ObjType
from sphinx.roles import XRefRole
from sphinx.util import logging
from sphinx.util import ws_re
from sphinx.util.docfields import DocFieldTransformer
from sphinx.util.docfields import Field
from sphinx.util.docfields import TypedField
from sphinx.util.nodes import make_refnode
logger = logging.getLogger(__name__)
class BBRefTargetDirective(Directive):
"""
A directive that can be a target for references. Attributes:
@cvar ref_type: same as directive name
@cvar indextemplates: templates for main index entries, if any
"""
has_content = False
name_annotation = None
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = True
option_spec = {}
domain = 'bb'
doc_field_types = []
def get_field_type_map(self):
# This is the same as DocFieldTransformer.preprocess_fieldtype which got removed in
# Sphinx 4.0
typemap = {}
for fieldtype in self.doc_field_types:
for name in fieldtype.names:
typemap[name] = fieldtype, False
if fieldtype.is_typed:
for name in fieldtype.typenames:
typemap[name] = fieldtype, True
return typemap
def run(self):
self.env = env = self.state.document.settings.env
# normalize whitespace in fullname like XRefRole does
fullname = ws_re.sub(' ', self.arguments[0].strip())
targetname = '{}-{}'.format(self.ref_type, fullname)
# keep the target; this may be used to generate a BBIndex later
targets = env.domaindata['bb']['targets'].setdefault(self.ref_type, {})
targets[fullname] = env.docname, targetname
# make up the descriptor: a target and potentially an index descriptor
node = nodes.target('', '', ids=[targetname])
ret = [node]
# add the target to the document
self.state.document.note_explicit_target(node)
# append the index node if necessary
entries = []
for tpl in self.indextemplates:
colon = tpl.find(':')
if colon != -1:
indextype = tpl[:colon].strip()
indexentry = tpl[colon + 1:].strip() % (fullname,)
else:
indextype = 'single'
indexentry = tpl % (fullname,)
entries.append(
(indextype, indexentry, targetname, targetname, None))
if entries:
inode = addnodes.index(entries=entries)
ret.insert(0, inode)
# if the node has content, set up a signature and parse the content
if self.has_content:
descnode = addnodes.desc()
descnode['domain'] = 'bb'
descnode['objtype'] = self.ref_type
descnode['noindex'] = True
signode = addnodes.desc_signature(fullname, '')
if self.name_annotation:
annotation = "{} ".format(self.name_annotation)
signode += addnodes.desc_annotation(annotation, annotation)
signode += addnodes.desc_name(fullname, fullname)
descnode += signode
contentnode = addnodes.desc_content()
self.state.nested_parse(self.content, 0, contentnode)
DocFieldTransformer(self).transform_all(contentnode)
descnode += contentnode
ret.append(descnode)
return ret
@classmethod
def resolve_ref(cls, domain, env, fromdocname, builder, typ, target, node,
contnode):
"""
Resolve a reference to a directive of this class
"""
targets = domain.data['targets'].get(cls.ref_type, {})
try:
todocname, targetname = targets[target]
except KeyError:
env.warn(fromdocname, "Missing BB reference: bb:{}:{}".format(cls.ref_type, target),
node.line)
return None
return make_refnode(builder, fromdocname,
todocname, targetname,
contnode, target)
def make_ref_target_directive(ref_type, indextemplates=None, **kwargs):
"""
Create and return a L{BBRefTargetDirective} subclass.
"""
class_vars = dict(ref_type=ref_type, indextemplates=indextemplates)
class_vars.update(kwargs)
return type("BB{}RefTargetDirective".format(ref_type.capitalize()),
(BBRefTargetDirective,), class_vars)
class BBIndex(Index):
"""
A Buildbot-specific index.
@cvar name: same name as the directive and xref role
@cvar localname: name of the index document
"""
def generate(self, docnames=None):
content = {}
idx_targets = self.domain.data['targets'].get(self.name, {})
for name, (docname, targetname) in idx_targets.items():
letter = name[0].upper()
content.setdefault(letter, []).append(
(name, 0, docname, targetname, '', '', ''))
content = [(l, sorted(content[l], key=lambda tup: tup[0].lower()))
for l in sorted(content.keys())]
return (content, False)
@classmethod
def resolve_ref(cls, domain, env, fromdocname, builder, typ, target, node,
contnode):
"""
Resolve a reference to an index to the document containing the index,
using the index's C{localname} as the content of the link.
"""
# indexes appear to be automatically generated at doc DOMAIN-NAME
todocname = "bb-{}".format(target)
node = nodes.reference('', '', internal=True)
node['refuri'] = builder.get_relative_uri(fromdocname, todocname)
node['reftitle'] = cls.localname
node.append(nodes.emphasis(cls.localname, cls.localname))
return node
def make_index(name, localname):
"""
Create and return a L{BBIndex} subclass, for use in the domain's C{indices}
"""
return type("BB{}Index".format(name.capitalize()),
(BBIndex,),
dict(name=name, localname=localname))
class BBDomain(Domain):
name = 'bb'
label = 'Buildbot'
object_types = {
'cfg': ObjType('cfg', 'cfg'),
'sched': ObjType('sched', 'sched'),
'chsrc': ObjType('chsrc', 'chsrc'),
'step': ObjType('step', 'step'),
'reporter': ObjType('reporter', 'reporter'),
'configurator': ObjType('configurator', 'configurator'),
'worker': ObjType('worker', 'worker'),
'cmdline': ObjType('cmdline', 'cmdline'),
'msg': ObjType('msg', 'msg'),
'event': ObjType('event', 'event'),
'rtype': ObjType('rtype', 'rtype'),
'rpath': ObjType('rpath', 'rpath'),
'raction': ObjType('raction', 'raction'),
}
directives = {
'cfg': make_ref_target_directive('cfg',
indextemplates=[
'single: Buildmaster Config; %s',
'single: %s (Buildmaster Config)',
]),
'sched': make_ref_target_directive('sched',
indextemplates=[
'single: Schedulers; %s',
'single: %s Scheduler',
]),
'chsrc': make_ref_target_directive('chsrc',
indextemplates=[
'single: Change Sources; %s',
'single: %s Change Source',
]),
'step': make_ref_target_directive('step',
indextemplates=[
'single: Build Steps; %s',
'single: %s Build Step',
]),
'reporter': make_ref_target_directive('reporter',
indextemplates=[
'single: Reporter Targets; %s',
'single: %s Reporter Target',
]),
'configurator': make_ref_target_directive('configurator',
indextemplates=[
'single: Configurators; %s',
'single: %s Configurators',
]),
'worker': make_ref_target_directive('worker',
indextemplates=[
'single: Build Workers; %s',
'single: %s Build Worker',
]),
'cmdline': make_ref_target_directive('cmdline',
indextemplates=[
'single: Command Line Subcommands; %s',
'single: %s Command Line Subcommand',
]),
'msg': make_ref_target_directive('msg',
indextemplates=[
'single: Message Schema; %s',
],
has_content=True,
name_annotation='routing key:',
doc_field_types=[
TypedField('key', label='Keys', names=('key',),
typenames=('type',), can_collapse=True),
Field('var', label='Variable',
names=('var',)),
]),
'event': make_ref_target_directive('event',
indextemplates=[
'single: event; %s',
],
has_content=True,
name_annotation='event:',
doc_field_types=[
]),
'rtype': make_ref_target_directive('rtype',
indextemplates=[
'single: Resource Type; %s',
],
has_content=True,
name_annotation='resource type:',
doc_field_types=[
TypedField('attr', label='Attributes',
names=('attr',),
typenames=('type',), can_collapse=True),
]),
'rpath': make_ref_target_directive('rpath',
indextemplates=[
'single: Resource Path; %s',
],
name_annotation='path:',
has_content=True,
doc_field_types=[
TypedField('pathkey', label='Path Keys',
names=('pathkey',), typenames=('type',),
can_collapse=True),
]),
'raction': make_ref_target_directive('raction',
indextemplates=[
'single: Resource Action; %s',
],
name_annotation='POST with method:',
has_content=True,
doc_field_types=[
TypedField('body', label='Body keys',
names=('body',), typenames=('type',),
can_collapse=True),
]),
}
roles = {
'cfg': XRefRole(),
'sched': XRefRole(),
'chsrc': XRefRole(),
'step': XRefRole(),
'reporter': XRefRole(),
'configurator': XRefRole(),
'worker': XRefRole(),
'cmdline': XRefRole(),
'msg': XRefRole(),
'event': XRefRole(),
'rtype': XRefRole(),
'rpath': XRefRole(),
'index': XRefRole()
}
initial_data = {
'targets': {}, # type -> target -> (docname, targetname)
}
indices = [
make_index("cfg", "Buildmaster Configuration Index"),
make_index("sched", "Scheduler Index"),
make_index("chsrc", "Change Source Index"),
make_index("step", "Build Step Index"),
make_index("reporter", "Reporter Target Index"),
make_index("configurator", "Configurator Target Index"),
make_index("worker", "Build Worker Index"),
make_index("cmdline", "Command Line Index"),
make_index("msg", "MQ Routing Key Index"),
make_index("event", "Data API Event Index"),
make_index("rtype", "REST/Data API Resource Type Index"),
make_index("rpath", "REST/Data API Path Index"),
make_index("raction", "REST/Data API Actions Index"),
]
def resolve_xref(self, env, fromdocname, builder, typ, target, node,
contnode):
if typ == 'index':
for idx in self.indices:
if idx.name == target:
break
else:
raise KeyError("no index named '{}'".format(target))
return idx.resolve_ref(self, env, fromdocname, builder, typ,
target, node, contnode)
elif typ in self.directives:
dir = self.directives[typ]
return dir.resolve_ref(self, env, fromdocname, builder, typ,
target, node, contnode)
def merge_domaindata(self, docnames, otherdata):
for typ in self.object_types:
if typ not in otherdata['targets']:
continue
if typ not in self.data['targets']:
self.data['targets'][typ] = otherdata['targets'][typ]
continue
self_data = self.data['targets'][typ]
other_data = otherdata['targets'][typ]
for target_name, target_data in other_data.items():
if target_name in self_data:
# for some reason we end up with multiple references to the same things in
# multiple domains. If both instances point to the same location, ignore it,
# otherwise issue a warning.
if other_data[target_name] == self_data[target_name]:
continue
self_path = '{0}#{1}'.format(self.env.doc2path(self_data[target_name][0]),
self_data[target_name][1])
other_path = '{0}#{1}'.format(self.env.doc2path(other_data[target_name][0]),
other_data[target_name][1])
logger.warning(('Duplicate index {} reference {} in {}, '
'other instance in {}').format(typ, target_name,
self_path, other_path))
else:
self_data[target_name] = target_data
def setup(app):
app.add_domain(BBDomain)
return {'parallel_read_safe': True, 'parallel_write_safe': True}
| anish/buildbot | master/docs/bbdocs/ext.py | Python | gpl-2.0 | 17,319 |
from base import *
DEBUG = True
TEMPLATE_DEBUG = DEBUG
DATABASES = {
"default": {
"ENGINE": "django.db.backends.postgresql_psycopg2",
"NAME": "bakehouse",
"USER": "",
"PASSWORD": "",
"HOST": "localhost",
"PORT": "",
}
}
BROKER_URL = 'amqp://guest:guest@localhost:5672/'
GITHUB_PRIVATE_KEY = '/tmp/tmp_rsa'
| rollstudio/djangodash-2013 | bakehouse/settings/patrick_local.py | Python | mit | 366 |
# Copyright (c) 2013 Alan McIntyre
import urllib
import hashlib
import hmac
import warnings
from datetime import datetime
import common
class TradeAccountInfo(object):
'''An instance of this class will be returned by
a successful call to TradeAPI.getInfo.'''
def __init__(self, info):
funds = info.get(u'funds')
for c in common.all_currencies:
setattr(self, "balance_%s" % c, funds.get(unicode(c), 0))
self.open_orders = info.get(u'open_orders')
self.server_time = datetime.fromtimestamp(info.get(u'server_time'))
self.transaction_count = info.get(u'transaction_count')
rights = info.get(u'rights')
self.info_rights = (rights.get(u'info') == 1)
self.withdraw_rights = (rights.get(u'withdraw') == 1)
self.trade_rights = (rights.get(u'trade') == 1)
class TransactionHistoryItem(object):
'''A list of instances of this class will be returned by
a successful call to TradeAPI.transHistory.'''
def __init__(self, transaction_id, info):
self.transaction_id = transaction_id
items = ("type", "amount", "currency", "desc",
"status", "timestamp")
for n in items:
setattr(self, n, info.get(n))
self.timestamp = datetime.fromtimestamp(self.timestamp)
class TradeHistoryItem(object):
'''A list of instances of this class will be returned by
a successful call to TradeAPI.tradeHistory.'''
def __init__(self, transaction_id, info):
self.transaction_id = transaction_id
items = ("pair", "type", "amount", "rate", "order_id",
"is_your_order", "timestamp")
for n in items:
setattr(self, n, info.get(n))
self.timestamp = datetime.fromtimestamp(self.timestamp)
class OrderItem(object):
'''A list of instances of this class will be returned by
a successful call to TradeAPI.orderList.'''
def __init__(self, order_id, info):
self.order_id = int(order_id)
for n in ("pair", "type", "amount", "rate", "timestamp_created", "status"):
setattr(self, n, info.get(n))
self.timestamp_created = datetime.fromtimestamp(self.timestamp_created)
class TradeResult(object):
'''An instance of this class will be returned by
a successful call to TradeAPI.trade.'''
def __init__(self, info):
self.received = info.get(u"received")
self.remains = info.get(u"remains")
self.order_id = info.get(u"order_id")
funds = info.get(u'funds')
for c in common.all_currencies:
setattr(self, "balance_%s" % c, funds.get(unicode(c), 0))
class CancelOrderResult(object):
'''An instance of this class will be returned by
a successful call to TradeAPI.cancelOrder.'''
def __init__(self, info):
self.order_id = info.get(u"order_id")
funds = info.get(u'funds')
for c in common.all_currencies:
setattr(self, "balance_%s" % c, funds.get(unicode(c), 0))
def setHistoryParams(params, from_number, count_number, from_id, end_id,
order, since, end):
if from_number is not None:
params["from"] = "%d" % from_number
if count_number is not None:
params["count"] = "%d" % count_number
if from_id is not None:
params["from_id"] = "%d" % from_id
if end_id is not None:
params["end_id"] = "%d" % end_id
if order is not None:
if order not in ("ASC", "DESC"):
raise Exception("Unexpected order parameter: %r" % order)
params["order"] = order
if since is not None:
params["since"] = "%d" % since
if end is not None:
params["end"] = "%d" % end
class TradeAPI(object):
def __init__(self, key, secret = None, nonce = 1, handler=None):
self.key = key
self.handler = handler
if self.handler is None:
#warnings.warn("Using TradeAPI without a key handler will be deprecated soon.")
self.secret = secret
self.nonce = nonce
else:
# When a handler is given, use it to obtain the secret.
self.secret = handler.getSecret(key)
def next_nonce(self):
# If the handler is available, use that for
if self.handler is not None:
return self.handler.getNextNonce(self.key)
n = self.nonce
self.nonce += 1
return n
def _post(self, params):
params["nonce"] = self.next_nonce()
encoded_params = urllib.urlencode(params)
# Hash the params string to produce the Sign header value
H = hmac.new(self.secret, digestmod=hashlib.sha512)
H.update(encoded_params)
sign = H.hexdigest()
headers = {"Key":self.key, "Sign":sign}
result = common.makeJSONRequest("/tapi", headers, encoded_params)
success = result.get(u'success')
if not success:
if "method" in params:
raise Exception("%s call failed with error: %s" \
% (params["method"], result.get(u'error')))
raise Exception("Call failed with error: %s" % result.get(u'error'))
if u'return' not in result:
raise Exception("Response does not contain a 'return' item.")
return result.get(u'return')
def getInfo(self):
params = {"method":"getInfo"}
return TradeAccountInfo(self._post(params))
def transHistory(self, from_number = None, count_number = None,
from_id = None, end_id = None, order = "DESC",
since = None, end = None):
params = {"method":"TransHistory"}
setHistoryParams(params, from_number, count_number, from_id, end_id,
order, since, end)
orders = self._post(params)
result = []
for k, v in orders.items():
result.append(TransactionHistoryItem(int(k), v))
# We have to sort items here because the API returns a dict
if "ASC" == order:
result.sort(key=lambda a: a.transaction_id, reverse=False)
elif "DESC" == order:
result.sort(key=lambda a: a.transaction_id, reverse=True)
return result
def tradeHistory(self, from_number = None, count_number = None,
from_id = None, end_id = None, order = None,
since = None, end = None, pair = None):
params = {"method":"TradeHistory"}
setHistoryParams(params, from_number, count_number, from_id, end_id,
order, since, end)
if pair is not None:
common.validatePair(pair)
params["pair"] = pair
orders = self._post(params)
result = []
for k, v in orders.items():
result.append(TradeHistoryItem(k, v))
return result
def orderList(self, from_number = None, count_number = None,
from_id = None, end_id = None, order = None,
since = None, end = None, pair = None, active = None):
params = {"method":"OrderList"}
setHistoryParams(params, from_number, count_number, from_id, end_id,
order, since, end)
if pair is not None:
common.validatePair(pair)
params["pair"] = pair
if active is not None:
if active not in (0, 1, True, False):
raise Exception("Unexpected active parameter: %r" % active)
params["active"] = int(active)
orders = self._post(params)
result = []
for k, v in orders.items():
result.append(OrderItem(k, v))
return result
def trade(self, pair, trade_type, rate, amount):
common.validatePair(pair)
if trade_type not in ("buy", "sell"):
raise Exception("Unrecognized trade type: %r" % trade_type)
params = {"method":"Trade",
"pair":pair,
"type":trade_type,
"rate":common.formatCurrency(rate, pair),
"amount":common.formatCurrency(amount, pair)}
return TradeResult(self._post(params))
def cancelOrder(self, order_id):
params = {"method":"CancelOrder",
"order_id":order_id}
return CancelOrderResult(self._post(params))
| dashscar/btc-e_Trade_bot | btceapi/btceapi/trade.py | Python | mit | 8,660 |
import json
from decimal import Decimal
from jwt.algorithms import Algorithm
from jwt.api_jws import PyJWS
from jwt.exceptions import (
DecodeError, InvalidAlgorithmError, InvalidSignatureError,
InvalidTokenError
)
from jwt.utils import base64url_decode, force_bytes, force_unicode
import pytest
from .compat import string_types, text_type
try:
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.serialization import (
load_pem_private_key, load_pem_public_key, load_ssh_public_key
)
has_crypto = True
except ImportError:
has_crypto = False
@pytest.fixture
def jws():
return PyJWS()
@pytest.fixture
def payload():
""" Creates a sample jws claimset for use as a payload during tests """
return force_bytes('hello world')
class TestJWS:
def test_register_algo_does_not_allow_duplicate_registration(self, jws):
jws.register_algorithm('AAA', Algorithm())
with pytest.raises(ValueError):
jws.register_algorithm('AAA', Algorithm())
def test_register_algo_rejects_non_algorithm_obj(self, jws):
with pytest.raises(TypeError):
jws.register_algorithm('AAA123', {})
def test_unregister_algo_removes_algorithm(self, jws):
supported = jws.get_algorithms()
assert 'none' in supported
assert 'HS256' in supported
jws.unregister_algorithm('HS256')
supported = jws.get_algorithms()
assert 'HS256' not in supported
def test_unregister_algo_throws_error_if_not_registered(self, jws):
with pytest.raises(KeyError):
jws.unregister_algorithm('AAA')
def test_algo_parameter_removes_alg_from_algorithms_list(self, jws):
assert 'none' in jws.get_algorithms()
assert 'HS256' in jws.get_algorithms()
jws = PyJWS(algorithms=['HS256'])
assert 'none' not in jws.get_algorithms()
assert 'HS256' in jws.get_algorithms()
def test_override_options(self):
jws = PyJWS(options={'verify_signature': False})
assert not jws.options['verify_signature']
def test_non_object_options_dont_persist(self, jws, payload):
token = jws.encode(payload, 'secret')
jws.decode(token, 'secret', options={'verify_signature': False})
assert jws.options['verify_signature']
def test_options_must_be_dict(self, jws):
pytest.raises(TypeError, PyJWS, options=object())
pytest.raises(TypeError, PyJWS, options=('something'))
def test_encode_decode(self, jws, payload):
secret = 'secret'
jws_message = jws.encode(payload, secret)
decoded_payload = jws.decode(jws_message, secret)
assert decoded_payload == payload
def test_decode_fails_when_alg_is_not_on_method_algorithms_param(self, jws, payload):
secret = 'secret'
jws_token = jws.encode(payload, secret, algorithm='HS256')
jws.decode(jws_token, secret)
with pytest.raises(InvalidAlgorithmError):
jws.decode(jws_token, secret, algorithms=['HS384'])
def test_decode_works_with_unicode_token(self, jws):
secret = 'secret'
unicode_jws = text_type(
'eyJhbGciOiAiSFMyNTYiLCAidHlwIjogIkpXVCJ9'
'.eyJoZWxsbyI6ICJ3b3JsZCJ9'
'.tvagLDLoaiJKxOKqpBXSEGy7SYSifZhjntgm9ctpyj8')
jws.decode(unicode_jws, secret)
def test_decode_missing_segments_throws_exception(self, jws):
secret = 'secret'
example_jws = ('eyJhbGciOiAiSFMyNTYiLCAidHlwIjogIkpXVCJ9'
'.eyJoZWxsbyI6ICJ3b3JsZCJ9'
'') # Missing segment
with pytest.raises(DecodeError) as context:
jws.decode(example_jws, secret)
exception = context.value
assert str(exception) == 'Not enough segments'
def test_decode_invalid_token_type_is_none(self, jws):
example_jws = None
example_secret = 'secret'
with pytest.raises(DecodeError) as context:
jws.decode(example_jws, example_secret)
exception = context.value
assert 'Invalid token type' in str(exception)
def test_decode_invalid_token_type_is_int(self, jws):
example_jws = 123
example_secret = 'secret'
with pytest.raises(DecodeError) as context:
jws.decode(example_jws, example_secret)
exception = context.value
assert 'Invalid token type' in str(exception)
def test_decode_with_non_mapping_header_throws_exception(self, jws):
secret = 'secret'
example_jws = ('MQ' # == 1
'.eyJoZWxsbyI6ICJ3b3JsZCJ9'
'.tvagLDLoaiJKxOKqpBXSEGy7SYSifZhjntgm9ctpyj8')
with pytest.raises(DecodeError) as context:
jws.decode(example_jws, secret)
exception = context.value
assert str(exception) == 'Invalid header string: must be a json object'
def test_encode_algorithm_param_should_be_case_sensitive(self, jws, payload):
jws.encode(payload, 'secret', algorithm='HS256')
with pytest.raises(NotImplementedError) as context:
jws.encode(payload, None, algorithm='hs256')
exception = context.value
assert str(exception) == 'Algorithm not supported'
def test_decode_algorithm_param_should_be_case_sensitive(self, jws):
example_jws = ('eyJhbGciOiJoczI1NiIsInR5cCI6IkpXVCJ9' # alg = hs256
'.eyJoZWxsbyI6IndvcmxkIn0'
'.5R_FEPE7SW2dT9GgIxPgZATjFGXfUDOSwo7TtO_Kd_g')
with pytest.raises(InvalidAlgorithmError) as context:
jws.decode(example_jws, 'secret')
exception = context.value
assert str(exception) == 'Algorithm not supported'
def test_bad_secret(self, jws, payload):
right_secret = 'foo'
bad_secret = 'bar'
jws_message = jws.encode(payload, right_secret)
with pytest.raises(DecodeError) as excinfo:
# Backward compat for ticket #315
jws.decode(jws_message, bad_secret)
assert 'Signature verification failed' == str(excinfo.value)
with pytest.raises(InvalidSignatureError) as excinfo:
jws.decode(jws_message, bad_secret)
assert 'Signature verification failed' == str(excinfo.value)
def test_decodes_valid_jws(self, jws, payload):
example_secret = 'secret'
example_jws = (
b'eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.'
b'aGVsbG8gd29ybGQ.'
b'gEW0pdU4kxPthjtehYdhxB9mMOGajt1xCKlGGXDJ8PM')
decoded_payload = jws.decode(example_jws, example_secret)
assert decoded_payload == payload
# 'Control' Elliptic Curve jws created by another library.
# Used to test for regressions that could affect both
# encoding / decoding operations equally (causing tests
# to still pass).
@pytest.mark.skipif(not has_crypto, reason="Can't run without cryptography library")
def test_decodes_valid_es384_jws(self, jws):
example_payload = {'hello': 'world'}
with open('tests/keys/testkey_ec.pub', 'r') as fp:
example_pubkey = fp.read()
example_jws = (
b'eyJhbGciOiJFUzM4NCIsInR5cCI6IkpXVCJ9'
b'.eyJoZWxsbyI6IndvcmxkIn0'
b'.AGtlemKghaIaYh1yeeekFH9fRuNY7hCaw5hUgZ5aG1N'
b'2F8FIbiKLaZKr8SiFdTimXFVTEmxpBQ9sRmdsDsnrM-1'
b'HAG0_zxxu0JyINOFT2iqF3URYl9HZ8kZWMeZAtXmn6Cw'
b'PXRJD2f7N-f7bJ5JeL9VT5beI2XD3FlK3GgRvI-eE-2Ik')
decoded_payload = jws.decode(example_jws, example_pubkey)
json_payload = json.loads(force_unicode(decoded_payload))
assert json_payload == example_payload
# 'Control' RSA jws created by another library.
# Used to test for regressions that could affect both
# encoding / decoding operations equally (causing tests
# to still pass).
@pytest.mark.skipif(not has_crypto, reason="Can't run without cryptography library")
def test_decodes_valid_rs384_jws(self, jws):
example_payload = {'hello': 'world'}
with open('tests/keys/testkey_rsa.pub', 'r') as fp:
example_pubkey = fp.read()
example_jws = (
b'eyJhbGciOiJSUzM4NCIsInR5cCI6IkpXVCJ9'
b'.eyJoZWxsbyI6IndvcmxkIn0'
b'.yNQ3nI9vEDs7lEh-Cp81McPuiQ4ZRv6FL4evTYYAh1X'
b'lRTTR3Cz8pPA9Stgso8Ra9xGB4X3rlra1c8Jz10nTUju'
b'O06OMm7oXdrnxp1KIiAJDerWHkQ7l3dlizIk1bmMA457'
b'W2fNzNfHViuED5ISM081dgf_a71qBwJ_yShMMrSOfxDx'
b'mX9c4DjRogRJG8SM5PvpLqI_Cm9iQPGMvmYK7gzcq2cJ'
b'urHRJDJHTqIdpLWXkY7zVikeen6FhuGyn060Dz9gYq9t'
b'uwmrtSWCBUjiN8sqJ00CDgycxKqHfUndZbEAOjcCAhBr'
b'qWW3mSVivUfubsYbwUdUG3fSRPjaUPcpe8A')
decoded_payload = jws.decode(example_jws, example_pubkey)
json_payload = json.loads(force_unicode(decoded_payload))
assert json_payload == example_payload
def test_load_verify_valid_jws(self, jws, payload):
example_secret = 'secret'
example_jws = (
b'eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.'
b'aGVsbG8gd29ybGQ.'
b'SIr03zM64awWRdPrAM_61QWsZchAtgDV3pphfHPPWkI'
)
decoded_payload = jws.decode(example_jws, key=example_secret)
assert decoded_payload == payload
def test_allow_skip_verification(self, jws, payload):
right_secret = 'foo'
jws_message = jws.encode(payload, right_secret)
decoded_payload = jws.decode(jws_message, verify=False)
assert decoded_payload == payload
def test_verify_false_deprecated(self, jws, recwarn):
example_jws = (
b'eyJhbGciOiAiSFMyNTYiLCAidHlwIjogIkpXVCJ9'
b'.eyJoZWxsbyI6ICJ3b3JsZCJ9'
b'.tvagLDLoaiJKxOKqpBXSEGy7SYSifZhjntgm9ctpyj8')
pytest.deprecated_call(jws.decode, example_jws, verify=False)
def test_decode_with_optional_algorithms(self, jws):
example_secret = 'secret'
example_jws = (
b'eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.'
b'aGVsbG8gd29ybGQ.'
b'SIr03zM64awWRdPrAM_61QWsZchAtgDV3pphfHPPWkI'
)
pytest.deprecated_call(jws.decode, example_jws, key=example_secret)
def test_decode_no_algorithms_verify_signature_false(self, jws):
example_secret = 'secret'
example_jws = (
b'eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.'
b'aGVsbG8gd29ybGQ.'
b'SIr03zM64awWRdPrAM_61QWsZchAtgDV3pphfHPPWkI'
)
try:
pytest.deprecated_call(
jws.decode, example_jws, key=example_secret,
options={'verify_signature': False},
)
except pytest.fail.Exception:
pass
else:
assert False, "Unexpected DeprecationWarning raised."
def test_load_no_verification(self, jws, payload):
right_secret = 'foo'
jws_message = jws.encode(payload, right_secret)
decoded_payload = jws.decode(jws_message, key=None, verify=False)
assert decoded_payload == payload
def test_no_secret(self, jws, payload):
right_secret = 'foo'
jws_message = jws.encode(payload, right_secret)
with pytest.raises(DecodeError):
jws.decode(jws_message)
def test_verify_signature_with_no_secret(self, jws, payload):
right_secret = 'foo'
jws_message = jws.encode(payload, right_secret)
with pytest.raises(DecodeError) as exc:
jws.decode(jws_message)
assert 'Signature verification' in str(exc.value)
def test_verify_signature_with_no_algo_header_throws_exception(self, jws, payload):
example_jws = (
b'e30'
b'.eyJhIjo1fQ'
b'.KEh186CjVw_Q8FadjJcaVnE7hO5Z9nHBbU8TgbhHcBY'
)
with pytest.raises(InvalidAlgorithmError):
jws.decode(example_jws, 'secret')
def test_invalid_crypto_alg(self, jws, payload):
with pytest.raises(NotImplementedError):
jws.encode(payload, 'secret', algorithm='HS1024')
@pytest.mark.skipif(has_crypto, reason='Scenario requires cryptography to not be installed')
def test_missing_crypto_library_better_error_messages(self, jws, payload):
with pytest.raises(NotImplementedError) as excinfo:
jws.encode(payload, 'secret', algorithm='RS256')
assert 'cryptography' in str(excinfo.value)
def test_unicode_secret(self, jws, payload):
secret = '\xc2'
jws_message = jws.encode(payload, secret)
decoded_payload = jws.decode(jws_message, secret)
assert decoded_payload == payload
def test_nonascii_secret(self, jws, payload):
secret = '\xc2' # char value that ascii codec cannot decode
jws_message = jws.encode(payload, secret)
decoded_payload = jws.decode(jws_message, secret)
assert decoded_payload == payload
def test_bytes_secret(self, jws, payload):
secret = b'\xc2' # char value that ascii codec cannot decode
jws_message = jws.encode(payload, secret)
decoded_payload = jws.decode(jws_message, secret)
assert decoded_payload == payload
def test_decode_invalid_header_padding(self, jws):
example_jws = (
'aeyJhbGciOiAiSFMyNTYiLCAidHlwIjogIkpXVCJ9'
'.eyJoZWxsbyI6ICJ3b3JsZCJ9'
'.tvagLDLoaiJKxOKqpBXSEGy7SYSifZhjntgm9ctpyj8')
example_secret = 'secret'
with pytest.raises(DecodeError) as exc:
jws.decode(example_jws, example_secret)
assert 'header padding' in str(exc.value)
def test_decode_invalid_header_string(self, jws):
example_jws = (
'eyJhbGciOiAiSFMyNTbpIiwgInR5cCI6ICJKV1QifQ=='
'.eyJoZWxsbyI6ICJ3b3JsZCJ9'
'.tvagLDLoaiJKxOKqpBXSEGy7SYSifZhjntgm9ctpyj8')
example_secret = 'secret'
with pytest.raises(DecodeError) as exc:
jws.decode(example_jws, example_secret)
assert 'Invalid header' in str(exc.value)
def test_decode_invalid_payload_padding(self, jws):
example_jws = (
'eyJhbGciOiAiSFMyNTYiLCAidHlwIjogIkpXVCJ9'
'.aeyJoZWxsbyI6ICJ3b3JsZCJ9'
'.tvagLDLoaiJKxOKqpBXSEGy7SYSifZhjntgm9ctpyj8')
example_secret = 'secret'
with pytest.raises(DecodeError) as exc:
jws.decode(example_jws, example_secret)
assert 'Invalid payload padding' in str(exc.value)
def test_decode_invalid_crypto_padding(self, jws):
example_jws = (
'eyJhbGciOiAiSFMyNTYiLCAidHlwIjogIkpXVCJ9'
'.eyJoZWxsbyI6ICJ3b3JsZCJ9'
'.aatvagLDLoaiJKxOKqpBXSEGy7SYSifZhjntgm9ctpyj8')
example_secret = 'secret'
with pytest.raises(DecodeError) as exc:
jws.decode(example_jws, example_secret)
assert 'Invalid crypto padding' in str(exc.value)
def test_decode_with_algo_none_should_fail(self, jws, payload):
jws_message = jws.encode(payload, key=None, algorithm=None)
with pytest.raises(DecodeError):
jws.decode(jws_message)
def test_decode_with_algo_none_and_verify_false_should_pass(self, jws, payload):
jws_message = jws.encode(payload, key=None, algorithm=None)
jws.decode(jws_message, verify=False)
def test_get_unverified_header_returns_header_values(self, jws, payload):
jws_message = jws.encode(payload, key='secret', algorithm='HS256',
headers={'kid': 'toomanysecrets'})
header = jws.get_unverified_header(jws_message)
assert 'kid' in header
assert header['kid'] == 'toomanysecrets'
def test_get_unverified_header_fails_on_bad_header_types(self, jws, payload):
# Contains a bad kid value (int 123 instead of string)
example_jws = (
'eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCIsImtpZCI6MTIzfQ'
'.eyJzdWIiOiIxMjM0NTY3ODkwIn0'
'.vs2WY54jfpKP3JGC73Vq5YlMsqM5oTZ1ZydT77SiZSk')
with pytest.raises(InvalidTokenError) as exc:
jws.get_unverified_header(example_jws)
assert 'Key ID header parameter must be a string' == str(exc.value)
@pytest.mark.skipif(not has_crypto, reason='Not supported without cryptography library')
def test_encode_decode_with_rsa_sha256(self, jws, payload):
# PEM-formatted RSA key
with open('tests/keys/testkey_rsa', 'r') as rsa_priv_file:
priv_rsakey = load_pem_private_key(force_bytes(rsa_priv_file.read()),
password=None, backend=default_backend())
jws_message = jws.encode(payload, priv_rsakey, algorithm='RS256')
with open('tests/keys/testkey_rsa.pub', 'r') as rsa_pub_file:
pub_rsakey = load_ssh_public_key(force_bytes(rsa_pub_file.read()),
backend=default_backend())
jws.decode(jws_message, pub_rsakey)
# string-formatted key
with open('tests/keys/testkey_rsa', 'r') as rsa_priv_file:
priv_rsakey = rsa_priv_file.read()
jws_message = jws.encode(payload, priv_rsakey, algorithm='RS256')
with open('tests/keys/testkey_rsa.pub', 'r') as rsa_pub_file:
pub_rsakey = rsa_pub_file.read()
jws.decode(jws_message, pub_rsakey)
@pytest.mark.skipif(not has_crypto, reason='Not supported without cryptography library')
def test_encode_decode_with_rsa_sha384(self, jws, payload):
# PEM-formatted RSA key
with open('tests/keys/testkey_rsa', 'r') as rsa_priv_file:
priv_rsakey = load_pem_private_key(force_bytes(rsa_priv_file.read()),
password=None, backend=default_backend())
jws_message = jws.encode(payload, priv_rsakey, algorithm='RS384')
with open('tests/keys/testkey_rsa.pub', 'r') as rsa_pub_file:
pub_rsakey = load_ssh_public_key(force_bytes(rsa_pub_file.read()),
backend=default_backend())
jws.decode(jws_message, pub_rsakey)
# string-formatted key
with open('tests/keys/testkey_rsa', 'r') as rsa_priv_file:
priv_rsakey = rsa_priv_file.read()
jws_message = jws.encode(payload, priv_rsakey, algorithm='RS384')
with open('tests/keys/testkey_rsa.pub', 'r') as rsa_pub_file:
pub_rsakey = rsa_pub_file.read()
jws.decode(jws_message, pub_rsakey)
@pytest.mark.skipif(not has_crypto, reason='Not supported without cryptography library')
def test_encode_decode_with_rsa_sha512(self, jws, payload):
# PEM-formatted RSA key
with open('tests/keys/testkey_rsa', 'r') as rsa_priv_file:
priv_rsakey = load_pem_private_key(force_bytes(rsa_priv_file.read()),
password=None, backend=default_backend())
jws_message = jws.encode(payload, priv_rsakey, algorithm='RS512')
with open('tests/keys/testkey_rsa.pub', 'r') as rsa_pub_file:
pub_rsakey = load_ssh_public_key(force_bytes(rsa_pub_file.read()),
backend=default_backend())
jws.decode(jws_message, pub_rsakey)
# string-formatted key
with open('tests/keys/testkey_rsa', 'r') as rsa_priv_file:
priv_rsakey = rsa_priv_file.read()
jws_message = jws.encode(payload, priv_rsakey, algorithm='RS512')
with open('tests/keys/testkey_rsa.pub', 'r') as rsa_pub_file:
pub_rsakey = rsa_pub_file.read()
jws.decode(jws_message, pub_rsakey)
def test_rsa_related_algorithms(self, jws):
jws = PyJWS()
jws_algorithms = jws.get_algorithms()
if has_crypto:
assert 'RS256' in jws_algorithms
assert 'RS384' in jws_algorithms
assert 'RS512' in jws_algorithms
assert 'PS256' in jws_algorithms
assert 'PS384' in jws_algorithms
assert 'PS512' in jws_algorithms
else:
assert 'RS256' not in jws_algorithms
assert 'RS384' not in jws_algorithms
assert 'RS512' not in jws_algorithms
assert 'PS256' not in jws_algorithms
assert 'PS384' not in jws_algorithms
assert 'PS512' not in jws_algorithms
@pytest.mark.skipif(not has_crypto, reason="Can't run without cryptography library")
def test_encode_decode_with_ecdsa_sha256(self, jws, payload):
# PEM-formatted EC key
with open('tests/keys/testkey_ec', 'r') as ec_priv_file:
priv_eckey = load_pem_private_key(force_bytes(ec_priv_file.read()),
password=None, backend=default_backend())
jws_message = jws.encode(payload, priv_eckey, algorithm='ES256')
with open('tests/keys/testkey_ec.pub', 'r') as ec_pub_file:
pub_eckey = load_pem_public_key(force_bytes(ec_pub_file.read()),
backend=default_backend())
jws.decode(jws_message, pub_eckey)
# string-formatted key
with open('tests/keys/testkey_ec', 'r') as ec_priv_file:
priv_eckey = ec_priv_file.read()
jws_message = jws.encode(payload, priv_eckey, algorithm='ES256')
with open('tests/keys/testkey_ec.pub', 'r') as ec_pub_file:
pub_eckey = ec_pub_file.read()
jws.decode(jws_message, pub_eckey)
@pytest.mark.skipif(not has_crypto, reason="Can't run without cryptography library")
def test_encode_decode_with_ecdsa_sha384(self, jws, payload):
# PEM-formatted EC key
with open('tests/keys/testkey_ec', 'r') as ec_priv_file:
priv_eckey = load_pem_private_key(force_bytes(ec_priv_file.read()),
password=None, backend=default_backend())
jws_message = jws.encode(payload, priv_eckey, algorithm='ES384')
with open('tests/keys/testkey_ec.pub', 'r') as ec_pub_file:
pub_eckey = load_pem_public_key(force_bytes(ec_pub_file.read()),
backend=default_backend())
jws.decode(jws_message, pub_eckey)
# string-formatted key
with open('tests/keys/testkey_ec', 'r') as ec_priv_file:
priv_eckey = ec_priv_file.read()
jws_message = jws.encode(payload, priv_eckey, algorithm='ES384')
with open('tests/keys/testkey_ec.pub', 'r') as ec_pub_file:
pub_eckey = ec_pub_file.read()
jws.decode(jws_message, pub_eckey)
@pytest.mark.skipif(not has_crypto, reason="Can't run without cryptography library")
def test_encode_decode_with_ecdsa_sha512(self, jws, payload):
# PEM-formatted EC key
with open('tests/keys/testkey_ec', 'r') as ec_priv_file:
priv_eckey = load_pem_private_key(force_bytes(ec_priv_file.read()),
password=None, backend=default_backend())
jws_message = jws.encode(payload, priv_eckey, algorithm='ES521')
with open('tests/keys/testkey_ec.pub', 'r') as ec_pub_file:
pub_eckey = load_pem_public_key(force_bytes(ec_pub_file.read()), backend=default_backend())
jws.decode(jws_message, pub_eckey)
# string-formatted key
with open('tests/keys/testkey_ec', 'r') as ec_priv_file:
priv_eckey = ec_priv_file.read()
jws_message = jws.encode(payload, priv_eckey, algorithm='ES521')
with open('tests/keys/testkey_ec.pub', 'r') as ec_pub_file:
pub_eckey = ec_pub_file.read()
jws.decode(jws_message, pub_eckey)
def test_ecdsa_related_algorithms(self, jws):
jws = PyJWS()
jws_algorithms = jws.get_algorithms()
if has_crypto:
assert 'ES256' in jws_algorithms
assert 'ES384' in jws_algorithms
assert 'ES521' in jws_algorithms
else:
assert 'ES256' not in jws_algorithms
assert 'ES384' not in jws_algorithms
assert 'ES521' not in jws_algorithms
def test_skip_check_signature(self, jws):
token = ("eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9"
".eyJzb21lIjoicGF5bG9hZCJ9"
".4twFt5NiznN84AWoo1d7KO1T_yoc0Z6XOpOVswacPZA")
jws.decode(token, 'secret', options={'verify_signature': False})
def test_decode_options_must_be_dict(self, jws, payload):
token = jws.encode(payload, 'secret')
with pytest.raises(TypeError):
jws.decode(token, 'secret', options=object())
with pytest.raises(TypeError):
jws.decode(token, 'secret', options='something')
def test_custom_json_encoder(self, jws, payload):
class CustomJSONEncoder(json.JSONEncoder):
def default(self, o):
if isinstance(o, Decimal):
return 'it worked'
return super(CustomJSONEncoder, self).default(o)
data = {
'some_decimal': Decimal('2.2')
}
with pytest.raises(TypeError):
jws.encode(payload, 'secret', headers=data)
token = jws.encode(payload, 'secret', headers=data,
json_encoder=CustomJSONEncoder)
header = force_bytes(force_unicode(token).split('.')[0])
header = json.loads(force_unicode(base64url_decode(header)))
assert 'some_decimal' in header
assert header['some_decimal'] == 'it worked'
def test_encode_headers_parameter_adds_headers(self, jws, payload):
headers = {'testheader': True}
token = jws.encode(payload, 'secret', headers=headers)
if not isinstance(token, string_types):
token = token.decode()
header = token[0:token.index('.')].encode()
header = base64url_decode(header)
if not isinstance(header, text_type):
header = header.decode()
header_obj = json.loads(header)
assert 'testheader' in header_obj
assert header_obj['testheader'] == headers['testheader']
def test_encode_fails_on_invalid_kid_types(self, jws, payload):
with pytest.raises(InvalidTokenError) as exc:
jws.encode(payload, 'secret', headers={'kid': 123})
assert 'Key ID header parameter must be a string' == str(exc.value)
with pytest.raises(InvalidTokenError) as exc:
jws.encode(payload, 'secret', headers={'kid': None})
assert 'Key ID header parameter must be a string' == str(exc.value)
| cloudera/hue | desktop/core/ext-py/PyJWT-1.7.1/tests/test_api_jws.py | Python | apache-2.0 | 26,697 |
from django import forms
from django.core.validators import validate_email
from django.db.models import Q
from django.utils.translation import ugettext_lazy as _
from .utils import get_user_model
class PasswordRecoveryForm(forms.Form):
username_or_email = forms.CharField()
error_messages = {
'not_found': _("Sorry, this user doesn't exist."),
}
def __init__(self, *args, **kwargs):
self.case_sensitive = kwargs.pop('case_sensitive', True)
search_fields = kwargs.pop('search_fields', ('username', 'email'))
super(PasswordRecoveryForm, self).__init__(*args, **kwargs)
message = ("No other fields than email are supported "
"by default")
if len(search_fields) not in (1, 2):
raise ValueError(message)
for field in search_fields:
if field not in ['username', 'email']:
raise ValueError(message)
labels = {
'username': _('Username'),
'email': _('Email'),
'both': _('Username or Email'),
}
User = get_user_model() # noqa
if getattr(User, 'USERNAME_FIELD', 'username') == 'email':
self.label_key = 'email'
elif len(search_fields) == 1:
self.label_key = search_fields[0]
else:
self.label_key = 'both'
self.fields['username_or_email'].label = labels[self.label_key]
def clean_username_or_email(self):
username = self.cleaned_data['username_or_email']
cleaner = getattr(self, 'get_user_by_%s' % self.label_key)
self.cleaned_data['user'] = cleaner(username)
return username
def get_user_by_username(self, username):
key = 'username__%sexact' % ('' if self.case_sensitive else 'i')
User = get_user_model()
try:
user = User._default_manager.get(**{key: username})
except User.DoesNotExist:
raise forms.ValidationError(self.error_messages['not_found'],
code='not_found')
return user
def get_user_by_email(self, email):
validate_email(email)
key = 'email__%sexact' % ('' if self.case_sensitive else 'i')
User = get_user_model()
try:
user = User._default_manager.get(**{key: email})
except User.DoesNotExist:
raise forms.ValidationError(self.error_messages['not_found'],
code='not_found')
return user
def get_user_by_both(self, username):
key = '__%sexact'
key = key % '' if self.case_sensitive else key % 'i'
f = lambda field: Q(**{field + key: username})
filters = f('username') | f('email')
User = get_user_model()
try:
user = User._default_manager.get(filters)
except User.DoesNotExist:
raise forms.ValidationError(self.error_messages['not_found'],
code='not_found')
except User.MultipleObjectsReturned:
raise forms.ValidationError(_("Unable to find user."))
return user
class PasswordResetForm(forms.Form):
password1 = forms.CharField(
label=_('New password'),
widget=forms.PasswordInput,
)
password2 = forms.CharField(
label=_('New password (confirm)'),
widget=forms.PasswordInput,
)
error_messages = {
'password_mismatch': _("The two passwords didn't match."),
}
def __init__(self, *args, **kwargs):
self.user = kwargs.pop('user')
super(PasswordResetForm, self).__init__(*args, **kwargs)
def clean_password2(self):
password1 = self.cleaned_data.get('password1', '')
password2 = self.cleaned_data['password2']
if not password1 == password2:
raise forms.ValidationError(
self.error_messages['password_mismatch'],
code='password_mismatch')
return password2
def save(self, commit=True):
self.user.set_password(self.cleaned_data['password1'])
if commit:
get_user_model()._default_manager.filter(pk=self.user.pk).update(
password=self.user.password,
)
return self.user
| xsunfeng/cir | password_reset/forms.py | Python | mit | 4,276 |
#!/usr/bin/env python3
import numpy as np
import cv2
import argparse
import glob
from os.path import splitext, basename
debug = False
debug_dir = '.'
def get_output_path(out_dir, file_ext):
imgs = glob.glob('{}/*.{}'.format(out_dir, file_ext))
if len(imgs) == 0:
next_num = 0
else:
nums = [int(splitext(basename(i))[0]) for i in imgs]
next_num = 1 + max(nums)
return '{}/{}.{}'.format(out_dir, str(next_num).zfill(5), file_ext)
def write_img(img, path):
cv2.imwrite(path, img)
print('wrote to {}'.format(path))
def write_segment_img(img, out_dir, file_ext):
path = get_output_path(out_dir, file_ext)
write_img(img, path)
def write_debug_img(img, key):
global debug_dir
path = '{}/{}.png'.format(debug_dir, key)
write_img(img, path)
def process_contour(contour, img):
area_contour = cv2.contourArea(contour)
area_img = img.shape[0] * img.shape[1]
area_ratio = area_contour / area_img
if area_ratio < 0.1:
return None
# dice min area (rotated rect)
rect_min_area = cv2.minAreaRect(contour)
rect_min_points = cv2.boxPoints(rect_min_area)
# bounding rect of the *min area rect*
rrb = cv2.boundingRect(rect_min_points)
rrb_tl = rrb[0:2]
rrb_br = tuple([sum(x) for x in zip(rrb_tl, rrb[2:4])])
# crop to bounding rect
cropped = img[rrb_tl[1]:rrb_br[1], rrb_tl[0]:rrb_br[0]]
if debug:
write_debug_img(cropped, 'cropped')
# straighten image
angle = rect_min_area[2]
keep = angle > -45. # if the angle is less than -45 we need to swap w/h
rrb_width = rrb_br[0] - rrb_tl[0]
rrb_height = rrb_br[1] - rrb_tl[1]
width = rrb_width if keep else rrb_height
height = rrb_height if keep else rrb_width
angle += (0 if keep else 90)
center = (width / 2, height / 2)
dsize = (width, height)
matrix = cv2.getRotationMatrix2D(center, angle, 1.0)
if 0 in cropped.shape:
return None
straight = cv2.warpAffine(cropped, matrix, dsize)
if debug:
write_debug_img(straight, 'straight')
# crop based on nonzero values
nonzero = straight.nonzero()
if len(nonzero[0]) == 0 or len(nonzero[1]) == 0:
return None
y_start = min(nonzero[0])
y_end = max(nonzero[0])
x_start = min(nonzero[1])
x_end = max(nonzero[1])
straight_crop = straight[y_start:y_end, x_start:x_end]
if debug:
write_debug_img(straight_crop, 'straight_crop')
square = np.zeros(img.shape[0:2])
y_len = y_end - y_start
x_len = x_end - x_start
y_start_sq = int((square.shape[0] - y_len) / 2)
y_end_sq = y_start_sq + y_len
x_start_sq = int((square.shape[1] - x_len) / 2)
x_end_sq = x_start_sq + x_len
square[y_start_sq:y_end_sq, x_start_sq:x_end_sq] = straight_crop
if debug:
write_debug_img(square, 'square')
if 0 in square.shape:
return
return square
def get_segmented(img, threshold):
global debug
img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
if debug:
write_debug_img(img_gray, '01-gray')
blur_size = (img_gray.shape[0], img_gray.shape[1])
coeff = 0.012
blur_size = tuple([max(1, int(d * coeff)) for d in img_gray.shape[0:2]])
blurred = cv2.blur(img_gray, blur_size)
if debug:
write_debug_img(blurred, '02-blurred')
# white dice on black
# retval, threshold = cv2.threshold(blurred, 150, 255, cv2.THRESH_BINARY)
# black dice on white
retval, threshold = cv2.threshold(blurred, threshold, 255,
cv2.THRESH_BINARY_INV)
if debug:
write_debug_img(threshold, '03-threshold')
threshold, contours, hierarchy = cv2.findContours(threshold,
cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_NONE)
if debug:
cimg = np.zeros(threshold.shape)
cv2.drawContours(cimg, contours, -1, 255)
write_debug_img(cimg, '04-contours')
processed = [process_contour(c, threshold) for c in contours]
return [p for p in processed if p is not None]
def process(in_path, out_dir, file_ext, threshold=127):
img = cv2.imread(in_path)
if img is None:
print("couldn't read image: {}".format())
return
for i, segment in enumerate(get_segmented(img, threshold)):
write_segment_img(segment, out_dir, file_ext)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('in_path', help='path to input image', type=str)
parser.add_argument('out_dir', help='directory to output results', type=str)
parser.add_argument('--ext', help='output file extension', type=str,
default='png')
parser.add_argument('--threshold', help='threshold 0-255', type=int,
default=160)
parser.add_argument('--debug', help='if True then output debug images',
type=bool, default=False)
parser.add_argument('--debug_dir', help='output dir for debug images',
type=str, default='.')
args = parser.parse_args()
global debug, debug_dir
debug = args.debug
debug_dir = args.debug_dir
process(args.in_path, args.out_dir, args.ext, args.threshold, args.resize)
if __name__ == '__main__':
main()
| acarabott/ten-sided-dice-dataset | 04-processing/segment.py | Python | mit | 5,087 |
from django.db import models
import mixins
import query
class ArticleManager(models.Manager, mixins.ArticleManagerMixin):
def get_query_set(self):
return query.ArticleQuerySet(self.model, using=self._db)
class ImageManager(models.Manager, mixins.ImageManagerMixin):
def get_query_set(self):
return query.ImageQuerySet(self.model, using=self._db)
| megaprojectske/megaprojects.co.ke | megaprojects/articles/managers.py | Python | apache-2.0 | 377 |
# -*- coding: utf-8 -*-
#
# Copyright 2013, Not All Rights Reserved.
# Author: Windriver
# Email: [email protected]
# Created: 2013-09-26 01:13
#
# Description:
from evernote.edam.userstore import UserStore
from evernote.edam.notestore import NoteStore
import thrift.protocol.TBinaryProtocol as TBinaryProtocol
import thrift.transport.THttpClient as THttpClient
print "代码演示了如何调用中文Evernote(印象笔记)的API来访问开发者自己的笔记。"\
"主要难点是理解Thrift RPC的使用,以及UserStore和NoteStore这两个核心概念"
print "获得了UserStore和NoteStore之后,事情就变得好办了。"\
"可参考:http://dev.yinxiang.com/documentation/reference/"
# 开发者Token和note_store_url使用你从官方获得的
# 链接:http://dev.yinxiang.com/documentation/cloud/chapters/Authentication.php#devtoken
# user_store_url是通用的
dev_token = "Filled it with your own token"
user_store_url = "https://app.yinxiang.com/edam/user"
note_store_url = "https://app.yinxiang.com/shard/s9/notestore"
# 建立 UserStore 的 Client
user_store_client = THttpClient.THttpClient(user_store_url)
user_store_proto = TBinaryProtocol.TBinaryProtocol(user_store_client)
user_store = UserStore.Client(user_store_proto, user_store_proto)
print "\n输出用户的信息:"
user = user_store.getUser(dev_token)
print user.username, user.id
# 建立 NoteStore 的 Client
note_store_client = THttpClient.THttpClient(note_store_url)
note_store_proto = TBinaryProtocol.TBinaryProtocol(note_store_client)
note_store = NoteStore.Client(note_store_proto, note_store_proto)
print "\n输出各个笔记本的信息:"
notebooks = note_store.listNotebooks(dev_token)
for notebook in notebooks:
print notebook.name, notebook.serviceCreated
| Windriver/codelab | evernote/visit_my_evernote.py | Python | apache-2.0 | 1,795 |
# Choregraphe bezier export in Python.
from naoqi import ALProxy
names = list()
times = list()
keys = list()
names.append("LShoulderPitch")
times.append([ 2.00000, 4.00000])
keys.append([ [ 1.39626, [ 3, -0.66667, 0.00000], [ 3, 0.66667, 0.00000]], [ 1.39626, [ 3, -0.66667, 0.00000], [ 3, 0.00000, 0.00000]]])
names.append("LShoulderRoll")
times.append([ 2.00000, 4.00000])
keys.append([ [ 0.34907, [ 3, -0.66667, 0.00000], [ 3, 0.66667, 0.00000]], [ 0.34907, [ 3, -0.66667, 0.00000], [ 3, 0.00000, 0.00000]]])
names.append("LElbowYaw")
times.append([ 2.00000, 4.00000])
keys.append([ [ -1.39626, [ 3, -0.66667, 0.00000], [ 3, 0.66667, 0.00000]], [ -1.39626, [ 3, -0.66667, 0.00000], [ 3, 0.00000, 0.00000]]])
names.append("LElbowRoll")
times.append([ 2.00000, 4.00000])
keys.append([ [ -1.04720, [ 3, -0.66667, 0.00000], [ 3, 0.66667, 0.00000]], [ -1.04720, [ 3, -0.66667, 0.00000], [ 3, 0.00000, 0.00000]]])
names.append("LWristYaw")
times.append([ 2.00000, 4.00000])
keys.append([ [ -0.00000, [ 3, -0.66667, 0.00000], [ 3, 0.66667, 0.00000]], [ -0.00000, [ 3, -0.66667, 0.00000], [ 3, 0.00000, 0.00000]]])
names.append("LHand")
times.append([ 2.00000, 4.00000])
keys.append([ [ 0.00000, [ 3, -0.66667, 0.00000], [ 3, 0.66667, 0.00000]], [ 0.00000, [ 3, -0.66667, 0.00000], [ 3, 0.00000, 0.00000]]])
names.append("RShoulderPitch")
times.append([ 2.00000, 4.00000])
keys.append([ [ 0.82030, [ 3, -0.66667, 0.00000], [ 3, 0.66667, 0.00000]], [ 1.39626, [ 3, -0.66667, 0.00000], [ 3, 0.00000, 0.00000]]])
names.append("RShoulderRoll")
times.append([ 2.00000, 4.00000])
keys.append([ [ -0.03840, [ 3, -0.66667, 0.00000], [ 3, 0.66667, 0.00000]], [ -0.34907, [ 3, -0.66667, 0.00000], [ 3, 0.00000, 0.00000]]])
names.append("RElbowYaw")
times.append([ 2.00000, 4.00000])
keys.append([ [ 1.51669, [ 3, -0.66667, 0.00000], [ 3, 0.66667, 0.00000]], [ 1.39626, [ 3, -0.66667, 0.00000], [ 3, 0.00000, 0.00000]]])
names.append("RElbowRoll")
times.append([ 2.00000, 4.00000])
keys.append([ [ 1.56207, [ 3, -0.66667, 0.00000], [ 3, 0.66667, 0.00000]], [ 1.04720, [ 3, -0.66667, 0.00000], [ 3, 0.00000, 0.00000]]])
names.append("RWristYaw")
times.append([ 2.00000, 4.00000])
keys.append([ [ -1.45910, [ 3, -0.66667, 0.00000], [ 3, 0.66667, 0.00000]], [ 0.00000, [ 3, -0.66667, 0.00000], [ 3, 0.00000, 0.00000]]])
names.append("RHand")
times.append([ 2.00000, 4.00000])
keys.append([ [ 0.01745, [ 3, -0.66667, 0.00000], [ 3, 0.66667, 0.00000]], [ 0.00000, [ 3, -0.66667, 0.00000], [ 3, 0.00000, 0.00000]]])
try:
# uncomment the following line and modify the IP if you use this script outside Choregraphe.
# motion = ALProxy("ALMotion", IP, 9559)
motion = ALProxy("ALMotion")
motion.angleInterpolationBezier(names, times, keys);
except BaseException, err:
print err
| Rctue/nao-lib | gestures/HappyGesture_4.py | Python | gpl-2.0 | 2,776 |
# ===============================================================================
# Copyright 2015 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
from envisage.ui.tasks.preferences_pane import PreferencesPane
from traits.api import Str
from traitsui.api import View, FileEditor, VGroup, Item
# ============= standard library imports ========================
import os
# ============= local library imports ==========================
from pychron.paths import paths
from pychron.envisage.tasks.base_preferences_helper import BasePreferencesHelper
class LDEOFurnacePreferences(BasePreferencesHelper):
preferences_path = "pychron.ldeofurnace"
class LDEOFurnacePreferencesPane(PreferencesPane):
category = "LDEO Furnace"
model_factory = LDEOFurnacePreferences
def traits_view(self):
v = View()
return v
class LDEOFurnaceControlPreferences(BasePreferencesHelper):
preferences_path = "pychron.ldeofurnace.control"
canvas_path = Str
canvas_config_path = Str
valves_path = Str
class LDEOFurnaceControlPreferencesPane(PreferencesPane):
category = "LDEO Furnace"
model_factory = LDEOFurnaceControlPreferences
def traits_view(self):
p_grp = VGroup(
Item(
"canvas_path",
label="Canvas Path",
editor=FileEditor(
root_path=os.path.join(paths.canvas2D_dir, "canvas.xml")
),
),
Item("canvas_config_path", label="Config Path", editor=FileEditor()),
Item(
"valves_path",
label="Valves Path",
editor=FileEditor(
root_path=os.path.join(paths.extraction_line_dir, "valves.xml")
),
),
show_border=True,
label="Paths",
)
v = View(p_grp)
return v
# ============= EOF =============================================
| USGSDenverPychron/pychron | pychron/furnace/tasks/ldeo/preferences.py | Python | apache-2.0 | 2,603 |
from sympy.integrals.transforms import (mellin_transform,
inverse_mellin_transform, laplace_transform, inverse_laplace_transform,
fourier_transform, inverse_fourier_transform,
sine_transform, inverse_sine_transform,
cosine_transform, inverse_cosine_transform,
hankel_transform, inverse_hankel_transform,
LaplaceTransform, FourierTransform, SineTransform, CosineTransform,
InverseLaplaceTransform, InverseFourierTransform,
InverseSineTransform, InverseCosineTransform, IntegralTransformError)
from sympy import (
gamma, exp, oo, Heaviside, symbols, Symbol, re, factorial, pi,
cos, S, Abs, And, Or, sin, sqrt, I, log, tan, hyperexpand, meijerg,
EulerGamma, erf, erfc, besselj, bessely, besseli, besselk,
exp_polar, polar_lift, unpolarify, Function, expint, expand_mul,
combsimp, trigsimp, atan, sinh, cosh, Ne, periodic_argument, atan2, Abs)
from sympy.utilities.pytest import XFAIL, slow, skip, raises
from sympy.matrices import Matrix, eye
from sympy.abc import x, s, a, b, c, d
nu, beta, rho = symbols('nu beta rho')
def test_undefined_function():
from sympy import Function, MellinTransform
f = Function('f')
assert mellin_transform(f(x), x, s) == MellinTransform(f(x), x, s)
assert mellin_transform(f(x) + exp(-x), x, s) == \
(MellinTransform(f(x), x, s) + gamma(s), (0, oo), True)
assert laplace_transform(2*f(x), x, s) == 2*LaplaceTransform(f(x), x, s)
# TODO test derivative and other rules when implemented
def test_free_symbols():
from sympy import Function
f = Function('f')
assert mellin_transform(f(x), x, s).free_symbols == set([s])
assert mellin_transform(f(x)*a, x, s).free_symbols == set([s, a])
def test_as_integral():
from sympy import Function, Integral
f = Function('f')
assert mellin_transform(f(x), x, s).rewrite('Integral') == \
Integral(x**(s - 1)*f(x), (x, 0, oo))
assert fourier_transform(f(x), x, s).rewrite('Integral') == \
Integral(f(x)*exp(-2*I*pi*s*x), (x, -oo, oo))
assert laplace_transform(f(x), x, s).rewrite('Integral') == \
Integral(f(x)*exp(-s*x), (x, 0, oo))
assert str(inverse_mellin_transform(f(s), s, x, (a, b)).rewrite('Integral')) \
== "Integral(x**(-s)*f(s), (s, _c - oo*I, _c + oo*I))"
assert str(inverse_laplace_transform(f(s), s, x).rewrite('Integral')) == \
"Integral(f(s)*exp(s*x), (s, _c - oo*I, _c + oo*I))"
assert inverse_fourier_transform(f(s), s, x).rewrite('Integral') == \
Integral(f(s)*exp(2*I*pi*s*x), (s, -oo, oo))
# NOTE this is stuck in risch because meijerint cannot handle it
@slow
@XFAIL
def test_mellin_transform_fail():
skip("Risch takes forever.")
MT = mellin_transform
bpos = symbols('b', positive=True)
bneg = symbols('b', negative=True)
expr = (sqrt(x + b**2) + b)**a/sqrt(x + b**2)
# TODO does not work with bneg, argument wrong. Needs changes to matching.
assert MT(expr.subs(b, -bpos), x, s) == \
((-1)**(a + 1)*2**(a + 2*s)*bpos**(a + 2*s - 1)*gamma(a + s)
*gamma(1 - a - 2*s)/gamma(1 - s),
(-re(a), -re(a)/2 + S(1)/2), True)
expr = (sqrt(x + b**2) + b)**a
assert MT(expr.subs(b, -bpos), x, s) == \
(
2**(a + 2*s)*a*bpos**(a + 2*s)*gamma(-a - 2*
s)*gamma(a + s)/gamma(-s + 1),
(-re(a), -re(a)/2), True)
# Test exponent 1:
assert MT(expr.subs({b: -bpos, a: 1}), x, s) == \
(-bpos**(2*s + 1)*gamma(s)*gamma(-s - S(1)/2)/(2*sqrt(pi)),
(-1, -S(1)/2), True)
@slow
def test_mellin_transform():
from sympy import Max, Min
MT = mellin_transform
bpos = symbols('b', positive=True)
# 8.4.2
assert MT(x**nu*Heaviside(x - 1), x, s) == \
(-1/(nu + s), (-oo, -re(nu)), True)
assert MT(x**nu*Heaviside(1 - x), x, s) == \
(1/(nu + s), (-re(nu), oo), True)
assert MT((1 - x)**(beta - 1)*Heaviside(1 - x), x, s) == \
(gamma(beta)*gamma(s)/gamma(beta + s), (0, oo), re(-beta) < 0)
assert MT((x - 1)**(beta - 1)*Heaviside(x - 1), x, s) == \
(gamma(beta)*gamma(1 - beta - s)/gamma(1 - s),
(-oo, -re(beta) + 1), re(-beta) < 0)
assert MT((1 + x)**(-rho), x, s) == \
(gamma(s)*gamma(rho - s)/gamma(rho), (0, re(rho)), True)
# TODO also the conditions should be simplified
assert MT(abs(1 - x)**(-rho), x, s) == (
cos(pi*(rho/2 - s))*gamma(s)*gamma(rho - s)/(cos(pi*rho/2)*gamma(rho)),
(0, re(rho)), And(re(rho) - 1 < 0, re(rho) < 1))
mt = MT((1 - x)**(beta - 1)*Heaviside(1 - x)
+ a*(x - 1)**(beta - 1)*Heaviside(x - 1), x, s)
assert mt[1], mt[2] == ((0, -re(beta) + 1), True)
assert MT((x**a - b**a)/(x - b), x, s)[0] == \
pi*b**(a + s - 1)*sin(pi*a)/(sin(pi*s)*sin(pi*(a + s)))
assert MT((x**a - bpos**a)/(x - bpos), x, s) == \
(pi*bpos**(a + s - 1)*sin(pi*a)/(sin(pi*s)*sin(pi*(a + s))),
(Max(-re(a), 0), Min(1 - re(a), 1)), True)
expr = (sqrt(x + b**2) + b)**a
assert MT(expr.subs(b, bpos), x, s) == \
(-a*(2*bpos)**(a + 2*s)*gamma(s)*gamma(-a - 2*s)/gamma(-a - s + 1),
(0, -re(a)/2), True)
expr = (sqrt(x + b**2) + b)**a/sqrt(x + b**2)
assert MT(expr.subs(b, bpos), x, s) == \
(2**(a + 2*s)*bpos**(a + 2*s - 1)*gamma(s)
*gamma(1 - a - 2*s)/gamma(1 - a - s),
(0, -re(a)/2 + S(1)/2), True)
# 8.4.2
assert MT(exp(-x), x, s) == (gamma(s), (0, oo), True)
assert MT(exp(-1/x), x, s) == (gamma(-s), (-oo, 0), True)
# 8.4.5
assert MT(log(x)**4*Heaviside(1 - x), x, s) == (24/s**5, (0, oo), True)
assert MT(log(x)**3*Heaviside(x - 1), x, s) == (6/s**4, (-oo, 0), True)
assert MT(log(x + 1), x, s) == (pi/(s*sin(pi*s)), (-1, 0), True)
assert MT(log(1/x + 1), x, s) == (pi/(s*sin(pi*s)), (0, 1), True)
assert MT(log(abs(1 - x)), x, s) == (pi/(s*tan(pi*s)), (-1, 0), True)
assert MT(log(abs(1 - 1/x)), x, s) == (pi/(s*tan(pi*s)), (0, 1), True)
# TODO we cannot currently do these (needs summation of 3F2(-1))
# this also implies that they cannot be written as a single g-function
# (although this is possible)
mt = MT(log(x)/(x + 1), x, s)
assert mt[1:] == ((0, 1), True)
assert not hyperexpand(mt[0], allow_hyper=True).has(meijerg)
mt = MT(log(x)**2/(x + 1), x, s)
assert mt[1:] == ((0, 1), True)
assert not hyperexpand(mt[0], allow_hyper=True).has(meijerg)
mt = MT(log(x)/(x + 1)**2, x, s)
assert mt[1:] == ((0, 2), True)
assert not hyperexpand(mt[0], allow_hyper=True).has(meijerg)
# 8.4.14
assert MT(erf(sqrt(x)), x, s) == \
(-gamma(s + S(1)/2)/(sqrt(pi)*s), (-S(1)/2, 0), True)
@slow
def test_mellin_transform_bessel():
from sympy import Max
MT = mellin_transform
# 8.4.19
assert MT(besselj(a, 2*sqrt(x)), x, s) == \
(gamma(a/2 + s)/gamma(a/2 - s + 1), (-re(a)/2, S(3)/4), True)
assert MT(sin(sqrt(x))*besselj(a, sqrt(x)), x, s) == \
(2**a*gamma(-2*s + S(1)/2)*gamma(a/2 + s + S(1)/2)/(
gamma(-a/2 - s + 1)*gamma(a - 2*s + 1)), (
-re(a)/2 - S(1)/2, S(1)/4), True)
assert MT(cos(sqrt(x))*besselj(a, sqrt(x)), x, s) == \
(2**a*gamma(a/2 + s)*gamma(-2*s + S(1)/2)/(
gamma(-a/2 - s + S(1)/2)*gamma(a - 2*s + 1)), (
-re(a)/2, S(1)/4), True)
assert MT(besselj(a, sqrt(x))**2, x, s) == \
(gamma(a + s)*gamma(S(1)/2 - s)
/ (sqrt(pi)*gamma(1 - s)*gamma(1 + a - s)),
(-re(a), S(1)/2), True)
assert MT(besselj(a, sqrt(x))*besselj(-a, sqrt(x)), x, s) == \
(gamma(s)*gamma(S(1)/2 - s)
/ (sqrt(pi)*gamma(1 - a - s)*gamma(1 + a - s)),
(0, S(1)/2), True)
# NOTE: prudnikov gives the strip below as (1/2 - re(a), 1). As far as
# I can see this is wrong (since besselj(z) ~ 1/sqrt(z) for z large)
assert MT(besselj(a - 1, sqrt(x))*besselj(a, sqrt(x)), x, s) == \
(gamma(1 - s)*gamma(a + s - S(1)/2)
/ (sqrt(pi)*gamma(S(3)/2 - s)*gamma(a - s + S(1)/2)),
(S(1)/2 - re(a), S(1)/2), True)
assert MT(besselj(a, sqrt(x))*besselj(b, sqrt(x)), x, s) == \
(4**s*gamma(1 - 2*s)*gamma((a + b)/2 + s)
/ (gamma(1 - s + (b - a)/2)*gamma(1 - s + (a - b)/2)
*gamma( 1 - s + (a + b)/2)),
(-(re(a) + re(b))/2, S(1)/2), True)
assert MT(besselj(a, sqrt(x))**2 + besselj(-a, sqrt(x))**2, x, s)[1:] == \
((Max(re(a), -re(a)), S(1)/2), True)
# Section 8.4.20
assert MT(bessely(a, 2*sqrt(x)), x, s) == \
(-cos(pi*(a/2 - s))*gamma(s - a/2)*gamma(s + a/2)/pi,
(Max(-re(a)/2, re(a)/2), S(3)/4), True)
assert MT(sin(sqrt(x))*bessely(a, sqrt(x)), x, s) == \
(-4**s*sin(pi*(a/2 - s))*gamma(S(1)/2 - 2*s)
* gamma((1 - a)/2 + s)*gamma((1 + a)/2 + s)
/ (sqrt(pi)*gamma(1 - s - a/2)*gamma(1 - s + a/2)),
(Max(-(re(a) + 1)/2, (re(a) - 1)/2), S(1)/4), True)
assert MT(cos(sqrt(x))*bessely(a, sqrt(x)), x, s) == \
(-4**s*cos(pi*(a/2 - s))*gamma(s - a/2)*gamma(s + a/2)*gamma(S(1)/2 - 2*s)
/ (sqrt(pi)*gamma(S(1)/2 - s - a/2)*gamma(S(1)/2 - s + a/2)),
(Max(-re(a)/2, re(a)/2), S(1)/4), True)
assert MT(besselj(a, sqrt(x))*bessely(a, sqrt(x)), x, s) == \
(-cos(pi*s)*gamma(s)*gamma(a + s)*gamma(S(1)/2 - s)
/ (pi**S('3/2')*gamma(1 + a - s)),
(Max(-re(a), 0), S(1)/2), True)
assert MT(besselj(a, sqrt(x))*bessely(b, sqrt(x)), x, s) == \
(-4**s*cos(pi*(a/2 - b/2 + s))*gamma(1 - 2*s)
* gamma(a/2 - b/2 + s)*gamma(a/2 + b/2 + s)
/ (pi*gamma(a/2 - b/2 - s + 1)*gamma(a/2 + b/2 - s + 1)),
(Max((-re(a) + re(b))/2, (-re(a) - re(b))/2), S(1)/2), True)
# NOTE bessely(a, sqrt(x))**2 and bessely(a, sqrt(x))*bessely(b, sqrt(x))
# are a mess (no matter what way you look at it ...)
assert MT(bessely(a, sqrt(x))**2, x, s)[1:] == \
((Max(-re(a), 0, re(a)), S(1)/2), True)
# Section 8.4.22
# TODO we can't do any of these (delicate cancellation)
# Section 8.4.23
assert MT(besselk(a, 2*sqrt(x)), x, s) == \
(gamma(
s - a/2)*gamma(s + a/2)/2, (Max(-re(a)/2, re(a)/2), oo), True)
assert MT(besselj(a, 2*sqrt(2*sqrt(x)))*besselk(
a, 2*sqrt(2*sqrt(x))), x, s) == (4**(-s)*gamma(2*s)*
gamma(a/2 + s)/(2*gamma(a/2 - s + 1)), (Max(0, -re(a)/2), oo), True)
# TODO bessely(a, x)*besselk(a, x) is a mess
assert MT(besseli(a, sqrt(x))*besselk(a, sqrt(x)), x, s) == \
(gamma(s)*gamma(
a + s)*gamma(-s + S(1)/2)/(2*sqrt(pi)*gamma(a - s + 1)),
(Max(-re(a), 0), S(1)/2), True)
assert MT(besseli(b, sqrt(x))*besselk(a, sqrt(x)), x, s) == \
(2**(2*s - 1)*gamma(-2*s + 1)*gamma(-a/2 + b/2 + s)* \
gamma(a/2 + b/2 + s)/(gamma(-a/2 + b/2 - s + 1)* \
gamma(a/2 + b/2 - s + 1)), (Max(-re(a)/2 - re(b)/2, \
re(a)/2 - re(b)/2), S(1)/2), True)
# TODO products of besselk are a mess
mt = MT(exp(-x/2)*besselk(a, x/2), x, s)
mt0 = combsimp((trigsimp(combsimp(mt[0].expand(func=True)))))
assert mt0 == 2*pi**(S(3)/2)*cos(pi*s)*gamma(-s + S(1)/2)/(
(cos(2*pi*a) - cos(2*pi*s))*gamma(-a - s + 1)*gamma(a - s + 1))
assert mt[1:] == ((Max(-re(a), re(a)), oo), True)
# TODO exp(x/2)*besselk(a, x/2) [etc] cannot currently be done
# TODO various strange products of special orders
@slow
def test_expint():
from sympy import E1, expint, Max, re, lerchphi, Symbol, simplify, Si, Ci, Ei
aneg = Symbol('a', negative=True)
u = Symbol('u', polar=True)
assert mellin_transform(E1(x), x, s) == (gamma(s)/s, (0, oo), True)
assert inverse_mellin_transform(gamma(s)/s, s, x,
(0, oo)).rewrite(expint).expand() == E1(x)
assert mellin_transform(expint(a, x), x, s) == \
(gamma(s)/(a + s - 1), (Max(1 - re(a), 0), oo), True)
# XXX IMT has hickups with complicated strips ...
assert simplify(unpolarify(
inverse_mellin_transform(gamma(s)/(aneg + s - 1), s, x,
(1 - aneg, oo)).rewrite(expint).expand(func=True))) == \
expint(aneg, x)
assert mellin_transform(Si(x), x, s) == \
(-2**s*sqrt(pi)*gamma(s/2 + S(1)/2)/(
2*s*gamma(-s/2 + 1)), (-1, 0), True)
assert inverse_mellin_transform(-2**s*sqrt(pi)*gamma((s + 1)/2)
/(2*s*gamma(-s/2 + 1)), s, x, (-1, 0)) \
== Si(x)
assert mellin_transform(Ci(sqrt(x)), x, s) == \
(-2**(2*s - 1)*sqrt(pi)*gamma(s)/(s*gamma(-s + S(1)/2)), (0, 1), True)
assert inverse_mellin_transform(
-4**s*sqrt(pi)*gamma(s)/(2*s*gamma(-s + S(1)/2)),
s, u, (0, 1)).expand() == Ci(sqrt(u))
# TODO LT of Si, Shi, Chi is a mess ...
assert laplace_transform(Ci(x), x, s) == (-log(1 + s**2)/2/s, 0, True)
assert laplace_transform(expint(a, x), x, s) == \
(lerchphi(s*polar_lift(-1), 1, a), 0, S(0) < re(a))
assert laplace_transform(expint(1, x), x, s) == (log(s + 1)/s, 0, True)
assert laplace_transform(expint(2, x), x, s) == \
((s - log(s + 1))/s**2, 0, True)
assert inverse_laplace_transform(-log(1 + s**2)/2/s, s, u).expand() == \
Heaviside(u)*Ci(u)
assert inverse_laplace_transform(log(s + 1)/s, s, x).rewrite(expint) == \
Heaviside(x)*E1(x)
assert inverse_laplace_transform((s - log(s + 1))/s**2, s,
x).rewrite(expint).expand() == \
(expint(2, x)*Heaviside(x)).rewrite(Ei).rewrite(expint).expand()
@slow
def test_inverse_mellin_transform():
from sympy import (sin, simplify, Max, Min, expand,
powsimp, exp_polar, cos, cot)
IMT = inverse_mellin_transform
assert IMT(gamma(s), s, x, (0, oo)) == exp(-x)
assert IMT(gamma(-s), s, x, (-oo, 0)) == exp(-1/x)
assert simplify(IMT(s/(2*s**2 - 2), s, x, (2, oo))) == \
(x**2 + 1)*Heaviside(1 - x)/(4*x)
# test passing "None"
assert IMT(1/(s**2 - 1), s, x, (-1, None)) == \
-x*Heaviside(-x + 1)/2 - Heaviside(x - 1)/(2*x)
assert IMT(1/(s**2 - 1), s, x, (None, 1)) == \
-x*Heaviside(-x + 1)/2 - Heaviside(x - 1)/(2*x)
# test expansion of sums
assert IMT(gamma(s) + gamma(s - 1), s, x, (1, oo)) == (x + 1)*exp(-x)/x
# test factorisation of polys
r = symbols('r', real=True)
assert IMT(1/(s**2 + 1), s, exp(-x), (None, oo)
).subs(x, r).rewrite(sin).simplify() \
== sin(r)*Heaviside(1 - exp(-r))
# test multiplicative substitution
_a, _b = symbols('a b', positive=True)
assert IMT(_b**(-s/_a)*factorial(s/_a)/s, s, x, (0, oo)) == exp(-_b*x**_a)
assert IMT(factorial(_a/_b + s/_b)/(_a + s), s, x, (-_a, oo)) == x**_a*exp(-x**_b)
def simp_pows(expr):
return simplify(powsimp(expand_mul(expr, deep=False), force=True)).replace(exp_polar, exp)
# Now test the inverses of all direct transforms tested above
# Section 8.4.2
nu = symbols('nu', real=True, finite=True)
assert IMT(-1/(nu + s), s, x, (-oo, None)) == x**nu*Heaviside(x - 1)
assert IMT(1/(nu + s), s, x, (None, oo)) == x**nu*Heaviside(1 - x)
assert simp_pows(IMT(gamma(beta)*gamma(s)/gamma(s + beta), s, x, (0, oo))) \
== (1 - x)**(beta - 1)*Heaviside(1 - x)
assert simp_pows(IMT(gamma(beta)*gamma(1 - beta - s)/gamma(1 - s),
s, x, (-oo, None))) \
== (x - 1)**(beta - 1)*Heaviside(x - 1)
assert simp_pows(IMT(gamma(s)*gamma(rho - s)/gamma(rho), s, x, (0, None))) \
== (1/(x + 1))**rho
assert simp_pows(IMT(d**c*d**(s - 1)*sin(pi*c)
*gamma(s)*gamma(s + c)*gamma(1 - s)*gamma(1 - s - c)/pi,
s, x, (Max(-re(c), 0), Min(1 - re(c), 1)))) \
== (x**c - d**c)/(x - d)
assert simplify(IMT(1/sqrt(pi)*(-c/2)*gamma(s)*gamma((1 - c)/2 - s)
*gamma(-c/2 - s)/gamma(1 - c - s),
s, x, (0, -re(c)/2))) == \
(1 + sqrt(x + 1))**c
assert simplify(IMT(2**(a + 2*s)*b**(a + 2*s - 1)*gamma(s)*gamma(1 - a - 2*s)
/gamma(1 - a - s), s, x, (0, (-re(a) + 1)/2))) == \
b**(a - 1)*(sqrt(1 + x/b**2) + 1)**(a - 1)*(b**2*sqrt(1 + x/b**2) +
b**2 + x)/(b**2 + x)
assert simplify(IMT(-2**(c + 2*s)*c*b**(c + 2*s)*gamma(s)*gamma(-c - 2*s)
/ gamma(-c - s + 1), s, x, (0, -re(c)/2))) == \
b**c*(sqrt(1 + x/b**2) + 1)**c
# Section 8.4.5
assert IMT(24/s**5, s, x, (0, oo)) == log(x)**4*Heaviside(1 - x)
assert expand(IMT(6/s**4, s, x, (-oo, 0)), force=True) == \
log(x)**3*Heaviside(x - 1)
assert IMT(pi/(s*sin(pi*s)), s, x, (-1, 0)) == log(x + 1)
assert IMT(pi/(s*sin(pi*s/2)), s, x, (-2, 0)) == log(x**2 + 1)
assert IMT(pi/(s*sin(2*pi*s)), s, x, (-S(1)/2, 0)) == log(sqrt(x) + 1)
assert IMT(pi/(s*sin(pi*s)), s, x, (0, 1)) == log(1 + 1/x)
# TODO
def mysimp(expr):
from sympy import expand, logcombine, powsimp
return expand(
powsimp(logcombine(expr, force=True), force=True, deep=True),
force=True).replace(exp_polar, exp)
assert mysimp(mysimp(IMT(pi/(s*tan(pi*s)), s, x, (-1, 0)))) in [
log(1 - x)*Heaviside(1 - x) + log(x - 1)*Heaviside(x - 1),
log(x)*Heaviside(x - 1) + log(1 - 1/x)*Heaviside(x - 1) + log(-x +
1)*Heaviside(-x + 1)]
# test passing cot
assert mysimp(IMT(pi*cot(pi*s)/s, s, x, (0, 1))) in [
log(1/x - 1)*Heaviside(1 - x) + log(1 - 1/x)*Heaviside(x - 1),
-log(x)*Heaviside(-x + 1) + log(1 - 1/x)*Heaviside(x - 1) + log(-x +
1)*Heaviside(-x + 1), ]
# 8.4.14
assert IMT(-gamma(s + S(1)/2)/(sqrt(pi)*s), s, x, (-S(1)/2, 0)) == \
erf(sqrt(x))
# 8.4.19
assert simplify(IMT(gamma(a/2 + s)/gamma(a/2 - s + 1), s, x, (-re(a)/2, S(3)/4))) \
== besselj(a, 2*sqrt(x))
assert simplify(IMT(2**a*gamma(S(1)/2 - 2*s)*gamma(s + (a + 1)/2)
/ (gamma(1 - s - a/2)*gamma(1 - 2*s + a)),
s, x, (-(re(a) + 1)/2, S(1)/4))) == \
sin(sqrt(x))*besselj(a, sqrt(x))
assert simplify(IMT(2**a*gamma(a/2 + s)*gamma(S(1)/2 - 2*s)
/ (gamma(S(1)/2 - s - a/2)*gamma(1 - 2*s + a)),
s, x, (-re(a)/2, S(1)/4))) == \
cos(sqrt(x))*besselj(a, sqrt(x))
# TODO this comes out as an amazing mess, but simplifies nicely
assert simplify(IMT(gamma(a + s)*gamma(S(1)/2 - s)
/ (sqrt(pi)*gamma(1 - s)*gamma(1 + a - s)),
s, x, (-re(a), S(1)/2))) == \
besselj(a, sqrt(x))**2
assert simplify(IMT(gamma(s)*gamma(S(1)/2 - s)
/ (sqrt(pi)*gamma(1 - s - a)*gamma(1 + a - s)),
s, x, (0, S(1)/2))) == \
besselj(-a, sqrt(x))*besselj(a, sqrt(x))
assert simplify(IMT(4**s*gamma(-2*s + 1)*gamma(a/2 + b/2 + s)
/ (gamma(-a/2 + b/2 - s + 1)*gamma(a/2 - b/2 - s + 1)
*gamma(a/2 + b/2 - s + 1)),
s, x, (-(re(a) + re(b))/2, S(1)/2))) == \
besselj(a, sqrt(x))*besselj(b, sqrt(x))
# Section 8.4.20
# TODO this can be further simplified!
assert simplify(IMT(-2**(2*s)*cos(pi*a/2 - pi*b/2 + pi*s)*gamma(-2*s + 1) *
gamma(a/2 - b/2 + s)*gamma(a/2 + b/2 + s) /
(pi*gamma(a/2 - b/2 - s + 1)*gamma(a/2 + b/2 - s + 1)),
s, x,
(Max(-re(a)/2 - re(b)/2, -re(a)/2 + re(b)/2), S(1)/2))) == \
besselj(a, sqrt(x))*-(besselj(-b, sqrt(x)) -
besselj(b, sqrt(x))*cos(pi*b))/sin(pi*b)
# TODO more
# for coverage
assert IMT(pi/cos(pi*s), s, x, (0, S(1)/2)) == sqrt(x)/(x + 1)
@slow
def test_laplace_transform():
from sympy import fresnels, fresnelc
LT = laplace_transform
a, b, c, = symbols('a b c', positive=True)
t = symbols('t')
w = Symbol("w")
f = Function("f")
# Test unevaluated form
assert laplace_transform(f(t), t, w) == LaplaceTransform(f(t), t, w)
assert inverse_laplace_transform(
f(w), w, t, plane=0) == InverseLaplaceTransform(f(w), w, t, 0)
# test a bug
spos = symbols('s', positive=True)
assert LT(exp(t), t, spos)[:2] == (1/(spos - 1), True)
# basic tests from wikipedia
assert LT((t - a)**b*exp(-c*(t - a))*Heaviside(t - a), t, s) == \
((s + c)**(-b - 1)*exp(-a*s)*gamma(b + 1), -c, True)
assert LT(t**a, t, s) == (s**(-a - 1)*gamma(a + 1), 0, True)
assert LT(Heaviside(t), t, s) == (1/s, 0, True)
assert LT(Heaviside(t - a), t, s) == (exp(-a*s)/s, 0, True)
assert LT(1 - exp(-a*t), t, s) == (a/(s*(a + s)), 0, True)
assert LT((exp(2*t) - 1)*exp(-b - t)*Heaviside(t)/2, t, s, noconds=True) \
== exp(-b)/(s**2 - 1)
assert LT(exp(t), t, s)[:2] == (1/(s - 1), 1)
assert LT(exp(2*t), t, s)[:2] == (1/(s - 2), 2)
assert LT(exp(a*t), t, s)[:2] == (1/(s - a), a)
assert LT(log(t/a), t, s) == ((log(a*s) + EulerGamma)/s/-1, 0, True)
assert LT(erf(t), t, s) == ((erfc(s/2))*exp(s**2/4)/s, 0, True)
assert LT(sin(a*t), t, s) == (a/(a**2 + s**2), 0, True)
assert LT(cos(a*t), t, s) == (s/(a**2 + s**2), 0, True)
# TODO would be nice to have these come out better
assert LT(
exp(-a*t)*sin(b*t), t, s) == (b/(b**2 + (a + s)**2), -a, True)
assert LT(exp(-a*t)*cos(b*t), t, s) == \
((a + s)/(b**2 + (a + s)**2), -a, True)
assert LT(besselj(0, t), t, s) == (1/sqrt(1 + s**2), 0, True)
assert LT(besselj(1, t), t, s) == (1 - 1/sqrt(1 + 1/s**2), 0, True)
# TODO general order works, but is a *mess*
# TODO besseli also works, but is an even greater mess
# test a bug in conditions processing
# TODO the auxiliary condition should be recognised/simplified
assert LT(exp(t)*cos(t), t, s)[:-1] in [
((s - 1)/(s**2 - 2*s + 2), -oo),
((s - 1)/((s - 1)**2 + 1), -oo),
]
# Fresnel functions
assert laplace_transform(fresnels(t), t, s) == \
((-sin(s**2/(2*pi))*fresnels(s/pi) + sin(s**2/(2*pi))/2 -
cos(s**2/(2*pi))*fresnelc(s/pi) + cos(s**2/(2*pi))/2)/s, 0, True)
assert laplace_transform(fresnelc(t), t, s) == (
(sin(s**2/(2*pi))*fresnelc(s/pi)/s - cos(s**2/(2*pi))*fresnels(s/pi)/s
+ sqrt(2)*cos(s**2/(2*pi) + pi/4)/(2*s), 0, True))
assert LT(Matrix([[exp(t), t*exp(-t)], [t*exp(-t), exp(t)]]), t, s) ==\
Matrix([
[(1/(s - 1), 1, True), ((s + 1)**(-2), 0, True)],
[((s + 1)**(-2), 0, True), (1/(s - 1), 1, True)]
])
def test_issue_8368_7173():
LT = laplace_transform
# hyperbolic
assert LT(sinh(x), x, s) == (1/(s**2 - 1), 1, True)
assert LT(cosh(x), x, s) == (s/(s**2 - 1), 1, True)
assert LT(sinh(x + 3), x, s) == (
(-s + (s + 1)*exp(6) + 1)*exp(-3)/(s - 1)/(s + 1)/2, 1, True)
assert LT(sinh(x)*cosh(x), x, s) == (1/(s**2 - 4), 2, Ne(s/2, 1))
# trig (make sure they are not being rewritten in terms of exp)
assert LT(cos(x + 3), x, s) == ((s*cos(3) - sin(3))/(s**2 + 1), 0, True)
def test_inverse_laplace_transform():
from sympy import sinh, cosh, besselj, besseli, simplify, factor_terms
ILT = inverse_laplace_transform
a, b, c, = symbols('a b c', positive=True, finite=True)
t = symbols('t')
def simp_hyp(expr):
return factor_terms(expand_mul(expr)).rewrite(sin)
# just test inverses of all of the above
assert ILT(1/s, s, t) == Heaviside(t)
assert ILT(1/s**2, s, t) == t*Heaviside(t)
assert ILT(1/s**5, s, t) == t**4*Heaviside(t)/24
assert ILT(exp(-a*s)/s, s, t) == Heaviside(t - a)
assert ILT(exp(-a*s)/(s + b), s, t) == exp(b*(a - t))*Heaviside(-a + t)
assert ILT(a/(s**2 + a**2), s, t) == sin(a*t)*Heaviside(t)
assert ILT(s/(s**2 + a**2), s, t) == cos(a*t)*Heaviside(t)
# TODO is there a way around simp_hyp?
assert simp_hyp(ILT(a/(s**2 - a**2), s, t)) == sinh(a*t)*Heaviside(t)
assert simp_hyp(ILT(s/(s**2 - a**2), s, t)) == cosh(a*t)*Heaviside(t)
assert ILT(a/((s + b)**2 + a**2), s, t) == exp(-b*t)*sin(a*t)*Heaviside(t)
assert ILT(
(s + b)/((s + b)**2 + a**2), s, t) == exp(-b*t)*cos(a*t)*Heaviside(t)
# TODO sinh/cosh shifted come out a mess. also delayed trig is a mess
# TODO should this simplify further?
assert ILT(exp(-a*s)/s**b, s, t) == \
(t - a)**(b - 1)*Heaviside(t - a)/gamma(b)
assert ILT(exp(-a*s)/sqrt(1 + s**2), s, t) == \
Heaviside(t - a)*besselj(0, a - t) # note: besselj(0, x) is even
# XXX ILT turns these branch factor into trig functions ...
assert simplify(ILT(a**b*(s + sqrt(s**2 - a**2))**(-b)/sqrt(s**2 - a**2),
s, t).rewrite(exp)) == \
Heaviside(t)*besseli(b, a*t)
assert ILT(a**b*(s + sqrt(s**2 + a**2))**(-b)/sqrt(s**2 + a**2),
s, t).rewrite(exp) == \
Heaviside(t)*besselj(b, a*t)
assert ILT(1/(s*sqrt(s + 1)), s, t) == Heaviside(t)*erf(sqrt(t))
# TODO can we make erf(t) work?
assert ILT(1/(s**2*(s**2 + 1)),s,t) == (t - sin(t))*Heaviside(t)
assert ILT( (s * eye(2) - Matrix([[1, 0], [0, 2]])).inv(), s, t) ==\
Matrix([[exp(t)*Heaviside(t), 0], [0, exp(2*t)*Heaviside(t)]])
def test_fourier_transform():
from sympy import simplify, expand, expand_complex, factor, expand_trig
FT = fourier_transform
IFT = inverse_fourier_transform
def simp(x):
return simplify(expand_trig(expand_complex(expand(x))))
def sinc(x):
return sin(pi*x)/(pi*x)
k = symbols('k', real=True)
f = Function("f")
# TODO for this to work with real a, need to expand abs(a*x) to abs(a)*abs(x)
a = symbols('a', positive=True)
b = symbols('b', positive=True)
posk = symbols('posk', positive=True)
# Test unevaluated form
assert fourier_transform(f(x), x, k) == FourierTransform(f(x), x, k)
assert inverse_fourier_transform(
f(k), k, x) == InverseFourierTransform(f(k), k, x)
# basic examples from wikipedia
assert simp(FT(Heaviside(1 - abs(2*a*x)), x, k)) == sinc(k/a)/a
# TODO IFT is a *mess*
assert simp(FT(Heaviside(1 - abs(a*x))*(1 - abs(a*x)), x, k)) == sinc(k/a)**2/a
# TODO IFT
assert factor(FT(exp(-a*x)*Heaviside(x), x, k), extension=I) == \
1/(a + 2*pi*I*k)
# NOTE: the ift comes out in pieces
assert IFT(1/(a + 2*pi*I*x), x, posk,
noconds=False) == (exp(-a*posk), True)
assert IFT(1/(a + 2*pi*I*x), x, -posk,
noconds=False) == (0, True)
assert IFT(1/(a + 2*pi*I*x), x, symbols('k', negative=True),
noconds=False) == (0, True)
# TODO IFT without factoring comes out as meijer g
assert factor(FT(x*exp(-a*x)*Heaviside(x), x, k), extension=I) == \
1/(a + 2*pi*I*k)**2
assert FT(exp(-a*x)*sin(b*x)*Heaviside(x), x, k) == \
b/(b**2 + (a + 2*I*pi*k)**2)
assert FT(exp(-a*x**2), x, k) == sqrt(pi)*exp(-pi**2*k**2/a)/sqrt(a)
assert IFT(sqrt(pi/a)*exp(-(pi*k)**2/a), k, x) == exp(-a*x**2)
assert FT(exp(-a*abs(x)), x, k) == 2*a/(a**2 + 4*pi**2*k**2)
# TODO IFT (comes out as meijer G)
# TODO besselj(n, x), n an integer > 0 actually can be done...
# TODO are there other common transforms (no distributions!)?
def test_sine_transform():
from sympy import EulerGamma
t = symbols("t")
w = symbols("w")
a = symbols("a")
f = Function("f")
# Test unevaluated form
assert sine_transform(f(t), t, w) == SineTransform(f(t), t, w)
assert inverse_sine_transform(
f(w), w, t) == InverseSineTransform(f(w), w, t)
assert sine_transform(1/sqrt(t), t, w) == 1/sqrt(w)
assert inverse_sine_transform(1/sqrt(w), w, t) == 1/sqrt(t)
assert sine_transform(
(1/sqrt(t))**3, t, w) == sqrt(w)*gamma(S(1)/4)/(2*gamma(S(5)/4))
assert sine_transform(t**(-a), t, w) == 2**(
-a + S(1)/2)*w**(a - 1)*gamma(-a/2 + 1)/gamma((a + 1)/2)
assert inverse_sine_transform(2**(-a + S(
1)/2)*w**(a - 1)*gamma(-a/2 + 1)/gamma(a/2 + S(1)/2), w, t) == t**(-a)
assert sine_transform(
exp(-a*t), t, w) == sqrt(2)*w/(sqrt(pi)*(a**2 + w**2))
assert inverse_sine_transform(
sqrt(2)*w/(sqrt(pi)*(a**2 + w**2)), w, t) == exp(-a*t)
assert sine_transform(
log(t)/t, t, w) == -sqrt(2)*sqrt(pi)*(log(w**2) + 2*EulerGamma)/4
assert sine_transform(
t*exp(-a*t**2), t, w) == sqrt(2)*w*exp(-w**2/(4*a))/(4*a**(S(3)/2))
assert inverse_sine_transform(
sqrt(2)*w*exp(-w**2/(4*a))/(4*a**(S(3)/2)), w, t) == t*exp(-a*t**2)
def test_cosine_transform():
from sympy import Si, Ci
t = symbols("t")
w = symbols("w")
a = symbols("a")
f = Function("f")
# Test unevaluated form
assert cosine_transform(f(t), t, w) == CosineTransform(f(t), t, w)
assert inverse_cosine_transform(
f(w), w, t) == InverseCosineTransform(f(w), w, t)
assert cosine_transform(1/sqrt(t), t, w) == 1/sqrt(w)
assert inverse_cosine_transform(1/sqrt(w), w, t) == 1/sqrt(t)
assert cosine_transform(1/(
a**2 + t**2), t, w) == sqrt(2)*sqrt(pi)*exp(-a*w)/(2*a)
assert cosine_transform(t**(
-a), t, w) == 2**(-a + S(1)/2)*w**(a - 1)*gamma((-a + 1)/2)/gamma(a/2)
assert inverse_cosine_transform(2**(-a + S(
1)/2)*w**(a - 1)*gamma(-a/2 + S(1)/2)/gamma(a/2), w, t) == t**(-a)
assert cosine_transform(
exp(-a*t), t, w) == sqrt(2)*a/(sqrt(pi)*(a**2 + w**2))
assert inverse_cosine_transform(
sqrt(2)*a/(sqrt(pi)*(a**2 + w**2)), w, t) == exp(-a*t)
assert cosine_transform(exp(-a*sqrt(t))*cos(a*sqrt(
t)), t, w) == a*exp(-a**2/(2*w))/(2*w**(S(3)/2))
assert cosine_transform(1/(a + t), t, w) == sqrt(2)*(
(-2*Si(a*w) + pi)*sin(a*w)/2 - cos(a*w)*Ci(a*w))/sqrt(pi)
assert inverse_cosine_transform(sqrt(2)*meijerg(((S(1)/2, 0), ()), (
(S(1)/2, 0, 0), (S(1)/2,)), a**2*w**2/4)/(2*pi), w, t) == 1/(a + t)
assert cosine_transform(1/sqrt(a**2 + t**2), t, w) == sqrt(2)*meijerg(
((S(1)/2,), ()), ((0, 0), (S(1)/2,)), a**2*w**2/4)/(2*sqrt(pi))
assert inverse_cosine_transform(sqrt(2)*meijerg(((S(1)/2,), ()), ((0, 0), (S(1)/2,)), a**2*w**2/4)/(2*sqrt(pi)), w, t) == 1/(t*sqrt(a**2/t**2 + 1))
def test_hankel_transform():
from sympy import gamma, sqrt, exp
r = Symbol("r")
k = Symbol("k")
nu = Symbol("nu")
m = Symbol("m")
a = symbols("a")
assert hankel_transform(1/r, r, k, 0) == 1/k
assert inverse_hankel_transform(1/k, k, r, 0) == 1/r
assert hankel_transform(
1/r**m, r, k, 0) == 2**(-m + 1)*k**(m - 2)*gamma(-m/2 + 1)/gamma(m/2)
assert inverse_hankel_transform(
2**(-m + 1)*k**(m - 2)*gamma(-m/2 + 1)/gamma(m/2), k, r, 0) == r**(-m)
assert hankel_transform(1/r**m, r, k, nu) == (
2*2**(-m)*k**(m - 2)*gamma(-m/2 + nu/2 + 1)/gamma(m/2 + nu/2))
assert inverse_hankel_transform(2**(-m + 1)*k**(
m - 2)*gamma(-m/2 + nu/2 + 1)/gamma(m/2 + nu/2), k, r, nu) == r**(-m)
assert hankel_transform(r**nu*exp(-a*r), r, k, nu) == \
2**(nu + 1)*a*k**(-nu - 3)*(a**2/k**2 + 1)**(-nu - S(
3)/2)*gamma(nu + S(3)/2)/sqrt(pi)
assert inverse_hankel_transform(
2**(nu + 1)*a*k**(-nu - 3)*(a**2/k**2 + 1)**(-nu - S(3)/2)*gamma(
nu + S(3)/2)/sqrt(pi), k, r, nu) == r**nu*exp(-a*r)
def test_issue_7181():
assert mellin_transform(1/(1 - x), x, s) != None
def test_issue_8882():
# This is the original test.
# from sympy import diff, Integral, integrate
# r = Symbol('r')
# psi = 1/r*sin(r)*exp(-(a0*r))
# h = -1/2*diff(psi, r, r) - 1/r*psi
# f = 4*pi*psi*h*r**2
# assert integrate(f, (r, -oo, 3), meijerg=True).has(Integral) == True
# To save time, only the critical part is included.
F = -a**(-s + 1)*(4 + 1/a**2)**(-s/2)*sqrt(1/a**2)*exp(-s*I*pi)* \
sin(s*atan(sqrt(1/a**2)/2))*gamma(s)
raises(IntegralTransformError, lambda:
inverse_mellin_transform(F, s, x, (-1, oo),
**{'as_meijerg': True, 'needeval': True}))
def test_issue_7173():
assert laplace_transform(sinh(a*x)*cosh(a*x), x, s) == \
(a/(s**2 - 4*a**2), 0,
And(Or(Abs(periodic_argument(exp_polar(I*pi)*polar_lift(a), oo)) <
pi/2, Abs(periodic_argument(exp_polar(I*pi)*polar_lift(a), oo)) <=
pi/2), Or(Abs(periodic_argument(a, oo)) < pi/2,
Abs(periodic_argument(a, oo)) <= pi/2)))
def test_issue_8514():
from sympy import simplify, refine
a, b, c, = symbols('a b c', positive=True)
t = symbols('t', positive=True)
ft = simplify(inverse_laplace_transform(1/(a*s**2+b*s+c),s, t))
assert ft == ((exp(t*(exp(I*atan2(0, -4*a*c + b**2)/2) -
exp(-I*atan2(0, -4*a*c + b**2)/2))*
sqrt(refine(Abs(4*a*c - b**2)))/(4*a))*exp(t*cos(atan2(0, -4*a*c + b**2)/2)
*sqrt(refine(Abs(4*a*c - b**2)))/a) + I*sin(t*sin(atan2(0, -4*a*c + b**2)/2)
*sqrt(refine(Abs(4*a*c - b**2)))/(2*a)) - cos(t*sin(atan2(0, -4*a*c + b**2)/2)
*sqrt(refine(Abs(4*a*c - b**2)))/(2*a)))*exp(-t*(b + cos(atan2(0, -4*a*c + b**2)/2)
*sqrt(refine(Abs(4*a*c - b**2))))/(2*a))/sqrt(-4*a*c + b**2))
| kaichogami/sympy | sympy/integrals/tests/test_transforms.py | Python | bsd-3-clause | 33,356 |
from setuptools import setup, find_packages
import os
name = "revot"
version = "0.1"
def read(*rnames):
return open(os.path.join(os.path.dirname(__file__), *rnames)).read()
setup(
name=name,
version=version,
description="revot software",
long_description=read('README'),
# Get strings from http://www.python.org/pypi?%3Aaction=list_classifiers
classifiers=[],
keywords="",
author="",
author_email='',
url='',
license='',
package_dir={'': '.'},
packages=find_packages('.'),
include_package_data=True,
zip_safe=False,
install_requires=[
'Babel==2.1.1',
'Flask==0.10.1',
'Flask-Babel==0.9',
'Flask-Bootstrap==3.3.5.6',
'Flask-Mail==0.9.1',
'Flask-Moment==0.5.1',
'Flask-SQLAlchemy==2.0',
'Flask-Script==2.0.5',
'Flask-WTF==0.12',
'Jinja2==2.8',
'MarkupSafe==0.23',
'Pygments==2.0.2',
'SQLAlchemy==1.0.8',
'Sphinx==1.3.1',
'WTForms==2.0.2',
'Werkzeug==0.10.4',
'alabaster==0.7.6',
'amqp==1.4.7',
'anyjson==0.3.3',
'argparse==1.2.1',
'billiard==3.3.0.20',
'blinker==1.4',
'celery==3.1.18',
'docutils==0.12',
'dominate==2.1.16',
'flask-nav==0.4',
'itsdangerous==0.24',
'kombu==3.0.26',
'python-dateutil==2.4.2',
'pytz==2015.6',
'redis==2.10.3',
'six==1.10.0',
'snowballstemmer==1.2.0',
'speaklater==1.3',
'sphinx-rtd-theme==0.1.9',
'visitor==0.1.2',
'wsgiref==0.1.2',
'Flask-Login==0.3.2',
'hmac'
],
entry_points="""
[console_scripts]
flask-ctl = revot.script:run
[paste.app_factory]
main = revot.script:make_app
debug = revot.script:make_debug
""",
)
| iskracat/revot | setup.py | Python | apache-2.0 | 1,864 |
# -*- coding: utf-8 -*-
#
# odmlviz documentation build configuration file, created by
# sphinx-quickstart on Mon Aug 17 13:22:00 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
sys.path.insert(0, os.path.abspath('../../src'))
sys.path.insert(0, os.path.abspath('../../../../python-odml/'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.viewcode',
'sphinxcontrib.exceltable'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['.templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'odmlviz'
copyright = u'2015, Jana Pick'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.0'
# The full version, including alpha/beta/rc tags.
release = '1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['.static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'odmlvizdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'odmlviz.tex', u'odmlviz Documentation',
u'Jana Pick', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'odmlviz', u'odmlviz Documentation',
[u'Jana Pick'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'odmlviz', u'odmlviz Documentation',
u'Jana Pick', 'odmlviz', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| j-pick/python-odmlviz | doc/source/conf.py | Python | bsd-2-clause | 8,318 |
#!/usr/bin/env python2.7
# -*- coding: utf-8 -*-
from __future__ import print_function
from __future__ import division
import matplotlib.pyplot as plt
from matplotlib.ticker import FuncFormatter, MaxNLocator
import numpy as np
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
from matplotlib.finance import quotes_historical_yahoo_ochl
from matplotlib.dates import YearLocator, MonthLocator, DateFormatter, WeekdayLocator, MONDAY
import datetime
__version__ = '0.0.1'
__license__ = 'MIT'
__author__ = 'Joshua Guo ([email protected])'
'''
Python to plot using matplotlib and numpy
'''
def main():
# tick_lables_plot()
# major_minor_use()
stock_year_plot()
# stock_month_plot()
def tick_lables_plot():
'''
Basic demo showing how to set tick labels to values of a series.
Using ax.set_xticks causes the tick labels to be set on the currently chosen ticks.
However, you may want to allow matplotlib to dynamically choose the number of ticks and their spacing.
In this case it may be better to determine the tick label from the value at the tick. The following example shows how to do this.
NB: The MaxNLocator is used here to ensure that the tick values take integer values.
'''
fig = plt.figure()
ax = fig.add_subplot(111)
xs = range(26)
ys = range(26)
labels = list('abcdefghijklmnopqrstuvwxyz')
def format_fn(tick_val, tick_pos):
if int(tick_val) in xs:
return labels[int(tick_val)]
else:
return ''
# Locators determine where the ticks are and formatters control the formatting of ticks.
ax.xaxis.set_major_formatter(FuncFormatter(format_fn))
ax.xaxis.set_major_locator(MaxNLocator(integer=True))
ax.plot(xs, ys)
plt.show()
def major_minor_use():
'''
Demonstrate how to use major and minor tickers.
'''
majorLocator = MultipleLocator(20)
majorFormatter = FormatStrFormatter('%d')
minorLocator = MultipleLocator(5)
t = np.arange(0.0, 100.0, 0.1)
s = np.sin(0.1*np.pi*t)*np.exp(-t*0.01)
fig, ax = plt.subplots()
plt.plot(t, s)
ax.xaxis.set_major_locator(majorLocator)
ax.xaxis.set_major_formatter(majorFormatter)
# for the minor ticks, use no labels; default NullFormatter
ax.xaxis.set_minor_locator(minorLocator)
plt.show()
def stock_year_plot():
'''
Show how to make date plots in matplotlib using date tick locators and formatters.
'''
date1 = datetime.date(1995, 1, 1)
date2 = datetime.date(2004, 4, 12)
years = YearLocator() # every year
months = MonthLocator() # every month
yearsFmt = DateFormatter('%Y')
quotes = quotes_historical_yahoo_ochl('INTC', date1, date2)
if len(quotes) == 0:
print('Found no quotes')
raise SystemExit
dates = [q[0] for q in quotes]
opens = [q[1] for q in quotes]
fig, ax = plt.subplots()
ax.plot_date(dates, opens, '-')
# format the ticks
ax.xaxis.set_major_locator(years)
ax.xaxis.set_major_formatter(yearsFmt)
ax.xaxis.set_minor_locator(months)
ax.autoscale_view()
# format the coords message box
def price(x):
return '$%1.2f' % x
ax.fmt_xdata = DateFormatter('%Y-%m-%d')
ax.fmt_ydata = price
ax.grid(True)
fig.autofmt_xdate()
plt.show()
def stock_month_plot():
'''
Show how to make date plots in matplotlib using date tick locators and formatters.
'''
date1 = datetime.date(2002, 1, 5)
date2 = datetime.date(2003, 12, 1)
# every monday
mondays = WeekdayLocator(MONDAY)
# every 3rd month
months = MonthLocator(range(1, 13), bymonthday=1, interval=3)
monthsFmt = DateFormatter("%b '%y")
quotes = quotes_historical_yahoo_ochl('INTC', date1, date2)
if len(quotes) == 0:
print('Found no quotes')
raise SystemExit
dates = [q[0] for q in quotes]
opens = [q[1] for q in quotes]
fig, ax = plt.subplots()
ax.plot_date(dates, opens, '-')
ax.xaxis.set_major_locator(months)
ax.xaxis.set_major_formatter(monthsFmt)
ax.xaxis.set_minor_locator(mondays)
ax.autoscale_view()
#ax.xaxis.grid(False, 'major')
#ax.xaxis.grid(True, 'minor')
ax.grid(True)
fig.autofmt_xdate()
plt.show()
if __name__ == '__main__':
main()
| JoshuaMichaelKing/MyLearning | learn-python2.7/matplotlib/plot_demos.py | Python | mit | 4,309 |
# -*- coding: utf-8 -*-
"""The Elastic Search output module CLI arguments helper."""
from __future__ import unicode_literals
import getpass
import os
from uuid import uuid4
from plaso.cli.helpers import interface
from plaso.cli.helpers import manager
from plaso.cli.helpers import server_config
from plaso.cli import logger
from plaso.lib import errors
from plaso.output import elastic
class ElasticSearchServerArgumentsHelper(server_config.ServerArgumentsHelper):
"""Elastic Search server CLI arguments helper."""
_DEFAULT_SERVER = '127.0.0.1'
_DEFAULT_PORT = 9200
class ElasticSearchOutputArgumentsHelper(interface.ArgumentsHelper):
"""Elastic Search output module CLI arguments helper."""
NAME = 'elastic'
CATEGORY = 'output'
DESCRIPTION = 'Argument helper for the Elastic Search output modules.'
_DEFAULT_INDEX_NAME = uuid4().hex
_DEFAULT_DOCUMENT_TYPE = 'plaso_event'
_DEFAULT_FLUSH_INTERVAL = 1000
_DEFAULT_RAW_FIELDS = False
@classmethod
def AddArguments(cls, argument_group):
"""Adds command line arguments the helper supports to an argument group.
This function takes an argument parser or an argument group object and adds
to it all the command line arguments this helper supports.
Args:
argument_group (argparse._ArgumentGroup|argparse.ArgumentParser):
argparse group.
"""
argument_group.add_argument(
'--index_name', dest='index_name', type=str, action='store',
default=cls._DEFAULT_INDEX_NAME, help=(
'Name of the index in ElasticSearch.'))
argument_group.add_argument(
'--doc_type', dest='document_type', type=str,
action='store', default=cls._DEFAULT_DOCUMENT_TYPE, help=(
'Name of the document type that will be used in ElasticSearch.'))
argument_group.add_argument(
'--flush_interval', dest='flush_interval', type=int,
action='store', default=cls._DEFAULT_FLUSH_INTERVAL, help=(
'Events to queue up before bulk insert to ElasticSearch.'))
argument_group.add_argument(
'--raw_fields', dest='raw_fields', action='store_true',
default=cls._DEFAULT_RAW_FIELDS, help=(
'Export string fields that will not be analyzed by Lucene.'))
argument_group.add_argument(
'--elastic_user', dest='elastic_user', action='store',
default=None, help='Username to use for Elasticsearch authentication.')
argument_group.add_argument(
'--elastic_password', dest='elastic_password', action='store',
default=None, help=(
'Password to use for Elasticsearch authentication. WARNING: use '
'with caution since this can expose the password to other users '
'on the system. The password can also be set with the environment '
'variable PLASO_ELASTIC_PASSWORD. '))
argument_group.add_argument(
'--use_ssl', dest='use_ssl', action='store_true',
help='Enforces use of ssl.')
argument_group.add_argument(
'--ca_certificates_file_path', dest='ca_certificates_file_path',
action='store', type=str, default=None, help=(
'Path to a file containing a list of root certificates to trust.'))
argument_group.add_argument(
'--elastic_url_prefix', dest='elastic_url_prefix', type=str,
action='store', default=None, help='URL prefix for elastic search.')
ElasticSearchServerArgumentsHelper.AddArguments(argument_group)
# pylint: disable=arguments-differ
@classmethod
def ParseOptions(cls, options, output_module):
"""Parses and validates options.
Args:
options (argparse.Namespace): parser options.
output_module (OutputModule): output module to configure.
Raises:
BadConfigObject: when the output module object is of the wrong type.
BadConfigOption: when a configuration parameter fails validation.
"""
elastic_output_modules = (
elastic.ElasticsearchOutputModule, elastic.ElasticsearchOutputModule)
if not isinstance(output_module, elastic_output_modules):
raise errors.BadConfigObject(
'Output module is not an instance of ElasticsearchOutputModule')
index_name = cls._ParseStringOption(
options, 'index_name', default_value=cls._DEFAULT_INDEX_NAME)
document_type = cls._ParseStringOption(
options, 'document_type', default_value=cls._DEFAULT_DOCUMENT_TYPE)
flush_interval = cls._ParseNumericOption(
options, 'flush_interval', default_value=cls._DEFAULT_FLUSH_INTERVAL)
raw_fields = getattr(options, 'raw_fields', cls._DEFAULT_RAW_FIELDS)
elastic_user = cls._ParseStringOption(options, 'elastic_user')
elastic_password = cls._ParseStringOption(options, 'elastic_password')
use_ssl = getattr(options, 'use_ssl', False)
ca_certificates_path = cls._ParseStringOption(
options, 'ca_certificates_file_path')
elastic_url_prefix = cls._ParseStringOption(options, 'elastic_url_prefix')
if elastic_password is None:
elastic_password = os.getenv('PLASO_ELASTIC_PASSWORD', None)
if elastic_password is not None:
logger.warning(
'Note that specifying your Elasticsearch password via '
'--elastic_password or the environment PLASO_ELASTIC_PASSWORD can '
'expose the password to other users on the system.')
if elastic_user is not None and elastic_password is None:
elastic_password = getpass.getpass('Enter your Elasticsearch password: ')
ElasticSearchServerArgumentsHelper.ParseOptions(options, output_module)
output_module.SetIndexName(index_name)
output_module.SetDocumentType(document_type)
output_module.SetFlushInterval(flush_interval)
output_module.SetRawFields(raw_fields)
output_module.SetUsername(elastic_user)
output_module.SetPassword(elastic_password)
output_module.SetUseSSL(use_ssl)
output_module.SetCACertificatesPath(ca_certificates_path)
output_module.SetURLPrefix(elastic_url_prefix)
manager.ArgumentHelperManager.RegisterHelper(ElasticSearchOutputArgumentsHelper)
| rgayon/plaso | plaso/cli/helpers/elastic_output.py | Python | apache-2.0 | 6,071 |
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from lxml import etree
from oslo.serialization import jsonutils
import webob
from nova.api.openstack.compute.contrib import flavor_disabled
from nova.compute import flavors
from nova import test
from nova.tests.unit.api.openstack import fakes
FAKE_FLAVORS = {
'flavor 1': {
"flavorid": '1',
"name": 'flavor 1',
"memory_mb": '256',
"root_gb": '10',
"swap": 512,
"vcpus": 1,
"ephemeral_gb": 1,
"disabled": False,
},
'flavor 2': {
"flavorid": '2',
"name": 'flavor 2',
"memory_mb": '512',
"root_gb": '20',
"swap": None,
"vcpus": 1,
"ephemeral_gb": 1,
"disabled": True,
},
}
def fake_flavor_get_by_flavor_id(flavorid, ctxt=None):
return FAKE_FLAVORS['flavor %s' % flavorid]
def fake_get_all_flavors_sorted_list(context=None, inactive=False,
filters=None, sort_key='flavorid',
sort_dir='asc', limit=None, marker=None):
return [
fake_flavor_get_by_flavor_id(1),
fake_flavor_get_by_flavor_id(2)
]
class FlavorDisabledTestV21(test.NoDBTestCase):
base_url = '/v2/fake/flavors'
content_type = 'application/json'
prefix = "OS-FLV-DISABLED:"
def setUp(self):
super(FlavorDisabledTestV21, self).setUp()
ext = ('nova.api.openstack.compute.contrib'
'.flavor_disabled.Flavor_disabled')
self.flags(osapi_compute_extension=[ext])
fakes.stub_out_nw_api(self.stubs)
self.stubs.Set(flavors, "get_all_flavors_sorted_list",
fake_get_all_flavors_sorted_list)
self.stubs.Set(flavors,
"get_flavor_by_flavor_id",
fake_flavor_get_by_flavor_id)
def _make_request(self, url):
req = webob.Request.blank(url)
req.headers['Accept'] = self.content_type
res = req.get_response(fakes.wsgi_app_v21(init_only=('flavors')))
return res
def _get_flavor(self, body):
return jsonutils.loads(body).get('flavor')
def _get_flavors(self, body):
return jsonutils.loads(body).get('flavors')
def assertFlavorDisabled(self, flavor, disabled):
self.assertEqual(str(flavor.get('%sdisabled' % self.prefix)), disabled)
def test_show(self):
url = self.base_url + '/1'
res = self._make_request(url)
self.assertEqual(res.status_int, 200)
self.assertFlavorDisabled(self._get_flavor(res.body), 'False')
def test_detail(self):
url = self.base_url + '/detail'
res = self._make_request(url)
self.assertEqual(res.status_int, 200)
flavors = self._get_flavors(res.body)
self.assertFlavorDisabled(flavors[0], 'False')
self.assertFlavorDisabled(flavors[1], 'True')
class FlavorDisabledTestV2(FlavorDisabledTestV21):
def _make_request(self, url):
req = webob.Request.blank(url)
req.headers['Accept'] = self.content_type
res = req.get_response(fakes.wsgi_app())
return res
class FlavorDisabledXmlTest(FlavorDisabledTestV2):
content_type = 'application/xml'
prefix = '{%s}' % flavor_disabled.Flavor_disabled.namespace
def _get_flavor(self, body):
return etree.XML(body)
def _get_flavors(self, body):
return etree.XML(body).getchildren()
| luzheqi1987/nova-annotation | nova/tests/unit/api/openstack/compute/contrib/test_flavor_disabled.py | Python | apache-2.0 | 4,015 |
import cProfile
import ProfilingTest
import pstats
import os
print "starting profiling"
path = 'ProfilingTest.stats'
cProfile.run('ProfilingTest.run()', path)
p = pstats.Stats(path)
p.sort_stats('cumulative').print_stats(10)
p.sort_stats('time').print_stats(10)
os.system("gprof2dot -f pstats {0} | dot -Tsvg -o callgraph.svg".format(path))
os.system("rsvg-convert -h 2000 callgraph.svg > callgraph.png".format(path))
| lcameron05/PCWG | Profiling.py | Python | mit | 425 |
from sys import stdin
import string
def reversewords(line):
line.reverse()
print " ".join(line)
map((lambda l: reversewords(string.split(l.strip()))), stdin.readlines())
| aqibmushtaq/codeeval | reverse-words/reverse.py | Python | mit | 180 |
#!/usr/bin/python
# -- Content-Encoding: UTF-8 --
"""
Tests the pooled server
:license: Apache License 2.0
"""
# JSON-RPC library
from jsonrpclib import ServerProxy
from jsonrpclib.SimpleJSONRPCServer import PooledJSONRPCServer
from jsonrpclib.threadpool import ThreadPool
# Standard library
import random
import threading
import unittest
# ------------------------------------------------------------------------------
def add(a, b):
return a+b
class PooledServerTests(unittest.TestCase):
"""
These tests verify that the pooled server works correctly
"""
def test_default_pool(self, pool=None):
"""
Tests the default pool
"""
# Setup server
server = PooledJSONRPCServer(("localhost", 0), thread_pool=pool)
server.register_function(add)
# Serve in a thread
thread = threading.Thread(target=server.serve_forever)
thread.daemon = True
thread.start()
# Find its port
port = server.socket.getsockname()[1]
# Make the client
client = ServerProxy("http://localhost:{0}".format(port))
# Check calls
for _ in range(10):
a, b = random.random(), random.random()
result = client.add(a, b)
self.assertEqual(result, a+b)
# Close server
server.server_close()
thread.join()
def test_custom_pool(self):
"""
Tests the ability to have a custom pool
"""
# Setup the pool
pool = ThreadPool(2)
pool.start()
self.test_default_pool(pool)
| CloudI/CloudI | src/service_api/python/jsonrpclib/tests/test_server.py | Python | mit | 1,597 |
def _mask_border_keypoints(image, keypoints, dist):
"""Removes keypoints that are within dist pixels from the image border."""
width = image.shape[0]
height = image.shape[1]
keypoints_filtering_mask = ((dist - 1 < keypoints[:, 0]) &
(keypoints[:, 0] < width - dist + 1) &
(dist - 1 < keypoints[:, 1]) &
(keypoints[:, 1] < height - dist + 1))
return keypoints_filtering_mask
def pairwise_hamming_distance(array1, array2):
"""**Experimental function**.
Calculate hamming dissimilarity measure between two sets of
vectors.
Parameters
----------
array1 : (P1, D) array
P1 vectors of size D.
array2 : (P2, D) array
P2 vectors of size D.
Returns
-------
distance : (P1, P2) array of dtype float
2D ndarray with value at an index (i, j) representing the hamming
distance in the range [0, 1] between ith vector in array1 and jth
vector in array2.
"""
distance = (array1[:, None] != array2[None]).mean(axis=2)
return distance
| almarklein/scikit-image | skimage/feature/util.py | Python | bsd-3-clause | 1,135 |
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
import sys
from py4j.java_gateway import get_method
from pyflink.java_gateway import get_gateway
from pyflink.table.table_schema import TableSchema
from pyflink.table.window import GroupWindow
from pyflink.util.utils import to_jarray
if sys.version > '3':
xrange = range
__all__ = ['Table', 'GroupedTable', 'GroupWindowedTable', 'OverWindowedTable', 'WindowGroupedTable']
class Table(object):
"""
A :class:`Table` is the core component of the Table API.
Similar to how the batch and streaming APIs have DataSet and DataStream,
the Table API is built around :class:`Table`.
Use the methods of :class:`Table` to transform data.
Example:
::
>>> env = StreamExecutionEnvironment.get_execution_environment()
>>> env.set_parallelism(1)
>>> t_env = StreamTableEnvironment.create(env)
>>> ...
>>> t_env.register_table_source("source", ...)
>>> t = t_env.scan("source")
>>> t.select(...)
>>> ...
>>> t_env.register_table_sink("result", ...)
>>> t.insert_into("result")
>>> env.execute()
Operations such as :func:`~pyflink.table.Table.join`, :func:`~pyflink.table.Table.select`,
:func:`~pyflink.table.Table.where` and :func:`~pyflink.table.Table.group_by`
take arguments in an expression string. Please refer to the documentation for
the expression syntax.
"""
def __init__(self, j_table):
self._j_table = j_table
def select(self, fields):
"""
Performs a selection operation. Similar to a SQL SELECT statement. The field expressions
can contain complex expressions.
Example:
::
>>> tab.select("key, value + 'hello'")
:param fields: Expression string.
:return: The result :class:`Table`.
"""
return Table(self._j_table.select(fields))
def alias(self, fields):
"""
Renames the fields of the expression result. Use this to disambiguate fields before
joining to operations.
Example:
::
>>> tab.alias("a, b")
:param fields: Field list expression string.
:return: The result :class:`Table`.
"""
return Table(get_method(self._j_table, "as")(fields))
def filter(self, predicate):
"""
Filters out elements that don't pass the filter predicate. Similar to a SQL WHERE
clause.
Example:
::
>>> tab.filter("name = 'Fred'")
:param predicate: Predicate expression string.
:return: The result :class:`Table`.
"""
return Table(self._j_table.filter(predicate))
def where(self, predicate):
"""
Filters out elements that don't pass the filter predicate. Similar to a SQL WHERE
clause.
Example:
::
>>> tab.where("name = 'Fred'")
:param predicate: Predicate expression string.
:return: The result :class:`Table`.
"""
return Table(self._j_table.where(predicate))
def group_by(self, fields):
"""
Groups the elements on some grouping keys. Use this before a selection with aggregations
to perform the aggregation on a per-group basis. Similar to a SQL GROUP BY statement.
Example:
::
>>> tab.group_by("key").select("key, value.avg")
:param fields: Group keys.
:return: The grouped :class:`Table`.
"""
return GroupedTable(self._j_table.groupBy(fields))
def distinct(self):
"""
Removes duplicate values and returns only distinct (different) values.
Example:
::
>>> tab.select("key, value").distinct()
:return: The result :class:`Table`.
"""
return Table(self._j_table.distinct())
def join(self, right, join_predicate=None):
"""
Joins two :class:`Table`. Similar to a SQL join. The fields of the two joined
operations must not overlap, use :func:`~pyflink.table.Table.alias` to rename fields if
necessary. You can use where and select clauses after a join to further specify the
behaviour of the join.
.. note::
Both tables must be bound to the same :class:`TableEnvironment` .
Example:
::
>>> left.join(right).where("a = b && c > 3").select("a, b, d")
>>> left.join(right, "a = b")
:param right: Right table.
:param join_predicate: Optional, the join predicate expression string.
:return: The result :class:`Table`.
"""
if join_predicate is not None:
return Table(self._j_table.join(right._j_table, join_predicate))
else:
return Table(self._j_table.join(right._j_table))
def left_outer_join(self, right, join_predicate=None):
"""
Joins two :class:`Table`. Similar to a SQL left outer join. The fields of the two joined
operations must not overlap, use :func:`~pyflink.table.Table.alias` to rename fields if
necessary.
.. note::
Both tables must be bound to the same :class:`TableEnvironment` and its
:class:`TableConfig` must have null check enabled (default).
Example:
::
>>> left.left_outer_join(right).select("a, b, d")
>>> left.left_outer_join(right, "a = b").select("a, b, d")
:param right: Right table.
:param join_predicate: Optional, the join predicate expression string.
:return: The result :class:`Table`.
"""
if join_predicate is None:
return Table(self._j_table.leftOuterJoin(right._j_table))
else:
return Table(self._j_table.leftOuterJoin(right._j_table, join_predicate))
def right_outer_join(self, right, join_predicate):
"""
Joins two :class:`Table`. Similar to a SQL right outer join. The fields of the two joined
operations must not overlap, use :func:`~pyflink.table.Table.alias` to rename fields if
necessary.
.. note::
Both tables must be bound to the same :class:`TableEnvironment` and its
:class:`TableConfig` must have null check enabled (default).
Example:
::
>>> left.right_outer_join(right, "a = b").select("a, b, d")
:param right: Right table.
:param join_predicate: The join predicate expression string.
:return: The result :class:`Table`.
"""
return Table(self._j_table.rightOuterJoin(right._j_table, join_predicate))
def full_outer_join(self, right, join_predicate):
"""
Joins two :class:`Table`. Similar to a SQL full outer join. The fields of the two joined
operations must not overlap, use :func:`~pyflink.table.Table.alias` to rename fields if
necessary.
.. note::
Both tables must be bound to the same :class:`TableEnvironment` and its
:class:`TableConfig` must have null check enabled (default).
Example:
::
>>> left.full_outer_join(right, "a = b").select("a, b, d")
:param right: Right table.
:param join_predicate: The join predicate expression string.
:return: The result :class:`Table`.
"""
return Table(self._j_table.fullOuterJoin(right._j_table, join_predicate))
def minus(self, right):
"""
Minus of two :class:`Table` with duplicate records removed.
Similar to a SQL EXCEPT clause. Minus returns records from the left table that do not
exist in the right table. Duplicate records in the left table are returned
exactly once, i.e., duplicates are removed. Both tables must have identical field types.
.. note::
Both tables must be bound to the same :class:`TableEnvironment`.
Example:
::
>>> left.minus(right)
:param right: Right table.
:return: The result :class:`Table`.
"""
return Table(self._j_table.minus(right._j_table))
def minus_all(self, right):
"""
Minus of two :class:`Table`. Similar to a SQL EXCEPT ALL.
Similar to a SQL EXCEPT ALL clause. MinusAll returns the records that do not exist in
the right table. A record that is present n times in the left table and m times
in the right table is returned (n - m) times, i.e., as many duplicates as are present
in the right table are removed. Both tables must have identical field types.
.. note::
Both tables must be bound to the same :class:`TableEnvironment`.
Example:
::
>>> left.minus_all(right)
:param right: Right table.
:return: The result :class:`Table`.
"""
return Table(self._j_table.minusAll(right._j_table))
def union(self, right):
"""
Unions two :class:`Table` with duplicate records removed.
Similar to a SQL UNION. The fields of the two union operations must fully overlap.
.. note::
Both tables must be bound to the same :class:`TableEnvironment`.
Example:
::
>>> left.union(right)
:param right: Right table.
:return: The result :class:`Table`.
"""
return Table(self._j_table.union(right._j_table))
def union_all(self, right):
"""
Unions two :class:`Table`. Similar to a SQL UNION ALL. The fields of the two union
operations must fully overlap.
.. note::
Both tables must be bound to the same :class:`TableEnvironment`.
Example:
::
>>> left.union_all(right)
:param right: Right table.
:return: The result :class:`Table`.
"""
return Table(self._j_table.unionAll(right._j_table))
def intersect(self, right):
"""
Intersects two :class:`Table` with duplicate records removed. Intersect returns records
that exist in both tables. If a record is present in one or both tables more than once,
it is returned just once, i.e., the resulting table has no duplicate records. Similar to a
SQL INTERSECT. The fields of the two intersect operations must fully overlap.
.. note::
Both tables must be bound to the same :class:`TableEnvironment`.
Example:
::
>>> left.intersect(right)
:param right: Right table.
:return: The result :class:`Table`.
"""
return Table(self._j_table.intersect(right._j_table))
def intersect_all(self, right):
"""
Intersects two :class:`Table`. IntersectAll returns records that exist in both tables.
If a record is present in both tables more than once, it is returned as many times as it
is present in both tables, i.e., the resulting table might have duplicate records. Similar
to an SQL INTERSECT ALL. The fields of the two intersect operations must fully overlap.
.. note::
Both tables must be bound to the same :class:`TableEnvironment`.
Example:
::
>>> left.intersect_all(right)
:param right: Right table.
:return: The result :class:`Table`.
"""
return Table(self._j_table.intersectAll(right._j_table))
def order_by(self, fields):
"""
Sorts the given :class:`Table`. Similar to SQL ORDER BY.
The resulting Table is sorted globally sorted across all parallel partitions.
Example:
::
>>> tab.order_by("name.desc")
:param fields: Order fields expression string,
:return: The result :class:`Table`.
"""
return Table(self._j_table.orderBy(fields))
def offset(self, offset):
"""
Limits a sorted result from an offset position.
Similar to a SQL OFFSET clause. Offset is technically part of the Order By operator and
thus must be preceded by it.
:func:`~pyflink.table.Table.offset` can be combined with a subsequent
:func:`~pyflink.table.Table.fetch` call to return n rows after skipping the first o rows.
Example:
::
# skips the first 3 rows and returns all following rows.
>>> tab.order_by("name.desc").offset(3)
# skips the first 10 rows and returns the next 5 rows.
>>> tab.order_by("name.desc").offset(10).fetch(5)
:param offset: Number of records to skip.
:return: The result :class:`Table`.
"""
return Table(self._j_table.offset(offset))
def fetch(self, fetch):
"""
Limits a sorted result to the first n rows.
Similar to a SQL FETCH clause. Fetch is technically part of the Order By operator and
thus must be preceded by it.
:func:`~pyflink.table.Table.offset` can be combined with a preceding
:func:`~pyflink.table.Table.fetch` call to return n rows after skipping the first o rows.
Example:
Returns the first 3 records.
::
>>> tab.order_by("name.desc").fetch(3)
Skips the first 10 rows and returns the next 5 rows.
::
>>> tab.order_by("name.desc").offset(10).fetch(5)
:param fetch: The number of records to return. Fetch must be >= 0.
:return: The result :class:`Table`.
"""
return Table(self._j_table.fetch(fetch))
def window(self, window):
"""
Defines group window on the records of a table.
A group window groups the records of a table by assigning them to windows defined by a time
or row interval.
For streaming tables of infinite size, grouping into windows is required to define finite
groups on which group-based aggregates can be computed.
For batch tables of finite size, windowing essentially provides shortcuts for time-based
groupBy.
.. note::
Computing windowed aggregates on a streaming table is only a parallel operation
if additional grouping attributes are added to the
:func:`~pyflink.table.GroupWindowedTable.group_by` clause.
If the :func:`~pyflink.table.GroupWindowedTable.group_by` only references a GroupWindow
alias, the streamed table will be processed by a single task, i.e., with parallelism 1.
Example:
::
>>> tab.window(Tumble.over("10.minutes").on("rowtime").alias("w")) \\
... .group_by("w") \\
... .select("a.sum as a, w.start as b, w.end as c, w.rowtime as d")
:param window: A :class:`pyflink.table.window.GroupWindow` created from
:class:`pyflink.table.window.Tumble`, :class:`pyflink.table.window.Session`
or :class:`pyflink.table.window.Slide`.
:return: A :class:`GroupWindowedTable`.
"""
# type: (GroupWindow) -> GroupWindowedTable
return GroupWindowedTable(self._j_table.window(window._java_window))
def over_window(self, *over_windows):
"""
Defines over-windows on the records of a table.
An over-window defines for each record an interval of records over which aggregation
functions can be computed.
Example:
::
>>> table.window(Over.partition_by("c").order_by("rowTime") \\
... .preceding("10.seconds").alias("ow")) \\
... .select("c, b.count over ow, e.sum over ow")
.. note::
Computing over window aggregates on a streaming table is only a parallel
operation if the window is partitioned. Otherwise, the whole stream will be processed
by a single task, i.e., with parallelism 1.
.. note::
Over-windows for batch tables are currently not supported.
:param over_windows: :class:`OverWindow`s created from :class:`Over`.
:return: A :class:`OverWindowedTable`.
"""
gateway = get_gateway()
window_array = to_jarray(gateway.jvm.OverWindow,
[item._java_over_window for item in over_windows])
return OverWindowedTable(self._j_table.window(window_array))
def add_columns(self, fields):
"""
Adds additional columns. Similar to a SQL SELECT statement. The field expressions
can contain complex expressions, but can not contain aggregations. It will throw an
exception if the added fields already exist.
Example:
::
>>> tab.add_columns("a + 1 as a1, concat(b, 'sunny') as b1")
:param fields: Column list string.
:return: The result :class:`Table`.
"""
return Table(self._j_table.addColumns(fields))
def add_or_replace_columns(self, fields):
"""
Adds additional columns. Similar to a SQL SELECT statement. The field expressions
can contain complex expressions, but can not contain aggregations. Existing fields will be
replaced if add columns name is the same as the existing column name. Moreover, if the added
fields have duplicate field name, then the last one is used.
Example:
::
>>> tab.add_or_replace_columns("a + 1 as a1, concat(b, 'sunny') as b1")
:param fields: Column list string.
:return: The result :class:`Table`.
"""
return Table(self._j_table.addOrReplaceColumns(fields))
def rename_columns(self, fields):
"""
Renames existing columns. Similar to a field alias statement. The field expressions
should be alias expressions, and only the existing fields can be renamed.
Example:
::
>>> tab.rename_columns("a as a1, b as b1")
:param fields: Column list string.
:return: The result :class:`Table`.
"""
return Table(self._j_table.renameColumns(fields))
def drop_columns(self, fields):
"""
Drops existing columns. The field expressions should be field reference expressions.
Example:
::
>>> tab.drop_columns("a, b")
:param fields: Column list string.
:return: The result :class:`Table`.
"""
return Table(self._j_table.dropColumns(fields))
def insert_into(self, table_path, *table_path_continued):
"""
Writes the :class:`Table` to a :class:`TableSink` that was registered under
the specified name. For the path resolution algorithm see
:func:`~TableEnvironment.use_database`.
Example:
::
>>> tab.insert_into("sink")
:param table_path: The first part of the path of the registered :class:`TableSink` to which
the :class:`Table` is written. This is to ensure at least the name of the
:class:`Table` is provided.
:param table_path_continued: The remaining part of the path of the registered
:class:`TableSink` to which the :class:`Table` is written.
"""
gateway = get_gateway()
j_table_path = to_jarray(gateway.jvm.String, table_path_continued)
self._j_table.insertInto(table_path, j_table_path)
def get_schema(self):
"""
Returns the :class:`TableSchema` of this table.
:return: The schema of this table.
"""
return TableSchema(j_table_schema=self._j_table.getSchema())
def print_schema(self):
"""
Prints the schema of this table to the console in a tree format.
"""
self._j_table.printSchema()
def __str__(self):
return self._j_table.toString()
class GroupedTable(object):
"""
A table that has been grouped on a set of grouping keys.
"""
def __init__(self, java_table):
self._j_table = java_table
def select(self, fields):
"""
Performs a selection operation on a grouped table. Similar to an SQL SELECT statement.
The field expressions can contain complex expressions and aggregations.
Example:
::
>>> tab.group_by("key").select("key, value.avg + ' The average' as average")
:param fields: Expression string that contains group keys and aggregate function calls.
:return: The result :class:`Table`.
"""
return Table(self._j_table.select(fields))
class GroupWindowedTable(object):
"""
A table that has been windowed for :class:`pyflink.table.window.GroupWindow`.
"""
def __init__(self, java_group_windowed_table):
self._j_table = java_group_windowed_table
def group_by(self, fields):
"""
Groups the elements by a mandatory window and one or more optional grouping attributes.
The window is specified by referring to its alias.
If no additional grouping attribute is specified and if the input is a streaming table,
the aggregation will be performed by a single task, i.e., with parallelism 1.
Aggregations are performed per group and defined by a subsequent
:func:`~pyflink.table.WindowGroupedTable.select` clause similar to SQL SELECT-GROUP-BY
query.
Example:
::
>>> tab.window(group_window.alias("w")).group_by("w, key").select("key, value.avg")
:param fields: Group keys.
:return: A :class:`WindowGroupedTable`.
"""
return WindowGroupedTable(self._j_table.groupBy(fields))
class WindowGroupedTable(object):
"""
A table that has been windowed and grouped for :class:`pyflink.table.window.GroupWindow`.
"""
def __init__(self, java_window_grouped_table):
self._j_table = java_window_grouped_table
def select(self, fields):
"""
Performs a selection operation on a window grouped table. Similar to an SQL SELECT
statement.
The field expressions can contain complex expressions and aggregations.
Example:
::
>>> window_grouped_table.select("key, window.start, value.avg as valavg")
:param fields: Expression string.
:return: The result :class:`Table`.
"""
return Table(self._j_table.select(fields))
class OverWindowedTable(object):
"""
A table that has been windowed for :class:`pyflink.table.window.OverWindow`.
Unlike group windows, which are specified in the GROUP BY clause, over windows do not collapse
rows. Instead over window aggregates compute an aggregate for each input row over a range of
its neighboring rows.
"""
def __init__(self, java_over_windowed_table):
self._j_table = java_over_windowed_table
def select(self, fields):
"""
Performs a selection operation on a over windowed table. Similar to an SQL SELECT
statement.
The field expressions can contain complex expressions and aggregations.
Example:
::
>>> over_windowed_table.select("c, b.count over ow, e.sum over ow")
:param fields: Expression string.
:return: The result :class:`Table`.
"""
return Table(self._j_table.select(fields))
| shaoxuan-wang/flink | flink-python/pyflink/table/table.py | Python | apache-2.0 | 24,237 |
from netfields import MACAddressField
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.conf import settings
from nodeshot.core.base.models import BaseAccessLevel
from choices import INTERFACE_TYPE_CHOICES
from django_hstore.fields import DictionaryField, ReferencesField
from nodeshot.core.base.managers import HStoreAccessLevelManager as InterfaceManager
class Interface(BaseAccessLevel):
""" Interface model """
device = models.ForeignKey('net.Device')
type = models.IntegerField(_('type'), choices=INTERFACE_TYPE_CHOICES, blank=True)
name = models.CharField(_('name'), max_length=10, blank=True, null=True)
mac = MACAddressField(_('mac address'), max_length=17, unique=True, default=None, null=True, blank=True)
mtu = models.IntegerField(_('MTU'), blank=True, null=True, default=1500,
help_text=_('Maximum Trasmission Unit'))
tx_rate = models.IntegerField(_('TX Rate'), null=True, default=None, blank=True)
rx_rate = models.IntegerField(_('RX Rate'), null=True, default=None, blank=True)
# extra data
data = DictionaryField(_('extra data'), null=True, blank=True,
help_text=_('store extra attributes in JSON string'))
shortcuts = ReferencesField(null=True, blank=True)
objects = InterfaceManager()
class Meta:
app_label = 'net'
def __unicode__(self):
return '%s %s' % (self.get_type_display(), self.mac)
def save(self, *args, **kwargs):
"""
Custom save method does the following:
* save shortcuts if HSTORE is enabled
"""
if 'node' not in self.shortcuts:
self.shortcuts['node'] = self.device.node
if 'user' not in self.shortcuts and self.device.node.user:
self.shortcuts['user'] = self.device.node.user
if 'layer' not in self.shortcuts and 'nodeshot.core.layers' in settings.INSTALLED_APPS:
self.shortcuts['layer'] = self.device.node.layer
super(Interface, self).save(*args, **kwargs)
@property
def owner(self):
if 'user' not in self.shortcuts:
if self.device or self.device_id:
self.save()
else:
raise Exception('Instance does not have a device set yet')
return self.shortcuts['user']
@property
def node(self):
if 'node' not in self.shortcuts:
if self.device or self.device_id:
self.save()
else:
raise Exception('Instance does not have a device set yet')
return self.shortcuts['node']
@property
def layer(self):
if 'nodeshot.core.layers' not in settings.INSTALLED_APPS:
return False
if 'layer' not in self.shortcuts:
if self.device or self.device_id:
self.save()
else:
raise Exception('Instance does not have a device set yet')
return self.shortcuts['layer']
@property
def ip_addresses(self):
try:
addresses = self.data.get('ip_addresses', '')
# self.data might be none, hence self.data['ip_addresses'] will raise an exception
except AttributeError:
addresses = ''
return addresses.replace(' ', '').split(',') if addresses else []
@ip_addresses.setter
def ip_addresses(self, value):
""" :param value: a list of ip addresses """
if not isinstance(value, list):
raise ValueError('ip_addresses value must be a list')
# in soem cases self.data might be none, so let's instantiate an empty dict
if self.data is None:
self.data = {}
# update field
self.data['ip_addresses'] = ', '.join(value)
if 'grappelli' in settings.INSTALLED_APPS:
@staticmethod
def autocomplete_search_fields():
return ('mac__icontains', 'data__icontains')
| SCORE42/nodeshot | nodeshot/networking/net/models/interface.py | Python | gpl-3.0 | 3,968 |
import math, re
from wsgiref.simple_server import make_server
from pyramid.config import Configurator
from pyramid.response import Response
from pyramid.view import view_defaults, view_config
from jinja2 import Environment, Template
from pymongo import MongoClient
client = MongoClient()
db = client.blog
db.authenticate("blog", "harderpass123")
MAX_POST_LENGTH = 200
MAX_PER_PAGE = 10
REGEX_POST_LENGTH = re.compile(".{1,%d}[^ ]*" % MAX_POST_LENGTH);
def get_post(id):
if type(id) == int or id.isdigit():
post = db.posts.find_one({"id": int(id)})
return post
def get_latest():
return db.posts.find_one(sort=[("id", -1)])
def get_count():
return db.posts.count()
@view_config(route_name="index", renderer="template.jinja2")
def index(request):
posts = []
post_count = get_count()
page_count = int(math.ceil(post_count/MAX_PER_PAGE))
current_page = 1
offset = 0
if request.matchdict["offset"].isdigit() and int(request.matchdict["offset"]) > 0:
current_page = int(request.matchdict["offset"])
offset = MAX_PER_PAGE*(current_page-1)
if post_count > 0:
current_id = get_latest()["id"]-offset
while len(posts) < MAX_PER_PAGE and current_id > 0:
post = get_post(current_id)
posts.append(post)
current_id -= 1
for post in posts:
if post and "text" in post:
if len(post["text"]) > MAX_POST_LENGTH or "\n" in post["text"]:
post["text"] = "%s..." % re.match(REGEX_POST_LENGTH, post["text"].split("\n", 1)[0]).group(0)
return dict(header_text="Blog", posts=posts, page_count=page_count, current_page=current_page)
@view_config(route_name="post", renderer="template.jinja2")
def post(request):
id = request.matchdict["id"]
post = get_post(id)
return dict(header_text="Blog", post=post)
if __name__ == '__main__':
config = Configurator()
config.include('pyramid_jinja2')
config.add_static_view(name="static", path="static")
config.add_route("post", "/post/{id}")
config.add_route("index", "/{offset:.*}")
config.scan()
app = config.make_wsgi_app()
server = make_server('127.0.0.1', 8081, app)
server.serve_forever()
| jesopo/python-blog | blog.py | Python | gpl-3.0 | 2,242 |
# -*- coding: utf-8 -*-
# This code was generously pilfered from https://bitbucket.org/Jeffrey/gevent-websocket
# written by Jeffrey Gelens (http://noppo.pro/) and licensed under the Apache License, Version 2.0
import six
import struct
from socket import error as socket_error
from django.core.handlers.wsgi import logger
from .utf8validator import Utf8Validator
from .exceptions import WebSocketError, FrameTooLargeException
class WebSocket(object):
__slots__ = ('_closed', 'stream', 'utf8validator', 'utf8validate_last')
OPCODE_CONTINUATION = 0x00
OPCODE_TEXT = 0x01
OPCODE_BINARY = 0x02
OPCODE_CLOSE = 0x08
OPCODE_PING = 0x09
OPCODE_PONG = 0x0a
def __init__(self, wsgi_input):
self._closed = False
self.stream = Stream(wsgi_input)
self.utf8validator = Utf8Validator()
def __del__(self):
try:
self.close()
except:
# close() may fail if __init__ didn't complete
pass
def _decode_bytes(self, bytestring):
"""
Internal method used to convert the utf-8 encoded bytestring into unicode.
If the conversion fails, the socket will be closed.
"""
if not bytestring:
return u''
try:
return bytestring.decode('utf-8')
except UnicodeDecodeError:
self.close(1007)
raise
def _encode_bytes(self, text):
"""
:returns: The utf-8 byte string equivalent of `text`.
"""
if isinstance(text, six.binary_type):
return text
if not isinstance(text, six.text_type):
text = six.text_type(text or '')
return text.encode('utf-8')
def _is_valid_close_code(self, code):
"""
:returns: Whether the returned close code is a valid hybi return code.
"""
if code < 1000:
return False
if 1004 <= code <= 1006:
return False
if 1012 <= code <= 1016:
return False
if code == 1100:
# not sure about this one but the autobahn fuzzer requires it.
return False
if 2000 <= code <= 2999:
return False
return True
def get_file_descriptor(self):
"""Return the file descriptor for the given websocket"""
return self.stream.fileno
@property
def closed(self):
return self._closed
def handle_close(self, header, payload):
"""
Called when a close frame has been decoded from the stream.
:param header: The decoded `Header`.
:param payload: The bytestring payload associated with the close frame.
"""
if not payload:
self.close(1000, None)
return
if len(payload) < 2:
raise WebSocketError('Invalid close frame: {0} {1}'.format(header, payload))
code = struct.unpack('!H', str(payload[:2]))[0]
payload = payload[2:]
if payload:
validator = Utf8Validator()
val = validator.validate(payload)
if not val[0]:
raise UnicodeError
if not self._is_valid_close_code(code):
raise WebSocketError('Invalid close code {0}'.format(code))
self.close(code, payload)
def handle_ping(self, header, payload):
self.send_frame(payload, self.OPCODE_PONG)
def handle_pong(self, header, payload):
pass
def read_frame(self):
"""
Block until a full frame has been read from the socket.
This is an internal method as calling this will not cleanup correctly
if an exception is called. Use `receive` instead.
:return: The header and payload as a tuple.
"""
header = Header.decode_header(self.stream)
if header.flags:
raise WebSocketError
if not header.length:
return header, ''
try:
payload = self.stream.read(header.length)
except socket_error:
payload = ''
except Exception:
# TODO log out this exception
payload = ''
if len(payload) != header.length:
raise WebSocketError('Unexpected EOF reading frame payload')
if header.mask:
payload = header.unmask_payload(payload)
return header, payload
def validate_utf8(self, payload):
# Make sure the frames are decodable independently
self.utf8validate_last = self.utf8validator.validate(payload)
if not self.utf8validate_last[0]:
raise UnicodeError("Encountered invalid UTF-8 while processing "
"text message at payload octet index "
"{0:d}".format(self.utf8validate_last[3]))
def read_message(self):
"""
Return the next text or binary message from the socket.
This is an internal method as calling this will not cleanup correctly
if an exception is called. Use `receive` instead.
"""
opcode = None
message = ""
while True:
header, payload = self.read_frame()
f_opcode = header.opcode
if f_opcode in (self.OPCODE_TEXT, self.OPCODE_BINARY):
# a new frame
if opcode:
raise WebSocketError("The opcode in non-fin frame is expected to be zero, got {0!r}".format(f_opcode))
# Start reading a new message, reset the validator
self.utf8validator.reset()
self.utf8validate_last = (True, True, 0, 0)
opcode = f_opcode
elif f_opcode == self.OPCODE_CONTINUATION:
if not opcode:
raise WebSocketError("Unexpected frame with opcode=0")
elif f_opcode == self.OPCODE_PING:
self.handle_ping(header, payload)
continue
elif f_opcode == self.OPCODE_PONG:
self.handle_pong(header, payload)
continue
elif f_opcode == self.OPCODE_CLOSE:
self.handle_close(header, payload)
return
else:
raise WebSocketError("Unexpected opcode={0!r}".format(f_opcode))
if opcode == self.OPCODE_TEXT:
self.validate_utf8(payload)
message += payload
if header.fin:
break
if opcode == self.OPCODE_TEXT:
self.validate_utf8(message)
return message
else:
return bytearray(message)
def receive(self):
"""
Read and return a message from the stream. If `None` is returned, then
the socket is considered closed/errored.
"""
if self._closed:
raise WebSocketError("Connection is already closed")
try:
return self.read_message()
except UnicodeError as e:
logger.info('websocket.receive: UnicodeError {}'.format(e))
self.close(1007)
except WebSocketError as e:
logger.info('websocket.receive: WebSocketError {}'.format(e))
self.close(1002)
except Exception as e:
logger.info('websocket.receive: Unknown error {}'.format(e))
raise e
def flush(self):
"""
Flush a websocket. In this implementation intentionally it does nothing.
"""
pass
def send_frame(self, message, opcode):
"""
Send a frame over the websocket with message as its payload
"""
if self._closed:
raise WebSocketError("Connection is already closed")
if opcode == self.OPCODE_TEXT:
message = self._encode_bytes(message)
elif opcode == self.OPCODE_BINARY:
message = six.binary_type(message)
header = Header.encode_header(True, opcode, '', len(message), 0)
try:
self.stream.write(header + message)
except socket_error:
raise WebSocketError("Socket is dead")
def send(self, message, binary=False):
"""
Send a frame over the websocket with message as its payload
"""
if binary is None:
binary = not isinstance(message, six.string_types)
opcode = self.OPCODE_BINARY if binary else self.OPCODE_TEXT
try:
self.send_frame(message, opcode)
except WebSocketError, e:
logger.info('Socket is dead {}'.format(e))
#raise WebSocketError("Socket is dead %s" % e.message)
def close(self, code=1000, message=''):
"""
Close the websocket and connection, sending the specified code and
message. The underlying socket object is _not_ closed, that is the
responsibility of the initiator.
"""
try:
message = self._encode_bytes(message)
self.send_frame(
struct.pack('!H%ds' % len(message), code, message),
opcode=self.OPCODE_CLOSE)
except WebSocketError:
# Failed to write the closing frame but it's ok because we're
# closing the socket anyway.
logger.debug("Failed to write closing frame -> closing socket")
finally:
logger.debug("Closed WebSocket")
self._closed = True
self.stream = None
class Stream(object):
"""
Wraps the handler's socket/rfile attributes and makes it in to a file like
object that can be read from/written to by the lower level websocket api.
"""
__slots__ = ('read', 'write', 'fileno')
def __init__(self, wsgi_input):
if six.PY2:
self.read = wsgi_input._sock.recv
self.write = wsgi_input._sock.sendall
else:
self.read = wsgi_input.raw._sock.recv
self.write = wsgi_input.raw._sock.sendall
self.fileno = wsgi_input.fileno()
class Header(object):
__slots__ = ('fin', 'mask', 'opcode', 'flags', 'length')
FIN_MASK = 0x80
OPCODE_MASK = 0x0f
MASK_MASK = 0x80
LENGTH_MASK = 0x7f
RSV0_MASK = 0x40
RSV1_MASK = 0x20
RSV2_MASK = 0x10
# bitwise mask that will determine the reserved bits for a frame header
HEADER_FLAG_MASK = RSV0_MASK | RSV1_MASK | RSV2_MASK
def __init__(self, fin=0, opcode=0, flags=0, length=0):
self.mask = ''
self.fin = fin
self.opcode = opcode
self.flags = flags
self.length = length
def mask_payload(self, payload):
payload = bytearray(payload)
mask = bytearray(self.mask)
for i in xrange(self.length):
payload[i] ^= mask[i % 4]
return str(payload)
# it's the same operation
unmask_payload = mask_payload
def __repr__(self):
return ("<Header fin={0} opcode={1} length={2} flags={3} at "
"0x{4:x}>").format(self.fin, self.opcode, self.length,
self.flags, id(self))
@classmethod
def decode_header(cls, stream):
"""
Decode a WebSocket header.
:param stream: A file like object that can be 'read' from.
:returns: A `Header` instance.
"""
read = stream.read
data = read(2)
if len(data) != 2:
raise WebSocketError("Unexpected EOF while decoding header")
first_byte, second_byte = struct.unpack('!BB', data)
header = cls(
fin=first_byte & cls.FIN_MASK == cls.FIN_MASK,
opcode=first_byte & cls.OPCODE_MASK,
flags=first_byte & cls.HEADER_FLAG_MASK,
length=second_byte & cls.LENGTH_MASK)
has_mask = second_byte & cls.MASK_MASK == cls.MASK_MASK
if header.opcode > 0x07:
if not header.fin:
raise WebSocketError('Received fragmented control frame: {0!r}'.format(data))
# Control frames MUST have a payload length of 125 bytes or less
if header.length > 125:
raise FrameTooLargeException('Control frame cannot be larger than 125 bytes: {0!r}'.format(data))
if header.length == 126:
# 16 bit length
data = read(2)
if len(data) != 2:
raise WebSocketError('Unexpected EOF while decoding header')
header.length = struct.unpack('!H', data)[0]
elif header.length == 127:
# 64 bit length
data = read(8)
if len(data) != 8:
raise WebSocketError('Unexpected EOF while decoding header')
header.length = struct.unpack('!Q', data)[0]
if has_mask:
mask = read(4)
if len(mask) != 4:
raise WebSocketError('Unexpected EOF while decoding header')
header.mask = mask
return header
@classmethod
def encode_header(cls, fin, opcode, mask, length, flags):
"""
Encodes a WebSocket header.
:param fin: Whether this is the final frame for this opcode.
:param opcode: The opcode of the payload, see `OPCODE_*`
:param mask: Whether the payload is masked.
:param length: The length of the frame.
:param flags: The RSV* flags.
:return: A bytestring encoded header.
"""
first_byte = opcode
second_byte = 0
extra = ''
if fin:
first_byte |= cls.FIN_MASK
if flags & cls.RSV0_MASK:
first_byte |= cls.RSV0_MASK
if flags & cls.RSV1_MASK:
first_byte |= cls.RSV1_MASK
if flags & cls.RSV2_MASK:
first_byte |= cls.RSV2_MASK
# now deal with length complexities
if length < 126:
second_byte += length
elif length <= 0xffff:
second_byte += 126
extra = struct.pack('!H', length)
elif length <= 0xffffffffffffffff:
second_byte += 127
extra = struct.pack('!Q', length)
else:
raise FrameTooLargeException
if mask:
second_byte |= cls.MASK_MASK
extra += mask
return chr(first_byte) + chr(second_byte) + extra
| dalou/django-flow | django_flow/redis_ws/websocket.py | Python | bsd-3-clause | 14,168 |
# emacs: -*- mode: python-mode; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the NiBabel package for the
# copyright and license terms.
#
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
# module imports
from .py3k import asbytes
from .filename_parser import types_filenames, splitext_addext
from . import volumeutils as vu
from . import spm2analyze as spm2
from . import nifti1
from .freesurfer import MGHImage
from .fileholders import FileHolderError
from .spatialimages import ImageFileError
from .imageclasses import class_map, ext_map
def load(filename):
''' Load file given filename, guessing at file type
Parameters
----------
filename : string
specification of file to load
Returns
-------
img : ``SpatialImage``
Image of guessed type
'''
froot, ext, trailing = splitext_addext(filename, ('.gz', '.bz2'))
try:
img_type = ext_map[ext]
except KeyError:
raise ImageFileError('Cannot work out file type of "%s"' %
filename)
if ext in ('.nii', '.mnc', '.mgh', '.mgz'):
klass = class_map[img_type]['class']
else:
# might be nifti pair or analyze of some sort
files_types = (('image','.img'), ('header','.hdr'))
filenames = types_filenames(filename, files_types)
hdr = nifti1.Nifti1Header.from_fileobj(
vu.allopen(filenames['header']),
check=False)
if hdr['magic'] in (asbytes('ni1'), asbytes('n+1')):
# allow goofy nifti single magic for pair
klass = nifti1.Nifti1Pair
else:
klass = spm2.Spm2AnalyzeImage
return klass.from_filename(filename)
def save(img, filename):
''' Save an image to file adapting format to `filename`
Parameters
----------
img : ``SpatialImage``
image to save
filename : str
filename (often implying filenames) to which to save `img`.
Returns
-------
None
'''
try:
img.to_filename(filename)
except ImageFileError:
pass
else:
return
froot, ext, trailing = splitext_addext(filename, ('.gz', '.bz2'))
img_type = ext_map[ext]
klass = class_map[img_type]['class']
converted = klass.from_image(img)
converted.to_filename(filename)
def read_img_data(img, prefer='scaled'):
""" Read data from image associated with files
Parameters
----------
img : ``SpatialImage``
Image with valid image file in ``img.file_map``. Unlike the
``img.get_data()`` method, this function returns the data read
from the image file, as specified by the *current* image header
and *current* image files.
prefer : str, optional
Can be 'scaled' - in which case we return the data with the
scaling suggested by the format, or 'unscaled', in which case we
return, if we can, the raw data from the image file, without the
scaling applied.
Returns
-------
arr : ndarray
array as read from file, given parameters in header
Notes
-----
Summary: please use the ``get_data`` method of `img` instead of this
function unless you are sure what you are doing.
In general, you will probably prefer ``prefer='scaled'``, because
this gives the data as the image format expects to return it.
Use `prefer` == 'unscaled' with care; the modified Analyze-type
formats such as SPM formats, and nifti1, specify that the image data
array is given by the raw data on disk, multiplied by a scalefactor
and maybe with the addition of a constant. This function, with
``unscaled`` returns the data on the disk, without these
format-specific scalings applied. Please use this funciton only if
you absolutely need the unscaled data, and the magnitude of the
data, as given by the scalefactor, is not relevant to your
application. The Analyze-type formats have a single scalefactor +/-
offset per image on disk. If you do not care about the absolute
values, and will be removing the mean from the data, then the
unscaled values will have preserved intensity ratios compared to the
mean-centered scaled data. However, this is not necessarily true of
other formats with more complicated scaling - such as MINC.
"""
image_fileholder = img.file_map['image']
try:
fileobj = image_fileholder.get_prepare_fileobj()
except FileHolderError:
raise ImageFileError('No image file specified for this image')
if prefer not in ('scaled', 'unscaled'):
raise ValueError('Invalid string "%s" for "prefer"' % prefer)
hdr = img.get_header()
if prefer == 'unscaled':
try:
return hdr.raw_data_from_fileobj(fileobj)
except AttributeError:
pass
return hdr.data_from_fileobj(fileobj)
| ME-ICA/me-ica | meica.libs/nibabel/loadsave.py | Python | lgpl-2.1 | 5,048 |
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import pipes
import sys
from telemetry.core import util
util.AddDirToPythonPath(util.GetChromiumSrcDir(), 'build', 'android')
from pylib.device import device_errors # pylint: disable=F0401
def _QuoteIfNeeded(arg):
# Properly escape "key=valueA valueB" to "key='valueA valueB'"
# Values without spaces, or that seem to be quoted are left untouched.
# This is required so CommandLine.java can parse valueB correctly rather
# than as a separate switch.
params = arg.split('=', 1)
if len(params) != 2:
return arg
key, values = params
if ' ' not in values:
return arg
if values[0] in '"\'' and values[-1] == values[0]:
return arg
return '%s=%s' % (key, pipes.quote(values))
class SetUpCommandLineFlags(object):
"""A context manager for setting up the android command line flags.
This provides a readable way of using the android command line backend class.
Example usage:
with android_command_line_backend.SetUpCommandLineFlags(
device, backend_settings, startup_args):
# Something to run while the command line flags are set appropriately.
"""
def __init__(self, device, backend_settings, startup_args):
self._android_command_line_backend = _AndroidCommandLineBackend(
device, backend_settings, startup_args)
def __enter__(self):
self._android_command_line_backend.SetUpCommandLineFlags()
def __exit__(self, *args):
self._android_command_line_backend.RestoreCommandLineFlags()
class _AndroidCommandLineBackend(object):
"""The backend for providing command line flags on android.
There are command line flags that Chromium accept in order to enable
particular features or modify otherwise default functionality. To set the
flags for Chrome on Android, specific files on the device must be updated
with the flags to enable. This class provides a wrapper around this
functionality.
"""
def __init__(self, device, backend_settings, startup_args):
self._device = device
self._backend_settings = backend_settings
self._startup_args = startup_args
self._saved_command_line_file_contents = None
@property
def command_line_file(self):
return self._backend_settings.GetCommandLineFile(self._device.IsUserBuild())
def SetUpCommandLineFlags(self):
args = [self._backend_settings.pseudo_exec_name]
args.extend(self._startup_args)
content = ' '.join(_QuoteIfNeeded(arg) for arg in args)
try:
# Save the current command line to restore later, except if it appears to
# be a Telemetry created one. This is to prevent a common bug where
# --host-resolver-rules borks people's browsers if something goes wrong
# with Telemetry.
self._saved_command_line_file_contents = self._ReadFile()
if '--host-resolver-rules' in self._saved_command_line_file_contents:
self._saved_command_line_file_contents = None
except device_errors.CommandFailedError:
self._saved_command_line_file_contents = None
try:
self._WriteFile(content)
except device_errors.CommandFailedError as exc:
logging.critical(exc)
logging.critical('Cannot set Chrome command line. '
'Fix this by flashing to a userdebug build.')
sys.exit(1)
def RestoreCommandLineFlags(self):
if self._saved_command_line_file_contents is None:
self._RemoveFile()
else:
self._WriteFile(self._saved_command_line_file_contents)
def _ReadFile(self):
return self._device.ReadFile(self.command_line_file, as_root=True)
def _WriteFile(self, contents):
self._device.WriteFile(self.command_line_file, contents, as_root=True)
def _RemoveFile(self):
self._device.RunShellCommand(['rm', '-f', self.command_line_file],
as_root=True, check_return=True)
| SaschaMester/delicium | tools/telemetry/telemetry/internal/backends/android_command_line_backend.py | Python | bsd-3-clause | 3,990 |
import re
import json
import os
import logging
from logging.handlers import RotatingFileHandler
from logging import Formatter
from ngram import NGram
from hashlib import md5
from flask import Flask, send_from_directory, send_file, render_template, request, redirect
import imgur
from memegenerator import gen_meme
APP_ROOT = os.path.dirname(__file__)
MEME_PATH = os.path.join(APP_ROOT, 'static/memes/')
TEMPLATES_PATH = os.path.join(APP_ROOT, 'templates/memes/')
IMAGE_EXTENSIONS = ('png', 'jpeg', 'jpg', 'gif')
SUPPORTED_EXTENSIONS = IMAGE_EXTENSIONS + ('json',)
ERROR_BACKGROUND = 'blank-colored-background'
app = Flask(__name__)
# Logging
handler = RotatingFileHandler('urlmeme.log', maxBytes=10000, backupCount=1)
handler.setFormatter(Formatter('%(asctime)s %(levelname)s: %(message)s'))
handler.setLevel(logging.INFO)
app.logger.setLevel(logging.INFO)
app.logger.addHandler(handler)
# Maps meme's file name to its common names
with open(os.path.join(APP_ROOT, 'memes.json')) as data_file:
MEMES = json.load(data_file)
def replace_underscore(string):
return re.sub(r'_', ' ', string)
def tokenize(string):
return re.sub(r' ', '', string.lower())
def parse_meme_url(path):
"""
Given a URL path, returns a named tuple representing the meme in question
(meme_name, top_text, bottom_text, extension)
"""
ext = 'jpg' # Default extension
if path.endswith(tuple('.%s' % e for e in SUPPORTED_EXTENSIONS)):
path, ext = os.path.splitext(path)
ext = ext[1:]
path = replace_underscore(path)
path_parts = path.split('/')[:3]
while(len(path_parts) < 3):
path_parts.append('')
path_parts.append(ext)
return tuple(path_parts)
def guess_meme_image(meme_name):
'''
Guess which meme image they mean by finding the alias with greatest ngram
similarity
'''
meme_name = tokenize(meme_name)
best = ''
best_score = None
for guess_image, names in MEMES.items():
for guess in names:
guess = tokenize(guess)
score = NGram.compare(guess, meme_name)
if best_score is None or score > best_score:
best_score = score
best = guess_image
app.logger.debug('New best meme for "%s": "%s" (Score: %s)', meme_name, guess, score)
app.logger.info('Picked meme "%s" for name "%s" (Score: %s)', best, meme_name, best_score)
return best
def derive_meme_path(meme_image, top, bottom, ext):
""" Generate a hash filename for this meme image """
token = "%s|%s|%s" % (meme_image, top, bottom)
meme_id = md5(token.encode('utf-8')).hexdigest()
file_path = '%s.%s' % (meme_id, ext)
return MEME_PATH + file_path
def meme_image_path(meme_image, top, bottom, ext):
file_path = derive_meme_path(meme_image, top, bottom, ext)
app.logger.debug('Looking for file: "%s"', file_path)
try:
open(file_path)
app.logger.info('Found meme in cache: "%s"', file_path)
except IOError:
app.logger.info('Generating "%s"', file_path)
meme_path = os.path.join(TEMPLATES_PATH, meme_image)
gen_meme(meme_path + '.jpg', top, bottom, file_path)
return file_path
def error_image_response(top, bottom, status=500):
app.logger.error('Sending error response: %s, %s (%s)', top, bottom, status)
image_path = meme_image_path(ERROR_BACKGROUND, top, bottom, 'jpg')
return send_file(image_path), status
@app.route("/")
def help():
return render_template('help.html', base_url=request.base_url)
@app.route('/favicon.ico')
def favicon():
path = os.path.join(app.root_path, 'static')
mimetype = 'image/vnd.microsoft.icon'
return send_from_directory(path, 'favicon.ico', mimetype=mimetype)
@app.route('/<path:path>')
def meme(path):
app.logger.info('New request for meme: "%s"', path)
meme_name, top, bottom, ext = parse_meme_url(path)
meme_image = guess_meme_image(meme_name)
app.logger.info('Meme: "%s" / "%s" / "%s" . "%s"', meme_image, top, bottom, ext)
if ext == 'json':
app.logger.info('Serving JSON')
return json.dumps({'image': meme_image, 'top': top, 'bottom': bottom})
elif ext in IMAGE_EXTENSIONS:
image_path = meme_image_path(meme_image, top, bottom, ext)
host = request.args.get('host', None)
if host == 'imgur':
try:
imgur_url = imgur.upload(image_path)
app.logger.info('Uploaded: "%s" as "%s"', image_path, imgur_url)
app.logger.info('Redirecting to: "%s"', imgur_url)
return redirect(imgur_url, code=301)
except imgur.ImgurException as e:
return error_image_response('Error uploading "%s" to Imgur:', image_path, e.message)
app.logger.info('Serving: "%s"', image_path)
return send_file(image_path)
if __name__ == "__main__":
""" Only runs in dev """
app.logger.setLevel(logging.DEBUG)
app.run(debug=True)
| monu27mr/gret | url.py | Python | mit | 4,989 |
#!/usr/bin/env python3
# Copyright (c) 2014-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Run regression test suite.
This module calls down into individual test cases via subprocess. It will
forward all unrecognized arguments onto the individual test scripts.
Functional tests are disabled on Windows by default. Use --force to run them anyway.
For a description of arguments recognized by test scripts, see
`test/functional/test_framework/test_framework.py:BitcoinTestFramework.main`.
"""
import argparse
from collections import deque
import configparser
import datetime
import os
import time
import shutil
import signal
import sys
import subprocess
import tempfile
import re
import logging
# Formatting. Default colors to empty strings.
BOLD, BLUE, RED, GREY = ("", ""), ("", ""), ("", ""), ("", "")
try:
# Make sure python thinks it can write unicode to its stdout
"\u2713".encode("utf_8").decode(sys.stdout.encoding)
TICK = "✓ "
CROSS = "✖ "
CIRCLE = "○ "
except UnicodeDecodeError:
TICK = "P "
CROSS = "x "
CIRCLE = "o "
if os.name == 'posix':
# primitive formatting on supported
# terminal via ANSI escape sequences:
BOLD = ('\033[0m', '\033[1m')
BLUE = ('\033[0m', '\033[0;34m')
RED = ('\033[0m', '\033[0;31m')
GREY = ('\033[0m', '\033[1;30m')
TEST_EXIT_PASSED = 0
TEST_EXIT_SKIPPED = 77
BASE_SCRIPTS= [
# Scripts that are run by the travis build process.
# Longest test should go first, to favor running tests in parallel
'wallet-hd.py',
'walletbackup.py',
# vv Tests less than 5m vv
'p2p-fullblocktest.py',
'fundrawtransaction.py',
'p2p-compactblocks.py',
'segwit.py',
# vv Tests less than 2m vv
'wallet.py',
'wallet-accounts.py',
'p2p-segwit.py',
'wallet-dump.py',
'listtransactions.py',
# vv Tests less than 60s vv
'sendheaders.py',
'zapwallettxes.py',
'importmulti.py',
'mempool_limit.py',
'merkle_blocks.py',
'receivedby.py',
'abandonconflict.py',
'bip68-112-113-p2p.py',
'rawtransactions.py',
'address_types.py',
'reindex.py',
# vv Tests less than 30s vv
'keypool-topup.py',
'zmq_test.py',
'bitcoin_cli.py',
'mempool_resurrect_test.py',
'txn_doublespend.py --mineblock',
'txn_clone.py',
'txn_clone.py --segwit',
'getchaintips.py',
'rest.py',
'mempool_spendcoinbase.py',
'mempool_reorg.py',
'mempool_persist.py',
'multiwallet.py',
'httpbasics.py',
'multi_rpc.py',
'proxy_test.py',
'signrawtransactions.py',
'disconnect_ban.py',
'decodescript.py',
'blockchain.py',
'deprecated_rpc.py',
'disablewallet.py',
'net.py',
'keypool.py',
'p2p-mempool.py',
'prioritise_transaction.py',
'invalidblockrequest.py',
'invalidtxrequest.py',
'p2p-versionbits-warning.py',
'preciousblock.py',
'importprunedfunds.py',
'signmessages.py',
'nulldummy.py',
'import-rescan.py',
'mining.py',
'bumpfee.py',
'rpcnamedargs.py',
'listsinceblock.py',
'p2p-leaktests.py',
'wallet-encryption.py',
'bipdersig-p2p.py',
'bip65-cltv-p2p.py',
'uptime.py',
'resendwallettransactions.py',
'minchainwork.py',
'p2p-fingerprint.py',
'uacomment.py',
'p2p-acceptblock.py',
'feature_logging.py',
'node_network_limited.py',
'conf_args.py',
# Don't append tests at the end to avoid merge conflicts
# Put them in a random line within the section that fits their approximate run-time
]
EXTENDED_SCRIPTS = [
# These tests are not run by the travis build process.
# Longest test should go first, to favor running tests in parallel
'pruning.py',
# vv Tests less than 20m vv
'smartfees.py',
# vv Tests less than 5m vv
'maxuploadtarget.py',
'mempool_packages.py',
'dbcrash.py',
# vv Tests less than 2m vv
'bip68-sequence.py',
'getblocktemplate_longpoll.py',
'p2p-timeouts.py',
# vv Tests less than 60s vv
'bip9-softforks.py',
'p2p-feefilter.py',
'rpcbind_test.py',
# vv Tests less than 30s vv
'assumevalid.py',
'example_test.py',
'txn_doublespend.py',
'txn_clone.py --mineblock',
'notifications.py',
'invalidateblock.py',
'replace-by-fee.py',
]
# Place EXTENDED_SCRIPTS first since it has the 3 longest running tests
ALL_SCRIPTS = EXTENDED_SCRIPTS + BASE_SCRIPTS
NON_SCRIPTS = [
# These are python files that live in the functional tests directory, but are not test scripts.
"combine_logs.py",
"create_cache.py",
"test_runner.py",
]
def main():
# Parse arguments and pass through unrecognised args
parser = argparse.ArgumentParser(add_help=False,
usage='%(prog)s [test_runner.py options] [script options] [scripts]',
description=__doc__,
epilog='''
Help text and arguments for individual test script:''',
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('--combinedlogslen', '-c', type=int, default=0, help='print a combined log (of length n lines) from all test nodes and test framework to the console on failure.')
parser.add_argument('--coverage', action='store_true', help='generate a basic coverage report for the RPC interface')
parser.add_argument('--exclude', '-x', help='specify a comma-separated-list of scripts to exclude.')
parser.add_argument('--extended', action='store_true', help='run the extended test suite in addition to the basic tests')
parser.add_argument('--force', '-f', action='store_true', help='run tests even on platforms where they are disabled by default (e.g. windows).')
parser.add_argument('--help', '-h', '-?', action='store_true', help='print help text and exit')
parser.add_argument('--jobs', '-j', type=int, default=4, help='how many test scripts to run in parallel. Default=4.')
parser.add_argument('--keepcache', '-k', action='store_true', help='the default behavior is to flush the cache directory on startup. --keepcache retains the cache from the previous testrun.')
parser.add_argument('--quiet', '-q', action='store_true', help='only print results summary and failure logs')
parser.add_argument('--tmpdirprefix', '-t', default=tempfile.gettempdir(), help="Root directory for datadirs")
args, unknown_args = parser.parse_known_args()
# args to be passed on always start with two dashes; tests are the remaining unknown args
tests = [arg for arg in unknown_args if arg[:2] != "--"]
passon_args = [arg for arg in unknown_args if arg[:2] == "--"]
# Read config generated by configure.
config = configparser.ConfigParser()
configfile = os.path.abspath(os.path.dirname(__file__)) + "/../config.ini"
config.read_file(open(configfile))
passon_args.append("--configfile=%s" % configfile)
# Set up logging
logging_level = logging.INFO if args.quiet else logging.DEBUG
logging.basicConfig(format='%(message)s', level=logging_level)
# Create base test directory
tmpdir = "%s/bitcoin_test_runner_%s" % (args.tmpdirprefix, datetime.datetime.now().strftime("%Y%m%d_%H%M%S"))
os.makedirs(tmpdir)
logging.debug("Temporary test directory at %s" % tmpdir)
enable_wallet = config["components"].getboolean("ENABLE_WALLET")
enable_utils = config["components"].getboolean("ENABLE_UTILS")
enable_bitcoind = config["components"].getboolean("ENABLE_BITCOIND")
if config["environment"]["EXEEXT"] == ".exe" and not args.force:
# https://github.com/bitcoin/bitcoin/commit/d52802551752140cf41f0d9a225a43e84404d3e9
# https://github.com/bitcoin/bitcoin/pull/5677#issuecomment-136646964
print("Tests currently disabled on Windows by default. Use --force option to enable")
sys.exit(0)
if not (enable_wallet and enable_utils and enable_bitcoind):
print("No functional tests to run. Wallet, utils, and bitcoind must all be enabled")
print("Rerun `configure` with -enable-wallet, -with-utils and -with-daemon and rerun make")
sys.exit(0)
# Build list of tests
if tests:
# Individual tests have been specified. Run specified tests that exist
# in the ALL_SCRIPTS list. Accept the name with or without .py extension.
tests = [re.sub("\.py$", "", t) + ".py" for t in tests]
test_list = []
for t in tests:
if t in ALL_SCRIPTS:
test_list.append(t)
else:
print("{}WARNING!{} Test '{}' not found in full test list.".format(BOLD[1], BOLD[0], t))
else:
# No individual tests have been specified.
# Run all base tests, and optionally run extended tests.
test_list = BASE_SCRIPTS
if args.extended:
# place the EXTENDED_SCRIPTS first since the three longest ones
# are there and the list is shorter
test_list = EXTENDED_SCRIPTS + test_list
# Remove the test cases that the user has explicitly asked to exclude.
if args.exclude:
tests_excl = [re.sub("\.py$", "", t) + ".py" for t in args.exclude.split(',')]
for exclude_test in tests_excl:
if exclude_test in test_list:
test_list.remove(exclude_test)
else:
print("{}WARNING!{} Test '{}' not found in current test list.".format(BOLD[1], BOLD[0], exclude_test))
if not test_list:
print("No valid test scripts specified. Check that your test is in one "
"of the test lists in test_runner.py, or run test_runner.py with no arguments to run all tests")
sys.exit(0)
if args.help:
# Print help for test_runner.py, then print help of the first script (with args removed) and exit.
parser.print_help()
subprocess.check_call([(config["environment"]["SRCDIR"] + '/test/functional/' + test_list[0].split()[0])] + ['-h'])
sys.exit(0)
check_script_list(config["environment"]["SRCDIR"])
if not args.keepcache:
shutil.rmtree("%s/test/cache" % config["environment"]["BUILDDIR"], ignore_errors=True)
run_tests(test_list, config["environment"]["SRCDIR"], config["environment"]["BUILDDIR"], config["environment"]["EXEEXT"], tmpdir, args.jobs, args.coverage, passon_args, args.combinedlogslen)
def run_tests(test_list, src_dir, build_dir, exeext, tmpdir, jobs=1, enable_coverage=False, args=[], combined_logs_len=0):
# Warn if bitcoind is already running (unix only)
try:
if subprocess.check_output(["pidof", "bitcoind"]) is not None:
print("%sWARNING!%s There is already a bitcoind process running on this system. Tests may fail unexpectedly due to resource contention!" % (BOLD[1], BOLD[0]))
except (OSError, subprocess.SubprocessError):
pass
# Warn if there is a cache directory
cache_dir = "%s/test/cache" % build_dir
if os.path.isdir(cache_dir):
print("%sWARNING!%s There is a cache directory here: %s. If tests fail unexpectedly, try deleting the cache directory." % (BOLD[1], BOLD[0], cache_dir))
#Set env vars
if "BITCOIND" not in os.environ:
os.environ["BITCOIND"] = build_dir + '/src/bitcoind' + exeext
os.environ["BITCOINCLI"] = build_dir + '/src/bitcoin-cli' + exeext
tests_dir = src_dir + '/test/functional/'
flags = ["--srcdir={}/src".format(build_dir)] + args
flags.append("--cachedir=%s" % cache_dir)
if enable_coverage:
coverage = RPCCoverage()
flags.append(coverage.flag)
logging.debug("Initializing coverage directory at %s" % coverage.dir)
else:
coverage = None
if len(test_list) > 1 and jobs > 1:
# Populate cache
try:
subprocess.check_output([tests_dir + 'create_cache.py'] + flags + ["--tmpdir=%s/cache" % tmpdir])
except Exception as e:
print(e.output)
raise e
#Run Tests
job_queue = TestHandler(jobs, tests_dir, tmpdir, test_list, flags)
time0 = time.time()
test_results = []
max_len_name = len(max(test_list, key=len))
for _ in range(len(test_list)):
test_result, testdir, stdout, stderr = job_queue.get_next()
test_results.append(test_result)
if test_result.status == "Passed":
logging.debug("\n%s%s%s passed, Duration: %s s" % (BOLD[1], test_result.name, BOLD[0], test_result.time))
elif test_result.status == "Skipped":
logging.debug("\n%s%s%s skipped" % (BOLD[1], test_result.name, BOLD[0]))
else:
print("\n%s%s%s failed, Duration: %s s\n" % (BOLD[1], test_result.name, BOLD[0], test_result.time))
print(BOLD[1] + 'stdout:\n' + BOLD[0] + stdout + '\n')
print(BOLD[1] + 'stderr:\n' + BOLD[0] + stderr + '\n')
if combined_logs_len and os.path.isdir(testdir):
# Print the final `combinedlogslen` lines of the combined logs
print('{}Combine the logs and print the last {} lines ...{}'.format(BOLD[1], combined_logs_len, BOLD[0]))
print('\n============')
print('{}Combined log for {}:{}'.format(BOLD[1], testdir, BOLD[0]))
print('============\n')
combined_logs, _ = subprocess.Popen([os.path.join(tests_dir, 'combine_logs.py'), '-c', testdir], universal_newlines=True, stdout=subprocess.PIPE).communicate()
print("\n".join(deque(combined_logs.splitlines(), combined_logs_len)))
print_results(test_results, max_len_name, (int(time.time() - time0)))
if coverage:
coverage.report_rpc_coverage()
logging.debug("Cleaning up coverage data")
coverage.cleanup()
# Clear up the temp directory if all subdirectories are gone
if not os.listdir(tmpdir):
os.rmdir(tmpdir)
all_passed = all(map(lambda test_result: test_result.was_successful, test_results))
sys.exit(not all_passed)
def print_results(test_results, max_len_name, runtime):
results = "\n" + BOLD[1] + "%s | %s | %s\n\n" % ("TEST".ljust(max_len_name), "STATUS ", "DURATION") + BOLD[0]
test_results.sort(key=lambda result: result.name.lower())
all_passed = True
time_sum = 0
for test_result in test_results:
all_passed = all_passed and test_result.was_successful
time_sum += test_result.time
test_result.padding = max_len_name
results += str(test_result)
status = TICK + "Passed" if all_passed else CROSS + "Failed"
results += BOLD[1] + "\n%s | %s | %s s (accumulated) \n" % ("ALL".ljust(max_len_name), status.ljust(9), time_sum) + BOLD[0]
results += "Runtime: %s s\n" % (runtime)
print(results)
class TestHandler:
"""
Trigger the test scripts passed in via the list.
"""
def __init__(self, num_tests_parallel, tests_dir, tmpdir, test_list=None, flags=None):
assert(num_tests_parallel >= 1)
self.num_jobs = num_tests_parallel
self.tests_dir = tests_dir
self.tmpdir = tmpdir
self.test_list = test_list
self.flags = flags
self.num_running = 0
# In case there is a graveyard of zombie bitcoinds, we can apply a
# pseudorandom offset to hopefully jump over them.
# (625 is PORT_RANGE/MAX_NODES)
self.portseed_offset = int(time.time() * 1000) % 625
self.jobs = []
def get_next(self):
while self.num_running < self.num_jobs and self.test_list:
# Add tests
self.num_running += 1
t = self.test_list.pop(0)
portseed = len(self.test_list) + self.portseed_offset
portseed_arg = ["--portseed={}".format(portseed)]
log_stdout = tempfile.SpooledTemporaryFile(max_size=2**16)
log_stderr = tempfile.SpooledTemporaryFile(max_size=2**16)
test_argv = t.split()
testdir = "{}/{}_{}".format(self.tmpdir, re.sub(".py$", "", test_argv[0]), portseed)
tmpdir_arg = ["--tmpdir={}".format(testdir)]
self.jobs.append((t,
time.time(),
subprocess.Popen([self.tests_dir + test_argv[0]] + test_argv[1:] + self.flags + portseed_arg + tmpdir_arg,
universal_newlines=True,
stdout=log_stdout,
stderr=log_stderr),
testdir,
log_stdout,
log_stderr))
if not self.jobs:
raise IndexError('pop from empty list')
while True:
# Return first proc that finishes
time.sleep(.5)
for j in self.jobs:
(name, time0, proc, testdir, log_out, log_err) = j
if os.getenv('TRAVIS') == 'true' and int(time.time() - time0) > 20 * 60:
# In travis, timeout individual tests after 20 minutes (to stop tests hanging and not
# providing useful output.
proc.send_signal(signal.SIGINT)
if proc.poll() is not None:
log_out.seek(0), log_err.seek(0)
[stdout, stderr] = [l.read().decode('utf-8') for l in (log_out, log_err)]
log_out.close(), log_err.close()
if proc.returncode == TEST_EXIT_PASSED and stderr == "":
status = "Passed"
elif proc.returncode == TEST_EXIT_SKIPPED:
status = "Skipped"
else:
status = "Failed"
self.num_running -= 1
self.jobs.remove(j)
return TestResult(name, status, int(time.time() - time0)), testdir, stdout, stderr
print('.', end='', flush=True)
class TestResult():
def __init__(self, name, status, time):
self.name = name
self.status = status
self.time = time
self.padding = 0
def __repr__(self):
if self.status == "Passed":
color = BLUE
glyph = TICK
elif self.status == "Failed":
color = RED
glyph = CROSS
elif self.status == "Skipped":
color = GREY
glyph = CIRCLE
return color[1] + "%s | %s%s | %s s\n" % (self.name.ljust(self.padding), glyph, self.status.ljust(7), self.time) + color[0]
@property
def was_successful(self):
return self.status != "Failed"
def check_script_list(src_dir):
"""Check scripts directory.
Check that there are no scripts in the functional tests directory which are
not being run by pull-tester.py."""
script_dir = src_dir + '/test/functional/'
python_files = set([t for t in os.listdir(script_dir) if t[-3:] == ".py"])
missed_tests = list(python_files - set(map(lambda x: x.split()[0], ALL_SCRIPTS + NON_SCRIPTS)))
if len(missed_tests) != 0:
print("%sWARNING!%s The following scripts are not being run: %s. Check the test lists in test_runner.py." % (BOLD[1], BOLD[0], str(missed_tests)))
if os.getenv('TRAVIS') == 'true':
# On travis this warning is an error to prevent merging incomplete commits into master
sys.exit(1)
class RPCCoverage():
"""
Coverage reporting utilities for test_runner.
Coverage calculation works by having each test script subprocess write
coverage files into a particular directory. These files contain the RPC
commands invoked during testing, as well as a complete listing of RPC
commands per `bitcoin-cli help` (`rpc_interface.txt`).
After all tests complete, the commands run are combined and diff'd against
the complete list to calculate uncovered RPC commands.
See also: test/functional/test_framework/coverage.py
"""
def __init__(self):
self.dir = tempfile.mkdtemp(prefix="coverage")
self.flag = '--coveragedir=%s' % self.dir
def report_rpc_coverage(self):
"""
Print out RPC commands that were unexercised by tests.
"""
uncovered = self._get_uncovered_rpc_commands()
if uncovered:
print("Uncovered RPC commands:")
print("".join((" - %s\n" % i) for i in sorted(uncovered)))
else:
print("All RPC commands covered.")
def cleanup(self):
return shutil.rmtree(self.dir)
def _get_uncovered_rpc_commands(self):
"""
Return a set of currently untested RPC commands.
"""
# This is shared from `test/functional/test-framework/coverage.py`
reference_filename = 'rpc_interface.txt'
coverage_file_prefix = 'coverage.'
coverage_ref_filename = os.path.join(self.dir, reference_filename)
coverage_filenames = set()
all_cmds = set()
covered_cmds = set()
if not os.path.isfile(coverage_ref_filename):
raise RuntimeError("No coverage reference found")
with open(coverage_ref_filename, 'r') as f:
all_cmds.update([i.strip() for i in f.readlines()])
for root, dirs, files in os.walk(self.dir):
for filename in files:
if filename.startswith(coverage_file_prefix):
coverage_filenames.add(os.path.join(root, filename))
for filename in coverage_filenames:
with open(filename, 'r') as f:
covered_cmds.update([i.strip() for i in f.readlines()])
return all_cmds - covered_cmds
if __name__ == '__main__':
main()
| laudaa/bitcoin | test/functional/test_runner.py | Python | mit | 21,842 |
# Copyright (c) 2015 Tanium Inc
#
# Generated from console.wsdl version 0.0.1
#
#
from .base import BaseType
class PluginSchedule(BaseType):
_soap_tag = 'plugin_schedule'
def __init__(self):
BaseType.__init__(
self,
simple_properties={'id': int,
'name': str,
'plugin_name': str,
'plugin_bundle': str,
'plugin_server': str,
'start_hour': int,
'end_hour': int,
'start_date': int,
'end_date': int,
'run_on_days': str,
'run_interval_seconds': int,
'enabled': int,
'deleted_flag': int,
'input': str,
'last_run_time': str,
'last_exit_code': int,
'last_run_text': str},
complex_properties={'arguments': PluginArgumentList,
'user': User,
'last_run_sql': PluginSql},
list_properties={},
)
self.id = None
self.name = None
self.plugin_name = None
self.plugin_bundle = None
self.plugin_server = None
self.start_hour = None
self.end_hour = None
self.start_date = None
self.end_date = None
self.run_on_days = None
self.run_interval_seconds = None
self.enabled = None
self.deleted_flag = None
self.input = None
self.last_run_time = None
self.last_exit_code = None
self.last_run_text = None
self.arguments = None
self.user = None
self.last_run_sql = None
from plugin_argument_list import PluginArgumentList
from user import User
from plugin_sql import PluginSql
| tanium/pytan | lib/taniumpy/object_types/plugin_schedule.py | Python | mit | 1,932 |
from __future__ import division # allows floating point division from integers
import globalVars as gv
import datetime
import os
# Resistance is in ohms per 100 feet
# for 0 to 40 gauge wire
wireResistance = {0: 0.009827, 1: 0.01239, 2: 0.01563, 3: 0.01970, 4: 0.02485, 5: 0.03133, 6: 0.03951, 7: 0.04982,
8: 0.06282, 9: 0.07921, 10: 0.09989, 11: 0.1260, 12: 0.1588, 13: 0.2003, 14: 0.2525, 15: 0.3184,
16: 0.4016, 17: 0.5064, 18: 0.6385, 19: 0.8051, 20: 1.015, 21: 1.280, 22: 1.614, 23: 2.036,
24: 2.567, 25: 3.237, 26: 4.081, 27: 5.147, 28: 6.490, 29: 8.183, 30: 10.32, 31: 13.01, 32: 16.41,
33: 20.69, 34: 26.09, 35: 32.90, 36: 41.48, 37: 52.31, 38: 65.96, 39: 83.18, 40: 104.90}
def computeWireResistance(wireGauge, wireLength):
if (wireGauge <= 40) and (wireGauge >= 0) and (wireLength >= 0):
# ohms per meter
res = wireLength * wireResistance[wireGauge] / 30.48
res = float(float(res) * 1000) / 1000
return res
else:
return -1
voltage = gv.voltage
amperage = gv.amperage
gauge = gv.gauge
def design():
# By Ohms Law
goalResistance = float(voltage/amperage)
# Get how many meters are needed
wireLength = goalResistance/computeWireResistance(gauge, 1)
# print gv.printableWidth
# print gv.printableLength
printableLength = str(gv.printableLength)
printableWidth = str(gv.printableWidth)
nHoles = int(float(round((int(float(printableLength))-30)/10.5)))
print nHoles
# Make dateString and add it to the directory string
date = datetime.date.today().strftime("%m_%d_%Y")
printerDir = gv.printerDir+"Printer_"+date+"/"
filename = os.path.join(printerDir, 'Parts', 'Heated Bed Wire Diagram.svg')
print filename
svg = open(filename, 'w')
svg.write('<svg width="'+printableWidth+'mm" height="'+printableLength+'mm">')
svg.write('<circle cx="15mm" cy="15mm" r="1.5mm" fill="white" stroke="black" stroke-width=".5mm" />')
svg.write('<circle cx="'+str(int(float(printableWidth))-15)+'mm" cy="15mm" r="1.5mm" fill="white" stroke="black" stroke-width=".5mm" />')
for x in range(1,nHoles):
svg.write('<circle cx="15mm" cy="'+str(15+(10.5*x))+'mm" r="1.5mm" fill="white" stroke="black" stroke-width=".5mm" />')
svg.write('<circle cx="'+str(int(float(printableWidth))-15)+'mm" cy="'+str(15+(10.5*x))+'mm" r="1.5mm" fill="white" stroke="black" stroke-width=".5mm" />')
svg.write('<text x = "30mm" y = "30mm" fill = "black" font-size = "50">'+str(round(wireLength, 1))+' Meters of Wire Needed</text>')
svg.write('</svg>')
svg.close()
| masterperson40/retr3d | old/heatedbed.py | Python | gpl-3.0 | 2,664 |
# utility functions
import fcntl, os, subprocess
class PipeException(Exception):
def __init__(self, message, errno):
self.errno = errno
message = '%s: %d, %s' % (message, errno, os.strerror(errno))
Exception.__init__(self, message)
class Lock(object):
"""advisory lock"""
def __init__(self, filename):
"""lock using filename for synchronization"""
self.filename = filename + '.lock'
self.fd = None
self.lock()
def __del__(self):
self.unlock()
def lock(self):
if self.fd:
return
self.fd = open(self.filename, 'w')
fcntl.lockf(self.fd, fcntl.LOCK_EX)
def unlock(self):
if not self.fd:
return
fcntl.lockf(self.fd, fcntl.LOCK_UN)
self.fd = None
try:
os.remove(self.filename)
except OSError:
# harmless race
pass
def canonifymac(mac):
return ':'.join(['%02x' % int(field, 16) for field in mac.split(':')])
def checkpid(pid):
"""return True if pid is live"""
try:
os.kill(pid, 0)
return True
except OSError:
return False
def runcmd(args, cwd=None):
# TODO: stdin handling
if type(args) == str:
args = args.split(' ')
try:
proc = subprocess.Popen(args, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, close_fds=True,
cwd=cwd)
stdout = proc.stdout.read()
stderr = proc.stderr.read()
proc.wait()
if proc.returncode:
print ' '.join(args)
errmsg = stderr.strip()
print errmsg
raise PipeException('%s failed (errmsg: %s)' % (args[0], errmsg),
proc.returncode)
return stdout
except (OSError, IOError), inst:
raise PipeException('could not run %s' % args[0], inst.errno)
def modprobe(modname):
"""attempt to load kernel module modname"""
try:
runcmd(['modprobe', '-q', modname])
return True
except PipeException:
return False
| YongMan/Xen-4.3.1 | tools/python/xen/remus/util.py | Python | gpl-2.0 | 2,139 |
# -*- coding:utf-8 -*-
# 206. Reverse Linked List QuestionEditorial Solution My Submissions
# Difficulty: Easy
# Reverse a singly linked list.
#
# click to show more hints.
#
# Hint:
# A linked list can be reversed either iteratively or recursively. Could you implement both?
# Definition for singly-linked list.
class ListNode(object):
def __init__(self, x):
self.val = x
self.next = None
class Solution(object):
def reverseList(self, head):
"""
:type head: ListNode
:rtype: ListNode
"""
if head is None:
return head
cur = head.next
pre = head
head.next = None
while cur is not None:
tmp = cur.next
cur.next = pre
pre = cur
cur = tmp
return pre
if __name__ == '__main__':
head = ListNode(1)
head.next = ListNode(2)
head.next.next = ListNode(3)
head.next.next.next = ListNode(4)
s = Solution()
new_list = s.reverseList(head)
print new_list.val
| Baz2013/blog_demo | leetcode/easy/reverse_linked_list.py | Python | gpl-3.0 | 1,048 |
# -*- coding: utf-8 -*-
#
# Copyright (c) 2020, the cclib development team
#
# This file is part of cclib (http://cclib.github.io) and is distributed under
# the terms of the BSD 3-Clause License.
"""Run data tests for cclib."""
import importlib
import logging
import os
import sys
import unittest
import cclib
__filedir__ = os.path.realpath(os.path.dirname(__file__))
# We need this in Python3 for importing things from the same directory
# within the unit test files.
sys.path.insert(1, os.path.join(__filedir__, 'data'))
parser_names = [
"ADF", "DALTON", "FChk", "GAMESS", "GAMESSUK", "Gaussian", "Jaguar",
"Molpro", "Molcas", "MOPAC", "NWChem", "ORCA", "Psi4", "QChem",
"Turbomole",
]
all_parsers = {name: getattr(cclib.parser, name) for name in parser_names}
# Not used currently, but keeping in a list to keep track of which parsers
# are in the legacy bin.
legacy_parser_names = ["Psi3"]
module_names = [
"SP", "SPun", "GeoOpt", "Basis", "Core", # Basic calculations.
"MP", "CC", "CI", "TD", "TDun", # Post-SCF calculations.
"BOMD", "NMR", "Polar", "Scan", "vib" # Other property calculations.
]
all_modules = {tn: importlib.import_module('.data.test' + tn, package='test')
for tn in module_names}
def gettestdata():
"""Return a dict of the test file data."""
testdatadir = os.path.dirname(os.path.realpath(__file__))
with open(testdatadir + '/testdata') as testdatafile:
lines = testdatafile.readlines()
# Remove blank lines and those starting with '#'.
lines = [line for line in lines if (line.strip() and line[0] != '#')]
# Remove comment at end of lines (everything after a '#').
lines = [line.split('#')[0] for line in lines]
# Transform remaining lines into dictionaries.
cols = [line.split() for line in lines]
labels = ('module', 'parser', 'class', 'subdir', 'files')
testdata = [dict(zip(labels, (c[0], c[1], c[2], c[3], c[4:]))) for c in cols]
return testdata
def get_program_dir(parser_name):
"""Return a directory name given a parser name.
In at least one case (GAMESS-UK) the directory is named differently.
"""
if parser_name == "GAMESSUK":
return "GAMESS-UK"
return parser_name
def getdatafile(parser, subdir, files, stream=None, loglevel=logging.ERROR, datatype=None):
"""Returns a parsed logfile.
Inputs:
parser - a logfile parser class (subclass of LogFile)
subdir - subdirectory containing data files (program version)
files - data filename(s)
stream - where to log to (sys.stdout by default)
loglevel - what level to log at
datatype - ccData or child class
Outputs:
data - the resulting data object
logfile - the parser object used for parsing
"""
# Convert any string into the parser object we will be using.
if isinstance(parser, str):
parser = all_parsers[parser]
datadir = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "data"))
programdir = os.path.join(get_program_dir(parser.__name__), subdir)
inputs = [os.path.join(datadir, programdir, fn) for fn in files]
# We should be able to pass a list of length one here, but for some reason
# this does not work with some parsers and we get errors.
if len(inputs) == 1:
inputs = inputs[0]
stream = stream or sys.stdout
logfile = parser(inputs, logstream=stream, loglevel=loglevel,
datatype=datatype or cclib.parser.data.ccData)
data = logfile.parse()
return data, logfile
def ccdata_getattribute_with_coverage(self, attr):
"""A bookkeeping version of __getattribute__ for ccData objects."""
if attr != '_attrlist' and attr in self._attrlist:
if not hasattr(self, 'coverage'):
self.coverage = {}
self.coverage[attr] = self.coverage.get(attr, 0) + 1
return object.__getattribute__(self, attr)
class DataSuite:
"""Suite containing data (logfile) tests in cclib.
This is supposed to represent a single run of the entire data test suite in cclib or
a subset of it. The main functions are to load data, run test cases in the data/
subdirectory, and do some basic bookkeeping.
"""
def __init__(self, parsers, modules, terse=False, silent=False, loglevel=logging.ERROR, stream=sys.stdout):
self.parsers = parsers
self.modules = modules
self.terse = terse or silent
self.silent = silent
self.loglevel = loglevel
self.stream = stream
# Load the test data and filter with parsers and modules.
self.testdata = gettestdata()
self.testdata = [td for td in self.testdata if td['parser'] in self.parsers]
self.testdata = [td for td in self.testdata if td['module'] in self.modules]
# We want to gather the unit tests and results in several lists/dicts,
# in order to easily generate summaries at the end.
self.errors = []
self.failures = []
self.alltests = []
self.perpackage = {p: [0, 0, 0, 0] for p in self.parsers}
def testall(self):
"""Run all unittests in all modules.
Run unit tests for all or a subset of parsers and modules. Arguments:
stream - stream used for all output
"""
stream_test = self.stream
if self.terse:
devnull = open(os.devnull, 'w')
stream_test = devnull
for td in self.testdata:
module = self.modules[td['module']]
parser = self.parsers[td['parser']]
test = getattr(module, td['class'])
description = ''
if not self.silent:
print("", file=stream_test)
description = "%s/%s: %s" % (td['subdir'], ",".join(td['files']), test.__doc__)
print("*** %s ***" % description, file=self.stream)
test.data, test.logfile = getdatafile(
parser, td['subdir'], td['files'], stream=self.stream, loglevel=self.loglevel,
datatype=test.datatype if hasattr(test, 'datatype') else None
)
# By overriding __getattribute__ temporarily with a custom method, we collect
# coverage information for data attributes while the tests are run. This slightly
# hacky approach is very convenient since it is self-contained and we don't
# need to worry about it when writing the actual test cases.
test.data.__class__.__getattribute__ = ccdata_getattribute_with_coverage
# Here we actually run the tests for this line in testdata.
myunittest = unittest.makeSuite(test)
results = unittest.TextTestRunner(stream=stream_test, verbosity=2).run(myunittest)
# We don't want to collect coverage stats beyond this point, so set __getattribute__
# back to its original value. Note that we are setting the class method.
test.data.__class__.__getattribute__ = object.__getattribute__
self.perpackage[td['parser']][0] += results.testsRun
self.perpackage[td['parser']][1] += len(results.errors)
self.perpackage[td['parser']][2] += len(results.failures)
self.perpackage[td['parser']][3] += len(getattr(results, 'skipped', []))
self.alltests.append(test)
self.errors.extend([description + "\n" + "".join(map(str, e)) for e in results.errors])
self.failures.extend([description + "\n" + "".join(map(str, f)) for f in results.failures])
if self.terse:
devnull.close()
return self.errors or self.failures
def summary(self):
"""Prints a summary of the suite after it has been run."""
if self.errors:
print("\n********* SUMMARY OF ERRORS *********\n", file=self.stream)
print("\n".join(self.errors), file=self.stream)
if self.failures:
print("\n********* SUMMARY OF FAILURES *********\n", file=self.stream)
print("\n".join(self.failures), file=self.stream)
print("\n********* SUMMARY PER PACKAGE ****************", file=self.stream)
names = sorted(self.perpackage.keys())
total = [0, 0, 0, 0]
print(" "*14, "\t".join(["Total", "Passed", "Failed", "Errors", "Skipped"]), file=self.stream)
fmt = "%3d\t%3d\t%3d\t%3d\t%3d"
for name in names:
l = self.perpackage[name]
args = (l[0], l[0]-l[1]-l[2]-l[3], l[2], l[1], l[3])
print(name.ljust(15), fmt % args, file=self.stream)
for i in range(4):
total[i] += l[i]
print("\n********* SUMMARY OF EVERYTHING **************", file=self.stream)
print("TOTAL: %d\tPASSED: %d\tFAILED: %d\tERRORS: %d\tSKIPPED: %d" \
%(total[0], total[0]-(total[1]+total[2]+total[3]), total[2], total[1], total[3]), file=self.stream)
def visualtests(self, stream=sys.stdout):
"""These are not formal tests -- but they should be eyeballed."""
parsers_to_test = {
'ADF2013.01' : getdatafile('ADF', "basicADF2013.01", ["dvb_gopt.adfout"])[0],
'DALTON2015' : getdatafile('DALTON', "basicDALTON-2015", ["dvb_gopt_ks.out"])[0],
'Firefly8.0' : getdatafile('GAMESS', "basicFirefly8.0", ["dvb_gopt_a.out"])[0],
'Gaussian16' : getdatafile('Gaussian', "basicGaussian16", ["dvb_gopt.out"])[0],
'GAMESS-US2018' : getdatafile('GAMESS', "basicGAMESS-US2018", ["dvb_gopt_a.out"])[0],
'Jaguar8.0' : getdatafile('Jaguar', "basicJaguar8.3", ["dvb_gopt_ks.out"])[0],
'Molpro2012' : getdatafile('Molpro', "basicMolpro2012", ["dvb_gopt.out", "dvb_gopt.log"])[0],
# Note that it doesn't make sense to put MOPAC here, as it
# is a semiempirical-only program.
'NWChem6.5' : getdatafile('NWChem', "basicNWChem6.5", ["dvb_gopt_ks.out"])[0],
'ORCA4.2' : getdatafile('ORCA', "basicORCA4.2", ["dvb_gopt.out"])[0],
'Psi4-1.3.1' : getdatafile('Psi4', "basicPsi4-1.3.1", ["dvb_gopt_rks.out"])[0],
'QChem5.4' : getdatafile('QChem', "basicQChem5.4", ["dvb_gopt.out"])[0],
}
parser_names = sorted(parsers_to_test.keys())
output = [parsers_to_test[pn] for pn in parser_names]
print("\n*** Visual tests ***", file=self.stream)
print("MO energies of optimised dvb", file=self.stream)
print(" ", "".join(["%-12s" % pn for pn in parser_names]), file=self.stream)
print("HOMO", " ".join(["%+9.4f" % out.moenergies[0][out.homos[0]] for out in output]), file=self.stream)
print("LUMO", " ".join(["%+9.4f" % out.moenergies[0][out.homos[0]+1] for out in output]), file=self.stream)
print("H-L ", " ".join(["%9.4f" % (out.moenergies[0][out.homos[0]+1]-out.moenergies[0][out.homos[0]],) for out in output]), file=self.stream)
def test_all(parsers, modules, terse, silent, loglevel, summary, visual_tests):
parsers = parsers or all_parsers
modules = modules or all_modules
data_suite = DataSuite(parsers, modules, terse=terse, silent=silent, loglevel=loglevel)
errors_or_failures = data_suite.testall()
if summary and not silent:
data_suite.summary()
if visual_tests and not silent:
data_suite.visualtests()
if errors_or_failures:
sys.exit(1)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--debug", action="store_true")
parser.add_argument("--terse", action="store_true")
parser.add_argument("--silent", action="store_true")
parser.add_argument(
"parser_or_module",
nargs="*",
help="Limit the test to the packages/parsers passed as arguments. "
"No arguments implies all parsers."
)
args = parser.parse_args()
loglevel = logging.DEBUG if args.debug else logging.ERROR
# No matching parsers/modules implies all of them.
parsers = {p: all_parsers[p] for p in parser_names
if p in args.parser_or_module} or None
modules = {m: all_modules[m] for m in module_names
if m in args.parser_or_module} or None
test_all(parsers, modules, terse=args.terse, silent=args.silent, loglevel=loglevel, summary=True, visual_tests=True)
| cclib/cclib | test/test_data.py | Python | bsd-3-clause | 12,415 |
# Copyright 2011 Cloudscaling Group, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import pprint
import re
import socket
import sys
import types
import uuid
import eventlet
import greenlet
from oslo.config import cfg
import six
from six import moves
from oslo.log.openstack.common import excutils
from oslo.log.openstack.common.gettextutils import _, _LE, _LI
from oslo.log.openstack.common import importutils
from oslo.log.openstack.common import jsonutils
from oslo.log.openstack.common.rpc import common as rpc_common
zmq = importutils.try_import('eventlet.green.zmq')
# for convenience, are not modified.
pformat = pprint.pformat
Timeout = eventlet.timeout.Timeout
LOG = rpc_common.LOG
RemoteError = rpc_common.RemoteError
RPCException = rpc_common.RPCException
zmq_opts = [
cfg.StrOpt('rpc_zmq_bind_address', default='*',
help='ZeroMQ bind address. Should be a wildcard (*), '
'an ethernet interface, or IP. '
'The "host" option should point or resolve to this '
'address.'),
# The module.Class to use for matchmaking.
cfg.StrOpt(
'rpc_zmq_matchmaker',
default=('oslo.log.openstack.common.rpc.'
'matchmaker.MatchMakerLocalhost'),
help='MatchMaker driver',
),
# The following port is unassigned by IANA as of 2012-05-21
cfg.IntOpt('rpc_zmq_port', default=9501,
help='ZeroMQ receiver listening port'),
cfg.IntOpt('rpc_zmq_contexts', default=1,
help='Number of ZeroMQ contexts, defaults to 1'),
cfg.IntOpt('rpc_zmq_topic_backlog',
help='Maximum number of ingress messages to locally buffer '
'per topic. Default is unlimited.'),
cfg.StrOpt('rpc_zmq_ipc_dir', default='/var/run/openstack',
help='Directory for holding IPC sockets'),
cfg.StrOpt('rpc_zmq_host', default=socket.gethostname(),
help='Name of this node. Must be a valid hostname, FQDN, or '
'IP address. Must match "host" option, if running Nova.')
]
CONF = cfg.CONF
CONF.register_opts(zmq_opts)
ZMQ_CTX = None # ZeroMQ Context, must be global.
matchmaker = None # memorized matchmaker object
def _serialize(data):
"""Serialization wrapper.
We prefer using JSON, but it cannot encode all types.
Error if a developer passes us bad data.
"""
try:
return jsonutils.dumps(data, ensure_ascii=True)
except TypeError:
with excutils.save_and_reraise_exception():
LOG.error(_LE("JSON serialization failed."))
def _deserialize(data):
"""Deserialization wrapper."""
LOG.debug("Deserializing: %s", data)
return jsonutils.loads(data)
class ZmqSocket(object):
"""A tiny wrapper around ZeroMQ.
Simplifies the send/recv protocol and connection management.
Can be used as a Context (supports the 'with' statement).
"""
def __init__(self, addr, zmq_type, bind=True, subscribe=None):
self.sock = _get_ctxt().socket(zmq_type)
self.addr = addr
self.type = zmq_type
self.subscriptions = []
# Support failures on sending/receiving on wrong socket type.
self.can_recv = zmq_type in (zmq.PULL, zmq.SUB)
self.can_send = zmq_type in (zmq.PUSH, zmq.PUB)
self.can_sub = zmq_type in (zmq.SUB, )
# Support list, str, & None for subscribe arg (cast to list)
do_sub = {
list: subscribe,
str: [subscribe],
type(None): []
}[type(subscribe)]
for f in do_sub:
self.subscribe(f)
str_data = {'addr': addr, 'type': self.socket_s(),
'subscribe': subscribe, 'bind': bind}
LOG.debug("Connecting to %(addr)s with %(type)s", str_data)
LOG.debug("-> Subscribed to %(subscribe)s", str_data)
LOG.debug("-> bind: %(bind)s", str_data)
try:
if bind:
self.sock.bind(addr)
else:
self.sock.connect(addr)
except Exception:
raise RPCException(_("Could not open socket."))
def socket_s(self):
"""Get socket type as string."""
t_enum = ('PUSH', 'PULL', 'PUB', 'SUB', 'REP', 'REQ', 'ROUTER',
'DEALER')
return dict(map(lambda t: (getattr(zmq, t), t), t_enum))[self.type]
def subscribe(self, msg_filter):
"""Subscribe."""
if not self.can_sub:
raise RPCException("Cannot subscribe on this socket.")
LOG.debug("Subscribing to %s", msg_filter)
try:
self.sock.setsockopt(zmq.SUBSCRIBE, msg_filter)
except Exception:
return
self.subscriptions.append(msg_filter)
def unsubscribe(self, msg_filter):
"""Unsubscribe."""
if msg_filter not in self.subscriptions:
return
self.sock.setsockopt(zmq.UNSUBSCRIBE, msg_filter)
self.subscriptions.remove(msg_filter)
def close(self):
if self.sock is None or self.sock.closed:
return
# We must unsubscribe, or we'll leak descriptors.
if self.subscriptions:
for f in self.subscriptions:
try:
self.sock.setsockopt(zmq.UNSUBSCRIBE, f)
except Exception:
pass
self.subscriptions = []
try:
# Default is to linger
self.sock.close()
except Exception:
# While this is a bad thing to happen,
# it would be much worse if some of the code calling this
# were to fail. For now, lets log, and later evaluate
# if we can safely raise here.
LOG.error(_LE("ZeroMQ socket could not be closed."))
self.sock = None
def recv(self, **kwargs):
if not self.can_recv:
raise RPCException(_("You cannot recv on this socket."))
return self.sock.recv_multipart(**kwargs)
def send(self, data, **kwargs):
if not self.can_send:
raise RPCException(_("You cannot send on this socket."))
self.sock.send_multipart(data, **kwargs)
class ZmqClient(object):
"""Client for ZMQ sockets."""
def __init__(self, addr):
self.outq = ZmqSocket(addr, zmq.PUSH, bind=False)
def cast(self, msg_id, topic, data, envelope):
msg_id = msg_id or 0
if not envelope:
self.outq.send(map(bytes,
(msg_id, topic, 'cast', _serialize(data))))
return
rpc_envelope = rpc_common.serialize_msg(data[1], envelope)
zmq_msg = moves.reduce(lambda x, y: x + y, rpc_envelope.items())
self.outq.send(map(bytes,
(msg_id, topic, 'impl_zmq_v2', data[0]) + zmq_msg))
def close(self):
self.outq.close()
class RpcContext(rpc_common.CommonRpcContext):
"""Context that supports replying to a rpc.call."""
def __init__(self, **kwargs):
self.replies = []
super(RpcContext, self).__init__(**kwargs)
def deepcopy(self):
values = self.to_dict()
values['replies'] = self.replies
return self.__class__(**values)
def reply(self, reply=None, failure=None, ending=False):
if ending:
return
self.replies.append(reply)
@classmethod
def marshal(self, ctx):
ctx_data = ctx.to_dict()
return _serialize(ctx_data)
@classmethod
def unmarshal(self, data):
return RpcContext.from_dict(_deserialize(data))
class InternalContext(object):
"""Used by ConsumerBase as a private context for - methods."""
def __init__(self, proxy):
self.proxy = proxy
self.msg_waiter = None
def _get_response(self, ctx, proxy, topic, data):
"""Process a curried message and cast the result to topic."""
LOG.debug("Running func with context: %s", ctx.to_dict())
data.setdefault('version', None)
data.setdefault('args', {})
try:
result = proxy.dispatch(
ctx, data['version'], data['method'],
data.get('namespace'), **data['args'])
return ConsumerBase.normalize_reply(result, ctx.replies)
except greenlet.GreenletExit:
# ignore these since they are just from shutdowns
pass
except rpc_common.ClientException as e:
LOG.debug("Expected exception during message handling (%s)" %
e._exc_info[1])
return {'exc':
rpc_common.serialize_remote_exception(e._exc_info,
log_failure=False)}
except Exception:
LOG.error(_LE("Exception during message handling"))
return {'exc':
rpc_common.serialize_remote_exception(sys.exc_info())}
def reply(self, ctx, proxy,
msg_id=None, context=None, topic=None, msg=None):
"""Reply to a casted call."""
# NOTE(ewindisch): context kwarg exists for Grizzly compat.
# this may be able to be removed earlier than
# 'I' if ConsumerBase.process were refactored.
if type(msg) is list:
payload = msg[-1]
else:
payload = msg
response = ConsumerBase.normalize_reply(
self._get_response(ctx, proxy, topic, payload),
ctx.replies)
LOG.debug("Sending reply")
_multi_send(_cast, ctx, topic, {
'method': '-process_reply',
'args': {
'msg_id': msg_id, # Include for Folsom compat.
'response': response
}
}, _msg_id=msg_id)
class ConsumerBase(object):
"""Base Consumer."""
def __init__(self):
self.private_ctx = InternalContext(None)
@classmethod
def normalize_reply(self, result, replies):
# TODO(ewindisch): re-evaluate and document this method.
if isinstance(result, types.GeneratorType):
return list(result)
elif replies:
return replies
else:
return [result]
def process(self, proxy, ctx, data):
data.setdefault('version', None)
data.setdefault('args', {})
# Method starting with - are
# processed internally. (non-valid method name)
method = data.get('method')
if not method:
LOG.error(_LE("RPC message did not include method."))
return
# Internal method
# uses internal context for safety.
if method == '-reply':
self.private_ctx.reply(ctx, proxy, **data['args'])
return
proxy.dispatch(ctx, data['version'],
data['method'], data.get('namespace'), **data['args'])
class ZmqBaseReactor(ConsumerBase):
"""A consumer class implementing a centralized casting broker (PULL-PUSH).
Used for RoundRobin requests.
"""
def __init__(self, conf):
super(ZmqBaseReactor, self).__init__()
self.proxies = {}
self.threads = []
self.sockets = []
self.subscribe = {}
self.pool = eventlet.greenpool.GreenPool(conf.rpc_thread_pool_size)
def register(self, proxy, in_addr, zmq_type_in,
in_bind=True, subscribe=None):
LOG.info(_LI("Registering reactor"))
if zmq_type_in not in (zmq.PULL, zmq.SUB):
raise RPCException("Bad input socktype")
# Items push in.
inq = ZmqSocket(in_addr, zmq_type_in, bind=in_bind,
subscribe=subscribe)
self.proxies[inq] = proxy
self.sockets.append(inq)
LOG.info(_LI("In reactor registered"))
def consume_in_thread(self):
@excutils.forever_retry_uncaught_exceptions
def _consume(sock):
LOG.info(_LI("Consuming socket"))
while True:
self.consume(sock)
for k in self.proxies.keys():
self.threads.append(
self.pool.spawn(_consume, k)
)
def wait(self):
for t in self.threads:
t.wait()
def close(self):
for s in self.sockets:
s.close()
for t in self.threads:
t.kill()
class ZmqProxy(ZmqBaseReactor):
"""A consumer class implementing a topic-based proxy.
Forwards to IPC sockets.
"""
def __init__(self, conf):
super(ZmqProxy, self).__init__(conf)
pathsep = set((os.path.sep or '', os.path.altsep or '', '/', '\\'))
self.badchars = re.compile(r'[%s]' % re.escape(''.join(pathsep)))
self.topic_proxy = {}
def consume(self, sock):
ipc_dir = CONF.rpc_zmq_ipc_dir
data = sock.recv(copy=False)
topic = data[1].bytes
if topic.startswith('fanout~'):
sock_type = zmq.PUB
topic = topic.split('.', 1)[0]
elif topic.startswith('zmq_replies'):
sock_type = zmq.PUB
else:
sock_type = zmq.PUSH
if topic not in self.topic_proxy:
def publisher(waiter):
LOG.info(_LI("Creating proxy for topic: %s"), topic)
try:
# The topic is received over the network,
# don't trust this input.
if self.badchars.search(topic) is not None:
emsg = _("Topic contained dangerous characters.")
LOG.warn(emsg)
raise RPCException(emsg)
out_sock = ZmqSocket("ipc://%s/zmq_topic_%s" %
(ipc_dir, topic),
sock_type, bind=True)
except RPCException:
waiter.send_exception(*sys.exc_info())
return
self.topic_proxy[topic] = eventlet.queue.LightQueue(
CONF.rpc_zmq_topic_backlog)
self.sockets.append(out_sock)
# It takes some time for a pub socket to open,
# before we can have any faith in doing a send() to it.
if sock_type == zmq.PUB:
eventlet.sleep(.5)
waiter.send(True)
while(True):
data = self.topic_proxy[topic].get()
out_sock.send(data, copy=False)
wait_sock_creation = eventlet.event.Event()
eventlet.spawn(publisher, wait_sock_creation)
try:
wait_sock_creation.wait()
except RPCException:
LOG.error(_LE("Topic socket file creation failed."))
return
try:
self.topic_proxy[topic].put_nowait(data)
except eventlet.queue.Full:
LOG.error(_LE("Local per-topic backlog buffer full for topic "
"%(topic)s. Dropping message.") % {'topic': topic})
def consume_in_thread(self):
"""Runs the ZmqProxy service."""
ipc_dir = CONF.rpc_zmq_ipc_dir
consume_in = "tcp://%s:%s" % \
(CONF.rpc_zmq_bind_address,
CONF.rpc_zmq_port)
consumption_proxy = InternalContext(None)
try:
os.makedirs(ipc_dir)
except os.error:
if not os.path.isdir(ipc_dir):
with excutils.save_and_reraise_exception():
LOG.error(_LE("Required IPC directory does not exist at"
" %s") % (ipc_dir, ))
try:
self.register(consumption_proxy,
consume_in,
zmq.PULL)
except zmq.ZMQError:
if os.access(ipc_dir, os.X_OK):
with excutils.save_and_reraise_exception():
LOG.error(_LE("Permission denied to IPC directory at"
" %s") % (ipc_dir, ))
with excutils.save_and_reraise_exception():
LOG.error(_LE("Could not create ZeroMQ receiver daemon. "
"Socket may already be in use."))
super(ZmqProxy, self).consume_in_thread()
def unflatten_envelope(packenv):
"""Unflattens the RPC envelope.
Takes a list and returns a dictionary.
i.e. [1,2,3,4] => {1: 2, 3: 4}
"""
i = iter(packenv)
h = {}
try:
while True:
k = six.next(i)
h[k] = six.next(i)
except StopIteration:
return h
class ZmqReactor(ZmqBaseReactor):
"""A consumer class implementing a consumer for messages.
Can also be used as a 1:1 proxy
"""
def __init__(self, conf):
super(ZmqReactor, self).__init__(conf)
def consume(self, sock):
# TODO(ewindisch): use zero-copy (i.e. references, not copying)
data = sock.recv()
LOG.debug("CONSUMER RECEIVED DATA: %s", data)
proxy = self.proxies[sock]
if data[2] == 'cast': # Legacy protocol
packenv = data[3]
ctx, msg = _deserialize(packenv)
request = rpc_common.deserialize_msg(msg)
ctx = RpcContext.unmarshal(ctx)
elif data[2] == 'impl_zmq_v2':
packenv = data[4:]
msg = unflatten_envelope(packenv)
request = rpc_common.deserialize_msg(msg)
# Unmarshal only after verifying the message.
ctx = RpcContext.unmarshal(data[3])
else:
LOG.error(_LE("ZMQ Envelope version unsupported or unknown."))
return
self.pool.spawn_n(self.process, proxy, ctx, request)
class Connection(rpc_common.Connection):
"""Manages connections and threads."""
def __init__(self, conf):
self.topics = []
self.reactor = ZmqReactor(conf)
def create_consumer(self, topic, proxy, fanout=False):
# Register with matchmaker.
_get_matchmaker().register(topic, CONF.rpc_zmq_host)
# Subscription scenarios
if fanout:
sock_type = zmq.SUB
subscribe = ('', fanout)[type(fanout) == str]
topic = 'fanout~' + topic.split('.', 1)[0]
else:
sock_type = zmq.PULL
subscribe = None
topic = '.'.join((topic.split('.', 1)[0], CONF.rpc_zmq_host))
if topic in self.topics:
LOG.info(_LI("Skipping topic registration. Already registered."))
return
# Receive messages from (local) proxy
inaddr = "ipc://%s/zmq_topic_%s" % \
(CONF.rpc_zmq_ipc_dir, topic)
LOG.debug("Consumer is a zmq.%s",
['PULL', 'SUB'][sock_type == zmq.SUB])
self.reactor.register(proxy, inaddr, sock_type,
subscribe=subscribe, in_bind=False)
self.topics.append(topic)
def close(self):
_get_matchmaker().stop_heartbeat()
for topic in self.topics:
_get_matchmaker().unregister(topic, CONF.rpc_zmq_host)
self.reactor.close()
self.topics = []
def wait(self):
self.reactor.wait()
def consume_in_thread(self):
_get_matchmaker().start_heartbeat()
self.reactor.consume_in_thread()
def _cast(addr, context, topic, msg, timeout=None, envelope=False,
_msg_id=None):
timeout_cast = timeout or CONF.rpc_cast_timeout
payload = [RpcContext.marshal(context), msg]
with Timeout(timeout_cast, exception=rpc_common.Timeout):
try:
conn = ZmqClient(addr)
# assumes cast can't return an exception
conn.cast(_msg_id, topic, payload, envelope)
except zmq.ZMQError:
raise RPCException("Cast failed. ZMQ Socket Exception")
finally:
if 'conn' in vars():
conn.close()
def _call(addr, context, topic, msg, timeout=None,
envelope=False):
# timeout_response is how long we wait for a response
timeout = timeout or CONF.rpc_response_timeout
# The msg_id is used to track replies.
msg_id = uuid.uuid4().hex
# Replies always come into the reply service.
reply_topic = "zmq_replies.%s" % CONF.rpc_zmq_host
LOG.debug("Creating payload")
# Curry the original request into a reply method.
mcontext = RpcContext.marshal(context)
payload = {
'method': '-reply',
'args': {
'msg_id': msg_id,
'topic': reply_topic,
# TODO(ewindisch): safe to remove mcontext in I.
'msg': [mcontext, msg]
}
}
LOG.debug("Creating queue socket for reply waiter")
# Messages arriving async.
# TODO(ewindisch): have reply consumer with dynamic subscription mgmt
with Timeout(timeout, exception=rpc_common.Timeout):
try:
msg_waiter = ZmqSocket(
"ipc://%s/zmq_topic_zmq_replies.%s" %
(CONF.rpc_zmq_ipc_dir,
CONF.rpc_zmq_host),
zmq.SUB, subscribe=msg_id, bind=False
)
LOG.debug("Sending cast")
_cast(addr, context, topic, payload, envelope)
LOG.debug("Cast sent; Waiting reply")
# Blocks until receives reply
msg = msg_waiter.recv()
LOG.debug("Received message: %s", msg)
LOG.debug("Unpacking response")
if msg[2] == 'cast': # Legacy version
raw_msg = _deserialize(msg[-1])[-1]
elif msg[2] == 'impl_zmq_v2':
rpc_envelope = unflatten_envelope(msg[4:])
raw_msg = rpc_common.deserialize_msg(rpc_envelope)
else:
raise rpc_common.UnsupportedRpcEnvelopeVersion(
_("Unsupported or unknown ZMQ envelope returned."))
responses = raw_msg['args']['response']
# ZMQError trumps the Timeout error.
except zmq.ZMQError:
raise RPCException("ZMQ Socket Error")
except (IndexError, KeyError):
raise RPCException(_("RPC Message Invalid."))
finally:
if 'msg_waiter' in vars():
msg_waiter.close()
# It seems we don't need to do all of the following,
# but perhaps it would be useful for multicall?
# One effect of this is that we're checking all
# responses for Exceptions.
for resp in responses:
if isinstance(resp, types.DictType) and 'exc' in resp:
raise rpc_common.deserialize_remote_exception(CONF, resp['exc'])
return responses[-1]
def _multi_send(method, context, topic, msg, timeout=None,
envelope=False, _msg_id=None):
"""Wraps the sending of messages.
Dispatches to the matchmaker and sends message to all relevant hosts.
"""
conf = CONF
LOG.debug("%(msg)s" % {'msg': ' '.join(map(pformat, (topic, msg)))})
queues = _get_matchmaker().queues(topic)
LOG.debug("Sending message(s) to: %s", queues)
# Don't stack if we have no matchmaker results
if not queues:
LOG.warn(_("No matchmaker results. Not casting."))
# While not strictly a timeout, callers know how to handle
# this exception and a timeout isn't too big a lie.
raise rpc_common.Timeout(_("No match from matchmaker."))
# This supports brokerless fanout (addresses > 1)
for queue in queues:
(_topic, ip_addr) = queue
_addr = "tcp://%s:%s" % (ip_addr, conf.rpc_zmq_port)
if method.__name__ == '_cast':
eventlet.spawn_n(method, _addr, context,
_topic, msg, timeout, envelope,
_msg_id)
return
return method(_addr, context, _topic, msg, timeout,
envelope)
def create_connection(conf, new=True):
return Connection(conf)
def multicall(conf, *args, **kwargs):
"""Multiple calls."""
return _multi_send(_call, *args, **kwargs)
def call(conf, *args, **kwargs):
"""Send a message, expect a response."""
data = _multi_send(_call, *args, **kwargs)
return data[-1]
def cast(conf, *args, **kwargs):
"""Send a message expecting no reply."""
_multi_send(_cast, *args, **kwargs)
def fanout_cast(conf, context, topic, msg, **kwargs):
"""Send a message to all listening and expect no reply."""
# NOTE(ewindisch): fanout~ is used because it avoid splitting on .
# and acts as a non-subtle hint to the matchmaker and ZmqProxy.
_multi_send(_cast, context, 'fanout~' + str(topic), msg, **kwargs)
def notify(conf, context, topic, msg, envelope):
"""Send notification event.
Notifications are sent to topic-priority.
This differs from the AMQP drivers which send to topic.priority.
"""
# NOTE(ewindisch): dot-priority in rpc notifier does not
# work with our assumptions.
topic = topic.replace('.', '-')
cast(conf, context, topic, msg, envelope=envelope)
def cleanup():
"""Clean up resources in use by implementation."""
global ZMQ_CTX
if ZMQ_CTX:
ZMQ_CTX.term()
ZMQ_CTX = None
global matchmaker
matchmaker = None
def _get_ctxt():
if not zmq:
raise ImportError("Failed to import eventlet.green.zmq")
global ZMQ_CTX
if not ZMQ_CTX:
ZMQ_CTX = zmq.Context(CONF.rpc_zmq_contexts)
return ZMQ_CTX
def _get_matchmaker(*args, **kwargs):
global matchmaker
if not matchmaker:
mm = CONF.rpc_zmq_matchmaker
if mm.endswith('matchmaker.MatchMakerRing'):
mm.replace('matchmaker', 'matchmaker_ring')
LOG.warn(_('rpc_zmq_matchmaker = %(orig)s is deprecated; use'
' %(new)s instead') % dict(
orig=CONF.rpc_zmq_matchmaker, new=mm))
matchmaker = importutils.import_object(mm, *args, **kwargs)
return matchmaker
| citrix-openstack-build/oslo.log | oslo/log/openstack/common/rpc/impl_zmq.py | Python | apache-2.0 | 26,437 |
import sys
import requests
from bs4 import BeautifulSoup
# Google Search Class
class googleSearch():
def __init__(self, query):
self.query = "http://google.com/search?q=" + "+".join(query.split()) + "&num=100&start="
self.page = 1
self.content = requests.get(self.query).text
self.websiteList = self.returnWebsiteList(0) + self.returnWebsiteList(100) + self.returnWebsiteList(200)
def getWebsiteList(self):
return self.websiteList
def cleanURL(self, url):
return url.replace("https://", "").replace("http://", "")
def returnWebsiteList(self, startResult):
# PRODUCTION CODE
storeURL = []
# Parse raw HTML into BeautifulSoup object
soup = BeautifulSoup(self.content, 'html.parser')
# storeURL = []
# # Return Google raw HTML
# rawPageData = open('data/google.txt', 'r').read()
# # Parse raw HTML into BeautifulSoup object
# soup = BeautifulSoup(rawPageData, 'html.parser')
# Loop over cite tags in HTML
for cite in soup.find_all("cite"):
# Extract text from cite tags
text = self.cleanURL(cite.text)
if "..." in text:
storeURL.append(text.split("/")[0])
else:
storeURL.append(text)
return storeURL | tomarrell/Email-Scraper | lib/googleSearch.py | Python | gpl-3.0 | 1,172 |
# Set default logging handler to avoid "No handler found" warnings.
#import logging
#import logging.config
#
#try: # Python 2.7+
# from logging import NullHandler
#except ImportError:
# class NullHandler(logging.Handler):
# def emit(self, record):
# pass
#
#log = logging.getLogger(__name__)
#try:
# logging.config.fileConfig('settings/logging.conf')
#except:
# pass
#
#log.addHandler(NullHandler())
| SPRACE/python-odl | odl/__init__.py | Python | gpl-3.0 | 431 |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'FormText'
db.create_table(u'landing_formtext', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('title', self.gf('django.db.models.fields.CharField')(max_length=50)),
('text', self.gf('django.db.models.fields.TextField')(max_length=200)),
))
db.send_create_signal(u'landing', ['FormText'])
def backwards(self, orm):
# Deleting model 'FormText'
db.delete_table(u'landing_formtext')
models = {
u'landing.formtext': {
'Meta': {'object_name': 'FormText'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'text': ('django.db.models.fields.TextField', [], {'max_length': '200'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'landing.function': {
'Meta': {'object_name': 'Function'},
'description': ('django.db.models.fields.TextField', [], {'max_length': '200'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'landing.landingregister': {
'Meta': {'object_name': 'LandingRegister'},
'email': ('django.db.models.fields.EmailField', [], {'max_length': '250'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '10'})
},
u'landing.mainimage': {
'Description': ('django.db.models.fields.TextField', [], {'max_length': '200'}),
'Meta': {'object_name': 'MainImage'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'})
},
u'landing.product': {
'Meta': {'object_name': 'Product'},
'description': ('django.db.models.fields.TextField', [], {'max_length': '200'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'landing.slogan': {
'Meta': {'object_name': 'Slogan'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slogan': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'landing.testimonial': {
'Meta': {'object_name': 'Testimonial'},
'description': ('django.db.models.fields.TextField', [], {'max_length': '200'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['landing'] | andresfcardenas/marketing-platform | landing/migrations/0003_auto__add_formtext.py | Python | bsd-3-clause | 3,656 |
__author__ = 'oier'
import json
from flask import Flask, make_response
app = Flask(__name__)
import seaborn as sns
import numpy as np
import pandas as pd
import os
from datetime import datetime
import matplotlib.pyplot as plt
import sys
from matplotlib.figure import Figure
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from io import StringIO
from sklearn import linear_model
from models import InputForm, ValueSelector
from flask import Flask, render_template, request
from compute import compute, load_data, line_plot
@app.route('/')
def index():
return 'Hello World!'
def form_values(request):
data = load_data()
form = ValueSelector(request)
form.value.choices = [(k,i) for k,i in enumerate(data.columns)]
return(form)
@app.route('/blood', methods=['GET', 'POST'])
def blood():
form = form_values(request.form)
if request.method == 'POST':# and form.validate():
result = line_plot(form.value.data)
else:
print("False")
result = None
return render_template('plot.html',
form=form, result=result)
@app.route('/vib1', methods=['GET', 'POST'])
def vib1():
#form = InputForm(request.form)
form = form_values(request.form)
if request.method == 'POST' and form.validate():
result = compute(form.A.data, form.b.data,
form.w.data, form.T.data)
else:
result = None
return render_template('view_plain.html',
form=form, result=result)
if __name__ == '__main__':
app.run() | oiertwo/vampyr | flask/index.py | Python | mit | 1,592 |
import os
import sys
from _pydevd_bundle.pydevd_constants import IS_PYCHARM
IS_PY36_OR_GREATER = sys.version_info >= (3, 6)
frame_eval_func = None
stop_frame_eval = None
dummy_trace_dispatch = None
show_frame_eval_warning = False
clear_thread_local_info = None
# "NO" means we should not use frame evaluation, 'YES' we should use it (and fail if not there) and unspecified uses if possible.
use_frame_eval = os.environ.get('PYDEVD_USE_FRAME_EVAL', None)
if use_frame_eval == 'NO':
pass
elif use_frame_eval == 'YES':
# Fail if unable to use
from _pydevd_frame_eval.pydevd_frame_eval_cython_wrapper import frame_eval_func, stop_frame_eval, dummy_trace_dispatch, clear_thread_local_info
elif use_frame_eval is None:
# Try to use if possible
if IS_PY36_OR_GREATER:
try:
from _pydevd_frame_eval.pydevd_frame_eval_cython_wrapper import frame_eval_func, stop_frame_eval, dummy_trace_dispatch, clear_thread_local_info
except ImportError:
from _pydev_bundle.pydev_monkey import log_error_once
dirname = os.path.dirname(os.path.dirname(__file__))
if not IS_PYCHARM:
log_error_once("warning: Debugger speedups using cython not found. Run '\"%s\" \"%s\" build_ext --inplace' to build." % (
sys.executable, os.path.join(dirname, 'setup_cython.py')))
else:
show_frame_eval_warning = True
else:
raise RuntimeError('Unexpected value for PYDEVD_USE_FRAME_EVAL: %s (accepted: YES, NO)' % (use_frame_eval,))
| Elizaveta239/PyDev.Debugger | _pydevd_frame_eval/pydevd_frame_eval_main.py | Python | epl-1.0 | 1,550 |
from __future__ import (absolute_import, division, print_function)
import unittest
import numpy as np
from mantid.simpleapi import logger
import AbinsModules
class AbinsKpointsDataTest(unittest.TestCase):
_good_data_1 = {"k_vectors": np.asarray([[0.2, 0.1, 0.2], [0.1, 0.0, 0.2], [0.2, 0.2, 0.2]]),
"weights": np.asarray([0.3, 0.2, 0.5]),
"frequencies": np.asarray([[1.0, 2.0, 34.0, 4.9, 1.0, 2.0],
[11.0, 12.0, 134.0, 14.9, 11.0, 12.0],
[1.0, 2.0, 34.0, 4.9, 1.0, 2.0]]), # 6 frequencies for one k-point
"atomic_displacements": np.asarray([[[[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0],
[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]],
[[1.0, 1.0, 1.0], [1.0, 1.0, 111.0], [1.0, 1.0, 1.0],
[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]]],
[[[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0],
[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]],
[[1.0, 1.0, 1.0], [1.0, 1.0, 221.0], [1.0, 1.0, 1.0],
[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]]],
[[[1.0, 1.0, 1.0], [1.0, 1.0, 41.0], [1.0, 1.0, 1.0],
[1.0, 1.0, 1.0], [1.0, 1.0, 31.0], [1.0, 1.0, 1.0]],
[[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0],
[1.0, 1.0, 1.0], [1.0, 1.0, 41.0], [1.0, 1.0, 1.0]]]
# 12 atomic displacements for each k-point
]).astype(complex),
"unit_cell": np.asarray([[ 7.44, 0. , 0. ],
[ 0. , 9.55, 0. ],
[ 0. , 0. , 6.92]])
}
# data with soft phonons
_good_data_2 = {"k_vectors": np.asarray([[0.2, 0.1, 0.2], [0.1, 0.0, 0.2], [0.2, 0.2, 0.2]]),
"weights": np.asarray([0.3, 0.2, 0.5]),
"frequencies": np.asarray([[-10.0, -2.0, -3.0, 4.9, 1.0, 2.0],
[11.0, 12.0, 134.0, 14.9, 11.0, 12.0],
[1.0, 2.0, 34.0, 4.9, 1.0, 2.0]]), # 6 frequencies for one k-point
"atomic_displacements": np.asarray([[[[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0],
[1.0, 1.0, 1.0], [1.0, 121.0, 1.0], [1.0, 1.0, 1.0]],
[[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0],
[1.0, 1.0, 1.0], [1.0, 1.0, 131.0], [1.0, 1.0, 1.0]]],
[[[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0],
[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]],
[[1.0, 1.0, 1.0], [1.0, 1.0, 221.0], [1.0, 1.0, 1.0],
[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]]],
[[[1.0, 1.0, 1.0], [1.0, 1.0, 41.0], [1.0, 1.0, 1.0],
[1.0, 1.0, 1.0], [1.0, 1.0, 31.0], [1.0, 1.0, 1.0]],
[[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0],
[1.0, 1.0, 1.0], [1.0, 1.0, 41.0], [1.0, 1.0, 1.0]]]
# 12 atomic displacements for each k-point
]).astype(complex),
"unit_cell": np.asarray([[7.44, 0., 0.],
[0., 9.55, 0.],
[0., 0., 6.92]])
}
def setUp(self):
self.tester = AbinsModules.KpointsData(num_k=3, num_atoms=2)
# tests for append method
def test_no_dict(self):
# Case no dict to append
with self.assertRaises(ValueError):
wrong_dict = ["k_vectors", 2, "freq"]
self.tester.set(items=wrong_dict)
def test_missing_key(self):
# missing atomic_displacements
items = {"k_vectors": self._good_data_1["k_vectors"],
"weights": self._good_data_1["weights"],
"frequencies": self._good_data_1["frequencies"]}
with self.assertRaises(ValueError):
self.tester.set(items=items)
def test_wrong_value(self):
# value should be a numpy array with real numbers
items = {"k_vectors": "wrong_value",
"weights": self._good_data_1["weights"],
"frequencies": self._good_data_1["frequencies"],
"atomic_displacements": self._good_data_1["atomic_displacements"]}
with self.assertRaises(ValueError):
self.tester.set(items=items)
def test_wrong_weight(self):
# negative weight (weight should be represented as a positive real number)
items = {"k_vectors": self._good_data_1["k_vectors"],
"weights": np.asarray([-0.1, 0.3, 0.2]),
"frequencies": self._good_data_1["frequencies"],
"atomic_displacements": self._good_data_1["atomic_displacements"]}
with self.assertRaises(ValueError):
self.tester.set(items=items)
def test_wrong_freq(self):
# frequencies as a string
wrong_items = {"k_vectors": self._good_data_1["k_vectors"],
"weights": self._good_data_1["weights"],
"frequencies": "Wrong_freq",
"atomic_displacements": self._good_data_1["atomic_displacements"]}
with self.assertRaises(ValueError):
self.tester.set(items=wrong_items)
# complex frequencies
wrong_items = {"k_vectors": self._good_data_1["k_vectors"],
"weights": self._good_data_1["weights"],
"frequencies": self._good_data_1["frequencies"].astype(complex),
"atomic_displacements": self._good_data_1["atomic_displacements"]}
with self.assertRaises(ValueError):
self.tester.set(items=wrong_items)
# frequencies as 2D arrays but with a bad shape
wrong_items = {"k_vectors": self._good_data_1["k_vectors"],
"weights": self._good_data_1["weights"],
"frequencies": np.asarray([[1.0, 2.0, 34.0], [4.9, 1.0, 1.0]]),
"atomic_displacements": self._good_data_1["atomic_displacements"]}
with self.assertRaises(ValueError):
self.tester.set(items=wrong_items)
def test_wrong_displacements(self):
# displacements as a number
wrong_items = {"k_vectors": self._good_data_1["k_vectors"],
"weights": self._good_data_1["weights"],
"frequencies": self._good_data_1["frequencies"],
"atomic_displacements": 1}
with self.assertRaises(ValueError):
self.tester.set(items=wrong_items)
# wrong size of the second dimension
wrong_items = {"k_vectors": self._good_data_1["k_vectors"],
"weights": self._good_data_1["weights"],
"frequencies": self._good_data_1["frequencies"],
"atomic_displacements": np.asarray([[[[1., 1., 11.], [1., 1., 1., 1.0], [1.0, 1.0, 1.0],
[1., 1.0, 1.0], [1., 1., 11.], [1., 1., 11.]],
[[1., 1.0, 1.0], [1., 1., 11.], [1., 1., 11.],
[1., 1.0, 1.0], [1., 1., 11.], [1., 1., 11.]]],
self._good_data_1["atomic_displacements"][0, 0],
self._good_data_1["atomic_displacements"][0, 1]]
)}
with self.assertRaises(ValueError):
self.tester.set(items=wrong_items)
# displacements as numpy arrays with integers
wrong_items = {"k_vectors": self._good_data_1["k_vectors"],
"weights": self._good_data_1["weights"],
"frequencies": self._good_data_1["frequencies"],
"atomic_displacements": self._good_data_1["atomic_displacements"].astype(int)}
with self.assertRaises(ValueError):
self.tester.set(items=wrong_items)
# displacements as a 1D array
wrong_items = {"k_vectors": self._good_data_1["k_vectors"],
"weights": self._good_data_1["weights"],
"frequencies": self._good_data_1["frequencies"],
"atomic_displacements": np.ravel(self._good_data_1["atomic_displacements"])}
with self.assertRaises(ValueError):
self.tester.set(items=wrong_items)
def test_set_good_case(self):
self._set_good_case_core(data=self._good_data_1)
self._set_good_case_core(data=self._good_data_2)
def _set_good_case_core(self, data):
self.tester.set(items=data)
collected_data = self.tester.extract()
for k in range(data["frequencies"].shape[0]):
indices = data["frequencies"][k] > AbinsModules.AbinsConstants.ACOUSTIC_PHONON_THRESHOLD
temp_f = data["frequencies"][k]
self.assertEqual(True, np.allclose(temp_f[indices],
collected_data["frequencies"][str(k)]))
temp_a = data["atomic_displacements"][k]
self.assertEqual(True, np.allclose(temp_a[:, indices],
collected_data["atomic_displacements"][str(k)]))
self.assertEqual(True, np.allclose(data["k_vectors"][k], collected_data["k_vectors"][str(k)]))
self.assertEqual(data["weights"][k], collected_data["weights"][str(k)])
# tests for set method
def test_set_wrong_dict(self):
with self.assertRaises(ValueError):
self.tester.set([1, 2234, 8])
# tests for constructor
def test_constructor_assertions(self):
with self.assertRaises(ValueError):
# noinspection PyUnusedLocal
poor_tester = AbinsModules.KpointsData(num_k=0.1, num_atoms=2)
with self.assertRaises(ValueError):
# noinspection PyUnusedLocal
poor_tester = AbinsModules.KpointsData(num_k=1, num_atoms=-2)
if __name__ == "__main__":
unittest.main()
| dymkowsk/mantid | scripts/test/AbinsKpointsDataTest.py | Python | gpl-3.0 | 11,416 |
import six
class AbstractIter(six.Iterator):
def __init__(self, node, filter_=None, stop=None, maxlevel=None):
"""
Iterate over tree starting at `node`.
Base class for all iterators.
Keyword Args:
filter_: function called with every `node` as argument, `node` is returned if `True`.
stop: stop iteration at `node` if `stop` function returns `True` for `node`.
maxlevel (int): maximum descending in the node hierarchy.
"""
self.node = node
self.filter_ = filter_
self.stop = stop
self.maxlevel = maxlevel
self.__iter = None
def __init(self):
node = self.node
maxlevel = self.maxlevel
filter_ = self.filter_ or AbstractIter.__default_filter
stop = self.stop or AbstractIter.__default_stop
children = [] if AbstractIter._abort_at_level(1, maxlevel) else AbstractIter._get_children([node], stop)
return self._iter(children, filter_, stop, maxlevel)
@staticmethod
def __default_filter(node):
return True
@staticmethod
def __default_stop(node):
return False
def __iter__(self):
return self
def __next__(self):
if self.__iter is None:
self.__iter = self.__init()
return next(self.__iter)
@staticmethod
def _iter(children, filter_, stop, maxlevel):
raise NotImplementedError() # pragma: no cover
@staticmethod
def _abort_at_level(level, maxlevel):
return maxlevel is not None and level > maxlevel
@staticmethod
def _get_children(children, stop):
return [child for child in children if not stop(child)]
| c0fec0de/anytree | anytree/iterators/abstractiter.py | Python | apache-2.0 | 1,704 |
# -*- coding: utf-8 -*-
'''
Created on 10 Jul 2015
@author: Kimon Tsitsikas
Copyright © 2015 Kimon Tsitsikas, Delmic
This file is part of Odemis.
Odemis is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License version 2 as published by the Free Software Foundation.
Odemis is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with Odemis. If not, see http://www.gnu.org/licenses/.
'''
from __future__ import division, print_function
import logging
import numpy
from odemis.driver import blinkstick
import os
import unittest
from unittest.case import skip
logger = logging.getLogger().setLevel(logging.DEBUG)
# Test using the hardware
# Export TEST_NOHW=1 to force using only the simulator and skipping test cases
# needing real hardware
TEST_NOHW = (os.environ.get("TEST_NOHW", "0") != "0") # Default to Hw testing
CLASS = blinkstick.WhiteLed
KWARGS = dict(name="test", role="light", max_power=1.0, inversed=True)
class TestStatic(unittest.TestCase):
"""
Tests which don't need a component ready
"""
def test_creation(self):
"""
Doesn't even try to do anything, just create and delete components
"""
if TEST_NOHW:
self.skipTest("Cannot test without hardware present")
dev = CLASS(**KWARGS)
dev.terminate()
def test_scan(self):
"""
Test scanning for the device
"""
devices = CLASS.scan()
if not TEST_NOHW:
self.assertGreater(len(devices), 0)
for name, kwargs in devices:
print("opening", name)
d = CLASS(name, "test", **kwargs)
d.terminate()
class TestWhiteLed(unittest.TestCase):
"""
Tests which need a component ready
"""
def setUp(self):
if TEST_NOHW:
self.skipTest("Cannot test without hardware present")
self.dev = CLASS(**KWARGS)
def tearDown(self):
self.dev.terminate()
def test_power_va(self):
# Set power value min and max and mean
self.dev.power.value = self.dev.power.range[0]
self.assertEqual(self.dev.power.value, list(self.dev.power.range[0]))
self.dev.power.value = self.dev.power.range[1]
self.assertEqual(self.dev.power.value, list(self.dev.power.range[1]))
h = numpy.mean(self.dev.power.range)
self.dev.power.value = [h]
self.assertAlmostEqual(self.dev.power.value[0], h, delta=1 / 256)
if __name__ == "__main__":
unittest.main()
| delmic/odemis | src/odemis/driver/test/blinkstick_test.py | Python | gpl-2.0 | 2,773 |
# (c) 2012-2014, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from itertools import chain
from ansible import constants as C
from ansible.errors import AnsibleError
from ansible.module_utils._text import to_native, to_text
from ansible.module_utils.common._collections_compat import Mapping, MutableMapping
from ansible.utils.display import Display
from ansible.utils.vars import combine_vars
display = Display()
def to_safe_group_name(name, replacer="_", force=False, silent=False):
# Converts 'bad' characters in a string to underscores (or provided replacer) so they can be used as Ansible hosts or groups
warn = ''
if name: # when deserializing we might not have name yet
invalid_chars = C.INVALID_VARIABLE_NAMES.findall(name)
if invalid_chars:
msg = 'invalid character(s) "%s" in group name (%s)' % (to_text(set(invalid_chars)), to_text(name))
if C.TRANSFORM_INVALID_GROUP_CHARS not in ('never', 'ignore') or force:
name = C.INVALID_VARIABLE_NAMES.sub(replacer, name)
if not (silent or C.TRANSFORM_INVALID_GROUP_CHARS == 'silently'):
display.vvvv('Replacing ' + msg)
warn = 'Invalid characters were found in group names and automatically replaced, use -vvvv to see details'
else:
if C.TRANSFORM_INVALID_GROUP_CHARS == 'never':
display.vvvv('Not replacing %s' % msg)
warn = True
warn = 'Invalid characters were found in group names but not replaced, use -vvvv to see details'
if warn:
display.warning(warn)
return name
class Group:
''' a group of ansible hosts '''
# __slots__ = [ 'name', 'hosts', 'vars', 'child_groups', 'parent_groups', 'depth', '_hosts_cache' ]
def __init__(self, name=None):
self.depth = 0
self.name = to_safe_group_name(name)
self.hosts = []
self._hosts = None
self.vars = {}
self.child_groups = []
self.parent_groups = []
self._hosts_cache = None
self.priority = 1
def __repr__(self):
return self.get_name()
def __str__(self):
return self.get_name()
def __getstate__(self):
return self.serialize()
def __setstate__(self, data):
return self.deserialize(data)
def serialize(self):
parent_groups = []
for parent in self.parent_groups:
parent_groups.append(parent.serialize())
self._hosts = None
result = dict(
name=self.name,
vars=self.vars.copy(),
parent_groups=parent_groups,
depth=self.depth,
hosts=self.hosts,
)
return result
def deserialize(self, data):
self.__init__()
self.name = data.get('name')
self.vars = data.get('vars', dict())
self.depth = data.get('depth', 0)
self.hosts = data.get('hosts', [])
self._hosts = None
parent_groups = data.get('parent_groups', [])
for parent_data in parent_groups:
g = Group()
g.deserialize(parent_data)
self.parent_groups.append(g)
def _walk_relationship(self, rel, include_self=False, preserve_ordering=False):
'''
Given `rel` that is an iterable property of Group,
consitituting a directed acyclic graph among all groups,
Returns a set of all groups in full tree
A B C
| / | /
| / | /
D -> E
| / vertical connections
| / are directed upward
F
Called on F, returns set of (A, B, C, D, E)
'''
seen = set([])
unprocessed = set(getattr(self, rel))
if include_self:
unprocessed.add(self)
if preserve_ordering:
ordered = [self] if include_self else []
ordered.extend(getattr(self, rel))
while unprocessed:
seen.update(unprocessed)
new_unprocessed = set([])
for new_item in chain.from_iterable(getattr(g, rel) for g in unprocessed):
new_unprocessed.add(new_item)
if preserve_ordering:
if new_item not in seen:
ordered.append(new_item)
new_unprocessed.difference_update(seen)
unprocessed = new_unprocessed
if preserve_ordering:
return ordered
return seen
def get_ancestors(self):
return self._walk_relationship('parent_groups')
def get_descendants(self, **kwargs):
return self._walk_relationship('child_groups', **kwargs)
@property
def host_names(self):
if self._hosts is None:
self._hosts = set(self.hosts)
return self._hosts
def get_name(self):
return self.name
def add_child_group(self, group):
added = False
if self == group:
raise Exception("can't add group to itself")
# don't add if it's already there
if group not in self.child_groups:
# prepare list of group's new ancestors this edge creates
start_ancestors = group.get_ancestors()
new_ancestors = self.get_ancestors()
if group in new_ancestors:
raise AnsibleError("Adding group '%s' as child to '%s' creates a recursive dependency loop." % (to_native(group.name), to_native(self.name)))
new_ancestors.add(self)
new_ancestors.difference_update(start_ancestors)
added = True
self.child_groups.append(group)
# update the depth of the child
group.depth = max([self.depth + 1, group.depth])
# update the depth of the grandchildren
group._check_children_depth()
# now add self to child's parent_groups list, but only if there
# isn't already a group with the same name
if self.name not in [g.name for g in group.parent_groups]:
group.parent_groups.append(self)
for h in group.get_hosts():
h.populate_ancestors(additions=new_ancestors)
self.clear_hosts_cache()
return added
def _check_children_depth(self):
depth = self.depth
start_depth = self.depth # self.depth could change over loop
seen = set([])
unprocessed = set(self.child_groups)
while unprocessed:
seen.update(unprocessed)
depth += 1
to_process = unprocessed.copy()
unprocessed = set([])
for g in to_process:
if g.depth < depth:
g.depth = depth
unprocessed.update(g.child_groups)
if depth - start_depth > len(seen):
raise AnsibleError("The group named '%s' has a recursive dependency loop." % to_native(self.name))
def add_host(self, host):
added = False
if host.name not in self.host_names:
self.hosts.append(host)
self._hosts.add(host.name)
host.add_group(self)
self.clear_hosts_cache()
added = True
return added
def remove_host(self, host):
removed = False
if host.name in self.host_names:
self.hosts.remove(host)
self._hosts.remove(host.name)
host.remove_group(self)
self.clear_hosts_cache()
removed = True
return removed
def set_variable(self, key, value):
if key == 'ansible_group_priority':
self.set_priority(int(value))
else:
if key in self.vars and isinstance(self.vars[key], MutableMapping) and isinstance(value, Mapping):
self.vars = combine_vars(self.vars, {key: value})
else:
self.vars[key] = value
def clear_hosts_cache(self):
self._hosts_cache = None
for g in self.get_ancestors():
g._hosts_cache = None
def get_hosts(self):
if self._hosts_cache is None:
self._hosts_cache = self._get_hosts()
return self._hosts_cache
def _get_hosts(self):
hosts = []
seen = {}
for kid in self.get_descendants(include_self=True, preserve_ordering=True):
kid_hosts = kid.hosts
for kk in kid_hosts:
if kk not in seen:
seen[kk] = 1
if self.name == 'all' and kk.implicit:
continue
hosts.append(kk)
return hosts
def get_vars(self):
return self.vars.copy()
def set_priority(self, priority):
try:
self.priority = int(priority)
except TypeError:
# FIXME: warn about invalid priority
pass
| 2ndQuadrant/ansible | lib/ansible/inventory/group.py | Python | gpl-3.0 | 9,635 |
''' ResourceManagementHandler
Module that allows users to access the ResourceManagementDB remotely.
'''
from DIRAC import gConfig, S_OK, gLogger
from DIRAC.Core.DISET.RequestHandler import RequestHandler
from DIRAC.ResourceStatusSystem.Utilities import Synchronizer
from DIRAC.ResourceStatusSystem.Service.ResourceStatusHandler import convert
from DIRAC.ResourceStatusSystem.DB.ResourceManagementDB import ResourceManagementDB
__RCSID__ = '$Id: $'
def initializeResourceManagementHandler(_serviceInfo):
'''
Handler initialization, where we set the ResourceManagementDB as global db.
'''
global db
db = ResourceManagementDB()
syncObject = Synchronizer.Synchronizer()
gConfig.addListenerToNewVersionEvent(syncObject.sync)
return S_OK()
################################################################################
class ResourceManagementHandler(RequestHandler):
'''
The ResourceManagementHandler exposes the DB front-end functions through a
XML-RPC server, functionalities inherited from :class:`DIRAC.Core.DISET.Reques\
tHandler.RequestHandler`
According to the ResourceManagementDB philosophy, only functions of the type:
- insert
- select
- delete
- addOrModify
are exposed. If you need anything more complicated, either look for it on the
:class:`ResourceManagementClient`, or code it yourself. This way the DB and the
Service are kept clean and tidied.
To can use this service on this way, but you MUST NOT DO IT. Use it through the
:class:`ResourceManagementClient`. If offers in the worst case as good perfor\
mance as the :class:`ResourceManagementHandler`, if not better.
>>> from DIRAC.Core.DISET.RPCClient import RPCCLient
>>> server = RPCCLient("ResourceStatus/ResourceManagement")
'''
def __init__(self, *args, **kwargs):
super(ResourceManagementHandler, self).__init__(*args, **kwargs)
@staticmethod
def __logResult(methodName, result):
'''
Method that writes to log error messages
'''
if not result['OK']:
gLogger.error('%s : %s' % (methodName, result['Message']))
@staticmethod
def setDatabase(database):
'''
This method let us inherit from this class and overwrite the database object
without having problems with the global variables.
:Parameters:
**database** - `MySQL`
database used by this handler
:return: None
'''
global db
db = database
types_insert = [basestring, dict]
def export_insert(self, table, params):
'''
This method is a bridge to access :class:`ResourceManagementDB` remotely. It
does not add neither processing nor validation. If you need to know more
about this method, you must keep reading on the database documentation.
:Parameters:
**table** - `string` or `dict`
should contain the table from which querying
if it's a `dict` the query comes from a client prior to v6r18
**params** - `dict`
arguments for the mysql query. Currently it is being used only for column selection.
For example: meta = { 'columns' : [ 'Name' ] } will return only the 'Name' column.
:return: S_OK() || S_ERROR()
'''
if isinstance(table, dict): # for backward compatibility: conversion is needed
params, table = convert(table, params)
gLogger.info('insert: %s %s' % (table, params))
# remove unnecessary key generated by locals()
del params['self']
res = db.insert(table, params)
self.__logResult('insert', res)
return res
types_select = [[basestring, dict], dict]
def export_select(self, table, params):
'''
This method is a bridge to access :class:`ResourceManagementDB` remotely.
It does not add neither processing nor validation. If you need to know more\
about this method, you must keep reading on the database documentation.
:Parameters:
**table** - `string` or `dict`
should contain the table from which querying
if it's a `dict` the query comes from a client prior to v6r18
**params** - `dict`
arguments for the mysql query. Currently it is being used only for column selection.
For example: meta = { 'columns' : [ 'Name' ] } will return only the 'Name' column.
:return: S_OK() || S_ERROR()
'''
if isinstance(table, dict): # for backward compatibility: conversion is needed
params, table = convert(table, params)
gLogger.info('select: %s %s' % (table, params))
res = db.select(table, params)
self.__logResult('select', res)
return res
types_delete = [[basestring, dict], dict]
def export_delete(self, table, params):
'''
This method is a bridge to access :class:`ResourceManagementDB` remotely.\
It does not add neither processing nor validation. If you need to know more \
about this method, you must keep reading on the database documentation.
:Parameters:
**table** - `string` or `dict`
should contain the table from which querying
if it's a `dict` the query comes from a client prior to v6r18
**params** - `dict`
arguments for the mysql query. Currently it is being used only for column selection.
For example: meta = { 'columns' : [ 'Name' ] } will return only the 'Name' column.
:return: S_OK() || S_ERROR()
'''
if isinstance(table, dict): # for backward compatibility: conversion is needed
params, table = convert(table, params)
gLogger.info('delete: %s %s' % (table, params))
res = db.delete(table, params)
self.__logResult('delete', res)
return res
types_addOrModify = [[basestring, dict], dict]
def export_addOrModify(self, table, params):
'''
This method is a bridge to access :class:`ResourceManagementDB` remotely. It does
not add neither processing nor validation. If you need to know more about
this method, you must keep reading on the database documentation.
:Parameters:
**table** - `string` or `dict`
should contain the table from which querying
if it's a `dict` the query comes from a client prior to v6r18
**params** - `dict`
arguments for the mysql query. Currently it is being used only for column selection.
For example: meta = { 'columns' : [ 'Name' ] } will return only the 'Name' column.
:return: S_OK() || S_ERROR()
'''
if isinstance(table, dict): # for backward compatibility: conversion is needed
params, table = convert(table, params)
gLogger.info('addOrModify: %s %s' % (table, params))
res = db.addOrModify(table, params)
self.__logResult('addOrModify', res)
return res
################################################################################
# EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF
| andresailer/DIRAC | ResourceStatusSystem/Service/ResourceManagementHandler.py | Python | gpl-3.0 | 6,815 |
from django.conf.urls.defaults import *
from django.conf import settings
from ella import newman
# make sure to import ella error handlers
from ella.core.urls import handler404, handler500
# register ella's admin
newman.autodiscover()
urlpatterns = patterns('',)
if settings.DEBUG:
# only use these urls in DEBUG mode, otherwise they should be handled by your web server
from os.path import dirname, join, normpath
import django, ella
# static files from both admin apps
ADMIN_ROOTS = (
normpath(join(dirname(ella.__file__), 'newman', 'media')),
normpath(join(dirname(django.__file__), 'contrib', 'admin', 'media')),
)
# serve static files
urlpatterns += patterns('',
# newman specific files first
(r'^%s/(?P<path>.*)$' % settings.NEWMAN_MEDIA_PREFIX.strip('/'), 'ella.utils.views.fallback_serve', {'document_roots': ADMIN_ROOTS}),
# rest of the static files
(r'^%s/(?P<path>.*)$' % settings.MEDIA_URL.strip('/'), 'django.views.static.serve', {'document_root': settings.MEDIA_ROOT, 'show_indexes': True}),
)
# actual URL mappings
urlpatterns += patterns('',
(r'^newman/', include(newman.site.urls)),
(r'^', include('ella.core.urls')),
)
| ella/ellablog | ellablog/urls.py | Python | bsd-3-clause | 1,254 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import test_util
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops.linalg import linalg as linalg_lib
from tensorflow.python.ops.linalg import linear_operator_addition
from tensorflow.python.platform import test
linalg = linalg_lib
rng = np.random.RandomState(0)
add_operators = linear_operator_addition.add_operators
# pylint: disable=unused-argument
class _BadAdder(linear_operator_addition._Adder):
"""Adder that will fail if used."""
def can_add(self, op1, op2):
raise AssertionError("BadAdder.can_add called!")
def _add(self, op1, op2, operator_name, hints):
raise AssertionError("This line should not be reached")
# pylint: enable=unused-argument
class LinearOperatorAdditionCorrectnessTest(test.TestCase):
"""Tests correctness of addition with combinations of a few Adders.
Tests here are done with the _DEFAULT_ADDITION_TIERS, which means
add_operators should reduce all operators resulting in one single operator.
This shows that we are able to correctly combine adders using the tiered
system. All Adders should be tested separately, and there is no need to test
every Adder within this class.
"""
def test_one_operator_is_returned_unchanged(self):
op_a = linalg.LinearOperatorDiag([1., 1.])
op_sum = add_operators([op_a])
self.assertEqual(1, len(op_sum))
self.assertIs(op_sum[0], op_a)
def test_at_least_one_operators_required(self):
with self.assertRaisesRegex(ValueError, "must contain at least one"):
add_operators([])
def test_attempting_to_add_numbers_raises(self):
with self.assertRaisesRegex(TypeError, "contain only LinearOperator"):
add_operators([1, 2])
@test_util.run_deprecated_v1
def test_two_diag_operators(self):
op_a = linalg.LinearOperatorDiag(
[1., 1.], is_positive_definite=True, name="A")
op_b = linalg.LinearOperatorDiag(
[2., 2.], is_positive_definite=True, name="B")
with self.cached_session():
op_sum = add_operators([op_a, op_b])
self.assertEqual(1, len(op_sum))
op = op_sum[0]
self.assertIsInstance(op, linalg_lib.LinearOperatorDiag)
self.assertAllClose([[3., 0.], [0., 3.]], op.to_dense())
# Adding positive definite operators produces positive def.
self.assertTrue(op.is_positive_definite)
# Real diagonal ==> self-adjoint.
self.assertTrue(op.is_self_adjoint)
# Positive definite ==> non-singular
self.assertTrue(op.is_non_singular)
# Enforce particular name for this simple case
self.assertEqual("Add/B__A/", op.name)
@test_util.run_deprecated_v1
def test_three_diag_operators(self):
op1 = linalg.LinearOperatorDiag(
[1., 1.], is_positive_definite=True, name="op1")
op2 = linalg.LinearOperatorDiag(
[2., 2.], is_positive_definite=True, name="op2")
op3 = linalg.LinearOperatorDiag(
[3., 3.], is_positive_definite=True, name="op3")
with self.cached_session():
op_sum = add_operators([op1, op2, op3])
self.assertEqual(1, len(op_sum))
op = op_sum[0]
self.assertTrue(isinstance(op, linalg_lib.LinearOperatorDiag))
self.assertAllClose([[6., 0.], [0., 6.]], op.to_dense())
# Adding positive definite operators produces positive def.
self.assertTrue(op.is_positive_definite)
# Real diagonal ==> self-adjoint.
self.assertTrue(op.is_self_adjoint)
# Positive definite ==> non-singular
self.assertTrue(op.is_non_singular)
@test_util.run_deprecated_v1
def test_diag_tril_diag(self):
op1 = linalg.LinearOperatorDiag(
[1., 1.], is_non_singular=True, name="diag_a")
op2 = linalg.LinearOperatorLowerTriangular(
[[2., 0.], [0., 2.]],
is_self_adjoint=True,
is_non_singular=True,
name="tril")
op3 = linalg.LinearOperatorDiag(
[3., 3.], is_non_singular=True, name="diag_b")
with self.cached_session():
op_sum = add_operators([op1, op2, op3])
self.assertEqual(1, len(op_sum))
op = op_sum[0]
self.assertIsInstance(op, linalg_lib.LinearOperatorLowerTriangular)
self.assertAllClose([[6., 0.], [0., 6.]], op.to_dense())
# The diag operators will be self-adjoint (because real and diagonal).
# The TriL operator has the self-adjoint hint set.
self.assertTrue(op.is_self_adjoint)
# Even though op1/2/3 are non-singular, this does not imply op is.
# Since no custom hint was provided, we default to None (unknown).
self.assertEqual(None, op.is_non_singular)
@test_util.run_deprecated_v1
def test_matrix_diag_tril_diag_uses_custom_name(self):
op0 = linalg.LinearOperatorFullMatrix(
[[-1., -1.], [-1., -1.]], name="matrix")
op1 = linalg.LinearOperatorDiag([1., 1.], name="diag_a")
op2 = linalg.LinearOperatorLowerTriangular(
[[2., 0.], [1.5, 2.]], name="tril")
op3 = linalg.LinearOperatorDiag([3., 3.], name="diag_b")
with self.cached_session():
op_sum = add_operators([op0, op1, op2, op3], operator_name="my_operator")
self.assertEqual(1, len(op_sum))
op = op_sum[0]
self.assertIsInstance(op, linalg_lib.LinearOperatorFullMatrix)
self.assertAllClose([[5., -1.], [0.5, 5.]], op.to_dense())
self.assertEqual("my_operator", op.name)
def test_incompatible_domain_dimensions_raises(self):
op1 = linalg.LinearOperatorFullMatrix(rng.rand(2, 3))
op2 = linalg.LinearOperatorDiag(rng.rand(2, 4))
with self.assertRaisesRegex(ValueError, "must.*same domain dimension"):
add_operators([op1, op2])
def test_incompatible_range_dimensions_raises(self):
op1 = linalg.LinearOperatorFullMatrix(rng.rand(2, 3))
op2 = linalg.LinearOperatorDiag(rng.rand(3, 3))
with self.assertRaisesRegex(ValueError, "must.*same range dimension"):
add_operators([op1, op2])
def test_non_broadcastable_batch_shape_raises(self):
op1 = linalg.LinearOperatorFullMatrix(rng.rand(2, 3, 3))
op2 = linalg.LinearOperatorDiag(rng.rand(4, 3, 3))
with self.assertRaisesRegex(ValueError, "Incompatible shapes"):
add_operators([op1, op2])
class LinearOperatorOrderOfAdditionTest(test.TestCase):
"""Test that the order of addition is done as specified by tiers."""
def test_tier_0_additions_done_in_tier_0(self):
diag1 = linalg.LinearOperatorDiag([1.])
diag2 = linalg.LinearOperatorDiag([1.])
diag3 = linalg.LinearOperatorDiag([1.])
addition_tiers = [
[linear_operator_addition._AddAndReturnDiag()],
[_BadAdder()],
]
# Should not raise since all were added in tier 0, and tier 1 (with the
# _BadAdder) was never reached.
op_sum = add_operators([diag1, diag2, diag3], addition_tiers=addition_tiers)
self.assertEqual(1, len(op_sum))
self.assertIsInstance(op_sum[0], linalg.LinearOperatorDiag)
def test_tier_1_additions_done_by_tier_1(self):
diag1 = linalg.LinearOperatorDiag([1.])
diag2 = linalg.LinearOperatorDiag([1.])
tril = linalg.LinearOperatorLowerTriangular([[1.]])
addition_tiers = [
[linear_operator_addition._AddAndReturnDiag()],
[linear_operator_addition._AddAndReturnTriL()],
[_BadAdder()],
]
# Should not raise since all were added by tier 1, and the
# _BadAdder) was never reached.
op_sum = add_operators([diag1, diag2, tril], addition_tiers=addition_tiers)
self.assertEqual(1, len(op_sum))
self.assertIsInstance(op_sum[0], linalg.LinearOperatorLowerTriangular)
def test_tier_1_additions_done_by_tier_1_with_order_flipped(self):
diag1 = linalg.LinearOperatorDiag([1.])
diag2 = linalg.LinearOperatorDiag([1.])
tril = linalg.LinearOperatorLowerTriangular([[1.]])
addition_tiers = [
[linear_operator_addition._AddAndReturnTriL()],
[linear_operator_addition._AddAndReturnDiag()],
[_BadAdder()],
]
# Tier 0 could convert to TriL, and this converted everything to TriL,
# including the Diags.
# Tier 1 was never used.
# Tier 2 was never used (therefore, _BadAdder didn't raise).
op_sum = add_operators([diag1, diag2, tril], addition_tiers=addition_tiers)
self.assertEqual(1, len(op_sum))
self.assertIsInstance(op_sum[0], linalg.LinearOperatorLowerTriangular)
@test_util.run_deprecated_v1
def test_cannot_add_everything_so_return_more_than_one_operator(self):
diag1 = linalg.LinearOperatorDiag([1.])
diag2 = linalg.LinearOperatorDiag([2.])
tril5 = linalg.LinearOperatorLowerTriangular([[5.]])
addition_tiers = [
[linear_operator_addition._AddAndReturnDiag()],
]
# Tier 0 (the only tier) can only convert to Diag, so it combines the two
# diags, but the TriL is unchanged.
# Result should contain two operators, one Diag, one TriL.
op_sum = add_operators([diag1, diag2, tril5], addition_tiers=addition_tiers)
self.assertEqual(2, len(op_sum))
found_diag = False
found_tril = False
with self.cached_session():
for op in op_sum:
if isinstance(op, linalg.LinearOperatorDiag):
found_diag = True
self.assertAllClose([[3.]], op.to_dense())
if isinstance(op, linalg.LinearOperatorLowerTriangular):
found_tril = True
self.assertAllClose([[5.]], op.to_dense())
self.assertTrue(found_diag and found_tril)
def test_intermediate_tier_is_not_skipped(self):
diag1 = linalg.LinearOperatorDiag([1.])
diag2 = linalg.LinearOperatorDiag([1.])
tril = linalg.LinearOperatorLowerTriangular([[1.]])
addition_tiers = [
[linear_operator_addition._AddAndReturnDiag()],
[_BadAdder()],
[linear_operator_addition._AddAndReturnTriL()],
]
# tril cannot be added in tier 0, and the intermediate tier 1 with the
# BadAdder will catch it and raise.
with self.assertRaisesRegex(AssertionError, "BadAdder.can_add called"):
add_operators([diag1, diag2, tril], addition_tiers=addition_tiers)
class AddAndReturnScaledIdentityTest(test.TestCase):
def setUp(self):
self._adder = linear_operator_addition._AddAndReturnScaledIdentity()
@test_util.run_deprecated_v1
def test_identity_plus_identity(self):
id1 = linalg.LinearOperatorIdentity(num_rows=2)
id2 = linalg.LinearOperatorIdentity(num_rows=2, batch_shape=[3])
hints = linear_operator_addition._Hints(
is_positive_definite=True, is_non_singular=True)
self.assertTrue(self._adder.can_add(id1, id2))
operator = self._adder.add(id1, id2, "my_operator", hints)
self.assertIsInstance(operator, linalg.LinearOperatorScaledIdentity)
with self.cached_session():
self.assertAllClose(2 * linalg_ops.eye(num_rows=2, batch_shape=[3]),
operator.to_dense())
self.assertTrue(operator.is_positive_definite)
self.assertTrue(operator.is_non_singular)
self.assertEqual("my_operator", operator.name)
@test_util.run_deprecated_v1
def test_identity_plus_scaled_identity(self):
id1 = linalg.LinearOperatorIdentity(num_rows=2, batch_shape=[3])
id2 = linalg.LinearOperatorScaledIdentity(num_rows=2, multiplier=2.2)
hints = linear_operator_addition._Hints(
is_positive_definite=True, is_non_singular=True)
self.assertTrue(self._adder.can_add(id1, id2))
operator = self._adder.add(id1, id2, "my_operator", hints)
self.assertIsInstance(operator, linalg.LinearOperatorScaledIdentity)
with self.cached_session():
self.assertAllClose(3.2 * linalg_ops.eye(num_rows=2, batch_shape=[3]),
operator.to_dense())
self.assertTrue(operator.is_positive_definite)
self.assertTrue(operator.is_non_singular)
self.assertEqual("my_operator", operator.name)
@test_util.run_deprecated_v1
def test_scaled_identity_plus_scaled_identity(self):
id1 = linalg.LinearOperatorScaledIdentity(
num_rows=2, multiplier=[2.2, 2.2, 2.2])
id2 = linalg.LinearOperatorScaledIdentity(num_rows=2, multiplier=-1.0)
hints = linear_operator_addition._Hints(
is_positive_definite=True, is_non_singular=True)
self.assertTrue(self._adder.can_add(id1, id2))
operator = self._adder.add(id1, id2, "my_operator", hints)
self.assertIsInstance(operator, linalg.LinearOperatorScaledIdentity)
with self.cached_session():
self.assertAllClose(1.2 * linalg_ops.eye(num_rows=2, batch_shape=[3]),
operator.to_dense())
self.assertTrue(operator.is_positive_definite)
self.assertTrue(operator.is_non_singular)
self.assertEqual("my_operator", operator.name)
class AddAndReturnDiagTest(test.TestCase):
def setUp(self):
self._adder = linear_operator_addition._AddAndReturnDiag()
@test_util.run_deprecated_v1
def test_identity_plus_identity_returns_diag(self):
id1 = linalg.LinearOperatorIdentity(num_rows=2)
id2 = linalg.LinearOperatorIdentity(num_rows=2, batch_shape=[3])
hints = linear_operator_addition._Hints(
is_positive_definite=True, is_non_singular=True)
self.assertTrue(self._adder.can_add(id1, id2))
operator = self._adder.add(id1, id2, "my_operator", hints)
self.assertIsInstance(operator, linalg.LinearOperatorDiag)
with self.cached_session():
self.assertAllClose(2 * linalg_ops.eye(num_rows=2, batch_shape=[3]),
operator.to_dense())
self.assertTrue(operator.is_positive_definite)
self.assertTrue(operator.is_non_singular)
self.assertEqual("my_operator", operator.name)
@test_util.run_deprecated_v1
def test_diag_plus_diag(self):
diag1 = rng.rand(2, 3, 4)
diag2 = rng.rand(4)
op1 = linalg.LinearOperatorDiag(diag1)
op2 = linalg.LinearOperatorDiag(diag2)
hints = linear_operator_addition._Hints(
is_positive_definite=True, is_non_singular=True)
self.assertTrue(self._adder.can_add(op1, op2))
operator = self._adder.add(op1, op2, "my_operator", hints)
self.assertIsInstance(operator, linalg.LinearOperatorDiag)
with self.cached_session():
self.assertAllClose(
linalg.LinearOperatorDiag(diag1 + diag2).to_dense(),
operator.to_dense())
self.assertTrue(operator.is_positive_definite)
self.assertTrue(operator.is_non_singular)
self.assertEqual("my_operator", operator.name)
class AddAndReturnTriLTest(test.TestCase):
def setUp(self):
self._adder = linear_operator_addition._AddAndReturnTriL()
@test_util.run_deprecated_v1
def test_diag_plus_tril(self):
diag = linalg.LinearOperatorDiag([1., 2.])
tril = linalg.LinearOperatorLowerTriangular([[10., 0.], [30., 0.]])
hints = linear_operator_addition._Hints(
is_positive_definite=True, is_non_singular=True)
self.assertTrue(self._adder.can_add(diag, diag))
self.assertTrue(self._adder.can_add(diag, tril))
operator = self._adder.add(diag, tril, "my_operator", hints)
self.assertIsInstance(operator, linalg.LinearOperatorLowerTriangular)
with self.cached_session():
self.assertAllClose([[11., 0.], [30., 2.]], operator.to_dense())
self.assertTrue(operator.is_positive_definite)
self.assertTrue(operator.is_non_singular)
self.assertEqual("my_operator", operator.name)
class AddAndReturnMatrixTest(test.TestCase):
def setUp(self):
self._adder = linear_operator_addition._AddAndReturnMatrix()
@test_util.run_deprecated_v1
def test_diag_plus_diag(self):
diag1 = linalg.LinearOperatorDiag([1., 2.])
diag2 = linalg.LinearOperatorDiag([-1., 3.])
hints = linear_operator_addition._Hints(
is_positive_definite=False, is_non_singular=False)
self.assertTrue(self._adder.can_add(diag1, diag2))
operator = self._adder.add(diag1, diag2, "my_operator", hints)
self.assertIsInstance(operator, linalg.LinearOperatorFullMatrix)
with self.cached_session():
self.assertAllClose([[0., 0.], [0., 5.]], operator.to_dense())
self.assertFalse(operator.is_positive_definite)
self.assertFalse(operator.is_non_singular)
self.assertEqual("my_operator", operator.name)
if __name__ == "__main__":
test.main()
| frreiss/tensorflow-fred | tensorflow/python/kernel_tests/linalg/linear_operator_addition_test.py | Python | apache-2.0 | 16,884 |
def main(request, response):
cookie = request.cookies.first("COOKIE_NAME", None)
response_headers = [("Content-Type", "text/javascript"),
("Access-Control-Allow-Credentials", "true")]
origin = request.headers.get("Origin", None)
if origin:
response_headers.append(("Access-Control-Allow-Origin", origin))
cookie_value = '';
if cookie:
cookie_value = cookie.value;
return (200, response_headers, "postMessage('"+cookie_value+"');")
| saneyuki/servo | tests/wpt/web-platform-tests/workers/modules/resources/credentials.py | Python | mpl-2.0 | 502 |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from pyspark import since, SparkContext
from pyspark.ml.common import _java2py, _py2java
from pyspark.ml.wrapper import _jvm
class ChiSquareTest(object):
"""
.. note:: Experimental
Conduct Pearson's independence test for every feature against the label. For each feature,
the (feature, label) pairs are converted into a contingency matrix for which the Chi-squared
statistic is computed. All label and feature values must be categorical.
The null hypothesis is that the occurrence of the outcomes is statistically independent.
:param dataset:
DataFrame of categorical labels and categorical features.
Real-valued features will be treated as categorical for each distinct value.
:param featuresCol:
Name of features column in dataset, of type `Vector` (`VectorUDT`).
:param labelCol:
Name of label column in dataset, of any numerical type.
:return:
DataFrame containing the test result for every feature against the label.
This DataFrame will contain a single Row with the following fields:
- `pValues: Vector`
- `degreesOfFreedom: Array[Int]`
- `statistics: Vector`
Each of these fields has one value per feature.
>>> from pyspark.ml.linalg import Vectors
>>> from pyspark.ml.stat import ChiSquareTest
>>> dataset = [[0, Vectors.dense([0, 0, 1])],
... [0, Vectors.dense([1, 0, 1])],
... [1, Vectors.dense([2, 1, 1])],
... [1, Vectors.dense([3, 1, 1])]]
>>> dataset = spark.createDataFrame(dataset, ["label", "features"])
>>> chiSqResult = ChiSquareTest.test(dataset, 'features', 'label')
>>> chiSqResult.select("degreesOfFreedom").collect()[0]
Row(degreesOfFreedom=[3, 1, 0])
.. versionadded:: 2.2.0
"""
@staticmethod
@since("2.2.0")
def test(dataset, featuresCol, labelCol):
"""
Perform a Pearson's independence test using dataset.
"""
sc = SparkContext._active_spark_context
javaTestObj = _jvm().org.apache.spark.ml.stat.ChiSquareTest
args = [_py2java(sc, arg) for arg in (dataset, featuresCol, labelCol)]
return _java2py(sc, javaTestObj.test(*args))
class Correlation(object):
"""
.. note:: Experimental
Compute the correlation matrix for the input dataset of Vectors using the specified method.
Methods currently supported: `pearson` (default), `spearman`.
.. note:: For Spearman, a rank correlation, we need to create an RDD[Double] for each column
and sort it in order to retrieve the ranks and then join the columns back into an RDD[Vector],
which is fairly costly. Cache the input Dataset before calling corr with `method = 'spearman'`
to avoid recomputing the common lineage.
:param dataset:
A dataset or a dataframe.
:param column:
The name of the column of vectors for which the correlation coefficient needs
to be computed. This must be a column of the dataset, and it must contain
Vector objects.
:param method:
String specifying the method to use for computing correlation.
Supported: `pearson` (default), `spearman`.
:return:
A dataframe that contains the correlation matrix of the column of vectors. This
dataframe contains a single row and a single column of name
'$METHODNAME($COLUMN)'.
>>> from pyspark.ml.linalg import Vectors
>>> from pyspark.ml.stat import Correlation
>>> dataset = [[Vectors.dense([1, 0, 0, -2])],
... [Vectors.dense([4, 5, 0, 3])],
... [Vectors.dense([6, 7, 0, 8])],
... [Vectors.dense([9, 0, 0, 1])]]
>>> dataset = spark.createDataFrame(dataset, ['features'])
>>> pearsonCorr = Correlation.corr(dataset, 'features', 'pearson').collect()[0][0]
>>> print(str(pearsonCorr).replace('nan', 'NaN'))
DenseMatrix([[ 1. , 0.0556..., NaN, 0.4004...],
[ 0.0556..., 1. , NaN, 0.9135...],
[ NaN, NaN, 1. , NaN],
[ 0.4004..., 0.9135..., NaN, 1. ]])
>>> spearmanCorr = Correlation.corr(dataset, 'features', method='spearman').collect()[0][0]
>>> print(str(spearmanCorr).replace('nan', 'NaN'))
DenseMatrix([[ 1. , 0.1054..., NaN, 0.4 ],
[ 0.1054..., 1. , NaN, 0.9486... ],
[ NaN, NaN, 1. , NaN],
[ 0.4 , 0.9486... , NaN, 1. ]])
.. versionadded:: 2.2.0
"""
@staticmethod
@since("2.2.0")
def corr(dataset, column, method="pearson"):
"""
Compute the correlation matrix with specified method using dataset.
"""
sc = SparkContext._active_spark_context
javaCorrObj = _jvm().org.apache.spark.ml.stat.Correlation
args = [_py2java(sc, arg) for arg in (dataset, column, method)]
return _java2py(sc, javaCorrObj.corr(*args))
if __name__ == "__main__":
import doctest
import pyspark.ml.stat
from pyspark.sql import SparkSession
globs = pyspark.ml.stat.__dict__.copy()
# The small batch size here ensures that we see multiple batches,
# even in these small test examples:
spark = SparkSession.builder \
.master("local[2]") \
.appName("ml.stat tests") \
.getOrCreate()
sc = spark.sparkContext
globs['sc'] = sc
globs['spark'] = spark
failure_count, test_count = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS)
spark.stop()
if failure_count:
exit(-1)
| wangyixiaohuihui/spark2-annotation | python/pyspark/ml/stat.py | Python | apache-2.0 | 6,611 |
# coding=utf-8
# Copyright 2020 The TF-Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python2, python3
"""Tests for TF Agents ppo_agent."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import flags
from absl.testing import parameterized
from absl.testing.absltest import mock
import numpy as np
from six.moves import range
import tensorflow as tf # pylint: disable=g-explicit-tensorflow-version-import
import tensorflow_probability as tfp
from tf_agents.agents.ppo import ppo_agent
from tf_agents.drivers import dynamic_episode_driver
from tf_agents.environments import random_tf_environment
from tf_agents.networks import actor_distribution_network
from tf_agents.networks import actor_distribution_rnn_network
from tf_agents.networks import network
from tf_agents.networks import sequential
from tf_agents.networks import utils as network_utils
from tf_agents.networks import value_network
from tf_agents.networks import value_rnn_network
from tf_agents.policies import random_tf_policy
from tf_agents.replay_buffers import tf_uniform_replay_buffer
from tf_agents.specs import distribution_spec
from tf_agents.specs import tensor_spec
from tf_agents.trajectories import time_step as ts
from tf_agents.trajectories import trajectory
from tf_agents.utils import common
from tf_agents.utils import nest_utils
from tf_agents.utils import test_utils
FLAGS = flags.FLAGS
class DummyActorNet(network.DistributionNetwork):
def __init__(self,
input_spec,
action_spec,
preprocessing_layers=None,
name=None):
output_spec = self._get_normal_distribution_spec(action_spec)
super(DummyActorNet, self).__init__(
input_spec, (), output_spec=output_spec, name='DummyActorNet')
self._action_spec = action_spec
self._flat_action_spec = tf.nest.flatten(self._action_spec)[0]
self._dummy_layers = (preprocessing_layers or []) + [
tf.keras.layers.Dense(
self._flat_action_spec.shape.num_elements() * 2,
kernel_initializer=tf.constant_initializer([[2.0, 1.0], [1.0, 1.0]
]),
bias_initializer=tf.constant_initializer([5.0, 5.0]),
activation=None,
)
]
def _get_normal_distribution_spec(self, sample_spec):
is_multivariate = sample_spec.shape.ndims > 0
param_properties = tfp.distributions.Normal.parameter_properties()
input_param_spec = { # pylint: disable=g-complex-comprehension
name: tensor_spec.TensorSpec(
shape=properties.shape_fn(sample_spec.shape),
dtype=sample_spec.dtype)
for name, properties in param_properties.items()
}
def distribution_builder(*args, **kwargs):
if is_multivariate:
# For backwards compatibility, and because MVNDiag does not support
# `param_static_shapes`, even when using MVNDiag the spec
# continues to use the terms 'loc' and 'scale'. Here we have to massage
# the construction to use 'scale' for kwarg 'scale_diag'. Since they
# have the same shape and dtype expectationts, this is okay.
kwargs = kwargs.copy()
kwargs['scale_diag'] = kwargs['scale']
del kwargs['scale']
return tfp.distributions.MultivariateNormalDiag(*args, **kwargs)
else:
return tfp.distributions.Normal(*args, **kwargs)
return distribution_spec.DistributionSpec(
distribution_builder, input_param_spec, sample_spec=sample_spec)
def call(self, inputs, step_type=None, network_state=()):
del step_type
hidden_state = tf.cast(tf.nest.flatten(inputs), tf.float32)[0]
# Calls coming from agent.train() have a time dimension. Direct loss calls
# may not have a time dimension. In order to make BatchSquash work, we need
# to specify the outer dimension properly.
has_time_dim = nest_utils.get_outer_rank(inputs,
self.input_tensor_spec) == 2
outer_rank = 2 if has_time_dim else 1
batch_squash = network_utils.BatchSquash(outer_rank)
hidden_state = batch_squash.flatten(hidden_state)
for layer in self._dummy_layers:
hidden_state = layer(hidden_state)
actions, stdevs = tf.split(hidden_state, 2, axis=1)
actions = batch_squash.unflatten(actions)
stdevs = batch_squash.unflatten(stdevs)
actions = tf.nest.pack_sequence_as(self._action_spec, [actions])
stdevs = tf.nest.pack_sequence_as(self._action_spec, [stdevs])
return self.output_spec.build_distribution(
loc=actions, scale=stdevs), network_state
def create_sequential_actor_net(ndims: int):
def create_dist(loc_and_scale):
return {
'my_action': tfp.bijectors.Tanh()(
tfp.distributions.MultivariateNormalDiag(
loc=loc_and_scale[..., :ndims],
scale_diag=0.01 + tf.math.softplus(loc_and_scale[..., ndims:]),
validate_args=True,
name='my_action_normal',
))
}
return sequential.Sequential([
tf.keras.layers.Dense(4),
tf.keras.layers.Dense(ndims * 2),
tf.keras.layers.Lambda(create_dist)
])
class DummyValueNet(network.Network):
def __init__(self,
observation_spec,
preprocessing_layers=None,
name=None,
outer_rank=1):
super(DummyValueNet, self).__init__(observation_spec, (), 'DummyValueNet')
self._outer_rank = outer_rank
self._dummy_layers = (preprocessing_layers or []) + [
tf.keras.layers.Dense(
1,
kernel_initializer=tf.constant_initializer([2, 1]),
bias_initializer=tf.constant_initializer([5]))
]
def call(self, inputs, step_type=None, network_state=()):
del step_type
hidden_state = tf.cast(tf.nest.flatten(inputs), tf.float32)[0]
batch_squash = network_utils.BatchSquash(self._outer_rank)
hidden_state = batch_squash.flatten(hidden_state)
for layer in self._dummy_layers:
hidden_state = layer(hidden_state)
value_pred = tf.squeeze(batch_squash.unflatten(hidden_state), axis=-1)
return value_pred, network_state
def _compute_returns_fn(rewards, discounts, next_state_return=0.0):
"""Python implementation of computing discounted returns."""
returns = np.zeros_like(rewards)
for t in range(len(returns) - 1, -1, -1):
returns[t] = rewards[t] + discounts[t] * next_state_return
next_state_return = returns[t]
return returns
def _create_joint_actor_value_networks(observation_spec, action_spec):
shared_layers = [
tf.keras.layers.Dense(
tf.nest.flatten(observation_spec)[0].shape.num_elements(),
kernel_initializer=tf.constant_initializer([[3.0, 1.0], [1.0, 1.0]]),
bias_initializer=tf.constant_initializer([5.0, 5.0]),
activation=None,
)
]
actor_net = DummyActorNet(observation_spec, action_spec, shared_layers)
value_net = DummyValueNet(observation_spec, shared_layers)
return actor_net, value_net
def _default():
return tf.distribute.get_strategy()
def _one_device():
return tf.distribute.OneDeviceStrategy('/cpu:0')
def _mirrored():
return tf.distribute.MirroredStrategy()
class PPOAgentTest(parameterized.TestCase, test_utils.TestCase):
def setUp(self):
super(PPOAgentTest, self).setUp()
tf.compat.v1.enable_resource_variables()
self._obs_spec = tensor_spec.TensorSpec([2], tf.float32)
self._time_step_spec = ts.time_step_spec(self._obs_spec)
self._action_spec = tensor_spec.BoundedTensorSpec([1], tf.float32, -1, 1)
# Ensure that there are 4 CPU devices available for the mirrored strategy.
physical_devices = tf.config.list_physical_devices('CPU')
try:
tf.config.set_logical_device_configuration(physical_devices[0], [
tf.config.LogicalDeviceConfiguration(),
tf.config.LogicalDeviceConfiguration(),
tf.config.LogicalDeviceConfiguration(),
tf.config.LogicalDeviceConfiguration(),
])
logical_devices = tf.config.list_logical_devices('CPU')
assert len(logical_devices) == 4
except RuntimeError:
# Cannot modify logical devices once initialized.
pass
def testCreateAgent(self):
agent = ppo_agent.PPOAgent(
self._time_step_spec,
self._action_spec,
tf.compat.v1.train.AdamOptimizer(),
actor_net=DummyActorNet(self._obs_spec, self._action_spec),
value_net=DummyValueNet(self._obs_spec),
check_numerics=True)
agent.initialize()
@parameterized.named_parameters(('Default', _default),
('OneDevice', _one_device),
('Mirrored', _mirrored))
def testComputeAdvantagesNoGae(self, strategy_fn):
with strategy_fn().scope():
agent = ppo_agent.PPOAgent(
self._time_step_spec,
self._action_spec,
tf.compat.v1.train.AdamOptimizer(),
actor_net=DummyActorNet(self._obs_spec, self._action_spec),
value_net=DummyValueNet(self._obs_spec),
normalize_observations=False,
use_gae=False)
agent.initialize()
rewards = tf.constant([[1.0] * 9, [1.0] * 9])
discounts = tf.constant([[1.0, 1.0, 1.0, 1.0, 0.0, 0.9, 0.9, 0.9, 0.0],
[1.0, 1.0, 1.0, 1.0, 0.0, 0.9, 0.9, 0.9, 0.0]])
returns = tf.constant([[5.0, 4.0, 3.0, 2.0, 1.0, 3.439, 2.71, 1.9, 1.0],
[3.0, 4.0, 7.0, 2.0, -1.0, 5.439, 2.71, -2.9, 1.0]])
value_preds = tf.constant([
[3.0] * 10,
[3.0] * 10,
]) # One extra for final time_step.
expected_advantages = returns - value_preds[:, :-1]
advantages = agent.compute_advantages(rewards, returns, discounts,
value_preds)
self.assertAllClose(expected_advantages, advantages)
@parameterized.named_parameters(('Default', _default),
('OneDevice', _one_device),
('Mirrored', _mirrored))
def testComputeAdvantagesWithGae(self, strategy_fn):
gae_lambda = 0.95
with strategy_fn().scope():
agent = ppo_agent.PPOAgent(
self._time_step_spec,
self._action_spec,
tf.compat.v1.train.AdamOptimizer(),
actor_net=DummyActorNet(
self._obs_spec,
self._action_spec,
),
value_net=DummyValueNet(self._obs_spec),
normalize_observations=False,
use_gae=True,
lambda_value=gae_lambda)
agent.initialize()
rewards = tf.constant([[1.0] * 9, [1.0] * 9])
discounts = tf.constant([[1.0, 1.0, 1.0, 1.0, 0.0, 0.9, 0.9, 0.9, 0.0],
[1.0, 1.0, 1.0, 1.0, 0.0, 0.9, 0.9, 0.9, 0.0]])
returns = tf.constant([[5.0, 4.0, 3.0, 2.0, 1.0, 3.439, 2.71, 1.9, 1.0],
[5.0, 4.0, 3.0, 2.0, 1.0, 3.439, 2.71, 1.9, 1.0]])
value_preds = tf.constant([[3.0] * 10,
[3.0] * 10]) # One extra for final time_step.
gae_vals = tf.constant([[
2.0808625, 1.13775, 0.145, -0.9, -2.0, 0.56016475, -0.16355, -1.01, -2.0
], [
2.0808625, 1.13775, 0.145, -0.9, -2.0, 0.56016475, -0.16355, -1.01, -2.0
]])
advantages = agent.compute_advantages(rewards, returns, discounts,
value_preds)
self.assertAllClose(gae_vals, advantages)
@parameterized.named_parameters(('Default', _default),
('OneDevice', _one_device),
('Mirrored', _mirrored))
def testSequencePreprocess(self, strategy_fn):
with strategy_fn().scope():
counter = common.create_variable('test_train_counter')
batch_size = 2
n_time_steps = 3
agent = ppo_agent.PPOAgent(
self._time_step_spec,
self._action_spec,
tf.compat.v1.train.AdamOptimizer(),
actor_net=DummyActorNet(
self._obs_spec,
self._action_spec,
),
value_net=DummyValueNet(self._obs_spec),
normalize_observations=False,
num_epochs=1,
use_gae=False,
use_td_lambda_return=False,
compute_value_and_advantage_in_train=False,
train_step_counter=counter)
agent.initialize()
observations = tf.constant(
[
[[1, 2], [3, 4], [5, 6]],
[[1, 2], [3, 4], [5, 6]],
],
dtype=tf.float32)
mid_time_step_val = ts.StepType.MID.tolist()
time_steps = ts.TimeStep(
step_type=tf.constant(
[[mid_time_step_val] * n_time_steps] * batch_size, dtype=tf.int32),
reward=tf.constant([[1] * n_time_steps] * batch_size, dtype=tf.float32),
discount=tf.constant(
[[1] * n_time_steps] * batch_size, dtype=tf.float32),
observation=observations)
actions = tf.constant([[[0], [1], [1]], [[0], [1], [1]]], dtype=tf.float32)
old_action_distribution_parameters = {
'loc':
tf.constant(
[[[0.0]] * n_time_steps] * batch_size, dtype=tf.float32),
'scale':
tf.constant(
[[[1.0]] * n_time_steps] * batch_size, dtype=tf.float32),
}
value_preds = tf.constant([[9., 15., 21.], [9., 15., 21.]],
dtype=tf.float32)
policy_info = {
'dist_params': old_action_distribution_parameters,
'value_prediction': value_preds,
}
experience = trajectory.Trajectory(time_steps.step_type, observations,
actions, policy_info,
time_steps.step_type, time_steps.reward,
time_steps.discount)
returned_experience = agent.preprocess_sequence(experience)
self.evaluate(tf.compat.v1.initialize_all_variables())
self.assertAllClose(observations, returned_experience.observation)
self.assertAllClose(actions, returned_experience.action)
expected_value_preds = tf.constant([[9., 15., 21.], [9., 15., 21.]],
dtype=tf.float32)
(_, _, next_time_steps) = trajectory.to_transition(experience)
expected_returns, expected_advantages = agent.compute_return_and_advantage(
next_time_steps, expected_value_preds)
self.assertAllClose(old_action_distribution_parameters,
returned_experience.policy_info['dist_params'])
self.assertEqual((batch_size, n_time_steps),
returned_experience.policy_info['return'].shape)
self.assertAllClose(expected_returns,
returned_experience.policy_info['return'][:, :-1])
self.assertEqual((batch_size, n_time_steps),
returned_experience.policy_info['advantage'].shape)
self.assertAllClose(expected_advantages,
returned_experience.policy_info['advantage'][:, :-1])
@parameterized.named_parameters(('Default', _default),
('OneDevice', _one_device),
('Mirrored', _mirrored))
def testSequencePreprocessNotBatched(self, strategy_fn):
with strategy_fn().scope():
counter = common.create_variable('test_train_counter')
n_time_steps = 3
agent = ppo_agent.PPOAgent(
self._time_step_spec,
self._action_spec,
tf.compat.v1.train.AdamOptimizer(),
actor_net=DummyActorNet(
self._obs_spec,
self._action_spec,
),
value_net=DummyValueNet(self._obs_spec),
normalize_observations=False,
num_epochs=1,
use_gae=False,
use_td_lambda_return=False,
compute_value_and_advantage_in_train=False,
train_step_counter=counter)
agent.initialize()
observations = tf.constant([[1, 2], [3, 4], [5, 6]], dtype=tf.float32)
mid_time_step_val = ts.StepType.MID.tolist()
time_steps = ts.TimeStep(
step_type=tf.constant(
[mid_time_step_val] * n_time_steps, dtype=tf.int32),
reward=tf.constant([1] * n_time_steps, dtype=tf.float32),
discount=tf.constant([1] * n_time_steps, dtype=tf.float32),
observation=observations)
actions = tf.constant([[0], [1], [1]], dtype=tf.float32)
old_action_distribution_parameters = {
'loc': tf.constant([[0.0]] * n_time_steps, dtype=tf.float32),
'scale': tf.constant([[1.0]] * n_time_steps, dtype=tf.float32),
}
value_preds = tf.constant([9., 15., 21.], dtype=tf.float32)
policy_info = {
'dist_params': old_action_distribution_parameters,
'value_prediction': value_preds,
}
experience = trajectory.Trajectory(time_steps.step_type, observations,
actions, policy_info,
time_steps.step_type, time_steps.reward,
time_steps.discount)
returned_experience = agent.preprocess_sequence(experience)
self.evaluate(tf.compat.v1.initialize_all_variables())
self.assertAllClose(observations, returned_experience.observation)
self.assertAllClose(actions, returned_experience.action)
self.assertAllClose(old_action_distribution_parameters,
returned_experience.policy_info['dist_params'])
self.assertEqual(n_time_steps,
returned_experience.policy_info['return'].shape)
self.assertAllClose([40.4821, 30.79],
returned_experience.policy_info['return'][:-1])
self.assertEqual(n_time_steps,
returned_experience.policy_info['advantage'].shape)
self.assertAllClose([31.482101, 15.790001],
returned_experience.policy_info['advantage'][:-1])
@parameterized.named_parameters(
('DefaultOneEpochValueInTrain', _default, 1, True, True),
('DefaultFiveEpochsValueInCollect', _default, 5, False, False),
('DefaultIncompEpisodesReturnNonZeroLoss', _default, 1, False, True),
('OneDeviceOneEpochValueInTrain', _one_device, 1, True, True),
('OneDeviceFiveEpochsValueInCollect', _one_device, 5, False, False),
('OneDeviceIncompEpisodesReturnNonZeroLoss', _one_device, 1, False, True),
('MirroredOneEpochValueInTrain', _mirrored, 1, True, True),
('MirroredFiveEpochsValueInCollect', _mirrored, 5, False, False),
('MirroredIncompEpisodesReturnNonZeroLoss', _mirrored, 1, False, True))
def testTrain(self, strategy_fn, num_epochs, use_td_lambda_return,
compute_value_and_advantage_in_train):
# Mock the build_train_op to return an op for incrementing this counter.
with strategy_fn().scope():
counter = common.create_variable('test_train_counter')
agent = ppo_agent.PPOAgent(
self._time_step_spec,
self._action_spec,
tf.compat.v1.train.AdamOptimizer(),
actor_net=DummyActorNet(
self._obs_spec,
self._action_spec,
),
value_net=DummyValueNet(self._obs_spec),
normalize_observations=False,
num_epochs=num_epochs,
use_gae=use_td_lambda_return,
use_td_lambda_return=use_td_lambda_return,
compute_value_and_advantage_in_train=(
compute_value_and_advantage_in_train),
train_step_counter=counter)
agent.initialize()
observations = tf.constant(
[
[[1, 2], [3, 4], [5, 6]],
[[1, 2], [3, 4], [5, 6]],
],
dtype=tf.float32)
mid_time_step_val = ts.StepType.MID.tolist()
time_steps = ts.TimeStep(
step_type=tf.constant([[mid_time_step_val] * 3] * 2, dtype=tf.int32),
reward=tf.constant([[1] * 3] * 2, dtype=tf.float32),
discount=tf.constant([[1] * 3] * 2, dtype=tf.float32),
observation=observations)
actions = tf.constant([[[0], [1], [1]], [[0], [1], [1]]], dtype=tf.float32)
action_distribution_parameters = {
'loc': tf.constant([[[0.0]] * 3] * 2, dtype=tf.float32),
'scale': tf.constant([[[1.0]] * 3] * 2, dtype=tf.float32),
}
value_preds = tf.constant([[9., 15., 21.], [9., 15., 21.]],
dtype=tf.float32)
policy_info = {
'dist_params': action_distribution_parameters,
}
if not compute_value_and_advantage_in_train:
policy_info['value_prediction'] = value_preds
experience = trajectory.Trajectory(time_steps.step_type, observations,
actions, policy_info,
time_steps.step_type, time_steps.reward,
time_steps.discount)
if not compute_value_and_advantage_in_train:
experience = agent._preprocess(experience)
if tf.executing_eagerly():
loss = lambda: agent.train(experience)
else:
loss = agent.train(experience)
# Assert that counter starts out at zero.
self.evaluate(tf.compat.v1.initialize_all_variables())
self.assertEqual(0, self.evaluate(counter))
loss_type = self.evaluate(loss)
loss_numpy = loss_type.loss
# Assert that loss is not zero as we are training in a non-episodic env.
self.assertNotEqual(
loss_numpy,
0.0,
msg=('Loss is exactly zero, looks like no training '
'was performed due to incomplete episodes.'))
# Assert that train_op ran increment_counter num_epochs times.
self.assertEqual(num_epochs, self.evaluate(counter))
@parameterized.named_parameters(('Default', _default),
('OneDevice', _one_device),
('Mirrored', _mirrored))
def testGetEpochLoss(self, strategy_fn):
with strategy_fn().scope():
agent = ppo_agent.PPOAgent(
self._time_step_spec,
self._action_spec,
tf.compat.v1.train.AdamOptimizer(),
actor_net=DummyActorNet(self._obs_spec, self._action_spec),
value_net=DummyValueNet(self._obs_spec),
normalize_observations=False,
normalize_rewards=False,
value_pred_loss_coef=1.0,
policy_l2_reg=1e-4,
value_function_l2_reg=1e-4,
entropy_regularization=0.1,
importance_ratio_clipping=10,
)
agent.initialize()
observations = tf.constant([[1, 2], [3, 4], [1, 2], [3, 4]],
dtype=tf.float32)
time_steps = ts.restart(observations, batch_size=2)
actions = tf.constant([[0], [1], [0], [1]], dtype=tf.float32)
returns = tf.constant([1.9, 1.0, 1.9, 1.0], dtype=tf.float32)
sample_action_log_probs = tf.constant([0.9, 0.3, 0.9, 0.3],
dtype=tf.float32)
advantages = tf.constant([1.9, 1.0, 1.9, 1.0], dtype=tf.float32)
weights = tf.constant([1.0, 1.0, 0.0, 0.0], dtype=tf.float32)
sample_action_distribution_parameters = {
'loc': tf.constant([[9.0], [15.0], [9.0], [15.0]], dtype=tf.float32),
'scale': tf.constant([[8.0], [12.0], [8.0], [12.0]], dtype=tf.float32),
}
train_step = tf.compat.v1.train.get_or_create_global_step()
loss_info = agent.get_loss(
time_steps,
actions,
sample_action_log_probs,
returns,
advantages,
sample_action_distribution_parameters,
weights,
train_step,
debug_summaries=False)
self.evaluate(tf.compat.v1.global_variables_initializer())
total_loss, extra_loss_info = self.evaluate(loss_info)
(policy_gradient_loss, value_estimation_loss, l2_regularization_loss,
entropy_reg_loss, kl_penalty_loss) = extra_loss_info
# Check loss values are as expected. Factor of 2/4 is because four timesteps
# were included in the data, but two were masked out. Reduce_means in losses
# will divide by 4, but computed loss values are for first 2 timesteps.
expected_pg_loss = -0.0164646133 * 2 / 4
expected_ve_loss = 123.205 * 2 / 4
expected_l2_loss = 1e-4 * 12 * 2 / 4
expected_ent_loss = -0.370111 * 2 / 4
expected_kl_penalty_loss = 0.0
self.assertAllClose(
expected_pg_loss + expected_ve_loss + expected_l2_loss +
expected_ent_loss + expected_kl_penalty_loss,
total_loss,
atol=0.001,
rtol=0.001)
self.assertAllClose(expected_pg_loss, policy_gradient_loss)
self.assertAllClose(expected_ve_loss, value_estimation_loss)
self.assertAllClose(
expected_l2_loss, l2_regularization_loss, atol=0.001, rtol=0.001)
self.assertAllClose(expected_ent_loss, entropy_reg_loss)
self.assertAllClose(expected_kl_penalty_loss, kl_penalty_loss)
@parameterized.named_parameters(
('DefaultIsZero', _default, 0), ('DefaultNotZero', _default, 1),
('OneDeviceIsZero', _one_device, 0), ('OneDeviceNotZero', _one_device, 1),
('MirroredIsZero', _mirrored, 0), ('MirroredNotZero', _mirrored, 1))
def testL2RegularizationLoss(self, strategy_fn, not_zero):
l2_reg = 1e-4 * not_zero
with strategy_fn().scope():
agent = ppo_agent.PPOAgent(
self._time_step_spec,
self._action_spec,
tf.compat.v1.train.AdamOptimizer(),
actor_net=DummyActorNet(self._obs_spec, self._action_spec),
value_net=DummyValueNet(self._obs_spec),
normalize_observations=False,
policy_l2_reg=l2_reg,
value_function_l2_reg=l2_reg,
)
agent.initialize()
# Call other loss functions to make sure trainable variables are
# constructed.
observations = tf.constant([[1, 2], [3, 4]], dtype=tf.float32)
time_steps = ts.restart(observations, batch_size=2)
actions = tf.constant([[0], [1]], dtype=tf.float32)
returns = tf.constant([1.9, 1.0], dtype=tf.float32)
sample_action_log_probs = tf.constant([[0.9], [0.3]], dtype=tf.float32)
advantages = tf.constant([1.9, 1.0], dtype=tf.float32)
current_policy_distribution, unused_network_state = DummyActorNet(
self._obs_spec, self._action_spec)(time_steps.observation,
time_steps.step_type, ())
weights = tf.ones_like(advantages)
agent.policy_gradient_loss(time_steps, actions, sample_action_log_probs,
advantages, current_policy_distribution, weights)
agent.value_estimation_loss(time_steps, returns, weights)
# Now request L2 regularization loss.
# Value function weights are [2, 1], actor net weights are [2, 1, 1, 1].
expected_loss = l2_reg * ((2**2 + 1) + (2**2 + 1 + 1 + 1))
# Make sure the network is built before we try to get variables.
agent.policy.action(
tensor_spec.sample_spec_nest(self._time_step_spec, outer_dims=(2,)))
loss = agent.l2_regularization_loss()
self.evaluate(tf.compat.v1.global_variables_initializer())
loss_ = self.evaluate(loss)
self.assertAllClose(loss_, expected_loss)
@parameterized.named_parameters(
('DefaultIsZero', _default, 0), ('DefaultNotZero', _default, 1),
('OneDeviceIsZero', _one_device, 0), ('OneDeviceNotZero', _one_device, 1),
('MirroredIsZero', _mirrored, 0), ('MirroredNotZero', _mirrored, 1))
def testL2RegularizationLossWithSharedVariables(self, strategy_fn, not_zero):
policy_l2_reg = 4e-4 * not_zero
value_function_l2_reg = 2e-4 * not_zero
shared_vars_l2_reg = 1e-4 * not_zero
with strategy_fn().scope():
actor_net, value_net = _create_joint_actor_value_networks(
self._obs_spec, self._action_spec)
agent = ppo_agent.PPOAgent(
self._time_step_spec,
self._action_spec,
tf.compat.v1.train.AdamOptimizer(),
actor_net=actor_net,
value_net=value_net,
normalize_observations=False,
policy_l2_reg=policy_l2_reg,
value_function_l2_reg=value_function_l2_reg,
shared_vars_l2_reg=shared_vars_l2_reg,
)
agent.initialize()
# Call other loss functions to make sure trainable variables are
# constructed.
observations = tf.constant([[1, 2], [3, 4]], dtype=tf.float32)
time_steps = ts.restart(observations, batch_size=2)
actions = tf.constant([[0], [1]], dtype=tf.float32)
returns = tf.constant([1.9, 1.0], dtype=tf.float32)
sample_action_log_probs = tf.constant([[0.9], [0.3]], dtype=tf.float32)
advantages = tf.constant([1.9, 1.0], dtype=tf.float32)
current_policy_distribution, unused_network_state = DummyActorNet(
self._obs_spec, self._action_spec)(time_steps.observation,
time_steps.step_type, ())
weights = tf.ones_like(advantages)
agent.policy_gradient_loss(time_steps, actions, sample_action_log_probs,
advantages, current_policy_distribution, weights)
agent.value_estimation_loss(time_steps, returns, weights)
# Now request L2 regularization loss.
# Value function weights are [2, 1], actor net weights are [2, 1, 1, 1],
# shared weights are [3, 1, 1, 1].
expected_loss = value_function_l2_reg * (2**2 + 1) + policy_l2_reg * (
2**2 + 1 + 1 + 1) + shared_vars_l2_reg * (3**2 + 1 + 1 + 1)
# Make sure the network is built before we try to get variables.
agent.policy.action(
tensor_spec.sample_spec_nest(self._time_step_spec, outer_dims=(2,)))
loss = agent.l2_regularization_loss()
self.evaluate(tf.compat.v1.global_variables_initializer())
loss_ = self.evaluate(loss)
self.assertAllClose(loss_, expected_loss)
@parameterized.named_parameters(
('DefaultIsZero', _default, 0), ('DefaultNotZero', _default, 1),
('OneDeviceIsZero', _one_device, 0), ('OneDeviceNotZero', _one_device, 1),
('MirroredIsZero', _mirrored, 0), ('MirroredNotZero', _mirrored, 1))
def testEntropyRegularizationLoss(self, strategy_fn, not_zero):
ent_reg = 0.1 * not_zero
with strategy_fn().scope():
agent = ppo_agent.PPOAgent(
self._time_step_spec,
self._action_spec,
tf.compat.v1.train.AdamOptimizer(),
actor_net=DummyActorNet(self._obs_spec, self._action_spec),
value_net=DummyValueNet(self._obs_spec),
normalize_observations=False,
entropy_regularization=ent_reg,
)
agent.initialize()
# Call other loss functions to make sure trainable variables are
# constructed.
observations = tf.constant([[1, 2], [3, 4]], dtype=tf.float32)
time_steps = ts.restart(observations, batch_size=2)
actions = tf.constant([[0], [1]], dtype=tf.float32)
returns = tf.constant([1.9, 1.0], dtype=tf.float32)
sample_action_log_probs = tf.constant([[0.9], [0.3]], dtype=tf.float32)
advantages = tf.constant([1.9, 1.0], dtype=tf.float32)
weights = tf.ones_like(advantages)
current_policy_distribution, unused_network_state = DummyActorNet(
self._obs_spec, self._action_spec)(time_steps.observation,
time_steps.step_type, ())
agent.policy_gradient_loss(time_steps, actions, sample_action_log_probs,
advantages, current_policy_distribution, weights)
agent.value_estimation_loss(time_steps, returns, weights)
# Now request entropy regularization loss.
# Action stdevs should be ~1.0, and mean entropy ~3.70111.
expected_loss = -3.70111 * ent_reg
loss = agent.entropy_regularization_loss(time_steps,
current_policy_distribution,
weights)
self.evaluate(tf.compat.v1.global_variables_initializer())
loss_ = self.evaluate(loss)
self.assertAllClose(loss_, expected_loss)
@parameterized.named_parameters(('Default', _default),
('OneDevice', _one_device),
('Mirrored', _mirrored))
def testValueEstimationLoss(self, strategy_fn):
with strategy_fn().scope():
agent = ppo_agent.PPOAgent(
self._time_step_spec,
self._action_spec,
tf.compat.v1.train.AdamOptimizer(),
actor_net=DummyActorNet(self._obs_spec, self._action_spec),
value_net=DummyValueNet(self._obs_spec),
value_pred_loss_coef=1.0,
normalize_observations=False,
)
agent.initialize()
observations = tf.constant([[1, 2], [3, 4]], dtype=tf.float32)
time_steps = ts.restart(observations, batch_size=2)
returns = tf.constant([1.9, 1.0], dtype=tf.float32)
weights = tf.ones_like(returns)
expected_loss = 123.205
loss = agent.value_estimation_loss(time_steps, returns, weights)
self.evaluate(tf.compat.v1.global_variables_initializer())
loss_ = self.evaluate(loss)
self.assertAllClose(loss_, expected_loss)
@parameterized.named_parameters(('Default', _default),
('OneDevice', _one_device),
('Mirrored', _mirrored))
def testPolicyGradientLoss(self, strategy_fn):
with strategy_fn().scope():
actor_net = DummyActorNet(self._obs_spec, self._action_spec)
agent = ppo_agent.PPOAgent(
self._time_step_spec,
self._action_spec,
tf.compat.v1.train.AdamOptimizer(),
normalize_observations=False,
normalize_rewards=False,
actor_net=actor_net,
value_net=DummyValueNet(self._obs_spec),
importance_ratio_clipping=10.0,
)
agent.initialize()
observations = tf.constant([[1, 2], [3, 4]], dtype=tf.float32)
time_steps = ts.restart(observations, batch_size=2)
actions = tf.constant([[0], [1]], dtype=tf.float32)
sample_action_log_probs = tf.constant([0.9, 0.3], dtype=tf.float32)
advantages = tf.constant([1.9, 1.0], dtype=tf.float32)
weights = tf.ones_like(advantages)
current_policy_distribution, unused_network_state = actor_net(
time_steps.observation, time_steps.step_type, ())
expected_loss = -0.0164646133
loss = agent.policy_gradient_loss(time_steps, actions,
sample_action_log_probs, advantages,
current_policy_distribution, weights)
self.evaluate(tf.compat.v1.global_variables_initializer())
loss_ = self.evaluate(loss)
self.assertAllClose(loss_, expected_loss)
def testKlPenaltyLoss(self):
actor_net = actor_distribution_network.ActorDistributionNetwork(
self._time_step_spec.observation,
self._action_spec,
fc_layer_params=None)
value_net = value_network.ValueNetwork(
self._time_step_spec.observation, fc_layer_params=None)
agent = ppo_agent.PPOAgent(
self._time_step_spec,
self._action_spec,
tf.compat.v1.train.AdamOptimizer(),
actor_net=actor_net,
value_net=value_net,
kl_cutoff_factor=5.0,
adaptive_kl_target=0.1,
kl_cutoff_coef=100,
)
agent.kl_cutoff_loss = mock.MagicMock(
return_value=tf.constant(3.0, dtype=tf.float32))
agent.adaptive_kl_loss = mock.MagicMock(
return_value=tf.constant(4.0, dtype=tf.float32))
observations = tf.constant([[1, 2], [3, 4]], dtype=tf.float32)
time_steps = ts.restart(observations, batch_size=2)
action_distribution_parameters = {
'loc': tf.constant([[1.0], [1.0]], dtype=tf.float32),
'scale': tf.constant([[1.0], [1.0]], dtype=tf.float32),
}
current_policy_distribution, unused_network_state = DummyActorNet(
self._obs_spec, self._action_spec)(time_steps.observation,
time_steps.step_type, ())
weights = tf.ones_like(time_steps.discount)
expected_kl_penalty_loss = 7.0
kl_penalty_loss = agent.kl_penalty_loss(time_steps,
action_distribution_parameters,
current_policy_distribution,
weights)
self.evaluate(tf.compat.v1.global_variables_initializer())
kl_penalty_loss_ = self.evaluate(kl_penalty_loss)
self.assertEqual(expected_kl_penalty_loss, kl_penalty_loss_)
@parameterized.named_parameters(
('DefaultIsZero', _default, 0), ('DefaultNotZero', _default, 1),
('OneDeviceIsZero', _one_device, 0), ('OneDeviceNotZero', _one_device, 1),
('MirroredIsZero', _mirrored, 0), ('MirroredNotZero', _mirrored, 1))
def testKlCutoffLoss(self, strategy_fn, not_zero):
kl_cutoff_coef = 30.0 * not_zero
with strategy_fn().scope():
actor_net = actor_distribution_network.ActorDistributionNetwork(
self._time_step_spec.observation,
self._action_spec,
fc_layer_params=None)
value_net = value_network.ValueNetwork(
self._time_step_spec.observation, fc_layer_params=None)
agent = ppo_agent.PPOAgent(
self._time_step_spec,
self._action_spec,
tf.compat.v1.train.AdamOptimizer(),
actor_net=actor_net,
value_net=value_net,
kl_cutoff_factor=5.0,
adaptive_kl_target=0.1,
kl_cutoff_coef=kl_cutoff_coef,
)
agent.initialize()
kl_divergence = tf.constant([[1.5, -0.5, 6.5, -1.5, -2.3]],
dtype=tf.float32)
expected_kl_cutoff_loss = kl_cutoff_coef * (.24**2) # (0.74 - 0.5) ^ 2
loss = agent.kl_cutoff_loss(kl_divergence)
self.evaluate(tf.compat.v1.global_variables_initializer())
loss_ = self.evaluate(loss)
self.assertAllClose([loss_], [expected_kl_cutoff_loss])
@parameterized.named_parameters(('Default', _default),
('OneDevice', _one_device),
('Mirrored', _mirrored))
def testAdaptiveKlLoss(self, strategy_fn):
with strategy_fn().scope():
actor_net = actor_distribution_network.ActorDistributionNetwork(
self._time_step_spec.observation,
self._action_spec,
fc_layer_params=None)
value_net = value_network.ValueNetwork(
self._time_step_spec.observation, fc_layer_params=None)
agent = ppo_agent.PPOAgent(
self._time_step_spec,
self._action_spec,
tf.compat.v1.train.AdamOptimizer(),
actor_net=actor_net,
value_net=value_net,
initial_adaptive_kl_beta=1.0,
adaptive_kl_target=10.0,
adaptive_kl_tolerance=0.5,
)
agent.initialize()
# Initialize variables
self.evaluate(tf.compat.v1.global_variables_initializer())
# Loss should not change if data kl is target kl.
loss_1 = agent.adaptive_kl_loss([10.0])
loss_2 = agent.adaptive_kl_loss([10.0])
self.assertEqual(self.evaluate(loss_1), self.evaluate(loss_2))
# If data kl is low, kl penalty should decrease between calls.
loss_1 = self.evaluate(agent.adaptive_kl_loss([1.0]))
adaptive_kl_beta_update_fn = common.function(agent.update_adaptive_kl_beta)
self.evaluate(adaptive_kl_beta_update_fn([1.0]))
loss_2 = self.evaluate(agent.adaptive_kl_loss([1.0]))
self.assertGreater(loss_1, loss_2)
# # # If data kl is low, kl penalty should increase between calls.
loss_1 = self.evaluate(agent.adaptive_kl_loss([100.0]))
self.evaluate(adaptive_kl_beta_update_fn([100.0]))
loss_2 = self.evaluate(agent.adaptive_kl_loss([100.0]))
self.assertLess(loss_1, loss_2)
@parameterized.named_parameters(('Default', _default),
('OneDevice', _one_device),
('Mirrored', _mirrored))
def testUpdateAdaptiveKlBeta(self, strategy_fn):
with strategy_fn().scope():
actor_net = actor_distribution_network.ActorDistributionNetwork(
self._time_step_spec.observation,
self._action_spec,
fc_layer_params=None)
value_net = value_network.ValueNetwork(
self._time_step_spec.observation, fc_layer_params=None)
agent = ppo_agent.PPOAgent(
self._time_step_spec,
self._action_spec,
tf.compat.v1.train.AdamOptimizer(),
actor_net=actor_net,
value_net=value_net,
initial_adaptive_kl_beta=1.0,
adaptive_kl_target=10.0,
adaptive_kl_tolerance=0.5,
)
agent.initialize()
self.evaluate(tf.compat.v1.global_variables_initializer())
# When KL is target kl, beta should not change.
update_adaptive_kl_beta_fn = common.function(agent.update_adaptive_kl_beta)
beta_0 = update_adaptive_kl_beta_fn([10.0])
expected_beta_0 = 1.0
self.assertEqual(expected_beta_0, self.evaluate(beta_0))
# When KL is large, beta should increase.
beta_1 = update_adaptive_kl_beta_fn([100.0])
expected_beta_1 = 1.5
self.assertEqual(expected_beta_1, self.evaluate(beta_1))
# When KL is small, beta should decrease.
beta_2 = update_adaptive_kl_beta_fn([1.0])
expected_beta_2 = 1.0
self.assertEqual(expected_beta_2, self.evaluate(beta_2))
def testPolicy(self):
value_net = value_network.ValueNetwork(
self._time_step_spec.observation, fc_layer_params=None)
agent = ppo_agent.PPOAgent(
self._time_step_spec,
self._action_spec,
tf.compat.v1.train.AdamOptimizer(),
actor_net=DummyActorNet(self._obs_spec, self._action_spec),
value_net=value_net)
observations = tf.constant([[1, 2]], dtype=tf.float32)
time_steps = ts.restart(observations, batch_size=1)
action_step = agent.policy.action(time_steps)
actions = action_step.action
self.assertEqual(actions.shape.as_list(), [1, 1])
self.evaluate(tf.compat.v1.global_variables_initializer())
_ = self.evaluate(actions)
def testRNNTrain(self):
actor_net = actor_distribution_rnn_network.ActorDistributionRnnNetwork(
self._time_step_spec.observation,
self._action_spec,
input_fc_layer_params=None,
output_fc_layer_params=None,
lstm_size=(20,))
value_net = value_rnn_network.ValueRnnNetwork(
self._time_step_spec.observation,
input_fc_layer_params=None,
output_fc_layer_params=None,
lstm_size=(10,))
global_step = tf.compat.v1.train.get_or_create_global_step()
agent = ppo_agent.PPOAgent(
self._time_step_spec,
self._action_spec,
optimizer=tf.compat.v1.train.AdamOptimizer(),
actor_net=actor_net,
value_net=value_net,
num_epochs=1,
train_step_counter=global_step,
)
# Use a random env, policy, and replay buffer to collect training data.
random_env = random_tf_environment.RandomTFEnvironment(
self._time_step_spec, self._action_spec, batch_size=1)
collection_policy = random_tf_policy.RandomTFPolicy(
self._time_step_spec,
self._action_spec,
info_spec=agent.collect_policy.info_spec)
replay_buffer = tf_uniform_replay_buffer.TFUniformReplayBuffer(
collection_policy.trajectory_spec, batch_size=1, max_length=7)
collect_driver = dynamic_episode_driver.DynamicEpisodeDriver(
random_env,
collection_policy,
observers=[replay_buffer.add_batch],
num_episodes=1)
# In graph mode: finish building the graph so the optimizer
# variables are created.
if not tf.executing_eagerly():
_, _ = agent.train(experience=replay_buffer.gather_all())
# Initialize.
self.evaluate(agent.initialize())
self.evaluate(tf.compat.v1.global_variables_initializer())
# Train one step.
self.assertEqual(0, self.evaluate(global_step))
self.evaluate(collect_driver.run())
self.evaluate(agent.train(experience=replay_buffer.gather_all()))
self.assertEqual(1, self.evaluate(global_step))
@parameterized.named_parameters([
('ValueCalculationInTrain', True),
('ValueCalculationInCollect', False),
])
def testStatelessValueNetTrain(self, compute_value_and_advantage_in_train):
counter = common.create_variable('test_train_counter')
actor_net = actor_distribution_rnn_network.ActorDistributionRnnNetwork(
self._time_step_spec.observation,
self._action_spec,
input_fc_layer_params=None,
output_fc_layer_params=None,
lstm_size=(20,))
value_net = value_network.ValueNetwork(
self._time_step_spec.observation, fc_layer_params=None)
agent = ppo_agent.PPOAgent(
self._time_step_spec,
self._action_spec,
optimizer=tf.compat.v1.train.AdamOptimizer(),
actor_net=actor_net,
value_net=value_net,
num_epochs=1,
train_step_counter=counter,
compute_value_and_advantage_in_train=compute_value_and_advantage_in_train
)
observations = tf.constant(
[
[[1, 2], [3, 4], [5, 6]],
[[1, 2], [3, 4], [5, 6]],
],
dtype=tf.float32)
mid_time_step_val = ts.StepType.MID.tolist()
time_steps = ts.TimeStep(
step_type=tf.constant([[mid_time_step_val] * 3] * 2, dtype=tf.int32),
reward=tf.constant([[1] * 3] * 2, dtype=tf.float32),
discount=tf.constant([[1] * 3] * 2, dtype=tf.float32),
observation=observations)
actions = tf.constant([[[0], [1], [1]], [[0], [1], [1]]], dtype=tf.float32)
action_distribution_parameters = {
'loc': tf.constant([[[0.0]] * 3] * 2, dtype=tf.float32),
'scale': tf.constant([[[1.0]] * 3] * 2, dtype=tf.float32),
}
value_preds = tf.constant([[9., 15., 21.], [9., 15., 21.]],
dtype=tf.float32)
policy_info = {
'dist_params': action_distribution_parameters,
}
if not compute_value_and_advantage_in_train:
policy_info['value_prediction'] = value_preds
experience = trajectory.Trajectory(time_steps.step_type, observations,
actions, policy_info,
time_steps.step_type, time_steps.reward,
time_steps.discount)
if not compute_value_and_advantage_in_train:
experience = agent._preprocess(experience)
if tf.executing_eagerly():
loss = lambda: agent.train(experience)
else:
loss = agent.train(experience)
self.evaluate(tf.compat.v1.initialize_all_variables())
loss_type = self.evaluate(loss)
loss_numpy = loss_type.loss
# Assert that loss is not zero as we are training in a non-episodic env.
self.assertNotEqual(
loss_numpy,
0.0,
msg=('Loss is exactly zero, looks like no training '
'was performed due to incomplete episodes.'))
def testAgentDoesNotFailWhenNestedObservationActionAndDebugSummaries(self):
summary_writer = tf.compat.v2.summary.create_file_writer(
FLAGS.test_tmpdir, flush_millis=10000)
summary_writer.set_as_default()
nested_obs_spec = (self._obs_spec, self._obs_spec, {
'a': self._obs_spec,
'b': self._obs_spec,
})
nested_time_spec = ts.time_step_spec(nested_obs_spec)
nested_act_spec = (self._action_spec, {
'c': self._action_spec,
'd': self._action_spec
})
class NestedActorNet(network.DistributionNetwork):
def __init__(self, dummy_model):
output_spec = (dummy_model.output_spec, {
'c': dummy_model.output_spec,
'd': dummy_model.output_spec,
})
super(NestedActorNet, self).__init__(
dummy_model.input_tensor_spec, (),
output_spec=output_spec,
name='NestedActorNet')
self.dummy_model = dummy_model
def call(self, inputs, network_state, *args, **kwargs):
dummy_ans, _ = self.dummy_model(
inputs, network_state=network_state, *args, **kwargs)
return (dummy_ans, {'c': dummy_ans, 'd': dummy_ans}), ()
dummy_model = DummyActorNet(nested_obs_spec, self._action_spec)
agent = ppo_agent.PPOAgent(
nested_time_spec,
nested_act_spec,
tf.compat.v1.train.AdamOptimizer(),
actor_net=NestedActorNet(dummy_model),
value_net=DummyValueNet(nested_obs_spec),
compute_value_and_advantage_in_train=False,
debug_summaries=True)
observations = tf.constant(
[
[[1, 2], [3, 4], [5, 6]],
[[1, 2], [3, 4], [5, 6]],
],
dtype=tf.float32)
observations = (observations, observations, {
'a': observations,
'b': observations,
})
time_steps = ts.TimeStep(
step_type=tf.constant([[1] * 3] * 2, dtype=tf.int32),
reward=tf.constant([[1] * 3] * 2, dtype=tf.float32),
discount=tf.constant([[1] * 3] * 2, dtype=tf.float32),
observation=observations)
actions = tf.constant([[[0], [1], [1]], [[0], [1], [1]]], dtype=tf.float32)
actions = (actions, {
'c': actions,
'd': actions,
})
action_distribution_parameters = {
'loc': tf.constant([[[0.0]] * 3] * 2, dtype=tf.float32),
'scale': tf.constant([[[1.0]] * 3] * 2, dtype=tf.float32),
}
action_distribution_parameters = (action_distribution_parameters, {
'c': action_distribution_parameters,
'd': action_distribution_parameters,
})
value_preds = tf.constant([[9., 15., 21.], [9., 15., 21.]],
dtype=tf.float32)
policy_info = {
'dist_params': action_distribution_parameters,
'value_prediction': value_preds,
}
experience = trajectory.Trajectory(time_steps.step_type, observations,
actions, policy_info,
time_steps.step_type, time_steps.reward,
time_steps.discount)
experience = agent._preprocess(experience)
agent.train(experience)
@parameterized.named_parameters(('FeedTrajectory', False),
('FeedTransition', True))
def testTrainWithNonLegacyActorNetwork(self, feed_transition):
if not tf.executing_eagerly():
self.skipTest('Skipping test: sequential networks not supported in TF1')
num_epochs = 5
counter = common.create_variable('test_train_counter')
action_spec = {
'my_action': tensor_spec.BoundedTensorSpec([1], tf.float32, -1, 1)
}
agent = ppo_agent.PPOAgent(
self._time_step_spec,
action_spec,
tf.compat.v1.train.AdamOptimizer(),
# action_spec == TensorSpec([1], tf.float32)
actor_net=create_sequential_actor_net(ndims=1),
value_net=DummyValueNet(self._obs_spec),
normalize_observations=False,
num_epochs=num_epochs,
initial_adaptive_kl_beta=1.0,
adaptive_kl_target=10.0,
adaptive_kl_tolerance=0.5,
check_numerics=True,
compute_value_and_advantage_in_train=False,
train_step_counter=counter)
agent.initialize()
experience = _create_experience_trajectory_my_action()
experience = agent._preprocess(experience)
if feed_transition:
experience = trajectory.to_transition(experience)
loss = lambda: agent.train(experience)
# Assert that counter starts out at zero.
self.evaluate(tf.compat.v1.initialize_all_variables())
self.assertEqual(0, self.evaluate(counter))
loss_type = self.evaluate(loss)
loss_numpy = loss_type.loss
# Assert that loss is not zero as we are training in a non-episodic env.
self.assertNotEqual(
loss_numpy,
0.0,
msg=('Loss is exactly zero, looks like no training '
'was performed due to incomplete episodes.'))
# Assert that train_op ran increment_counter num_epochs times.
self.assertEqual(num_epochs, self.evaluate(counter))
def _create_experience_trajectory_my_action() -> trajectory.Trajectory:
observations = tf.constant(
[
[[1, 2], [3, 4], [5, 6]],
[[1, 2], [3, 4], [5, 6]],
],
dtype=tf.float32)
mid_time_step_val = ts.StepType.MID.tolist()
time_steps = ts.TimeStep(
step_type=tf.constant([[mid_time_step_val] * 3] * 2, dtype=tf.int32),
reward=tf.constant([[1] * 3] * 2, dtype=tf.float32),
discount=tf.constant([[1] * 3] * 2, dtype=tf.float32),
observation=observations)
actions = {
'my_action':
tf.constant([[[0.1], [0.9], [0.1]], [[0.9], [0.1], [0.9]]],
dtype=tf.float32)
}
action_distribution_parameters = {
'my_action': {
'bijector': {},
'distribution': {
'loc': tf.constant([[[0.0]] * 3] * 2, dtype=tf.float32),
'scale_diag': tf.constant([[[1.0]] * 3] * 2, dtype=tf.float32)
}
}
}
value_preds = tf.constant([[0.9, 1.5, 2.1], [0.9, 1.5, 2.1]],
dtype=tf.float32)
policy_info = {
'dist_params': action_distribution_parameters,
}
policy_info['value_prediction'] = value_preds
experience = trajectory.Trajectory(time_steps.step_type, observations,
actions, policy_info,
time_steps.step_type, time_steps.reward,
time_steps.discount)
return experience
if __name__ == '__main__':
tf.test.main()
| tensorflow/agents | tf_agents/agents/ppo/ppo_agent_test.py | Python | apache-2.0 | 53,941 |
#!/usr/bin/env python
"""
jsonxs uses a path expression string to get and set values in JSON and Python
datastructures.
For example:
>>> d = {
... 'feed': {
... 'id': 'my_feed',
... 'url': 'http://example.com/feed.rss',
... 'tags': ['devel', 'example', 'python'],
... 'short.desc': 'A feed',
... 'list': [
... {
... 'uuid': 'e9b48a2'
... }
... ]
... }
... }
# Get the value for a path expression
>>> jsonxs(d, 'feed.tags[-1]')
'python'
# Access paths with special chars in them
>>> jsonxs(d, 'feed.short\.desc')
'A feed'
# Return default value if path not found
>>> jsonxs(d, 'feed.long\.desc', default='N/A')
'N/A'
# Set the value for a path expression
>>> jsonxs(d, 'feed.id', ACTION_SET, 'your_feed')
>>> d['feed']['id']
'your_feed'
# Replace a value in a list
>>> jsonxs(d, 'feed.tags[-1]', ACTION_SET, 'javascript')
>>> d['feed']['tags']
['devel', 'example', 'javascript']
# Create a new key in a dict
>>> jsonxs(d, 'feed.author', ACTION_SET, 'Ferry Boender')
>>> d['feed']['author']
'Ferry Boender'
# Delete a value from a list
>>> jsonxs(d, 'feed.tags[0]', ACTION_DEL)
>>> d['feed']['tags']
['example', 'javascript']
# Delete a key/value pair from a dictionary
>>> jsonxs(d, 'feed.url', ACTION_DEL)
>>> 'url' in d['feed']
False
# Append a value to a list
>>> jsonxs(d, 'feed.tags', ACTION_APPEND, 'programming')
>>> d['feed']['tags']
['example', 'javascript', 'programming']
# Insert a value to a list
>>> jsonxs(d, 'feed.tags[1]', ACTION_INSERT, 'tech')
>>> d['feed']['tags']
['example', 'tech', 'javascript', 'programming']
# Create a dict value
>>> jsonxs(d, 'feed.details', ACTION_MKDICT)
>>> d['feed']['details'] == {}
True
# Add a key / value to newly created dict
>>> jsonxs(d, 'feed.list[0].uuid', ACTION_SET, 'aeaeae')
# Create a list value
>>> jsonxs(d, 'feed.details.users', ACTION_MKLIST)
>>> d['feed']['details']['users'] == []
True
# Fill the newly created list
>>> jsonxs(d, 'feed.details.users', ACTION_APPEND, 'fboender')
>>> jsonxs(d, 'feed.details.users', ACTION_APPEND, 'ppeterson')
>>> d['feed']['details']['users']
['fboender', 'ppeterson']
"""
ACTION_GET = 'get'
ACTION_SET = 'set'
ACTION_DEL = 'del'
ACTION_APPEND = 'append'
ACTION_INSERT = 'insert'
ACTION_MKDICT = 'mkdict'
ACTION_MKLIST = 'mklist'
def tokenize(expr):
"""
Parse a string expression into a set of tokens that can be used as a path
into a Python datastructure.
"""
tokens = []
escape = False
cur_token = ''
for c in expr:
if escape == True:
cur_token += c
escape = False
else:
if c == '\\':
# Next char will be escaped
escape = True
continue
elif c == '[':
# Next token is of type index (list)
if len(cur_token) > 0:
tokens.append(cur_token)
cur_token = ''
elif c == ']':
# End of index token. Next token defaults to a key (dict)
if len(cur_token) > 0:
tokens.append(int(cur_token))
cur_token = ''
elif c == '.':
# End of key token. Next token defaults to a key (dict)
if len(cur_token) > 0:
tokens.append(cur_token)
cur_token = ''
else:
# Append char to token name
cur_token += c
if len(cur_token) > 0:
tokens.append(cur_token)
return tokens
def jsonxs(data, expr, action=ACTION_GET, value=None, default=None):
"""
Get, set, delete values in a JSON structure. `expr` is a JSONpath-like
expression pointing to the desired value. `action` determines the action to
perform. See the module-level `ACTION_*` constants. `value` should be given
if action is `ACTION_SET`. If `default` is set and `expr` isn't found,
return `default` instead. This will override all exceptions.
"""
tokens = tokenize(expr)
# Walk through the list of tokens to reach the correct path in the data
# structure.
try:
prev_path = None
cur_path = data
for token in tokens:
prev_path = cur_path
if type(cur_path) is not list:
if not token in cur_path:
if action in [ACTION_SET, ACTION_MKDICT, ACTION_MKLIST]:
# When setting values or creating dicts/lists, the key can be
# missing from the data struture
continue
cur_path = cur_path[token]
except Exception:
if default is not None:
return default
else:
raise
# Perform action the user requested.
if action == ACTION_GET:
return cur_path
elif action == ACTION_DEL:
del prev_path[token]
elif action == ACTION_SET:
prev_path[token] = value
elif action == ACTION_APPEND:
prev_path[token].append(value)
elif action == ACTION_INSERT:
prev_path.insert(token, value)
elif action == ACTION_MKDICT:
prev_path[token] = {}
elif action == ACTION_MKLIST:
prev_path[token] = []
else:
raise ValueError("Invalid action: {}".format(action))
if __name__ == "__main__":
import doctest
doctest.testmod()
| fboender/jsonxs | jsonxs/jsonxs.py | Python | mit | 5,644 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2018 David Arroyo Menéndez
# Author: David Arroyo Menéndez <[email protected]>
# Maintainer: David Arroyo Menéndez <[email protected]>
# This file is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
# This file is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with GNU Emacs; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
# Boston, MA 02110-1301 USA,
# -*- coding: iso-8859-15 -*-
from __future__ import division # want 3 / 2 == 1.5
import re, math, random # regexes, math functions, random numbers
import matplotlib.pyplot as plt # pyplot
from collections import defaultdict, Counter
from functools import partial
#
# functions for working with vectors
#
def vector_add(v, w):
"""adds two vectors componentwise"""
return [v_i + w_i for v_i, w_i in zip(v,w)]
def vector_subtract(v, w):
"""subtracts two vectors componentwise"""
return [v_i - w_i for v_i, w_i in zip(v,w)]
def vector_sum(vectors):
return reduce(vector_add, vectors)
def scalar_multiply(c, v):
return [c * v_i for v_i in v]
# this isn't right if you don't from __future__ import division
def vector_mean(vectors):
"""compute the vector whose i-th element is the mean of the
i-th elements of the input vectors"""
n = len(vectors)
return scalar_multiply(1/n, vector_sum(vectors))
def dot(v, w):
"""v_1 * w_1 + ... + v_n * w_n"""
return sum(v_i * w_i for v_i, w_i in zip(v, w))
def sum_of_squares(v):
"""v_1 * v_1 + ... + v_n * v_n"""
return dot(v, v)
def magnitude(v):
return math.sqrt(sum_of_squares(v))
def squared_distance(v, w):
return sum_of_squares(vector_subtract(v, w))
def distance(v, w):
return math.sqrt(squared_distance(v, w))
#
# functions for working with matrices
#
def shape(A):
num_rows = len(A)
num_cols = len(A[0]) if A else 0
return num_rows, num_cols
def get_row(A, i):
return A[i]
def get_column(A, j):
return [A_i[j] for A_i in A]
def make_matrix(num_rows, num_cols, entry_fn):
"""returns a num_rows x num_cols matrix
whose (i,j)-th entry is entry_fn(i, j)"""
return [[entry_fn(i, j) for j in range(num_cols)]
for i in range(num_rows)]
def is_diagonal(i, j):
"""1's on the 'diagonal', 0's everywhere else"""
return 1 if i == j else 0
identity_matrix = make_matrix(5, 5, is_diagonal)
# user 0 1 2 3 4 5 6 7 8 9
#
friendships = [[0, 1, 1, 0, 0, 0, 0, 0, 0, 0], # user 0
[1, 0, 1, 1, 0, 0, 0, 0, 0, 0], # user 1
[1, 1, 0, 1, 0, 0, 0, 0, 0, 0], # user 2
[0, 1, 1, 0, 1, 0, 0, 0, 0, 0], # user 3
[0, 0, 0, 1, 0, 1, 0, 0, 0, 0], # user 4
[0, 0, 0, 0, 1, 0, 1, 1, 0, 0], # user 5
[0, 0, 0, 0, 0, 1, 0, 0, 1, 0], # user 6
[0, 0, 0, 0, 0, 1, 0, 0, 1, 0], # user 7
[0, 0, 0, 0, 0, 0, 1, 1, 0, 1], # user 8
[0, 0, 0, 0, 0, 0, 0, 0, 1, 0]] # user 9
#####
# DELETE DOWN
#
def matrix_add(A, B):
if shape(A) != shape(B):
raise ArithmeticError("cannot add matrices with different shapes")
num_rows, num_cols = shape(A)
def entry_fn(i, j): return A[i][j] + B[i][j]
return make_matrix(num_rows, num_cols, entry_fn)
def make_graph_dot_product_as_vector_projection(plt):
v = [2, 1]
w = [math.sqrt(.25), math.sqrt(.75)]
c = dot(v, w)
vonw = scalar_multiply(c, w)
o = [0,0]
plt.arrow(0, 0, v[0], v[1],
width=0.002, head_width=.1, length_includes_head=True)
plt.annotate("v", v, xytext=[v[0] + 0.1, v[1]])
plt.arrow(0 ,0, w[0], w[1],
width=0.002, head_width=.1, length_includes_head=True)
plt.annotate("w", w, xytext=[w[0] - 0.1, w[1]])
plt.arrow(0, 0, vonw[0], vonw[1], length_includes_head=True)
plt.annotate(u"(v•w)w", vonw, xytext=[vonw[0] - 0.1, vonw[1] + 0.1])
plt.arrow(v[0], v[1], vonw[0] - v[0], vonw[1] - v[1],
linestyle='dotted', length_includes_head=True)
plt.scatter(*zip(v,w,o),marker='.')
plt.axis('equal')
plt.show()
| davidam/python-examples | ai/linear_algebra.py | Python | gpl-3.0 | 4,600 |
import json
import xbmc
import time
import urllib2
from quasar.addon import ADDON
from quasar.config import QUASARD_HOST
from quasar.provider import closing
def library_thread():
trakt_sync = int(ADDON.getSetting("trakt_sync"))
if trakt_sync > 0:
limit = trakt_sync * 3600
count = limit - int(ADDON.getSetting("library_update_delay"))
while not xbmc.abortRequested:
# trakt_sync hours passed - Update Library
if count >= limit:
count = 0
try:
urllib2.urlopen(QUASARD_HOST + "/library/movie/watchlist/add?updating=true")
urllib2.urlopen(QUASARD_HOST + "/library/movie/collection/add?updating=true")
urllib2.urlopen(QUASARD_HOST + "/library/show/watchlist/add?updating=true")
urllib2.urlopen(QUASARD_HOST + "/library/show/collection/add?updating=true")
with closing(urllib2.urlopen(QUASARD_HOST + "/library/userlists")) as response:
data = json.loads(response.read())
for userlist in data:
urllib2.urlopen(QUASARD_HOST + "/library/movie/list/add/%d?updating=true" % userlist['IDs']['trakt'])
urllib2.urlopen(QUASARD_HOST + "/library/show/list/add/%d?updating=true" % userlist['IDs']['trakt'])
urllib2.urlopen(QUASARD_HOST + "/library/update")
except:
pass
time.sleep(5)
count += 5
else:
limit = int(ADDON.getSetting("library_update_frequency")) * 3600
count = limit - int(ADDON.getSetting("library_update_delay"))
while not xbmc.abortRequested:
# library_update_frequency hours passed - Update Library
if count >= limit:
count = 0
try:
urllib2.urlopen(QUASARD_HOST + "/library/update")
except:
pass
time.sleep(5)
count += 5
| felipenaselva/felipe.repository | plugin.video.quasar/resources/site-packages/quasar/library.py | Python | gpl-2.0 | 2,059 |
# -*- coding: utf-8 -*-
"""
jinja.constants
~~~~~~~~~~~~~~~
Various constants.
:copyright: (c) 2017 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
#: list of lorem ipsum words used by the lipsum() helper function
LOREM_IPSUM_WORDS = u'''\
a ac accumsan ad adipiscing aenean aliquam aliquet amet ante aptent arcu at
auctor augue bibendum blandit class commodo condimentum congue consectetuer
consequat conubia convallis cras cubilia cum curabitur curae cursus dapibus
diam dictum dictumst dignissim dis dolor donec dui duis egestas eget eleifend
elementum elit enim erat eros est et etiam eu euismod facilisi facilisis fames
faucibus felis fermentum feugiat fringilla fusce gravida habitant habitasse hac
hendrerit hymenaeos iaculis id imperdiet in inceptos integer interdum ipsum
justo lacinia lacus laoreet lectus leo libero ligula litora lobortis lorem
luctus maecenas magna magnis malesuada massa mattis mauris metus mi molestie
mollis montes morbi mus nam nascetur natoque nec neque netus nibh nisi nisl non
nonummy nostra nulla nullam nunc odio orci ornare parturient pede pellentesque
penatibus per pharetra phasellus placerat platea porta porttitor posuere
potenti praesent pretium primis proin pulvinar purus quam quis quisque rhoncus
ridiculus risus rutrum sagittis sapien scelerisque sed sem semper senectus sit
sociis sociosqu sodales sollicitudin suscipit suspendisse taciti tellus tempor
tempus tincidunt torquent tortor tristique turpis ullamcorper ultrices
ultricies urna ut varius vehicula vel velit venenatis vestibulum vitae vivamus
viverra volutpat vulputate'''
| wildchildyn/autism-website | yanni_env/lib/python3.6/site-packages/jinja2/constants.py | Python | gpl-3.0 | 1,626 |
#!/usr/bin/python
'''
Copyright(c)2009 Internet Archive. Software license AGPL version 3.
This file is part of IA Bookserver.
IA Bookserver is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
IA Bookserver is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with IA Bookserver. If not, see <http://www.gnu.org/licenses/>.
'''
'''
This script downloads, installs, and configures the OPDS crawler.
'''
import commands
import os
crawler_dir = '/crawler'
warc_dir = crawler_dir + '/data'
def cmd(description, command):
print description
(ret, out) = commands.getstatusoutput(command)
print out
assert 0 == ret
print 'installing build-essential, swig, and svn'
(ret, out) = commands.getstatusoutput("""DEBIAN_FRONTEND=noninteractive apt-get --force-yes -qq install build-essential subversion swig1.3""")
print out
assert 0 == ret
print 'installing warc-tools'
if not os.path.exists('/tmp/warc-tools'):
(ret, out) = commands.getstatusoutput('svn checkout http://warc-tools.googlecode.com/svn/trunk/ /tmp/warc-tools')
print out
assert 0==ret
print 'patching warc-tools makefile'
(ret, out) = commands.getstatusoutput("""wget -q -O /tmp/warc-tools/makefile 'http://home.us.archive.org/~rkumar/git/gitweb.cgi?p=bookserver/.git;a=blob_plain;f=aggregator/install/warc-tools-makefile-64bit'""");
print out
assert 0==ret
print 'building and installing warc-tools'
(ret, out) = commands.getstatusoutput("""make -C /tmp/warc-tools install""");
print out
assert 0==ret
print 'installing python-feedparser, python-lxml, python-simplejson, and curl'
(ret, out) = commands.getstatusoutput("""DEBIAN_FRONTEND=noninteractive apt-get --force-yes -qq install python-feedparser python-lxml python-simplejson curl""")
print out
assert 0==ret
cmd('installing greenlet', """DEBIAN_FRONTEND=noninteractive apt-get --force-yes -qq install python-codespeak-lib""")
cmd('installing eventlet', 'easy_install eventlet')
cmd('installing python-xml', 'DEBIAN_FRONTEND=noninteractive apt-get --force-yes -qq install python-xml')
cmd('installing opensearch.py', 'easy_install opensearch')
if not os.path.exists(warc_dir):
print 'making warc_dir ' + warc_dir
os.makedirs(warc_dir)
| internetarchive/bookserver | aggregator/install/install_crawler.py | Python | agpl-3.0 | 2,701 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.