code
stringlengths
3
1.05M
repo_name
stringlengths
5
104
path
stringlengths
4
251
language
stringclasses
1 value
license
stringclasses
15 values
size
int64
3
1.05M
# -*- coding: utf-8 -*- def uri(num = 100): for i in range(1, int(num) + 1): if i % 2 == 0: print(i) if __name__ == '__main__': uri()
gustavolcorreia/uri
iniciante/exerc1059.py
Python
apache-2.0
171
#!/usr/bin/python # -*- encoding: utf-8; py-indent-offset: 4 -*- # +------------------------------------------------------------------+ # | ____ _ _ __ __ _ __ | # | / ___| |__ ___ ___| | __ | \/ | |/ / | # | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | # | Copyright Mathias Kettner 2013 [email protected] | # +------------------------------------------------------------------+ # # This file is part of Check_MK. # The official homepage is at http://mathias-kettner.de/check_mk. # # check_mk is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by # the Free Software Foundation in version 2. check_mk is distributed # in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- # out even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. See the GNU General Public License for more de- # ails. You should have received a copy of the GNU General Public # License along with GNU Make; see the file COPYING. If not, write # to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, # Boston, MA 02110-1301 USA. # Example for creating real Nagios checks from BI aggregations. # Installation: # 1. Put this file in /usr/lib/check_mk_agent/local # 2. Make the file executable # 3. Add a correct url_prefix (OMD site and slash) # user and password with read access to Multisite. url_prefix = "" # non-OMD installations # url_prefix = "mysite/" # with OMD site name # HTTP Basic Auth user = "omdadmin" password = "omd" cookie = None # Alternatively: Multisite Cookie authentication: # If you are using Cookie base authentication, then # login with your browser and get the cookie content # of auth_... from your browser settings and put # it here: # cookie = "omdadmin:1329218457.69:16b1d572fe059e00a89b7f24592733f2" # Do not change anything below import os, sys if cookie: logininfo = "" opts = "-b 'auth_=%s'" % cookie else: logininfo = "%s:%s@" % (user, password) opts = "" url = 'http://%slocalhost/%scheck_mk/view.py?view_name=aggr_summary&output_format=python' % \ (logininfo, url_prefix) try: command = "curl --silent %s '%s'" % (opts, url) output = os.popen(command).read() data = eval(output) except: sys.stderr.write("Invalid output from URL %s:\n" % url) sys.stderr.write(output) sys.stderr.write("Command was: %s\n" % command) sys.exit(1) states = { "OK" : 0, "WARN" : 1, "CRIT" : 2, "UNKNOWN" : 3, } for name, state, output in data[1:]: state_nr = states.get(state, -1) descr = "BI_Aggr_" + name.replace(" ", "_") if state_nr != -1: text = "%d %s - %s" % (state_nr, descr, state) if output: text += " - " + output print text
opinkerfi/check_mk
doc/treasures/check_bi_local.py
Python
gpl-2.0
3,129
from typing import Any, Callable, Dict, Optional import tensorflow as tf from tensorflow.keras.losses import Loss from tensorflow_similarity.types import FloatTensor @tf.keras.utils.register_keras_serializable(package="Similarity") class Barlow(Loss): """Barlow Loss""" def __init__(self, lambda_: float = 5e-3, margin: float = 1e-12, reduction: Callable = tf.keras.losses.Reduction.AUTO, name: Optional[str] = None, **kwargs): super().__init__(reduction=reduction, name=name, **kwargs) self.lambda_ = lambda_ self.margin = margin @tf.function def call(self, za: FloatTensor, zb: FloatTensor) -> FloatTensor: """Compute the lost. Args: za: Embedding A zb: Embedding B Returns: loss """ # compute the diagonal batch_size = tf.shape(za)[0] za = self.standardize_columns(za) zb = self.standardize_columns(zb) # compute pairwise c = tf.matmul(za, zb, transpose_a=True) c = c / tf.cast(batch_size, dtype="float32") on_diag = 1.0 - tf.linalg.diag_part(c) on_diag = tf.math.pow(on_diag, 2) on_diag = tf.math.reduce_sum(on_diag) off_diag = self.off_diagonal(c) off_diag = tf.math.pow(off_diag, 2) off_diag = tf.math.reduce_sum(off_diag) # 1D Tensor loss: FloatTensor = off_diag * self.lambda_ + on_diag + self.margin return loss def get_config(self) -> Dict[str, Any]: config = { "lambda_": self.lambda_, "margin": self.margin, } base_config = super().get_config() return {**base_config, **config} def off_diagonal(self, x: FloatTensor) -> FloatTensor: n = tf.shape(x)[0] flattened = tf.reshape(x, [-1])[:-1] off_diagonals = tf.reshape(flattened, (n - 1, n + 1))[:, 1:] off_diag: FloatTensor = tf.reshape(off_diagonals, [-1]) return off_diag def standardize_columns(self, x: FloatTensor) -> FloatTensor: col_mean = tf.math.reduce_mean(x, axis=0) col_std = tf.math.reduce_std(x, axis=0) norm_col: FloatTensor = tf.math.divide_no_nan((x - col_mean), col_std) return norm_col
tensorflow/similarity
tensorflow_similarity/losses/barlow.py
Python
apache-2.0
2,344
def resolve(args, frame): if len(args) == 1: angular_speed = args[0] if type(angular_speed) not in (int, float): raise TypeError("Wrong type argument : angular_speed") else: raise Exception("Wrong number of arguments") result = '_y_spt_afterframes {} "_y_spt_pitchspeed {}"'.format(frame, angular_speed) return result, frame
Dicatoro/YACLWiN
modules/setyanglespeed.py
Python
gpl-3.0
337
import unittest import pickle from urllib3.exceptions import (HTTPError, MaxRetryError, LocationParseError, ClosedPoolError, EmptyPoolError, HostChangedError, ReadTimeoutError, ConnectTimeoutError, HeaderParsingError) from urllib3.connectionpool import HTTPConnectionPool class TestPickle(unittest.TestCase): def verify_pickling(self, item): return pickle.loads(pickle.dumps(item)) def test_exceptions(self): assert self.verify_pickling(HTTPError(None)) assert self.verify_pickling(MaxRetryError(None, None, None)) assert self.verify_pickling(LocationParseError(None)) assert self.verify_pickling(ConnectTimeoutError(None)) def test_exceptions_with_objects(self): assert self.verify_pickling( HTTPError('foo')) assert self.verify_pickling( HTTPError('foo', IOError('foo'))) assert self.verify_pickling( MaxRetryError(HTTPConnectionPool('localhost'), '/', None)) assert self.verify_pickling( LocationParseError('fake location')) assert self.verify_pickling( ClosedPoolError(HTTPConnectionPool('localhost'), None)) assert self.verify_pickling( EmptyPoolError(HTTPConnectionPool('localhost'), None)) assert self.verify_pickling( HostChangedError(HTTPConnectionPool('localhost'), '/', None)) assert self.verify_pickling( ReadTimeoutError(HTTPConnectionPool('localhost'), '/', None)) class TestFormat(unittest.TestCase): def test_header_parsing_errors(self): hpe = HeaderParsingError('defects', 'unparsed_data') self.assertTrue('defects' in str(hpe)) self.assertTrue('unparsed_data' in str(hpe))
Lukasa/urllib3
test/test_exceptions.py
Python
mit
1,843
from PyQt4.QtGui import QListWidget, QListWidgetItem __author__ = 'mouton' from PyQt4.QtCore import Qt from PyQt4.QtGui import QAbstractItemView, QMessageBox from gui.EditorItem import ViewWidget class ViewsManagerWidget(QListWidget): def __init__(self, parent=None, propertiesEditor=None, mainWindow=None, nodesIdsGenerator=None, modeController=None): super(ViewsManagerWidget, self).__init__(parent) self.setMaximumWidth(200) self.setMinimumWidth(200) self.reinit() self.mainWindow = mainWindow self.nodesIdsGenerator = nodesIdsGenerator self.modeController = modeController self.propertiesEditor = propertiesEditor self.setDragDropMode(QAbstractItemView.InternalMove) self.setDropIndicatorShown(True) self.setSelectionMode(QAbstractItemView.ExtendedSelection) self.setEditTriggers(QAbstractItemView.EditKeyPressed) self.itemDoubleClicked.connect(self._viewDoubleClicked) self.itemChanged.connect(self._itemEdited) # self..connect(self._test) def reinit(self): self._counter = 0 self.clear() def addView(self): self._counter += 1 return self.addNamedView('unnamed' + str(self._counter)) def addNamedView(self, name): view = ViewWidget(mainWindow=self.mainWindow, nodesIdsGenerator=self.nodesIdsGenerator, modeController=self.modeController) view.setPropertiesEditor(self.propertiesEditor) item = ViewsManagerItem(view) item.setText(name) self.addItem(item) item.reloadViewName() self.mainWindow.setModified() return view def scenes(self): return (self.item(index).view.scene() for index in xrange(self.count())) def _viewDoubleClicked(self, item): view = item.view tabItem = self.mainWindow.centralWidget().tabItem() tabItem.showTabbedView(view) def _itemEdited(self, item): item.reloadViewName() self.mainWindow.setModified() def keyPressEvent(self, event): if event.key() == Qt.Key_Delete: def row(index): return index.row() selected = sorted(self.selectedIndexes(), key=row, reverse=True) if len(selected) > 0: reply = QMessageBox.warning(self, 'Delete one or more scenes', 'Are you sure you want to delete this or these scenes? \ You will not be able to get access to them in the future.', QMessageBox.Yes | QMessageBox.No, QMessageBox.No) if reply == QMessageBox.Yes: for index in selected: self.takeItem(index.row()) self.mainWindow.setModified() else: super(ViewsManagerWidget, self).keyPressEvent(event) def dropEvent(self, event): self.mainWindow.setModified() super(ViewsManagerWidget, self).dropEvent(event) class ViewsManagerItem(QListWidgetItem): def __init__(self, view, parent=None): super(ViewsManagerItem, self).__init__(parent) self.view = view self.setFlags(Qt.ItemIsSelectable | Qt.ItemIsDragEnabled | Qt.ItemIsEnabled | Qt.ItemIsEditable) self.reloadViewName() def reloadViewName(self): name = self.text() self.view.scene().setName(name)
mouton5000/DiscreteEventApplicationEditor
gui/ScenesManagerItems.py
Python
mit
3,506
""" This is the boilerplate default configuration file. Changes and additions to settings should be done in the config module located in the application root rather than this config. """ config = { # webapp2 sessions 'webapp2_extras.sessions' : {'secret_key': 'Force_be_with'}, # webapp2 authentication 'webapp2_extras.auth' : {'user_model': 'boilerplate.models.User', 'cookie_name': 'session_name'}, # jinja2 templates 'webapp2_extras.jinja2' : {'template_path': ['templates','boilerplate/templates', 'admin/templates'], 'environment_args': {'extensions': ['jinja2.ext.i18n']}}, # application name 'app_name' : "reviewringer", # the default language code for the application. # should match whatever language the site uses when i18n is disabled 'app_lang' : 'en', # Locale code = <language>_<territory> (ie 'en_US') # to pick locale codes see http://cldr.unicode.org/index/cldr-spec/picking-the-right-language-code # also see http://www.sil.org/iso639-3/codes.asp # Language codes defined under iso 639-1 http://en.wikipedia.org/wiki/List_of_ISO_639-1_codes # Territory codes defined under iso 3166-1 alpha-2 http://en.wikipedia.org/wiki/ISO_3166-1 # disable i18n if locales array is empty or None 'locales' : ['en_US', 'es_ES', 'it_IT', 'zh_CN', 'id_ID', 'fr_FR', 'de_DE', 'ru_RU', 'pt_BR', 'cs_CZ'], # contact page email settings 'contact_sender' : "PUT_SENDER_EMAIL_HERE", 'contact_recipient' : "PUT_RECIPIENT_EMAIL_HERE", # Password AES Encryption Parameters 'aes_key' : "12_24_32_BYTES_KEY_FOR_PASSWORDS", 'salt' : "_PUT_SALT_HERE_TO_SHA512_PASSWORDS_", # get your own consumer key and consumer secret by registering at https://dev.twitter.com/apps # callback url must be: http://[YOUR DOMAIN]/login/twitter/complete 'twitter_consumer_key' : 'PUT_YOUR_TWITTER_CONSUMER_KEY_HERE', 'twitter_consumer_secret' : 'PUT_YOUR_TWITTER_CONSUMER_SECRET_HERE', #Facebook Login # get your own consumer key and consumer secret by registering at https://developers.facebook.com/apps #Very Important: set the site_url= your domain in the application settings in the facebook app settings page # callback url must be: http://[YOUR DOMAIN]/login/facebook/complete 'fb_api_key' : 'PUT_YOUR_FACEBOOK_PUBLIC_KEY_HERE', 'fb_secret' : 'PUT_YOUR_FACEBOOK_PUBLIC_KEY_HERE', #Linkedin Login #Get you own api key and secret from https://www.linkedin.com/secure/developer 'linkedin_api' : 'PUT_YOUR_LINKEDIN_PUBLIC_KEY_HERE', 'linkedin_secret' : 'PUT_YOUR_LINKEDIN_PUBLIC_KEY_HERE', # Github login # Register apps here: https://github.com/settings/applications/new 'github_server' : 'github.com', 'github_redirect_uri' : 'http://www.example.com/social_login/github/complete', 'github_client_id' : 'PUT_YOUR_GITHUB_CLIENT_ID_HERE', 'github_client_secret' : 'PUT_YOUR_GITHUB_CLIENT_SECRET_HERE', # get your own recaptcha keys by registering at http://www.google.com/recaptcha/ 'captcha_public_key' : "PUT_YOUR_RECAPCHA_PUBLIC_KEY_HERE", 'captcha_private_key' : "PUT_YOUR_RECAPCHA_PRIVATE_KEY_HERE", # Leave blank "google_analytics_domain" if you only want Analytics code 'google_analytics_domain' : "YOUR_PRIMARY_DOMAIN (e.g. google.com)", 'google_analytics_code' : "UA-XXXXX-X", # add status codes and templates used to catch and display errors # if a status code is not listed here it will use the default app engine # stacktrace error page or browser error page 'error_templates' : { 403: 'errors/default_error.html', 404: 'errors/default_error.html', 500: 'errors/default_error.html', }, # Enable Federated login (OpenID and OAuth) # Google App Engine Settings must be set to Authentication Options: Federated Login 'enable_federated_login' : True, # jinja2 base layout template 'base_layout' : 'base.html', # send error emails to developers 'send_mail_developer' : False, # fellas' list 'developers' : ( ('Santa Klauss', '[email protected]'), ), # If true, it will write in datastore a log of every email sent 'log_email' : True, # If true, it will write in datastore a log of every visit 'log_visit' : True, # ----> ADD MORE CONFIGURATION OPTIONS HERE <---- } # end config
LuckDragon82/demo
config/localhost.py
Python
lgpl-3.0
4,151
############################################################################################## # Copyright 2014-2015 Cloud Media Sdn. Bhd. # # This file is part of Xuan Application Development SDK. # # Xuan Application Development SDK is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Xuan Application Development SDK is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Xuan Application Development SDK. If not, see <http://www.gnu.org/licenses/>. ############################################################################################## from threading import Lock from com.cloudMedia.theKuroBox.sdk.app.appinfo import AppInfo from com.cloudMedia.theKuroBox.sdk.app.deviceController import DeviceController from com.cloudMedia.theKuroBox.sdk.app.event import Event from com.cloudMedia.theKuroBox.sdk.app.kbxLang import KBXLang from com.cloudMedia.theKuroBox.sdk.app.sharedMethod import SharedMethod from com.cloudMedia.theKuroBox.sdk.dto.deviceDTO import DeviceDTO from com.cloudMedia.theKuroBox.sdk.ex.systemException import SystemException from com.cloudMedia.theKuroBox.sdk.paramComponents.kbxHidden import KBXHidden from com.cloudMedia.theKuroBox.sdk.paramComponents.kbxOption import KBXOption from com.cloudMedia.theKuroBox.sdk.util.logger import Logger class SensorDeviceController(DeviceController): ''' Switch Controller Class ''' def __init__(self, kbxModuleName, parentPath, name="", protocolId="", uniqueId="", icon="", isCtrlPanelExist=False, isAdvCtrlPanelExist=False, uniqueType=None, advDeviceControllerDTO=None): pass def on_device_initial(self, pairedDeviceId, deviceDTO, groupName, groupId, methodInitialStatus): ''' child class should override this function to implement their body ''' pass def on_device_remove(self, pairedDeviceId, deviceDTO, groupName, groupId): ''' child class should override this function to implement their body ''' pass def get_device_status(self, pairedDeviceId): pass def on_device_enable(self, pairedDeviceId, deviceDTO, groupName, groupId): ''' child class should override this function to implement their body ''' pass def on_device_disable(self, pairedDeviceId, deviceDTO, groupName, groupId): ''' child class should override this function to implement their body ''' pass def on_request_device_status_update(self, pairedDeviceId, deviceDTO): ''' child class should override this function to implement their body ''' pass
TheStackBox/xuansdk
SDKLibrary/com/cloudMedia/theKuroBox/sdk/deviceController/sensorDeviceController.py
Python
gpl-3.0
3,123
""" Support for Xiaomi Yeelight Wifi color bulb. For more details about this platform, please refer to the documentation at https://home-assistant.io/components/light.yeelight/ """ import logging import colorsys from typing import Tuple import voluptuous as vol from homeassistant.util.color import ( color_temperature_mired_to_kelvin as mired_to_kelvin, color_temperature_kelvin_to_mired as kelvin_to_mired, color_temperature_to_rgb) from homeassistant.const import CONF_DEVICES, CONF_NAME from homeassistant.components.light import ( ATTR_BRIGHTNESS, ATTR_RGB_COLOR, ATTR_TRANSITION, ATTR_COLOR_TEMP, ATTR_FLASH, FLASH_SHORT, FLASH_LONG, ATTR_EFFECT, SUPPORT_BRIGHTNESS, SUPPORT_RGB_COLOR, SUPPORT_TRANSITION, SUPPORT_COLOR_TEMP, SUPPORT_FLASH, SUPPORT_EFFECT, Light, PLATFORM_SCHEMA) import homeassistant.helpers.config_validation as cv REQUIREMENTS = ['yeelight==0.3.2'] _LOGGER = logging.getLogger(__name__) CONF_TRANSITION = 'transition' DEFAULT_TRANSITION = 350 CONF_SAVE_ON_CHANGE = 'save_on_change' CONF_MODE_MUSIC = 'use_music_mode' DOMAIN = 'yeelight' DEVICE_SCHEMA = vol.Schema({ vol.Optional(CONF_NAME): cv.string, vol.Optional(CONF_TRANSITION, default=DEFAULT_TRANSITION): cv.positive_int, vol.Optional(CONF_MODE_MUSIC, default=False): cv.boolean, vol.Optional(CONF_SAVE_ON_CHANGE, default=True): cv.boolean, }) PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend( {vol.Optional(CONF_DEVICES, default={}): {cv.string: DEVICE_SCHEMA}, }) SUPPORT_YEELIGHT = (SUPPORT_BRIGHTNESS | SUPPORT_TRANSITION | SUPPORT_FLASH) SUPPORT_YEELIGHT_RGB = (SUPPORT_YEELIGHT | SUPPORT_RGB_COLOR | SUPPORT_EFFECT | SUPPORT_COLOR_TEMP) EFFECT_DISCO = "Disco" EFFECT_TEMP = "Slow Temp" EFFECT_STROBE = "Strobe epilepsy!" EFFECT_STROBE_COLOR = "Strobe color" EFFECT_ALARM = "Alarm" EFFECT_POLICE = "Police" EFFECT_POLICE2 = "Police2" EFFECT_CHRISTMAS = "Christmas" EFFECT_RGB = "RGB" EFFECT_RANDOM_LOOP = "Random Loop" EFFECT_FAST_RANDOM_LOOP = "Fast Random Loop" EFFECT_SLOWDOWN = "Slowdown" EFFECT_WHATSAPP = "WhatsApp" EFFECT_FACEBOOK = "Facebook" EFFECT_TWITTER = "Twitter" EFFECT_STOP = "Stop" YEELIGHT_EFFECT_LIST = [ EFFECT_DISCO, EFFECT_TEMP, EFFECT_STROBE, EFFECT_STROBE_COLOR, EFFECT_ALARM, EFFECT_POLICE, EFFECT_POLICE2, EFFECT_CHRISTMAS, EFFECT_RGB, EFFECT_RANDOM_LOOP, EFFECT_FAST_RANDOM_LOOP, EFFECT_SLOWDOWN, EFFECT_WHATSAPP, EFFECT_FACEBOOK, EFFECT_TWITTER, EFFECT_STOP] # Travis-CI runs too old astroid https://github.com/PyCQA/pylint/issues/1212 # pylint: disable=invalid-sequence-index def hsv_to_rgb(hsv: Tuple[float, float, float]) -> Tuple[int, int, int]: """Convert HSV tuple (degrees, %, %) to RGB (values 0-255).""" red, green, blue = colorsys.hsv_to_rgb(hsv[0]/360, hsv[1]/100, hsv[2]/100) return int(red * 255), int(green * 255), int(blue * 255) def _cmd(func): """Define a wrapper to catch exceptions from the bulb.""" def _wrap(self, *args, **kwargs): import yeelight try: _LOGGER.debug("Calling %s with %s %s", func, args, kwargs) return func(self, *args, **kwargs) except yeelight.BulbException as ex: _LOGGER.error("Error when calling %s: %s", func, ex) return _wrap def setup_platform(hass, config, add_devices, discovery_info=None): """Set up the Yeelight bulbs.""" lights = [] if discovery_info is not None: _LOGGER.debug("Adding autodetected %s", discovery_info['hostname']) # Not using hostname, as it seems to vary. name = "yeelight_%s_%s" % (discovery_info['device_type'], discovery_info['properties']['mac']) device = {'name': name, 'ipaddr': discovery_info['host']} lights.append(YeelightLight(device, DEVICE_SCHEMA({}))) else: for ipaddr, device_config in config[CONF_DEVICES].items(): _LOGGER.debug("Adding configured %s", device_config[CONF_NAME]) device = {'name': device_config[CONF_NAME], 'ipaddr': ipaddr} lights.append(YeelightLight(device, device_config)) add_devices(lights, True) class YeelightLight(Light): """Representation of a Yeelight light.""" def __init__(self, device, config): """Initialize the Yeelight light.""" self.config = config self._name = device['name'] self._ipaddr = device['ipaddr'] self._supported_features = SUPPORT_YEELIGHT self._available = False self._bulb_device = None self._brightness = None self._color_temp = None self._is_on = None self._rgb = None @property def available(self) -> bool: """Return if bulb is available.""" return self._available @property def supported_features(self) -> int: """Flag supported features.""" return self._supported_features @property def effect_list(self): """Return the list of supported effects.""" return YEELIGHT_EFFECT_LIST @property def unique_id(self) -> str: """Return the ID of this light.""" return "{}.{}".format(self.__class__, self._ipaddr) @property def color_temp(self) -> int: """Return the color temperature.""" return self._color_temp @property def name(self) -> str: """Return the name of the device if any.""" return self._name @property def is_on(self) -> bool: """Return true if device is on.""" return self._is_on @property def brightness(self) -> int: """Return the brightness of this light between 1..255.""" return self._brightness def _get_rgb_from_properties(self): rgb = self._properties.get('rgb', None) color_mode = self._properties.get('color_mode', None) if not rgb or not color_mode: return rgb color_mode = int(color_mode) if color_mode == 2: # color temperature return color_temperature_to_rgb(self.color_temp) if color_mode == 3: # hsv hue = int(self._properties.get('hue')) sat = int(self._properties.get('sat')) val = int(self._properties.get('bright')) return hsv_to_rgb((hue, sat, val)) rgb = int(rgb) blue = rgb & 0xff green = (rgb >> 8) & 0xff red = (rgb >> 16) & 0xff return red, green, blue @property def rgb_color(self) -> tuple: """Return the color property.""" return self._rgb @property def _properties(self) -> dict: return self._bulb.last_properties @property def _bulb(self) -> 'yeelight.Bulb': import yeelight if self._bulb_device is None: try: self._bulb_device = yeelight.Bulb(self._ipaddr) self._bulb_device.get_properties() # force init for type self._available = True except yeelight.BulbException as ex: self._available = False _LOGGER.error("Failed to connect to bulb %s, %s: %s", self._ipaddr, self._name, ex) return self._bulb_device def set_music_mode(self, mode) -> None: """Set the music mode on or off.""" if mode: self._bulb.start_music() else: self._bulb.stop_music() def update(self) -> None: """Update properties from the bulb.""" import yeelight try: self._bulb.get_properties() if self._bulb_device.bulb_type == yeelight.BulbType.Color: self._supported_features = SUPPORT_YEELIGHT_RGB self._is_on = self._properties.get('power') == 'on' bright = self._properties.get('bright', None) if bright: self._brightness = 255 * (int(bright) / 100) temp_in_k = self._properties.get('ct', None) if temp_in_k: self._color_temp = kelvin_to_mired(int(temp_in_k)) self._rgb = self._get_rgb_from_properties() self._available = True except yeelight.BulbException as ex: if self._available: # just inform once _LOGGER.error("Unable to update bulb status: %s", ex) self._available = False @_cmd def set_brightness(self, brightness, duration) -> None: """Set bulb brightness.""" if brightness: _LOGGER.debug("Setting brightness: %s", brightness) self._bulb.set_brightness(brightness / 255 * 100, duration=duration) @_cmd def set_rgb(self, rgb, duration) -> None: """Set bulb's color.""" if rgb and self.supported_features & SUPPORT_RGB_COLOR: _LOGGER.debug("Setting RGB: %s", rgb) self._bulb.set_rgb(rgb[0], rgb[1], rgb[2], duration=duration) @_cmd def set_colortemp(self, colortemp, duration) -> None: """Set bulb's color temperature.""" if colortemp and self.supported_features & SUPPORT_COLOR_TEMP: temp_in_k = mired_to_kelvin(colortemp) _LOGGER.debug("Setting color temp: %s K", temp_in_k) self._bulb.set_color_temp(temp_in_k, duration=duration) @_cmd def set_default(self) -> None: """Set current options as default.""" self._bulb.set_default() @_cmd def set_flash(self, flash) -> None: """Activate flash.""" if flash: from yeelight import (RGBTransition, SleepTransition, Flow, BulbException) if self._bulb.last_properties["color_mode"] != 1: _LOGGER.error("Flash supported currently only in RGB mode.") return transition = int(self.config[CONF_TRANSITION]) if flash == FLASH_LONG: count = 1 duration = transition * 5 if flash == FLASH_SHORT: count = 1 duration = transition * 2 red, green, blue = self.rgb_color transitions = list() transitions.append( RGBTransition(255, 0, 0, brightness=10, duration=duration)) transitions.append(SleepTransition( duration=transition)) transitions.append( RGBTransition(red, green, blue, brightness=self.brightness, duration=duration)) flow = Flow(count=count, transitions=transitions) try: self._bulb.start_flow(flow) except BulbException as ex: _LOGGER.error("Unable to set flash: %s", ex) @_cmd def set_effect(self, effect) -> None: """Activate effect.""" if effect: from yeelight import (Flow, BulbException) from yeelight.transitions import (disco, temp, strobe, pulse, strobe_color, alarm, police, police2, christmas, rgb, randomloop, slowdown) if effect == EFFECT_STOP: self._bulb.stop_flow() return if effect == EFFECT_DISCO: flow = Flow(count=0, transitions=disco()) if effect == EFFECT_TEMP: flow = Flow(count=0, transitions=temp()) if effect == EFFECT_STROBE: flow = Flow(count=0, transitions=strobe()) if effect == EFFECT_STROBE_COLOR: flow = Flow(count=0, transitions=strobe_color()) if effect == EFFECT_ALARM: flow = Flow(count=0, transitions=alarm()) if effect == EFFECT_POLICE: flow = Flow(count=0, transitions=police()) if effect == EFFECT_POLICE2: flow = Flow(count=0, transitions=police2()) if effect == EFFECT_CHRISTMAS: flow = Flow(count=0, transitions=christmas()) if effect == EFFECT_RGB: flow = Flow(count=0, transitions=rgb()) if effect == EFFECT_RANDOM_LOOP: flow = Flow(count=0, transitions=randomloop()) if effect == EFFECT_FAST_RANDOM_LOOP: flow = Flow(count=0, transitions=randomloop(duration=250)) if effect == EFFECT_SLOWDOWN: flow = Flow(count=0, transitions=slowdown()) if effect == EFFECT_WHATSAPP: flow = Flow(count=2, transitions=pulse(37, 211, 102)) if effect == EFFECT_FACEBOOK: flow = Flow(count=2, transitions=pulse(59, 89, 152)) if effect == EFFECT_TWITTER: flow = Flow(count=2, transitions=pulse(0, 172, 237)) try: self._bulb.start_flow(flow) except BulbException as ex: _LOGGER.error("Unable to set effect: %s", ex) def turn_on(self, **kwargs) -> None: """Turn the bulb on.""" import yeelight brightness = kwargs.get(ATTR_BRIGHTNESS) colortemp = kwargs.get(ATTR_COLOR_TEMP) rgb = kwargs.get(ATTR_RGB_COLOR) flash = kwargs.get(ATTR_FLASH) effect = kwargs.get(ATTR_EFFECT) duration = int(self.config[CONF_TRANSITION]) # in ms if ATTR_TRANSITION in kwargs: # passed kwarg overrides config duration = int(kwargs.get(ATTR_TRANSITION) * 1000) # kwarg in s try: self._bulb.turn_on(duration=duration) except yeelight.BulbException as ex: _LOGGER.error("Unable to turn the bulb on: %s", ex) return if self.config[CONF_MODE_MUSIC] and not self._bulb.music_mode: try: self.set_music_mode(self.config[CONF_MODE_MUSIC]) except yeelight.BulbException as ex: _LOGGER.error("Unable to turn on music mode," "consider disabling it: %s", ex) try: # values checked for none in methods self.set_rgb(rgb, duration) self.set_colortemp(colortemp, duration) self.set_brightness(brightness, duration) self.set_flash(flash) self.set_effect(effect) except yeelight.BulbException as ex: _LOGGER.error("Unable to set bulb properties: %s", ex) return # save the current state if we had a manual change. if self.config[CONF_SAVE_ON_CHANGE] and (brightness or colortemp or rgb): try: self.set_default() except yeelight.BulbException as ex: _LOGGER.error("Unable to set the defaults: %s", ex) return def turn_off(self, **kwargs) -> None: """Turn off.""" import yeelight try: self._bulb.turn_off() except yeelight.BulbException as ex: _LOGGER.error("Unable to turn the bulb off: %s", ex)
ct-23/home-assistant
homeassistant/components/light/yeelight.py
Python
apache-2.0
15,296
from setuptools import setup APP = ['gui.py'] DATA_FILES = [] OPTIONS = {'argv_emulation': True, 'includes': ['sip', 'PyQt4','gpg','gui']} setup( app=APP, data_files=DATA_FILES, options={'py2app': OPTIONS}, setup_requires=['py2app'], )
Lightjohn/gpgProtector
setup.py
Python
mit
239
# coding: utf-8 import io from django.utils.xmlutils import SimplerXMLGenerator from django.utils.encoding import smart_text from rest_framework.negotiation import DefaultContentNegotiation from rest_framework.renderers import BaseRenderer from rest_framework.renderers import TemplateHTMLRenderer from rest_framework.renderers import StaticHTMLRenderer from rest_framework_xml.renderers import XMLRenderer class XLSRenderer(BaseRenderer): media_type = 'application/vnd.openxmlformats' format = 'xls' charset = None def render(self, data, accepted_media_type=None, renderer_context=None): return data class XLSXRenderer(XLSRenderer): format = 'xlsx' class CSVRenderer(BaseRenderer): media_type = 'text/csv' format = 'csv' charset = 'utf-8' # TODO add KML, ZIP(attachments) support class RawXMLRenderer(BaseRenderer): media_type = 'application/xml' format = 'xml' charset = 'utf-8' def render(self, data, accepted_media_type=None, renderer_context=None): return data class MediaFileContentNegotiation(DefaultContentNegotiation): def filter_renderers(self, renderers, format): """ If there is a '.json' style format suffix, filter the renderers so that we only negotiation against those that accept that format. If there is no renderer available, we use MediaFileRenderer. """ renderers = [renderer for renderer in renderers if renderer.format == format] if not renderers: renderers = [MediaFileRenderer()] return renderers class MediaFileRenderer(BaseRenderer): media_type = '*/*' format = None charset = None render_style = 'binary' def render(self, data, accepted_media_type=None, renderer_context=None): return data class XFormListRenderer(BaseRenderer): """ Renderer which serializes to XML. """ media_type = 'text/xml' format = 'xml' charset = 'utf-8' root_node = 'xforms' element_node = 'xform' xmlns = "http://openrosa.org/xforms/xformsList" def render(self, data, accepted_media_type=None, renderer_context=None): """ Renders *obj* into serialized XML. """ if data is None: return '' elif isinstance(data, str): return data stream = io.StringIO() xml = SimplerXMLGenerator(stream, self.charset) xml.startDocument() xml.startElement(self.root_node, {'xmlns': self.xmlns}) self._to_xml(xml, data) xml.endElement(self.root_node) xml.endDocument() return stream.getvalue() def _to_xml(self, xml, data): if isinstance(data, (list, tuple)): for item in data: xml.startElement(self.element_node, {}) self._to_xml(xml, item) xml.endElement(self.element_node) elif isinstance(data, dict): for key, value in data.items(): xml.startElement(key, {}) self._to_xml(xml, value) xml.endElement(key) elif data is None: # Don't output any value pass else: xml.characters(smart_text(data)) class XFormManifestRenderer(XFormListRenderer): root_node = "manifest" element_node = "mediaFile" xmlns = "http://openrosa.org/xforms/xformsManifest" class TemplateXMLRenderer(TemplateHTMLRenderer): format = 'xml' media_type = 'text/xml' def render(self, data, accepted_media_type=None, renderer_context=None): renderer_context = renderer_context or {} response = renderer_context['response'] if response and response.exception: return XMLRenderer().render( data, accepted_media_type, renderer_context) return super().render( data, accepted_media_type, renderer_context) class StaticXMLRenderer(StaticHTMLRenderer): format = 'xml' media_type = 'text/xml' class InstanceContentNegotiation(DefaultContentNegotiation): def filter_renderers(self, renderers, format): """ Removes `rest_framework_xml.renderers.XMLRenderer` from the renderers list to prioritize `RawXMLRenderer`. Useful to display xml of Instance without any parsing. :param renderers: list :param format: str :return: list """ renderers = [renderer for renderer in renderers if renderer.format == format and isinstance(renderer, XMLRenderer) is False] return renderers
kobotoolbox/kobocat
onadata/libs/renderers/renderers.py
Python
bsd-2-clause
4,636
import nltk import pickle import argparse from collections import Counter from pycocotools.coco import COCO class Vocabulary(object): """Simple vocabulary wrapper.""" def __init__(self): self.word2idx = {} self.idx2word = {} self.idx = 0 def add_word(self, word): if not word in self.word2idx: self.word2idx[word] = self.idx self.idx2word[self.idx] = word self.idx += 1 def __call__(self, word): if not word in self.word2idx: return self.word2idx['<unk>'] return self.word2idx[word] def __len__(self): return len(self.word2idx) def build_vocab(json, threshold): """Build a simple vocabulary wrapper.""" coco = COCO(json) counter = Counter() ids = coco.anns.keys() for i, id in enumerate(ids): caption = str(coco.anns[id]['caption']) tokens = nltk.tokenize.word_tokenize(caption.lower()) counter.update(tokens) if (i+1) % 1000 == 0: print("[{}/{}] Tokenized the captions.".format(i+1, len(ids))) # If the word frequency is less than 'threshold', then the word is discarded. words = [word for word, cnt in counter.items() if cnt >= threshold] # Create a vocab wrapper and add some special tokens. vocab = Vocabulary() vocab.add_word('<pad>') vocab.add_word('<start>') vocab.add_word('<end>') vocab.add_word('<unk>') # Add the words to the vocabulary. for i, word in enumerate(words): vocab.add_word(word) return vocab def main(args): vocab = build_vocab(json=args.caption_path, threshold=args.threshold) vocab_path = args.vocab_path with open(vocab_path, 'wb') as f: pickle.dump(vocab, f) print("Total vocabulary size: {}".format(len(vocab))) print("Saved the vocabulary wrapper to '{}'".format(vocab_path)) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--caption_path', type=str, default='data/annotations/captions_train2014.json', help='path for train annotation file') parser.add_argument('--vocab_path', type=str, default='./data/vocab.pkl', help='path for saving vocabulary wrapper') parser.add_argument('--threshold', type=int, default=4, help='minimum word count threshold') args = parser.parse_args() main(args)
yunjey/pytorch-tutorial
tutorials/03-advanced/image_captioning/build_vocab.py
Python
mit
2,459
#!/usr/bin/env python3 """ Pipe informations to lemonboy's bar """ from os import path from setuptools import setup here = path.abspath(path.dirname(__file__)) setup( name="barython", version="0.0.1", description="Pipe informations to lemonboy's bar", url="https://github.com/Anthony25/barython", author="Anthony25 <Anthony Ruhier>", author_email="[email protected]", license="Simplified BSD", classifiers=[ "Operating System :: POSIX :: Linux", "Programming Language :: Python :: 3 :: Only", "License :: OSI Approved :: BSD License", ], keywords=["bar", "desktop"], packages=["barython", ], install_requires=["python-mpd2", "xcffib"], setup_requires=['pytest-runner', ], tests_require=['pytest', 'pytest-cov', "pytest-mock", "pytest-xdist"], )
Anthony25/barython
setup.py
Python
bsd-3-clause
841
#!/usr/bin/env python #coding=gb2312 from downloader import * logger.addHandler(console) console.setLevel(logging.DEBUG) #################### Get query screen ###########33 def getqscr(tn): read_all(tn,TIMEOUT_0) pu = re.compile(".+\\x1b\[2;\d+H",re.S) #user query page pb = re.compile(".+\\x1b\[\d+;2H") #board page redrawcurscreen(tn) res = tn.expect([pu,pb],TIMEOUT_4) if res[0]<0: logger.warn("undefined status of page status") print res[2] return res[2] def enterqscr(tn): logger.debug("Enter query menu") buf = getqscr(tn) res = re.search("²éѯ˭:\s?(\w*)\\x1b\[K",buf) if res: logger.info("Already in query menu status") l = len(res.group(1)) if l>0: tn.write("\b"*l) logger.info("Removed existing string "+res.group(1)) return True tn.write("u") res = tn.expect(["²éѯ˭:"],TIMEOUT_4) if res<0: redrawcurscreen(tn) buf = read_all(tn,1) logger.error("Can't check user now\n%s"%buf) print "Can't check user now\n%s"%buf return False return True def quitqscr(tn): buf = getqscr(tn) res = re.search("²éѯ˭:(\w*)",buf) if not res: logger.warn("Not in query status\n"+buf) return False l = len(res.group(1)) tn.write(l*'\b' + '\n') buf = getqscr(tn) res = re.search("²éѯ˭: (\w+)",buf) if not res: logger.info("Quit successfully from query status") else: logger.error("Failed quit from query statuts") def getusers(tn,prefix): logger.info("Start query users starts with "+prefix) if len(prefix)>=13: prefix = prefix[:13] logger.warn("Prefix error, length should be in 13, cut it") if not enterqscr(tn): logger.error("Failed to enter user query screen") return False logger.info("Input query prefix "+prefix) tn.write(prefix) cursor = "\x1b[2;%sH"%(9+len(prefix)) userstr = "" while True: tn.write("\t") res = tn.read_until(cursor,TIMEOUT_4) buf = getqscr(tn) poss = buf.find("ËùÓÐʹÓÃÕß") pose = buf.find("»¹ÓÐʹÓÃÕß") if poss < 0: logger.error("Unknow status when check users\n%s"%buf) print "Unknow status when check users\n%s"%buf return userstr if pose > 0: ## print buf[poss:pose] logger.debug("Found page") userstr += buf[poss:pose] continue elif pose <= 0: logger.debug("Scrolled to end page") userstr += buf[poss:] break else: logger.error("Unknow status when check users\n%s"%buf) return userstr tn.write("\b"*len(prefix)) userstr = clearstr(userstr) quitqscr(tn) userlist = re.findall("\w+",userstr) logger.info("Found %s users"%len(userlist)) return userlist def out(msg): f = open(filename,"a") f.write(msg+"\n") f.close() def queryuser(tn,userid): logger.info("Start to query user "+userid) if not enterqscr(tn): logger.error("Failed to enter user query screen") return False tn.write(userid[:13]+"\n") buf = tn.read_until("ÆäËü¼ü¼ÌÐø",TIMEOUT_4) res = re.findall("(\w+) \((.*)\) ÉÏÕ¾ .*\n\rÉÏ´ÎÔÚ \[(.*)\] ´Ó \[(.*)\] ·ÃÎʱ¾Õ¾",buf) if len(res)==1: tn.write(" ") #quit from user information return res[0][3] else: logger.error("Fairesled to match the user information\n"+buf) return buf def getuserlist(tn): prefixs = [] for c in string.lowercase: for c2 in (string.lowercase+string.digits): logger.info("Query "+(c+c2)) users = getusers(tn,c+c2) for u in users: uinfo=queryuser(tn,u) out(u+" "+uinfo) def filluserinfo(tn): with open("userlist.txt") as f: data = f.next().strip() print data return if __name__ == "__main__": filename = "userlist_%s.txt"%time.strftime('%Y%m%d%H%M%S',time.localtime(time.time())) tn = login() if not tn: logger.error("Failed to login") exit(1) if not gotoboard(tn,"test"): logger.error("Failed to go to board test") exit(1) getuserlist(tn) ## filluserinfo(tn) logout(tn)
newsun/newsmth
query_user.py
Python
apache-2.0
4,348
# # This file is part of pyasn1-modules software. # # Created by Russ Housley # Copyright (c) 2019, Vigil Security, LLC # License: http://snmplabs.com/pyasn1/license.html # import sys import unittest from pyasn1.codec.der.decoder import decode as der_decoder from pyasn1.codec.der.encoder import encode as der_encoder from pyasn1_modules import pem from pyasn1_modules import rfc5280 from pyasn1_modules import rfc5958 from pyasn1_modules import rfc7914 from pyasn1_modules import rfc8018 # From RFC 7914, Section 13 class MultiprimeRSAPrivateKeyTestCase(unittest.TestCase): pem_text = """\ MIHiME0GCSqGSIb3DQEFDTBAMB8GCSsGAQQB2kcECzASBAVNb3VzZQIDEAAAAgEI AgEBMB0GCWCGSAFlAwQBKgQQyYmguHMsOwzGMPoyObk/JgSBkJb47EWd5iAqJlyy +ni5ftd6gZgOPaLQClL7mEZc2KQay0VhjZm/7MbBUNbqOAXNM6OGebXxVp6sHUAL iBGY/Dls7B1TsWeGObE0sS1MXEpuREuloZjcsNVcNXWPlLdZtkSH6uwWzR0PyG/Z +ZXfNodZtd/voKlvLOw5B3opGIFaLkbtLZQwMiGtl42AS89lZg== """ def setUp(self): self.asn1Spec = rfc5958.EncryptedPrivateKeyInfo() def testDerCodec(self): substrate = pem.readBase64fromText(self.pem_text) asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec) self.assertFalse(rest) self.assertTrue(asn1Object.prettyPrint()) self.assertEqual(substrate, der_encoder(asn1Object)) ea = asn1Object['encryptionAlgorithm'] self.assertEqual(rfc8018.id_PBES2, ea['algorithm']) self.assertIn(ea['algorithm'], rfc5280.algorithmIdentifierMap) params, rest = der_decoder( ea['parameters'], asn1Spec=rfc5280.algorithmIdentifierMap[ea['algorithm']]) self.assertFalse(rest) self.assertTrue(params.prettyPrint()) self.assertEqual(ea['parameters'], der_encoder(params)) kdf = params['keyDerivationFunc'] self.assertEqual(rfc7914.id_scrypt, kdf['algorithm']) self.assertIn(kdf['algorithm'], rfc5280.algorithmIdentifierMap) kdfp, rest = der_decoder( kdf['parameters'], asn1Spec=rfc5280.algorithmIdentifierMap[kdf['algorithm']]) self.assertFalse(rest) self.assertTrue(kdfp.prettyPrint()) self.assertTrue(kdf['parameters'], der_encoder(kdfp)) self.assertEqual(1048576, kdfp['costParameter']) def testOpenTypes(self): substrate = pem.readBase64fromText(self.pem_text) asn1Object, rest = der_decoder( substrate, asn1Spec=self.asn1Spec, decodeOpenTypes=True) self.assertFalse(rest) self.assertTrue(asn1Object.prettyPrint()) self.assertEqual(substrate, der_encoder(asn1Object)) ea = asn1Object['encryptionAlgorithm'] self.assertEqual(rfc8018.id_PBES2, ea['algorithm']) params = asn1Object['encryptionAlgorithm']['parameters'] self.assertEqual( rfc7914.id_scrypt, params['keyDerivationFunc']['algorithm']) kdfp = params['keyDerivationFunc']['parameters'] self.assertEqual(1048576, kdfp['costParameter']) suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__]) if __name__ == '__main__': result = unittest.TextTestRunner(verbosity=2).run(suite) sys.exit(not result.wasSuccessful())
etingof/pyasn1-modules
tests/test_rfc7914.py
Python
bsd-2-clause
3,206
# Copyright 2004-2015 Tom Rothamel <[email protected]> # # Permission is hereby granted, free of charge, to any person # obtaining a copy of this software and associated documentation files # (the "Software"), to deal in the Software without restriction, # including without limitation the rights to use, copy, modify, merge, # publish, distribute, sublicense, and/or sell copies of the Software, # and to permit persons to whom the Software is furnished to do so, # subject to the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. # NOTE: # Transitions need to be able to work even when old_widget and new_widget # are None, at least to the point of making it through __init__. This is # so that prediction of images works. import renpy.display # Utility function used by MoveTransition et al. def position(d): xpos, ypos, xanchor, yanchor, _xoffset, _yoffset, _subpixel = d.get_placement() if xpos is None: xpos = 0 if ypos is None: ypos = 0 if xanchor is None: xanchor = 0 if yanchor is None: yanchor = 0 return xpos, ypos, xanchor, yanchor def offsets(d): _xpos, _ypos, _xanchor, _yanchor, xoffset, yoffset, _subpixel = d.get_placement() if renpy.config.movetransition_respects_offsets: return { 'xoffset' : xoffset, 'yoffset' : yoffset } else: return { } # These are used by MoveTransition. def MoveFactory(pos1, pos2, delay, d, **kwargs): if pos1 == pos2: return d return renpy.display.motion.Move(pos1, pos2, delay, d, **kwargs) def default_enter_factory(pos, delay, d, **kwargs): return d def default_leave_factory(pos, delay, d, **kwargs): return None # These can be used to move things in and out of the screen. def MoveIn(pos, pos1, delay, d, **kwargs): def aorb(a, b): if a is None: return b return a pos = tuple([aorb(a, b) for a, b in zip(pos, pos1)]) return renpy.display.motion.Move(pos, pos1, delay, d, **kwargs) def MoveOut(pos, pos1, delay, d, **kwargs): def aorb(a, b): if a is None: return b return a pos = tuple([aorb(a, b) for a, b in zip(pos, pos1)]) return renpy.display.motion.Move(pos1, pos, delay, d, **kwargs) def ZoomInOut(start, end, pos, delay, d, **kwargs): xpos, ypos, xanchor, yanchor = pos FactorZoom = renpy.display.motion.FactorZoom if end == 1.0: return FactorZoom(start, end, delay, d, after_child=d, opaque=False, xpos=xpos, ypos=ypos, xanchor=xanchor, yanchor=yanchor, **kwargs) else: return FactorZoom(start, end, delay, d, opaque=False, xpos=xpos, ypos=ypos, xanchor=xanchor, yanchor=yanchor, **kwargs) def RevolveInOut(start, end, pos, delay, d, **kwargs): return renpy.display.motion.Revolve(start, end, delay, d, pos=pos, **kwargs) def OldMoveTransition(delay, old_widget=None, new_widget=None, factory=None, enter_factory=None, leave_factory=None, old=False, layers=[ 'master' ]): """ Returns a transition that attempts to find images that have changed position, and moves them from the old position to the new transition, taking delay seconds to complete the move. If `factory` is given, it is expected to be a function that takes as arguments: an old position, a new position, the delay, and a displayable, and to return a displayable as an argument. If not given, the default behavior is to move the displayable from the starting to the ending positions. Positions are always given as (xpos, ypos, xanchor, yanchor) tuples. If `enter_factory` or `leave_factory` are given, they are expected to be functions that take as arguments a position, a delay, and a displayable, and return a displayable. They are applied to displayables that are entering or leaving the scene, respectively. The default is to show in place displayables that are entering, and not to show those that are leaving. If `old` is True, then factory moves the old displayable with the given tag. Otherwise, it moves the new displayable with that tag. `layers` is a list of layers that the transition will be applied to. Images are considered to be the same if they have the same tag, in the same way that the tag is used to determine which image to replace or to hide. They are also considered to be the same if they have no tag, but use the same displayable. Computing the order in which images are displayed is a three-step process. The first step is to create a list of images that preserves the relative ordering of entering and moving images. The second step is to insert the leaving images such that each leaving image is at the lowest position that is still above all images that were below it in the original scene. Finally, the list is sorted by zorder, to ensure no zorder violations occur. If you use this transition to slide an image off the side of the screen, remember to hide it when you are done. (Or just use a leave_factory.) """ if factory is None: factory = MoveFactory if enter_factory is None: enter_factory = default_enter_factory if leave_factory is None: leave_factory = default_leave_factory use_old = old def merge_slide(old, new): # If new does not have .layers or .scene_list, then we simply # insert a move from the old position to the new position, if # a move occured. if (not isinstance(new, renpy.display.layout.MultiBox) or (new.layers is None and new.layer_name is None)): if use_old: child = old else: child = new old_pos = position(old) new_pos = position(new) if old_pos != new_pos: return factory(old_pos, new_pos, delay, child, **offsets(child) ) else: return child # If we're in the layers_root widget, merge the child widgets # for each layer. if new.layers: rv = renpy.display.layout.MultiBox(layout='fixed') rv.layers = { } for layer in renpy.config.layers: f = new.layers[layer] if (isinstance(f, renpy.display.layout.MultiBox) and layer in layers and f.scene_list is not None): f = merge_slide(old.layers[layer], new.layers[layer]) rv.layers[layer] = f rv.add(f) return rv # Otherwise, we recompute the scene list for the two widgets, merging # as appropriate. # Wraps the displayable found in SLE so that the various timebases # are maintained. def wrap(sle): return renpy.display.layout.AdjustTimes(sle.displayable, sle.show_time, sle.animation_time) def tag(sle): return sle.tag or sle.displayable def merge(sle, d): rv = sle.copy() rv.show_time = 0 rv.displayable = d return rv def entering(sle): new_d = wrap(new_sle) move = enter_factory(position(new_d), delay, new_d, **offsets(new_d)) if move is None: return rv_sl.append(merge(new_sle, move)) def leaving(sle): old_d = wrap(sle) move = leave_factory(position(old_d), delay, old_d, **offsets(old_d)) if move is None: return move = renpy.display.layout.IgnoresEvents(move) rv_sl.append(merge(old_sle, move)) def moving(old_sle, new_sle): old_d = wrap(old_sle) new_d = wrap(new_sle) if use_old: child = old_d else: child = new_d move = factory(position(old_d), position(new_d), delay, child, **offsets(child)) if move is None: return rv_sl.append(merge(new_sle, move)) # The old, new, and merged scene_lists. old_sl = old.scene_list[:] new_sl = new.scene_list[:] rv_sl = [ ] # A list of tags in old_sl, new_sl, and rv_sl. old_map = dict((tag(i), i) for i in old_sl if i is not None) new_tags = set(tag(i) for i in new_sl if i is not None) rv_tags = set() while old_sl or new_sl: # If we have something in old_sl, then if old_sl: old_sle = old_sl[0] old_tag = tag(old_sle) # If the old thing has already moved, then remove it. if old_tag in rv_tags: old_sl.pop(0) continue # If the old thing does not match anything in new_tags, # have it enter. if old_tag not in new_tags: leaving(old_sle) rv_tags.add(old_tag) old_sl.pop(0) continue # Otherwise, we must have something in new_sl. We want to # either move it or have it enter. new_sle = new_sl.pop(0) new_tag = tag(new_sle) # If it exists in both, move. if new_tag in old_map: old_sle = old_map[new_tag] moving(old_sle, new_sle) rv_tags.add(new_tag) continue else: entering(new_sle) rv_tags.add(new_tag) continue # Sort everything by zorder, to ensure that there are no zorder # violations in the result. rv_sl.sort(key=lambda a : a.zorder) layer = new.layer_name rv = renpy.display.layout.MultiBox(layout='fixed', focus=layer, **renpy.game.interface.layer_properties[layer]) rv.append_scene_list(rv_sl) rv.layer_name = layer return rv # This calls merge_slide to actually do the merging. rv = merge_slide(old_widget, new_widget) rv.delay = delay # W0201 return rv ############################################################################## # New Move Transition (since 6.14) class MoveInterpolate(renpy.display.core.Displayable): """ This displayable has two children. It interpolates between the positions of its two children to place them on the screen. """ def __init__(self, delay, old, new, use_old, time_warp): super(MoveInterpolate, self).__init__() # The old and new displayables. self.old = old self.new = new # Should we display the old displayable? self.use_old = False # Time warp function or None. self.time_warp = time_warp # The width of the screen. self.screen_width = 0 self.screen_height = 0 # The width of the selected child. self.child_width = 0 self.child_height = 0 # The delay and st. self.delay = delay self.st = 0 def render(self, width, height, st, at): self.screen_width = width self.screen_height = height old_r = renpy.display.render.render(self.old, width, height, st, at) new_r = renpy.display.render.render(self.new, width, height, st, at) if self.use_old: cr = old_r else: cr = new_r self.child_width, self.child_height = cr.get_size() self.st = st if self.st < self.delay: renpy.display.render.redraw(self, 0) return cr def child_placement(self, child): def based(v, base): if v is None: return 0 elif isinstance(v, int): return v elif isinstance(v, renpy.display.core.absolute): return v else: return v * base xpos, ypos, xanchor, yanchor, xoffset, yoffset, subpixel = child.get_placement() xpos = based(xpos, self.screen_width) ypos = based(ypos, self.screen_height) xanchor = based(xanchor, self.child_width) yanchor = based(yanchor, self.child_height) return xpos, ypos, xanchor, yanchor, xoffset, yoffset, subpixel def get_placement(self): if self.st > self.delay: done = 1.0 else: done = self.st / self.delay if self.time_warp is not None: done = self.time_warp(done) absolute = renpy.display.core.absolute def I(a, b): return absolute(a + done * (b - a)) old_xpos, old_ypos, old_xanchor, old_yanchor, old_xoffset, old_yoffset, old_subpixel = self.child_placement(self.old) new_xpos, new_ypos, new_xanchor, new_yanchor, new_xoffset, new_yoffset, new_subpixel = self.child_placement(self.new) xpos = I(old_xpos, new_xpos) ypos = I(old_ypos, new_ypos) xanchor = I(old_xanchor, new_xanchor) yanchor = I(old_yanchor, new_yanchor) xoffset = I(old_xoffset, new_xoffset) yoffset = I(old_yoffset, new_yoffset) subpixel = old_subpixel or new_subpixel return xpos, ypos, xanchor, yanchor, xoffset, yoffset, subpixel def MoveTransition(delay, old_widget=None, new_widget=None, enter=None, leave=None, old=False, layers=[ 'master' ], time_warp=None, enter_time_warp=None, leave_time_warp=None): """ :doc: transition function :args: (delay, enter=None, leave=None, old=False, layers=['master'], time_warp=None, enter_time_warp=None, leave_time_warp=None) :name: MoveTransition Returns a transition that interpolates the position of images (with the same tag) in the old and new scenes. `delay` The time it takes for the interpolation to finish. `enter` If not None, images entering the scene will also be moved. The value of `enter` should be a transform that is applied to the image to get its starting position. `leave` If not None, images leaving the scene will also be move. The value of `leave` should be a transform that is applied to the image to get its ending position. `old` If true, the old image will be used in preference to the new one. `layers` A list of layers that moves are applied to. `time_warp` A time warp function that's applied to the interpolation. This takes a number between 0.0 and 1.0, and should return a number in the same range. `enter_time_warp` A time warp function that's applied to images entering the scene. `leave_time_warp` A time warp function that's applied to images leaving the scene. """ use_old = old def merge_slide(old, new): # If new does not have .layers or .scene_list, then we simply # insert a move from the old position to the new position, if # a move occured. if (not isinstance(new, renpy.display.layout.MultiBox) or (new.layers is None and new.layer_name is None)): if old is new: return new else: return MoveInterpolate(delay, old, new, use_old, time_warp) # If we're in the layers_root widget, merge the child widgets # for each layer. if new.layers: rv = renpy.display.layout.MultiBox(layout='fixed') for layer in renpy.config.layers: f = new.layers[layer] if (isinstance(f, renpy.display.layout.MultiBox) and layer in layers and f.scene_list is not None): f = merge_slide(old.layers[layer], new.layers[layer]) rv.add(f) return rv # Otherwise, we recompute the scene list for the two widgets, merging # as appropriate. # Wraps the displayable found in SLE so that the various timebases # are maintained. def wrap(sle): return renpy.display.layout.AdjustTimes(sle.displayable, sle.show_time, sle.animation_time) def tag(sle): return sle.tag or sle.displayable def merge(sle, d): rv = sle.copy() rv.show_time = 0 rv.displayable = d return rv def entering(sle): if not enter: return new_d = wrap(new_sle) move = MoveInterpolate(delay, enter(new_d), new_d, False, enter_time_warp) rv_sl.append(merge(new_sle, move)) def leaving(sle): if not leave: return old_d = wrap(sle) move = MoveInterpolate(delay, old_d, leave(old_d), True, leave_time_warp) move = renpy.display.layout.IgnoresEvents(move) rv_sl.append(merge(old_sle, move)) def moving(old_sle, new_sle): if old_sle.displayable is new_sle.displayable: rv_sl.append(new_sle) return old_d = wrap(old_sle) new_d = wrap(new_sle) move = MoveInterpolate(delay, old_d, new_d, use_old, time_warp) rv_sl.append(merge(new_sle, move)) # The old, new, and merged scene_lists. old_sl = old.scene_list[:] new_sl = new.scene_list[:] rv_sl = [ ] # A list of tags in old_sl, new_sl, and rv_sl. old_map = dict((tag(i), i) for i in old_sl if i is not None) new_tags = set(tag(i) for i in new_sl if i is not None) rv_tags = set() while old_sl or new_sl: # If we have something in old_sl, then if old_sl: old_sle = old_sl[0] old_tag = tag(old_sle) # If the old thing has already moved, then remove it. if old_tag in rv_tags: old_sl.pop(0) continue # If the old thing does not match anything in new_tags, # have it enter. if old_tag not in new_tags: leaving(old_sle) rv_tags.add(old_tag) old_sl.pop(0) continue # Otherwise, we must have something in new_sl. We want to # either move it or have it enter. new_sle = new_sl.pop(0) new_tag = tag(new_sle) # If it exists in both, move. if new_tag in old_map: old_sle = old_map[new_tag] moving(old_sle, new_sle) rv_tags.add(new_tag) continue else: entering(new_sle) rv_tags.add(new_tag) continue # Sort everything by zorder, to ensure that there are no zorder # violations in the result. rv_sl.sort(key=lambda a : a.zorder) layer = new.layer_name rv = renpy.display.layout.MultiBox(layout='fixed', focus=layer, **renpy.game.interface.layer_properties[layer]) rv.append_scene_list(rv_sl) return rv # Call merge_slide to actually do the merging. rv = merge_slide(old_widget, new_widget) rv.delay = delay return rv
joxer/Baka-No-Voltron
tmp/android.dist/private/renpy/display/movetransition.py
Python
gpl-2.0
20,082
import os import sys if sys.version_info[:2] == (2, 6): import unittest2 as unittest else: import unittest from avocado.utils import process basedir = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..') basedir = os.path.abspath(basedir) class StandaloneTests(unittest.TestCase): def setUp(self): self.original_pypath = os.environ.get('PYTHONPATH') if self.original_pypath is not None: os.environ['PYTHONPATH'] = '%s:%s' % (basedir, self.original_pypath) else: os.environ['PYTHONPATH'] = '%s' % basedir def run_and_check(self, cmd_line, expected_rc, tstname): os.chdir(basedir) result = process.run(cmd_line, ignore_status=True) self.assertEqual(result.exit_status, expected_rc, "Stand alone %s did not return rc " "%d:\n%s" % (tstname, expected_rc, result)) return result def test_passtest(self): cmd_line = './examples/tests/passtest.py -r' expected_rc = 0 self.run_and_check(cmd_line, expected_rc, 'passtest') def test_warntest(self): cmd_line = './examples/tests/warntest.py -r' expected_rc = 0 self.run_and_check(cmd_line, expected_rc, 'warntest') def test_failtest(self): cmd_line = './examples/tests/failtest.py -r' expected_rc = 1 self.run_and_check(cmd_line, expected_rc, 'failtest') def test_failtest_nasty(self): cmd_line = './examples/tests/failtest_nasty.py -r' expected_rc = 1 result = self.run_and_check(cmd_line, expected_rc, 'failtest_nasty') exc = "NastyException: Nasty-string-like-exception" count = result.stdout.count("\n%s" % exc) self.assertEqual(count, 2, "Exception \\n%s should be present twice in" "the log (once from the log, second time when parsing" "exception details." % (exc)) def test_failtest_nasty2(self): cmd_line = './examples/tests/failtest_nasty2.py -r' expected_rc = 1 result = self.run_and_check(cmd_line, expected_rc, 'failtest_nasty2') self.assertIn("Exception: Unable to get exception, check the traceback" " for details.", result.stdout) def test_errortest(self): cmd_line = './examples/tests/errortest.py -r' expected_rc = 1 self.run_and_check(cmd_line, expected_rc, 'errortest') if __name__ == '__main__': unittest.main()
Hao-Liu/avocado
selftests/functional/test_standalone.py
Python
gpl-2.0
2,525
# Copyright 2013: Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc from oslo_config import cfg from six.moves.urllib import parse from rally.cli import envutils from rally.common.i18n import _ from rally.common import logging from rally.common import objects from rally.common.plugin import plugin from rally import consts from rally import exceptions CONF = cfg.CONF OSCLIENTS_OPTS = [ cfg.FloatOpt("openstack_client_http_timeout", default=180.0, help="HTTP timeout for any of OpenStack service in seconds") ] CONF.register_opts(OSCLIENTS_OPTS) _NAMESPACE = "openstack" def configure(name, default_version=None, default_service_type=None, supported_versions=None): """OpenStack client class wrapper. Each client class has to be wrapped by configure() wrapper. It sets essential configuration of client classes. :param name: Name of the client :param default_version: Default version for client :param default_service_type: Default service type of endpoint(If this variable is not specified, validation will assume that your client doesn't allow to specify service type. :param supported_versions: List of supported versions(If this variable is not specified, `OSClients.validate_version` method will raise an exception that client doesn't support setting any versions. If this logic is wrong for your client, you should override `validate_version` in client object) """ def wrapper(cls): cls = plugin.configure(name=name, namespace=_NAMESPACE)(cls) cls._meta_set("default_version", default_version) cls._meta_set("default_service_type", default_service_type) cls._meta_set("supported_versions", supported_versions or []) return cls return wrapper @plugin.base() class OSClient(plugin.Plugin): def __init__(self, credential, api_info, cache_obj): self.credential = credential self.api_info = api_info self.cache = cache_obj def choose_version(self, version=None): """Return version string. Choose version between transmitted(preferable value if present), version from api_info(configured from a context) and default. """ # NOTE(andreykurilin): The result of choose is converted to string, # since most of clients contain map for versioned modules, where a key # is a string value of version. Example of map and its usage: # # from oslo_utils import importutils # ... # version_map = {"1": "someclient.v1.client.Client", # "2": "someclient.v2.client.Client"} # # def Client(version, *args, **kwargs): # cls = importutils.import_class(version_map[version]) # return cls(*args, **kwargs) # # That is why type of version so important and we should ensure that # version is a string object. # For those clients which doesn't accept string value(for example # zaqarclient), this method should be overridden. version = (version or self.api_info.get(self.get_name(), {}).get("version") or self._meta_get("default_version")) if version is not None: version = str(version) return version @classmethod def get_supported_versions(cls): return cls._meta_get("supported_versions") @classmethod def validate_version(cls, version): supported_versions = cls.get_supported_versions() if supported_versions: if str(version) not in supported_versions: raise exceptions.ValidationError(_( "'%(vers)s' is not supported. Should be one of " "'%(supported)s'") % {"vers": version, "supported": supported_versions}) else: raise exceptions.RallyException( _("Setting version is not supported.")) try: float(version) except ValueError: raise exceptions.ValidationError(_( "'%s' is invalid. Should be numeric value.") % version) def choose_service_type(self, service_type=None): """Return service_type string. Choose service type between transmitted(preferable value if present), service type from api_info(configured from a context) and default. """ return (service_type or self.api_info.get(self.get_name(), {}).get("service_type") or self._meta_get("default_service_type")) @classmethod def is_service_type_configurable(cls): """Just checks that client supports setting service type.""" if cls._meta_get("default_service_type") is None: raise exceptions.RallyException(_( "Setting service type is not supported.")) def keystone(self, *args, **kwargs): """Make a call to keystone client.""" keystone = OSClient.get("keystone")(self.credential, self.api_info, self.cache) return keystone(*args, **kwargs) def _get_session(self, auth_url=None, version=None): from keystoneauth1 import discover from keystoneauth1 import session from keystoneclient.auth import identity password_args = { "auth_url": auth_url or self.credential.auth_url, "username": self.credential.username, "password": self.credential.password, "tenant_name": self.credential.tenant_name } version = OSClient.get("keystone")( self.credential, self.api_info, self.cache).choose_version(version) if version is None: # NOTE(rvasilets): If version not specified than we discover # available version with the smallest number. To be able to # discover versions we need session temp_session = session.Session( verify=( self.credential.cacert or not self.credential.insecure), timeout=CONF.openstack_client_http_timeout) version = str(discover.Discover( temp_session, password_args["auth_url"]).version_data()[0]["version"][0]) if "v2.0" not in password_args["auth_url"] and ( version != "2"): password_args.update({ "user_domain_name": self.credential.user_domain_name, "domain_name": self.credential.domain_name, "project_domain_name": self.credential.project_domain_name, }) identity_plugin = identity.Password(**password_args) sess = session.Session( auth=identity_plugin, verify=( self.credential.cacert or not self.credential.insecure), timeout=CONF.openstack_client_http_timeout) return sess, identity_plugin def _get_endpoint(self, service_type=None): kc = self.keystone() kw = {"service_type": self.choose_service_type(service_type), "region_name": self.credential.region_name} if self.credential.endpoint_type: kw["endpoint_type"] = self.credential.endpoint_type api_url = kc.service_catalog.url_for(**kw) return api_url def _get_auth_info(self, user_key="username", password_key="password", auth_url_key="auth_url", project_name_key="project_id", domain_name_key="domain_name", user_domain_name_key="user_domain_name", project_domain_name_key="project_domain_name", cacert_key="cacert", endpoint_type="endpoint_type", ): kw = { user_key: self.credential.username, password_key: self.credential.password, auth_url_key: self.credential.auth_url, cacert_key: self.credential.cacert, } if project_name_key: kw.update({project_name_key: self.credential.tenant_name}) if "v2.0" not in self.credential.auth_url: kw.update({ domain_name_key: self.credential.domain_name}) kw.update({ user_domain_name_key: self.credential.user_domain_name or "Default"}) kw.update({ project_domain_name_key: self.credential.project_domain_name or "Default"}) if self.credential.endpoint_type: kw[endpoint_type] = self.credential.endpoint_type return kw @abc.abstractmethod def create_client(self, *args, **kwargs): """Create new instance of client.""" def __call__(self, *args, **kwargs): """Return initialized client instance.""" key = "{0}{1}{2}".format(self.get_name(), str(args) if args else "", str(kwargs) if kwargs else "") if key not in self.cache: self.cache[key] = self.create_client(*args, **kwargs) return self.cache[key] @classmethod def get(cls, name, namespace=_NAMESPACE): return super(OSClient, cls).get(name, namespace) @configure("keystone", supported_versions=("2", "3")) class Keystone(OSClient): def keystone(self, *args, **kwargs): raise exceptions.RallyException(_("Method 'keystone' is restricted " "for keystoneclient. :)")) def _remove_url_version(self): """Remove any version from the auth_url. The keystone Client code requires that auth_url be the root url if a version override is used. """ url = parse.urlparse(self.credential.auth_url) # NOTE(bigjools): This assumes that non-versioned URLs have no # path component at all. parts = (url.scheme, url.netloc, "/", url.params, url.query, url.fragment) return parse.urlunparse(parts) def create_client(self, version=None): """Return a keystone client. :param version: Keystone API version, can be one of: ("2", "3") If this object was constructed with a version in the api_info then that will be used unless the version parameter is passed. """ import keystoneclient from keystoneclient import client # Use the version in the api_info if provided, otherwise fall # back to the passed version (which may be None, in which case # keystoneclient chooses). version = self.choose_version(version) auth_url = self.credential.auth_url if version is not None: auth_url = self._remove_url_version() sess, plugin = self._get_session(auth_url=auth_url, version=version) # NOTE(bigjools): When using sessions, keystoneclient no longer # does any pre-auth and calling client.authenticate() with # sessions is deprecated (it's still possible to call it but if # endpoint is defined it'll crash). We're forcing that pre-auth # here because the use of the service_catalog depends on doing # this. Also note that while the API has got the # endpoints.list() equivalent, there is no service_type in that # list which is why we need to ensure service_catalog is still # present. auth_ref = plugin.get_access(sess) kw = {"version": version, "session": sess, "timeout": CONF.openstack_client_http_timeout} if keystoneclient.__version__[0] == "1": # NOTE(andreykurilin): let's leave this hack for envs which uses # old(<2.0.0) keystoneclient version. Upstream fix: # https://github.com/openstack/python-keystoneclient/commit/d9031c252848d89270a543b67109a46f9c505c86 from keystoneclient import base kw["auth_url"] = sess.get_endpoint(interface=base.AUTH_INTERFACE) if self.credential.endpoint_type: kw["endpoint_type"] = self.credential.endpoint_type ks = client.Client(**kw) ks.auth_ref = auth_ref return ks @configure("nova", default_version="2", default_service_type="compute") class Nova(OSClient): @classmethod def validate_version(cls, version): from novaclient import api_versions from novaclient import exceptions as nova_exc try: api_versions.get_api_version(version) except nova_exc.UnsupportedVersion: raise exceptions.RallyException( "Version string '%s' is unsupported." % version) def create_client(self, version=None, service_type=None): """Return nova client.""" from novaclient import client as nova kc = self.keystone() client = nova.Client(self.choose_version(version), auth_token=kc.auth_token, http_log_debug=logging.is_debug(), timeout=CONF.openstack_client_http_timeout, insecure=self.credential.insecure, **self._get_auth_info(password_key="api_key")) client.set_management_url(self._get_endpoint(service_type)) return client @configure("neutron", default_version="2.0", default_service_type="network", supported_versions=["2.0"]) class Neutron(OSClient): def create_client(self, version=None, service_type=None): """Return neutron client.""" from neutronclient.neutron import client as neutron kc = self.keystone() client = neutron.Client(self.choose_version(version), token=kc.auth_token, endpoint_url=self._get_endpoint(service_type), timeout=CONF.openstack_client_http_timeout, insecure=self.credential.insecure, **self._get_auth_info( project_name_key="tenant_name", cacert_key="ca_cert")) return client @configure("glance", default_version="1", default_service_type="image", supported_versions=["1", "2"]) class Glance(OSClient): def create_client(self, version=None, service_type=None): """Return glance client.""" import glanceclient as glance kc = self.keystone() client = glance.Client(self.choose_version(version), endpoint=self._get_endpoint(service_type), token=kc.auth_token, timeout=CONF.openstack_client_http_timeout, insecure=self.credential.insecure, cacert=self.credential.cacert) return client @configure("heat", default_version="1", default_service_type="orchestration", supported_versions=["1"]) class Heat(OSClient): def create_client(self, version=None, service_type=None): """Return heat client.""" from heatclient import client as heat kc = self.keystone() client = heat.Client(self.choose_version(version), endpoint=self._get_endpoint(service_type), token=kc.auth_token, timeout=CONF.openstack_client_http_timeout, insecure=self.credential.insecure, **self._get_auth_info(project_name_key=None, cacert_key="ca_file")) return client @configure("cinder", default_version="2", default_service_type="volumev2", supported_versions=["1", "2"]) class Cinder(OSClient): def create_client(self, version=None, service_type=None): """Return cinder client.""" from cinderclient import client as cinder client = cinder.Client(self.choose_version(version), http_log_debug=logging.is_debug(), timeout=CONF.openstack_client_http_timeout, insecure=self.credential.insecure, **self._get_auth_info(password_key="api_key")) kc = self.keystone() client.client.management_url = self._get_endpoint(service_type) client.client.auth_token = kc.auth_token return client @configure("manila", default_version="1", default_service_type="share", supported_versions=["1", "2"]) class Manila(OSClient): def create_client(self, version=None, service_type=None): """Return manila client.""" from manilaclient import client as manila manila_client = manila.Client( self.choose_version(version), region_name=self.credential.region_name, http_log_debug=logging.is_debug(), timeout=CONF.openstack_client_http_timeout, insecure=self.credential.insecure, **self._get_auth_info(password_key="api_key", project_name_key="project_name")) kc = self.keystone() manila_client.client.management_url = self._get_endpoint(service_type) manila_client.client.auth_token = kc.auth_token return manila_client @configure("ceilometer", default_version="2", default_service_type="metering", supported_versions=["1", "2"]) class Ceilometer(OSClient): def create_client(self, version=None, service_type=None): """Return ceilometer client.""" from ceilometerclient import client as ceilometer kc = self.keystone() auth_token = kc.auth_token if not hasattr(auth_token, "__call__"): # python-ceilometerclient requires auth_token to be a callable auth_token = lambda: kc.auth_token client = ceilometer.get_client( self.choose_version(version), os_endpoint=self._get_endpoint(service_type), token=auth_token, timeout=CONF.openstack_client_http_timeout, insecure=self.credential.insecure, **self._get_auth_info(project_name_key="tenant_name", endpoint_type="interface")) return client @configure("gnocchi", default_service_type="metric", default_version="1", supported_versions=["1"]) class Gnocchi(OSClient): def create_client(self, version=None, service_type=None): """Return gnocchi client.""" # NOTE(sumantmurke): gnocchiclient requires keystoneauth1 for # authenticating and creating a session. from gnocchiclient import client as gnocchi service_type = self.choose_service_type(service_type) sess = self._get_session()[0] gclient = gnocchi.Client(version=self.choose_version( version), session=sess, service_type=service_type) return gclient @configure("ironic", default_version="1", default_service_type="baremetal", supported_versions=["1"]) class Ironic(OSClient): def create_client(self, version=None, service_type=None): """Return Ironic client.""" from ironicclient import client as ironic kc = self.keystone() client = ironic.get_client(self.choose_version(version), os_auth_token=kc.auth_token, ironic_url=self._get_endpoint(service_type), timeout=CONF.openstack_client_http_timeout, insecure=self.credential.insecure, cacert=self.credential.cacert, interface=self._get_auth_info().get( "endpoint_type") ) return client @configure("sahara", default_version="1.1", supported_versions=["1.0", "1.1"], default_service_type="data-processing") class Sahara(OSClient): # NOTE(andreykurilin): saharaclient supports "1.0" version and doesn't # support "1". `choose_version` and `validate_version` methods are written # as a hack to covert 1 -> 1.0, which can simplify setting saharaclient # for end-users. def choose_version(self, version=None): return float(super(Sahara, self).choose_version(version)) @classmethod def validate_version(cls, version): super(Sahara, cls).validate_version(float(version)) def create_client(self, version=None, service_type=None): """Return Sahara client.""" from saharaclient import client as sahara client = sahara.Client( self.choose_version(version), service_type=self.choose_service_type(service_type), insecure=self.credential.insecure, **self._get_auth_info(password_key="api_key", project_name_key="project_name")) return client @configure("zaqar", default_version="1.1", default_service_type="messaging", supported_versions=["1", "1.1"]) class Zaqar(OSClient): def choose_version(self, version=None): # zaqarclient accepts only int or float obj as version return float(super(Zaqar, self).choose_version(version)) def create_client(self, version=None, service_type=None): """Return Zaqar client.""" from zaqarclient.queues import client as zaqar kc = self.keystone() conf = {"auth_opts": {"backend": "keystone", "options": { "os_username": self.credential.username, "os_password": self.credential.password, "os_project_name": self.credential.tenant_name, "os_project_id": kc.auth_ref.get("token").get("tenant").get("id"), "os_auth_url": self.credential.auth_url, "insecure": self.credential.insecure, }}} client = zaqar.Client(url=self._get_endpoint(), version=self.choose_version(version), conf=conf) return client @configure("murano", default_version="1", default_service_type="application-catalog", supported_versions=["1"]) class Murano(OSClient): def create_client(self, version=None, service_type=None): """Return Murano client.""" from muranoclient import client as murano kc = self.keystone() client = murano.Client(self.choose_version(version), endpoint=self._get_endpoint(service_type), token=kc.auth_token) return client @configure("designate", default_version="1", default_service_type="dns", supported_versions=["1", "2"]) class Designate(OSClient): def create_client(self, version=None, service_type=None): """Return designate client.""" from designateclient import client version = self.choose_version(version) api_url = self._get_endpoint(service_type) api_url += "/v%s" % version session = self._get_session()[0] if version == "2": return client.Client(version, session=session, endpoint_override=api_url) return client.Client(version, session=session, endpoint=api_url) @configure("trove", default_version="1.0", supported_versions=["1.0"]) class Trove(OSClient): def create_client(self, version=None): """Returns trove client.""" from troveclient import client as trove client = trove.Client(self.choose_version(version), region_name=self.credential.region_name, timeout=CONF.openstack_client_http_timeout, insecure=self.credential.insecure, **self._get_auth_info(password_key="api_key") ) return client @configure("mistral", default_service_type="workflowv2") class Mistral(OSClient): def create_client(self, service_type=None): """Return Mistral client.""" from mistralclient.api import client as mistral kc = self.keystone() client = mistral.client( mistral_url=self._get_endpoint(service_type), service_type=self.choose_service_type(service_type), auth_token=kc.auth_token) return client @configure("swift", default_service_type="object-store") class Swift(OSClient): def create_client(self, service_type=None): """Return swift client.""" from swiftclient import client as swift kc = self.keystone() client = swift.Connection(retries=1, preauthurl=self._get_endpoint(service_type), preauthtoken=kc.auth_token, insecure=self.credential.insecure, cacert=self.credential.cacert, user=self.credential.username, tenant_name=self.credential.tenant_name, ) return client @configure("ec2") class EC2(OSClient): def create_client(self): """Return ec2 client.""" import boto kc = self.keystone() if kc.version != "v2.0": raise exceptions.RallyException( _("Rally EC2 benchmark currently supports only" "Keystone version 2")) ec2_credential = kc.ec2.create(user_id=kc.auth_user_id, tenant_id=kc.auth_tenant_id) client = boto.connect_ec2_endpoint( url=self._get_endpoint(), aws_access_key_id=ec2_credential.access, aws_secret_access_key=ec2_credential.secret, is_secure=self.credential.insecure) return client @configure("monasca", default_version="2_0", default_service_type="monitoring", supported_versions=["2_0"]) class Monasca(OSClient): def create_client(self, version=None, service_type=None): """Return monasca client.""" from monascaclient import client as monasca kc = self.keystone() auth_token = kc.auth_token client = monasca.Client( self.choose_version(version), self._get_endpoint(service_type), token=auth_token, timeout=CONF.openstack_client_http_timeout, insecure=self.credential.insecure, **self._get_auth_info(project_name_key="tenant_name")) return client @configure("cue", default_version="1", default_service_type="message-broker") class Cue(OSClient): def create_client(self, service_type=None): """Return cue client.""" from cueclient.v1 import client as cue version = self.choose_version() api_url = self._get_endpoint(service_type) api_url += "v%s" % version session = self._get_session(auth_url=api_url)[0] endpoint_type = self.credential.endpoint_type return cue.Client(session=session, interface=endpoint_type) @configure("senlin", default_version="1", default_service_type="clustering", supported_versions=["1"]) class Senlin(OSClient): def create_client(self, version=None, service_type=None): """Return senlin client.""" from senlinclient import client as senlin return senlin.Client( self.choose_version(version), **self._get_auth_info(project_name_key="project_name", cacert_key="cert", endpoint_type="interface")) @configure("magnum", default_version="1", supported_versions=["1"], default_service_type="container-infra",) class Magnum(OSClient): def create_client(self, version=None, service_type=None): """Return magnum client.""" from magnumclient import client as magnum api_url = self._get_endpoint(service_type) session = self._get_session()[0] return magnum.Client( session=session, interface=self.credential.endpoint_type, magnum_url=api_url) @configure("watcher", default_version="1", default_service_type="infra-optim", supported_versions=["1"]) class Watcher(OSClient): def create_client(self, version=None, service_type=None): """Return watcher client.""" from watcherclient import client as watcher_client kc = self.keystone() watcher_api_url = self._get_endpoint( self.choose_service_type(service_type)) client = watcher_client.Client( self.choose_version(version), watcher_api_url, token=kc.auth_token, timeout=CONF.openstack_client_http_timeout, insecure=self.credential.insecure, ca_file=self.credential.cacert, endpoint_type=self.credential.endpoint_type) return client class Clients(object): """This class simplify and unify work with OpenStack python clients.""" def __init__(self, credential, api_info=None): self.credential = credential self.api_info = api_info or {} self.cache = {} def __getattr__(self, client_name): """Lazy load of clients.""" return OSClient.get(client_name)(self.credential, self.api_info, self.cache) @classmethod def create_from_env(cls): creds = envutils.get_creds_from_env_vars() return cls( objects.Credential( creds["auth_url"], creds["admin"]["username"], creds["admin"]["password"], creds["admin"]["tenant_name"], endpoint_type=creds["endpoint_type"], user_domain_name=creds["admin"].get("user_domain_name"), project_domain_name=creds["admin"].get("project_domain_name"), endpoint=creds["endpoint"], region_name=creds["region_name"], https_cacert=creds["https_cacert"], https_insecure=creds["https_insecure"] )) def clear(self): """Remove all cached client handles.""" self.cache = {} def verified_keystone(self): """Ensure keystone endpoints are valid and then authenticate :returns: Keystone Client """ from keystoneclient import exceptions as keystone_exceptions try: # Ensure that user is admin client = self.keystone() if "admin" not in [role.lower() for role in client.auth_ref.role_names]: raise exceptions.InvalidAdminException( username=self.credential.username) except keystone_exceptions.Unauthorized: raise exceptions.InvalidEndpointsException() except keystone_exceptions.AuthorizationFailure: raise exceptions.HostUnreachableException( url=self.credential.auth_url) return client def services(self): """Return available services names and types. :returns: dict, {"service_type": "service_name", ...} """ if "services_data" not in self.cache: services_data = {} ks = self.keystone() available_services = ks.service_catalog.get_endpoints() for stype in available_services.keys(): if stype in consts.ServiceType: services_data[stype] = consts.ServiceType[stype] else: services_data[stype] = "__unknown__" self.cache["services_data"] = services_data return self.cache["services_data"]
gluke77/rally
rally/osclients.py
Python
apache-2.0
32,680
""" Module for the integration of the TUM structural clustering algorithm author: Tobias Girschick; [email protected] TUM - I12 (wwwkramer.in.tum.de/girschic) dependencies: gSpan, java Please cite the following article if you use the structural clustering procedure or results produced with it in any publication: @InProceedings{ seeland2010, title = "Online Structural Graph Clustering Using Frequent Subgraph Mining", booktitle = "Proceedings of the ECML/PKDD'10", series = "Lecture Notes in Computer Science", author = "M. Seeland and T. Girschick and F. Buchwald and S. Kramer", editor = "J.L. Balc{\'a}zar and F. Bonchi and A. Gionis and M. Sebag", pages = "213--228", volume = "6323", year = "2010", booktitle1 = "Machine Learning and Knowledge Discovery in Databases, European Conference, {ECML} {PKDD} 2010, Barcelona, Spain, September 20-24, 2010, Proceedings, Part {III}" } """ import os import sys import time import shutil import random import subprocess from subprocess import Popen, PIPE import tempfile from AZutilities import dataUtilities import AZOrangeConfig as AZOC def getStructuralClusters(data, threshold, minClusterSize, minClusterSaveSize = 0, minMolSize = 3, minSaveSDFsize = 0, numThreads=1, timeout=20): """ just the clustering returns a list (of clusters) of lists (which contain the smiles string of the cluster members) """ clusters = [] isSuccess = False tries = 0 while (not isSuccess and tries < 10): tries += 1 sdf_tempName = dataUtilities.makeTempSDF(data, smilesAsName=1) # create tempdir for usage as 6) outputpath temp_dir = tempfile.mkdtemp(prefix="AZorangeTMP_") # call clustering routine # Example command line call; gspan files are available in the same folder as the jar executable #java -jar structuralClustering.jar /home/girschic/proj/AZ/SAR/631.sdf 0.5 3 0 5 /home/girschic/proj/test/ . 2 20 jarpath = os.path.join(AZOC.STRUCTCLUSTDIR,'structuralClustering.jar') opt = '-jar ' + jarpath + ' ' + sdf_tempName + ' ' + str(threshold) + ' ' + str(minMolSize) + ' ' + str(minClusterSaveSize) + ' ' + str(minClusterSize) + ' ' + temp_dir + '/ ' + str(AZOC.STRUCTCLUSTDIR) + ' ' + str(numThreads) + ' ' + str(timeout) cmd = 'java ' + opt p = Popen(cmd, shell=True, close_fds=True, stdout=PIPE) stdout = p.communicate() # parse output outfile = os.path.join(temp_dir,'output_clusters.txt') try: if os.path.isfile(outfile): output = open(outfile, 'r') # 1,CCC(C)NC(=O)CSC1=NC2=C(C=CC(=C2)OCC)C=C1C#N COC1=CC2=C(C=C1)N=C(C(=C2)C#N)SCC(=O)NC3=CC=C(C=C3)S(=O)(=O)N4CCCC4 CCC1=C(N=C2C=C3C(=CC2=C1)OCO3)SCC(=O)NC4=NOC(=C4)C for line in output: tmp = line.strip() split = tmp.partition(',') smilesList = split[2].split('\t') clusters.append(smilesList) else: print str(outfile) + " does not exist!" continue except IOError as (errno, strerror): print "I/O error({0}): {1}".format(errno, strerror) continue shutil.rmtree(temp_dir) isSuccess = True return clusters def getReferenceStructures(data,threshold,minClusterSize,minClusterSaveSize = 0, minMolSize = 3, minSaveSDFsize = 0, numThreads=1, timeout=20, representativeMethod="random"): """ calls the clustering with the given parameters calls the different (now 1) methods to get representatives returns a orange ExampleTable """ # get clusters clusters = getStructuralClusters(data, threshold, minClusterSize, minClusterSaveSize, minMolSize, minSaveSDFsize, numThreads, timeout) # get representatives representatives = [] if (representativeMethod == 'random'): representatives = getRandomRepresentatives(clusters) else: print "Method for finding cluster representatives unknown!" return None return representatives def getRandomRepresentatives(clusters): """ get random representative for each cluster returns list of smiles strings for usage in descriptor calculation """ reps = [] for cluster in clusters: # generate random index for the cluster at hand end = len(cluster) - 1 idx = random.randint(0,end) reps.append(cluster[idx]) return reps
JonnaStalring/AZOrange
azorange/AZutilities/structuralClustering.py
Python
lgpl-3.0
4,151
# Unit tests for the boundary shape class and the boundary sphere class. import unittest import numpy as N from tracer.spatial_geometry import generate_transform, rotx from tracer.boundary_shape import * class TestInBounds(unittest.TestCase): def setUp(self): self.points = N.array([ [0.,0.,0.], [1.,1.,1.], [2.,2.,2.]]) def test_sphere(self): """Sphere bounding volume""" sphere = BoundarySphere(radius=2.) N.testing.assert_array_equal(sphere.in_bounds(self.points), [True, True, False]) def test_cylinder(self): """Cylinder bounding volume""" cyl = BoundaryCylinder(diameter=3.) N.testing.assert_array_equal(cyl.in_bounds(self.points), [True, True, False]) def test_plane(self): """Translated plane section of a volume""" plane = BoundaryPlane(rotation=rotx(-N.pi/6)[:3,:3], location=N.r_[0., 1., 0.]) N.testing.assert_array_equal(plane.in_bounds(self.points), [False, True, True]) class TestSphereBoundingRect(unittest.TestCase): def setUp(self): # Create some surfaces to intersect with spheres. self.at_origin_xy = N.eye(4) self.at_origin_yz = generate_transform(N.r_[0, 1, 0], N.pi/2, \ N.c_[[0,0,0]]) self.at_origin_slant = generate_transform(N.r_[0, 1, 0], N.pi/4, \ N.c_[[0,0,0]]) self.parallel_xy = generate_transform(N.r_[1, 0, 0], 0, N.c_[[0, 0, 1]]) self.parallel_yz = self.at_origin_yz.copy() self.parallel_yz[0,3] += 1 self.parallel_slanted = self.at_origin_slant.copy() self.parallel_slanted[[0,2],3] += N.sqrt(0.5) def test_sphere_at_origin(self): """For a bounding sphere at the origin, the right bounding rects are returned""" sphere = BoundarySphere(radius=2.) extents = sphere.bounding_rect_for_plane(self.at_origin_xy) self.failUnlessEqual(extents, (-2., 2., -2., 2.)) extents = sphere.bounding_rect_for_plane(self.at_origin_yz) self.failUnlessEqual(extents, (-2., 2., -2., 2.)) extents = sphere.bounding_rect_for_plane(self.at_origin_slant) self.failUnlessEqual(extents, (-2., 2., -2., 2.)) sqrt_3 = N.sqrt(3) extents = sphere.bounding_rect_for_plane(self.parallel_xy) self.failUnlessEqual(extents, (-sqrt_3, sqrt_3, -sqrt_3, sqrt_3)) extents = sphere.bounding_rect_for_plane(self.parallel_yz) self.failUnlessEqual(extents, (-sqrt_3, sqrt_3, -sqrt_3, sqrt_3)) extents = sphere.bounding_rect_for_plane(self.parallel_slanted) N.testing.assert_array_almost_equal(extents, \ (-sqrt_3, sqrt_3, -sqrt_3, sqrt_3)) def test_sphere_moved(self): """For a bounding sphere at 1,0,0 the right bounding rects are returned""" sphere = BoundarySphere(radius=2., location=N.r_[1,0,0]) extents = sphere.bounding_rect_for_plane(self.at_origin_xy) self.failUnlessEqual(extents, (-1., 3., -2., 2.)) sqrt_3 = N.sqrt(3) extents = sphere.bounding_rect_for_plane(self.at_origin_yz) self.failUnlessEqual(extents, (-sqrt_3, sqrt_3, -sqrt_3, sqrt_3)) sqrt_h = N.sqrt(0.5) sqrt_35 = N.sqrt(3.5) extents = sphere.bounding_rect_for_plane(self.at_origin_slant) self.failUnlessEqual(extents, \ (sqrt_h - sqrt_35, sqrt_h + sqrt_35, -sqrt_35, sqrt_35)) extents = sphere.bounding_rect_for_plane(self.parallel_xy) self.failUnlessEqual(extents, (1 - sqrt_3, 1 + sqrt_3, -sqrt_3, sqrt_3)) extents = sphere.bounding_rect_for_plane(self.parallel_yz) self.failUnlessEqual(extents, (-2., 2., -2., 2.)) Reff = N.sqrt(4 - (1 - N.sqrt(0.5))**2) extents = sphere.bounding_rect_for_plane(self.parallel_slanted) N.testing.assert_array_almost_equal(extents, \ (sqrt_h - Reff, sqrt_h + Reff, -Reff, Reff))
yosefm/tracer
tests/test_boundary_surface.py
Python
gpl-3.0
4,024
# -*- coding: UTF-8 -*- # ============================================================================= # Copyright (C) 2012 Brad Hards <[email protected]> # # Based on wms.py, which has the following copyright statement: # Copyright (c) 2004, 2006 Sean C. Gillies # Copyright (c) 2005 Nuxeo SARL <http://nuxeo.com> # # Authors : Sean Gillies <[email protected]> # Julien Anguenot <[email protected]> # # Contact email: [email protected] # ============================================================================= """ Abstract -------- The wmts module of the OWSlib package provides client-side functionality for fetching tiles from an OGC Web Map Tile Service (WMTS) Disclaimer ---------- PLEASE NOTE: the owslib wmts module should be considered in early-beta state: it has been tested against only one WMTS server (NASA EODSIS). More extensive testing is needed and feedback (to [email protected]) would be appreciated. """ from __future__ import (absolute_import, division, print_function) from random import randint import warnings import six from six.moves import filter try: # Python 3 from urllib.parse import (urlencode, urlparse, urlunparse, parse_qs, ParseResult) except ImportError: # Python 2 from urllib import urlencode from urlparse import urlparse, urlunparse, parse_qs, ParseResult from .etree import etree from .util import openURL, testXMLValue, getXMLInteger from .fgdc import Metadata from .iso import MD_Metadata from .ows import ServiceProvider, ServiceIdentification, OperationsMetadata _OWS_NS = '{http://www.opengis.net/ows/1.1}' _WMTS_NS = '{http://www.opengis.net/wmts/1.0}' _XLINK_NS = '{http://www.w3.org/1999/xlink}' _ABSTRACT_TAG = _OWS_NS + 'Abstract' _IDENTIFIER_TAG = _OWS_NS + 'Identifier' _LOWER_CORNER_TAG = _OWS_NS + 'LowerCorner' _OPERATIONS_METADATA_TAG = _OWS_NS + 'OperationsMetadata' _SERVICE_IDENTIFICATION_TAG = _OWS_NS + 'ServiceIdentification' _SERVICE_PROVIDER_TAG = _OWS_NS + 'ServiceProvider' _SUPPORTED_CRS_TAG = _OWS_NS + 'SupportedCRS' _TITLE_TAG = _OWS_NS + 'Title' _UPPER_CORNER_TAG = _OWS_NS + 'UpperCorner' _WGS84_BOUNDING_BOX_TAG = _OWS_NS + 'WGS84BoundingBox' _CONTENTS_TAG = _WMTS_NS + 'Contents' _FORMAT_TAG = _WMTS_NS + 'Format' _INFO_FORMAT_TAG = _WMTS_NS + 'InfoFormat' _LAYER_TAG = _WMTS_NS + 'Layer' _LAYER_REF_TAG = _WMTS_NS + 'LayerRef' _MATRIX_HEIGHT_TAG = _WMTS_NS + 'MatrixHeight' _MATRIX_WIDTH_TAG = _WMTS_NS + 'MatrixWidth' _MAX_TILE_COL_TAG = _WMTS_NS + 'MaxTileCol' _MAX_TILE_ROW_TAG = _WMTS_NS + 'MaxTileRow' _MIN_TILE_COL_TAG = _WMTS_NS + 'MinTileCol' _MIN_TILE_ROW_TAG = _WMTS_NS + 'MinTileRow' _RESOURCE_URL_TAG = _WMTS_NS + 'ResourceURL' _SCALE_DENOMINATOR_TAG = _WMTS_NS + 'ScaleDenominator' _SERVICE_METADATA_URL_TAG = _WMTS_NS + 'ServiceMetadataURL' _STYLE_TAG = _WMTS_NS + 'Style' _THEME_TAG = _WMTS_NS + 'Theme' _THEMES_TAG = _WMTS_NS + 'Themes' _TILE_HEIGHT_TAG = _WMTS_NS + 'TileHeight' _TILE_MATRIX_SET_LINK_TAG = _WMTS_NS + 'TileMatrixSetLink' _TILE_MATRIX_SET_TAG = _WMTS_NS + 'TileMatrixSet' _TILE_MATRIX_SET_LIMITS_TAG = _WMTS_NS + 'TileMatrixSetLimits' _TILE_MATRIX_LIMITS_TAG = _WMTS_NS + 'TileMatrixLimits' _TILE_MATRIX_TAG = _WMTS_NS + 'TileMatrix' _TILE_WIDTH_TAG = _WMTS_NS + 'TileWidth' _TOP_LEFT_CORNER_TAG = _WMTS_NS + 'TopLeftCorner' _HREF_TAG = _XLINK_NS + 'href' class ServiceException(Exception): """WMTS ServiceException Attributes: message -- short error message xml -- full xml error message from server """ def __init__(self, message, xml): self.message = message self.xml = xml def __str__(self): return repr(self.message) class CapabilitiesError(Exception): pass class WebMapTileService(object): """Abstraction for OGC Web Map Tile Service (WMTS). Implements IWebMapService. """ def __getitem__(self, name): '''Check contents dictionary to allow dict like access to service layers''' if name in self.__getattribute__('contents'): return self.__getattribute__('contents')[name] else: raise KeyError("No content named %s" % name) def __init__(self, url, version='1.0.0', xml=None, username=None, password=None, parse_remote_metadata=False, vendor_kwargs=None): """Initialize. Parameters ---------- url : string Base URL for the WMTS service. version : string Optional WMTS version. Defaults to '1.0.0'. xml : string Optional XML content to use as the content for the initial GetCapabilities request. Typically only used for testing. username : string Optional user name for authentication. password : string Optional password for authentication. parse_remote_metadata: string Currently unused. vendor_kwargs : dict Optional vendor-specific parameters to be included in all requests. """ self.url = url self.username = username self.password = password self.version = version self.vendor_kwargs = vendor_kwargs self._capabilities = None # Authentication handled by Reader reader = WMTSCapabilitiesReader(self.version, url=self.url, un=self.username, pw=self.password) if xml: # read from stored xml self._capabilities = reader.readString(xml) else: # read from server self._capabilities = reader.read(self.url, self.vendor_kwargs) # Avoid building capabilities metadata if the response is a # ServiceExceptionReport. # TODO: check if this needs a namespace se = self._capabilities.find('ServiceException') if se is not None: err_message = str(se.text).strip() raise ServiceException(err_message, xml) # build metadata objects self._buildMetadata(parse_remote_metadata) def _getcapproperty(self): if not self._capabilities: reader = WMTSCapabilitiesReader( self.version, url=self.url, un=self.username, pw=self.password ) xml = reader.read(self.url, self.vendor_kwargs) self._capabilities = ServiceMetadata(xml) return self._capabilities def _buildMetadata(self, parse_remote_metadata=False): ''' set up capabilities metadata objects ''' # serviceIdentification metadata serviceident = self._capabilities.find(_SERVICE_IDENTIFICATION_TAG) self.identification = ServiceIdentification(serviceident) # serviceProvider metadata serviceprov = self._capabilities.find(_SERVICE_PROVIDER_TAG) self.provider = ServiceProvider(serviceprov) # serviceOperations metadata self.operations = [] serviceop = self._capabilities.find(_OPERATIONS_METADATA_TAG) # REST only WMTS does not have any Operations if serviceop is not None: for elem in serviceop[:]: self.operations.append(OperationsMetadata(elem)) # serviceContents metadata: our assumption is that services use # a top-level layer as a metadata organizer, nothing more. self.contents = {} caps = self._capabilities.find(_CONTENTS_TAG) def gather_layers(parent_elem, parent_metadata): for index, elem in enumerate(parent_elem.findall(_LAYER_TAG)): cm = ContentMetadata( elem, parent=parent_metadata, index=index+1, parse_remote_metadata=parse_remote_metadata) if cm.id: if cm.id in self.contents: raise KeyError('Content metadata for layer "%s" ' 'already exists' % cm.id) self.contents[cm.id] = cm gather_layers(elem, cm) gather_layers(caps, None) self.tilematrixsets = {} for elem in caps.findall(_TILE_MATRIX_SET_TAG): tms = TileMatrixSet(elem) if tms.identifier: if tms.identifier in self.tilematrixsets: raise KeyError('TileMatrixSet with identifier "%s" ' 'already exists' % tms.identifier) self.tilematrixsets[tms.identifier] = tms self.themes = {} for elem in self._capabilities.findall(_THEMES_TAG + '/' + _THEME_TAG): theme = Theme(elem) if theme.identifier: if theme.identifier in self.themes: raise KeyError('Theme with identifier "%s" already exists' % theme.identifier) self.themes[theme.identifier] = theme serviceMetadataURL = self._capabilities.find(_SERVICE_METADATA_URL_TAG) if serviceMetadataURL is not None: self.serviceMetadataURL = serviceMetadataURL.attrib[_HREF_TAG] else: self.serviceMetadataURL = None def items(self): '''supports dict-like items() access''' items = [] for item in self.contents: items.append((item, self.contents[item])) return items def buildTileRequest(self, layer=None, style=None, format=None, tilematrixset=None, tilematrix=None, row=None, column=None, **kwargs): """Return the URL-encoded parameters for a GetTile request. Parameters ---------- layer : string Content layer name. style : string Optional style name. Defaults to the first style defined for the relevant layer in the GetCapabilities response. format : string Optional output image format, such as 'image/jpeg'. Defaults to the first format defined for the relevant layer in the GetCapabilities response. tilematrixset : string Optional name of tile matrix set to use. Defaults to the first tile matrix set defined for the relevant layer in the GetCapabilities response. tilematrix : string Name of the tile matrix to use. row : integer Row index of tile to request. column : integer Column index of tile to request. **kwargs : extra arguments anything else e.g. vendor specific parameters Example ------- >>> url = 'http://map1c.vis.earthdata.nasa.gov/wmts-geo/wmts.cgi' >>> wmts = WebMapTileService(url) >>> wmts.buildTileRequest(layer='VIIRS_CityLights_2012', ... tilematrixset='EPSG4326_500m', ... tilematrix='6', ... row=4, column=4) 'SERVICE=WMTS&REQUEST=GetTile&VERSION=1.0.0&\ LAYER=VIIRS_CityLights_2012&STYLE=default&TILEMATRIXSET=EPSG4326_500m&\ TILEMATRIX=6&TILEROW=4&TILECOL=4&FORMAT=image%2Fjpeg' """ if (layer is None): raise ValueError("layer is mandatory (cannot be None)") if style is None: style = list(self[layer].styles.keys())[0] if format is None: format = self[layer].formats[0] if tilematrixset is None: tilematrixset = sorted(self[layer].tilematrixsetlinks.keys())[0] if tilematrix is None: msg = 'tilematrix (zoom level) is mandatory (cannot be None)' raise ValueError(msg) if row is None: raise ValueError("row is mandatory (cannot be None)") if column is None: raise ValueError("column is mandatory (cannot be None)") request = list() request.append(('SERVICE', 'WMTS')) request.append(('REQUEST', 'GetTile')) request.append(('VERSION', '1.0.0')) request.append(('LAYER', layer)) request.append(('STYLE', style)) request.append(('TILEMATRIXSET', tilematrixset)) request.append(('TILEMATRIX', tilematrix)) request.append(('TILEROW', str(row))) request.append(('TILECOL', str(column))) request.append(('FORMAT', format)) for key, value in six.iteritems(kwargs): request.append((key, value)) data = urlencode(request, True) return data def buildTileResource(self, layer=None, style=None, format=None, tilematrixset=None, tilematrix=None, row=None, column=None, **kwargs): tileresourceurls = [] for resourceURL in self[layer].resourceURLs: if resourceURL['resourceType'] == 'tile': tileresourceurls.append(resourceURL) numres = len(tileresourceurls) if numres > 0: # choose random ResourceURL if more than one available resindex = randint(0, numres - 1) resurl = tileresourceurls[resindex]['template'] if tilematrixset: resurl = resurl.replace('{TileMatrixSet}', tilematrixset) resurl = resurl.replace('{TileMatrix}', tilematrix) resurl = resurl.replace('{TileRow}', row) resurl = resurl.replace('{TileCol}', column) if style: resurl = resurl.replace('{Style}', style) return resurl return None @property def restonly(self): # if OperationsMetadata is missing completely --> use REST if len(self.operations) == 0: return True # check if KVP or RESTful are available restenc = False kvpenc = False for operation in self.operations: if operation.name == 'GetTile': for method in operation.methods: if 'kvp' in str(method['constraints']).lower(): kvpenc = True if 'rest' in str(method['constraints']).lower(): restenc = True # if KVP is available --> use KVP if kvpenc: return False # if the operation has no constraint --> use KVP if not kvpenc and not restenc: return False return restenc def gettile(self, base_url=None, layer=None, style=None, format=None, tilematrixset=None, tilematrix=None, row=None, column=None, **kwargs): """Return a tile from the WMTS. Returns the tile image as a file-like object. Parameters ---------- base_url : string Optional URL for request submission. Defaults to the URL of the GetTile operation as declared in the GetCapabilities response. layer : string Content layer name. style : string Optional style name. Defaults to the first style defined for the relevant layer in the GetCapabilities response. format : string Optional output image format, such as 'image/jpeg'. Defaults to the first format defined for the relevant layer in the GetCapabilities response. tilematrixset : string Optional name of tile matrix set to use. Defaults to the first tile matrix set defined for the relevant layer in the GetCapabilities response. tilematrix : string Name of the tile matrix to use. row : integer Row index of tile to request. column : integer Column index of tile to request. **kwargs : extra arguments anything else e.g. vendor specific parameters Example ------- >>> url = 'http://map1c.vis.earthdata.nasa.gov/wmts-geo/wmts.cgi' >>> wmts = WebMapTileService(url) >>> img = wmts.gettile(layer='VIIRS_CityLights_2012',\ tilematrixset='EPSG4326_500m',\ tilematrix='6',\ row=4, column=4) >>> out = open('tile.jpg', 'wb') >>> bytes_written = out.write(img.read()) >>> out.close() """ vendor_kwargs = self.vendor_kwargs or {} vendor_kwargs.update(kwargs) # REST only WMTS if self.restonly: resurl = self.buildTileResource( layer, style, format, tilematrixset, tilematrix, row, column, **vendor_kwargs) u = openURL(resurl, username=self.username, password=self.password) return u # KVP implemetation data = self.buildTileRequest(layer, style, format, tilematrixset, tilematrix, row, column, **vendor_kwargs) if base_url is None: base_url = self.url try: methods = self.getOperationByName('GetTile').methods get_verbs = [x for x in methods if x.get('type').lower() == 'get'] if len(get_verbs) > 1: # Filter by constraints base_url = next( x for x in filter( list, ([pv.get('url') for const in pv.get('constraints') if 'kvp' in [x.lower() for x in const.values]] for pv in get_verbs if pv.get('constraints'))))[0] elif len(get_verbs) == 1: base_url = get_verbs[0].get('url') except StopIteration: pass u = openURL(base_url, data, username=self.username, password=self.password) # check for service exceptions, and return if u.info()['Content-Type'] == 'application/vnd.ogc.se_xml': se_xml = u.read() se_tree = etree.fromstring(se_xml) err_message = six.text_type(se_tree.find('ServiceException').text) raise ServiceException(err_message.strip(), se_xml) return u def getServiceXML(self): xml = None if self._capabilities is not None: xml = etree.tostring(self._capabilities) return xml def getfeatureinfo(self): raise NotImplementedError def getOperationByName(self, name): """Return a named content item.""" for item in self.operations: if item.name == name: return item raise KeyError("No operation named %s" % name) class TileMatrixSet(object): '''Holds one TileMatrixSet''' def __init__(self, elem): if elem.tag != _TILE_MATRIX_SET_TAG: raise ValueError('%s should be a TileMatrixSet' % (elem,)) self.identifier = testXMLValue(elem.find(_IDENTIFIER_TAG)).strip() self.crs = testXMLValue(elem.find(_SUPPORTED_CRS_TAG)).strip() if self.crs is None or self.identifier is None: raise ValueError('%s incomplete TileMatrixSet' % (elem,)) self.tilematrix = {} for tilematrix in elem.findall(_TILE_MATRIX_TAG): tm = TileMatrix(tilematrix) if tm.identifier: if tm.identifier in self.tilematrix: raise KeyError('TileMatrix with identifier "%s" ' 'already exists' % tm.identifier) self.tilematrix[tm.identifier] = tm class TileMatrix(object): '''Holds one TileMatrix''' def __init__(self, elem): if elem.tag != _TILE_MATRIX_TAG: raise ValueError('%s should be a TileMatrix' % (elem,)) self.identifier = testXMLValue(elem.find(_IDENTIFIER_TAG)).strip() sd = testXMLValue(elem.find(_SCALE_DENOMINATOR_TAG)) if sd is None: raise ValueError('%s is missing ScaleDenominator' % (elem,)) self.scaledenominator = float(sd) tl = testXMLValue(elem.find(_TOP_LEFT_CORNER_TAG)) if tl is None: raise ValueError('%s is missing TopLeftCorner' % (elem,)) (lon, lat) = tl.split(" ") self.topleftcorner = (float(lon), float(lat)) width = testXMLValue(elem.find(_TILE_WIDTH_TAG)) height = testXMLValue(elem.find(_TILE_HEIGHT_TAG)) if (width is None) or (height is None): msg = '%s is missing TileWidth and/or TileHeight' % (elem,) raise ValueError(msg) self.tilewidth = int(width) self.tileheight = int(height) mw = testXMLValue(elem.find(_MATRIX_WIDTH_TAG)) mh = testXMLValue(elem.find(_MATRIX_HEIGHT_TAG)) if (mw is None) or (mh is None): msg = '%s is missing MatrixWidth and/or MatrixHeight' % (elem,) raise ValueError(msg) self.matrixwidth = int(mw) self.matrixheight = int(mh) class Theme: """ Abstraction for a WMTS theme """ def __init__(self, elem): if elem.tag != _THEME_TAG: raise ValueError('%s should be a Theme' % (elem,)) self.identifier = testXMLValue(elem.find(_IDENTIFIER_TAG)).strip() title = testXMLValue(elem.find(_TITLE_TAG)) if title is not None: self.title = title.strip() else: self.title = None abstract = testXMLValue(elem.find(_ABSTRACT_TAG)) if abstract is not None: self.abstract = abstract.strip() else: self.abstract = None self.layerRefs = [] layerRefs = elem.findall(_LAYER_REF_TAG) for layerRef in layerRefs: if layerRef.text is not None: self.layerRefs.append(layerRef.text) class TileMatrixLimits(object): """ Represents a WMTS TileMatrixLimits element. """ def __init__(self, elem): if elem.tag != _TILE_MATRIX_LIMITS_TAG: raise ValueError('%s should be a TileMatrixLimits' % elem) tm = elem.find(_TILE_MATRIX_TAG) if tm is None: raise ValueError('Missing TileMatrix in %s' % elem) self.tilematrix = tm.text.strip() self.mintilerow = getXMLInteger(elem, _MIN_TILE_ROW_TAG) self.maxtilerow = getXMLInteger(elem, _MAX_TILE_ROW_TAG) self.mintilecol = getXMLInteger(elem, _MIN_TILE_COL_TAG) self.maxtilecol = getXMLInteger(elem, _MAX_TILE_COL_TAG) def __repr__(self): fmt = ('<TileMatrixLimits: {self.tilematrix}' ', minRow={self.mintilerow}, maxRow={self.maxtilerow}' ', minCol={self.mintilecol}, maxCol={self.maxtilecol}>') return fmt.format(self=self) class TileMatrixSetLink(object): """ Represents a WMTS TileMatrixSetLink element. """ @staticmethod def from_elements(link_elements): """ Return a list of TileMatrixSetLink instances derived from the given list of <TileMatrixSetLink> XML elements. """ # NB. The WMTS spec is contradictory re. the multiplicity # relationships between Layer and TileMatrixSetLink, and # TileMatrixSetLink and tileMatrixSet (URI). # Try to figure out which model has been used by the server. links = [] for link_element in link_elements: matrix_set_elements = link_element.findall(_TILE_MATRIX_SET_TAG) if len(matrix_set_elements) == 0: raise ValueError('Missing TileMatrixSet in %s' % link_element) elif len(matrix_set_elements) > 1: set_limits_elements = link_element.findall( _TILE_MATRIX_SET_LIMITS_TAG) if set_limits_elements: raise ValueError('Multiple instances of TileMatrixSet' ' plus TileMatrixSetLimits in %s' % link_element) for matrix_set_element in matrix_set_elements: uri = matrix_set_element.text.strip() links.append(TileMatrixSetLink(uri)) else: uri = matrix_set_elements[0].text.strip() tilematrixlimits = {} path = '%s/%s' % (_TILE_MATRIX_SET_LIMITS_TAG, _TILE_MATRIX_LIMITS_TAG) for limits_element in link_element.findall(path): tml = TileMatrixLimits(limits_element) if tml.tilematrix: if tml.tilematrix in tilematrixlimits: msg = ('TileMatrixLimits with tileMatrix "%s" ' 'already exists' % tml.tilematrix) raise KeyError(msg) tilematrixlimits[tml.tilematrix] = tml links.append(TileMatrixSetLink(uri, tilematrixlimits)) return links def __init__(self, tilematrixset, tilematrixlimits=None): self.tilematrixset = tilematrixset if tilematrixlimits is None: self.tilematrixlimits = {} else: self.tilematrixlimits = tilematrixlimits def __repr__(self): fmt = ('<TileMatrixSetLink: {self.tilematrixset}' ', tilematrixlimits={{...}}>') return fmt.format(self=self) class ContentMetadata: """ Abstraction for WMTS layer metadata. Implements IContentMetadata. """ def __init__(self, elem, parent=None, index=0, parse_remote_metadata=False): if elem.tag != _LAYER_TAG: raise ValueError('%s should be a Layer' % (elem,)) self.parent = parent if parent: self.index = "%s.%d" % (parent.index, index) else: self.index = str(index) self.id = self.name = testXMLValue(elem.find(_IDENTIFIER_TAG)) # title is mandatory property self.title = None title = testXMLValue(elem.find(_TITLE_TAG)) if title is not None: self.title = title.strip() self.abstract = testXMLValue(elem.find(_ABSTRACT_TAG)) # bboxes b = elem.find(_WGS84_BOUNDING_BOX_TAG) self.boundingBox = None if b is not None: lc = b.find(_LOWER_CORNER_TAG) uc = b.find(_UPPER_CORNER_TAG) ll = [float(s) for s in lc.text.split()] ur = [float(s) for s in uc.text.split()] self.boundingBoxWGS84 = (ll[0], ll[1], ur[0], ur[1]) # TODO: there is probably some more logic here, and it should # probably be shared code self._tilematrixsets = [f.text.strip() for f in elem.findall(_TILE_MATRIX_SET_LINK_TAG + '/' + _TILE_MATRIX_SET_TAG)] link_elements = elem.findall(_TILE_MATRIX_SET_LINK_TAG) tile_matrix_set_links = TileMatrixSetLink.from_elements(link_elements) self.tilematrixsetlinks = {} for tmsl in tile_matrix_set_links: if tmsl.tilematrixset: if tmsl.tilematrixset in self.tilematrixsetlinks: raise KeyError('TileMatrixSetLink with tilematrixset "%s"' ' already exists' % tmsl.tilematrixset) self.tilematrixsetlinks[tmsl.tilematrixset] = tmsl self.resourceURLs = [] for resourceURL in elem.findall(_RESOURCE_URL_TAG): resource = {} for attrib in ['format', 'resourceType', 'template']: resource[attrib] = resourceURL.attrib[attrib] self.resourceURLs.append(resource) # Styles self.styles = {} for s in elem.findall(_STYLE_TAG): style = {} isdefaulttext = s.attrib.get('isDefault') style['isDefault'] = (isdefaulttext == "true") identifier = s.find(_IDENTIFIER_TAG) if identifier is None: raise ValueError('%s missing identifier' % (s,)) title = s.find(_TITLE_TAG) if title is not None: style['title'] = title.text self.styles[identifier.text] = style self.formats = [f.text for f in elem.findall(_FORMAT_TAG)] self.infoformats = [f.text for f in elem.findall(_INFO_FORMAT_TAG)] self.layers = [] for child in elem.findall(_LAYER_TAG): self.layers.append(ContentMetadata(child, self)) @property def tilematrixsets(self): # NB. This attribute has been superseeded by the # `tilematrixsetlinks` attribute defined below, but is included # for now to provide continuity. warnings.warn("The 'tilematrixsets' attribute has been deprecated" " and will be removed in a future version of OWSLib." " Please use 'tilematrixsetlinks' instead.") return self._tilematrixsets def __str__(self): return 'Layer Name: %s Title: %s' % (self.name, self.title) class WMTSCapabilitiesReader: """Read and parse capabilities document into a lxml.etree infoset """ def __init__(self, version='1.0.0', url=None, un=None, pw=None): """Initialize""" self.version = version self._infoset = None self.url = url self.username = un self.password = pw def capabilities_url(self, service_url, vendor_kwargs=None): """Return a capabilities url """ # Ensure the 'service', 'request', and 'version' parameters, # and any vendor-specific parameters are included in the URL. pieces = urlparse(service_url) args = parse_qs(pieces.query) if 'service' not in args: args['service'] = 'WMTS' if 'request' not in args: args['request'] = 'GetCapabilities' if 'version' not in args: args['version'] = self.version if vendor_kwargs: args.update(vendor_kwargs) query = urlencode(args, doseq=True) pieces = ParseResult(pieces.scheme, pieces.netloc, pieces.path, pieces.params, query, pieces.fragment) return urlunparse(pieces) def read(self, service_url, vendor_kwargs=None): """Get and parse a WMTS capabilities document, returning an elementtree instance service_url is the base url, to which is appended the service, version, and request parameters. Optional vendor-specific parameters can also be supplied as a dict. """ getcaprequest = self.capabilities_url(service_url, vendor_kwargs) # now split it up again to use the generic openURL function... spliturl = getcaprequest.split('?') u = openURL(spliturl[0], spliturl[1], method='Get', username=self.username, password=self.password) return etree.fromstring(u.read()) def readString(self, st): """Parse a WMTS capabilities document, returning an elementtree instance string should be an XML capabilities document """ if not isinstance(st, str) and not isinstance(st, bytes): msg = 'String must be of type string or bytes, not %s' % type(st) raise ValueError(msg) return etree.fromstring(st)
Jenselme/OWSLib
owslib/wmts.py
Python
bsd-3-clause
31,462
# Opus/UrbanSim urban simulation software. # Copyright (C) 2005-2009 University of Washington # See opus_core/LICENSE from opus_core.variables.variable import Variable from variable_functions import my_attribute_label from opus_core.simulation_state import SimulationState from numpy import maximum, ma, logical_not class building_age_masked(Variable): """The age of a building in this gridcell, computed by subtracting the year built from the current simulation year. Entries that have invalid year_built are masked.""" year_built = "year_built" def dependencies(self): return [my_attribute_label(self.year_built), my_attribute_label("has_valid_year_built")] def compute(self, dataset_pool): current_year = SimulationState().get_current_time() if current_year == None: raise StandardError, "'SimulationState().get_current_time()' returns None." urbansim_constant = dataset_pool.get_dataset('urbansim_constant') is_year_built = self.get_dataset().get_attribute("has_valid_year_built") year_built_values = self.get_dataset().get_attribute(self.year_built) building_age = maximum(0, current_year - year_built_values) building_age = ma.masked_where( logical_not(is_year_built), building_age) return building_age def post_check(self, values, dataset_pool): self.do_check("x >= 0", values) from opus_core.tests import opus_unittest from opus_core.tests.utils.variable_tester import VariableTester from numpy import array class Tests(opus_unittest.OpusTestCase): def test_my_inputs(self): tester = VariableTester( __file__, package_order=['urbansim'], test_data={ 'gridcell':{ 'grid_id': array([1,2,3,4]), 'year_built': array([1995, 2000, 2006, 200]) }, 'urbansim_constant':{ 'absolute_min_year': array([1800]) } } ) SimulationState().set_current_time(2005) should_be = array([10, 5, 0, -999]) tester.test_is_equal_for_variable_defined_by_this_module(self, should_be) if __name__=='__main__': opus_unittest.main()
christianurich/VIBe2UrbanSim
3rdparty/opus/src/urbansim/gridcell/building_age_masked.py
Python
gpl-2.0
2,316
# Copyright (c) 2006-2009 The Trustees of Indiana University. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # - Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # - Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # - Neither the Indiana University nor the names of its contributors may be used # to endorse or promote products derived from this software without specific # prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import corepy.spre.spe as spe import corepy.arch.spu.isa as spu import corepy.lib.extarray as extarray def load_word(code, r_target, word, clear = False, zero = True): """If r0 is not set to 0, the zero parameter should be set to False""" #if zero and (-512 < word < 511): # code.add(spu.ai(r_target, code.r_zero, word)) #elif (word & 0x7FFF) == word: # code.add(spu.il(r_target, word)) if (word & 0x3FFFF) == word: code.add(spu.ila(r_target, word)) else: code.add(spu.ilhu(r_target, (word & 0xFFFF0000) >> 16)) if word & 0xFFFF != 0: code.add(spu.iohl(r_target, (word & 0xFFFF))) if clear: code.add(spu.shlqbyi(r_target, r_target, 12)) return def load_float(code, reg, val): data = extarray.extarray('f', (val,)) data.change_type('I') return load_word(code, reg, data[0]) def vector_from_array(code, r_target, a): """ Generate the instructions to fill a vector register with the values from an array. """ prgm = code.prgm r0 = r_target r1 = prgm.acquire_register() r2 = prgm.acquire_register() r3 = prgm.acquire_register() load_word(code, r0, a[0], True) load_word(code, r1, a[1], True) code.add(spu.rotqbyi(r1, r1, 12)) # rotate qw by bytes load_word(code, r2, a[2], True) code.add(spu.rotqbyi(r2, r2, 8)) load_word(code, r3, a[3], True) code.add(spu.rotqbyi(r3, r3, 4)) code.add(spu.a(r0, r0, r1)) code.add(spu.a(r0, r0, r2)) code.add(spu.a(r0, r0, r3)) prgm.release_register(r1) prgm.release_register(r2) prgm.release_register(r3) return def set_slot_value(code, reg, slot, value): """ Set the value in reg[slot] with value. If value is a register, use the value from the preferred slot (value[0]). If value is a constant, load it into reg[slot], preserving the values in the other slots. """ prgm = code.prgm if slot not in [0,1,2,3]: raise Exception("Invalid SIMD slot: " + slot) mask = prgm.acquire_register() vector_from_array(code, mask, [0xFFFFFFFF, 0, 0, 0]) if not issubclass(type(value), (spe.Register, spe.Variable)): r_value = prgm.acquire_register() load_word(code, r_value, value) else: r_value = value code.add(spu.rotqbyi(reg, reg, slot * 4)) code.add(spu.selb(reg, reg, r_value, mask)) code.add(spu.rotqbyi(reg, reg, (4 - slot) * 4)) prgm.release_register(mask) if not issubclass(type(value), (spe.Register, spe.Variable)): prgm.release_register(r_value) return def get_param_reg(code, param, dict, copy = True): """ Take a parameter given to a function, which may be a value or a register containing that value, and return a register containing the value. If copy is True, a new register is always returned. Otherwise if a register was passed in, that register is returned unchanged. dict is a dictionary used internally between get_param_reg() and put_param_reg() to keep track of whether registers have been allocated for parameters. A function should use one (initially empty) dictionary for all of its parameters. """ reg = None if isinstance(param, (spe.Register, spe.Variable)): if copy == True: # TODO - behave differently if at an even/odd spot reg = code.prgm.acquire_register() code.add(spu.ori(reg, param, 0)) dict[reg] = True else: reg = param dict[reg] = False else: # TODO - check types? reg = code.prgm.acquire_register() load_word(code, reg, param) dict[reg] = True return reg def put_param_reg(code, reg, dict): """Check a register containing a parameter, release the register if the provided dictionary indicates it was acquired by get_param_reg()/ """ if dict[reg] == True: code.prgm.release_register(reg) # ------------------------------------------------------------ # Unit Test Code # ------------------------------------------------------------ def TestSetSlotValue(): import corepy.arch.spu.platform as synspu import corepy.arch.spu.types.spu_types as var import corepy.arch.spu.lib.dma as dma prgm = synspu.Program() code = prgm.get_stream() proc = synspu.Processor() spu.set_active_code(code) a = var.SignedWord(0x11) b = var.SignedWord(0x13) r = var.SignedWord(0xFFFFFFFF) set_slot_value(code, r, 0, 0x10) set_slot_value(code, r, 1, a) set_slot_value(code, r, 2, 0x12) set_slot_value(code, r, 3, b) for i in range(4): spu.wrch(r, dma.SPU_WrOutMbox) spu.rotqbyi(r, r, 4) prgm.add(code) spe_id = proc.execute(prgm, async = True) for i in range(4): while synspu.spu_exec.stat_out_mbox(spe_id) == 0: pass result = synspu.spu_exec.read_out_mbox(spe_id) assert(result == (i + 0x10)) proc.join(spe_id) return if __name__=='__main__': TestSetSlotValue()
matthiaskramm/corepy
corepy/arch/spu/lib/util.py
Python
bsd-3-clause
6,974
"""Tests of monomial orderings. """ from sympy.polys.orderings import ( monomial_key, lex, grlex, grevlex, ilex, igrlex, igrevlex, LexOrder, InverseOrder, ProductOrder, build_product_order, ) from sympy.abc import x, y, z, t from sympy.core import S from sympy.utilities.pytest import raises def test_lex_order(): assert lex((1, 2, 3)) == (1, 2, 3) assert str(lex) == 'lex' assert lex((1, 2, 3)) == lex((1, 2, 3)) assert lex((2, 2, 3)) > lex((1, 2, 3)) assert lex((1, 3, 3)) > lex((1, 2, 3)) assert lex((1, 2, 4)) > lex((1, 2, 3)) assert lex((0, 2, 3)) < lex((1, 2, 3)) assert lex((1, 1, 3)) < lex((1, 2, 3)) assert lex((1, 2, 2)) < lex((1, 2, 3)) assert lex.is_global is True assert lex == LexOrder() assert lex != grlex def test_grlex_order(): assert grlex((1, 2, 3)) == (6, (1, 2, 3)) assert str(grlex) == 'grlex' assert grlex((1, 2, 3)) == grlex((1, 2, 3)) assert grlex((2, 2, 3)) > grlex((1, 2, 3)) assert grlex((1, 3, 3)) > grlex((1, 2, 3)) assert grlex((1, 2, 4)) > grlex((1, 2, 3)) assert grlex((0, 2, 3)) < grlex((1, 2, 3)) assert grlex((1, 1, 3)) < grlex((1, 2, 3)) assert grlex((1, 2, 2)) < grlex((1, 2, 3)) assert grlex((2, 2, 3)) > grlex((1, 2, 4)) assert grlex((1, 3, 3)) > grlex((1, 2, 4)) assert grlex((0, 2, 3)) < grlex((1, 2, 2)) assert grlex((1, 1, 3)) < grlex((1, 2, 2)) assert grlex((0, 1, 1)) > grlex((0, 0, 2)) assert grlex((0, 3, 1)) < grlex((2, 2, 1)) assert grlex.is_global is True def test_grevlex_order(): assert grevlex((1, 2, 3)) == (6, (-3, -2, -1)) assert str(grevlex) == 'grevlex' assert grevlex((1, 2, 3)) == grevlex((1, 2, 3)) assert grevlex((2, 2, 3)) > grevlex((1, 2, 3)) assert grevlex((1, 3, 3)) > grevlex((1, 2, 3)) assert grevlex((1, 2, 4)) > grevlex((1, 2, 3)) assert grevlex((0, 2, 3)) < grevlex((1, 2, 3)) assert grevlex((1, 1, 3)) < grevlex((1, 2, 3)) assert grevlex((1, 2, 2)) < grevlex((1, 2, 3)) assert grevlex((2, 2, 3)) > grevlex((1, 2, 4)) assert grevlex((1, 3, 3)) > grevlex((1, 2, 4)) assert grevlex((0, 2, 3)) < grevlex((1, 2, 2)) assert grevlex((1, 1, 3)) < grevlex((1, 2, 2)) assert grevlex((0, 1, 1)) > grevlex((0, 0, 2)) assert grevlex((0, 3, 1)) < grevlex((2, 2, 1)) assert grevlex.is_global is True def test_InverseOrder(): ilex = InverseOrder(lex) igrlex = InverseOrder(grlex) assert ilex((1, 2, 3)) > ilex((2, 0, 3)) assert igrlex((1, 2, 3)) < igrlex((0, 2, 3)) assert str(ilex) == "ilex" assert str(igrlex) == "igrlex" assert ilex.is_global is False assert igrlex.is_global is False assert ilex != igrlex assert ilex == InverseOrder(LexOrder()) def test_ProductOrder(): P = ProductOrder((grlex, lambda m: m[:2]), (grlex, lambda m: m[2:])) assert P((1, 3, 3, 4, 5)) > P((2, 1, 5, 5, 5)) assert str(P) == "ProductOrder(grlex, grlex)" assert P.is_global is True assert ProductOrder((grlex, None), (ilex, None)).is_global is None assert ProductOrder((igrlex, None), (ilex, None)).is_global is False def test_monomial_key(): assert monomial_key() == lex assert monomial_key('lex') == lex assert monomial_key('grlex') == grlex assert monomial_key('grevlex') == grevlex raises(ValueError, lambda: monomial_key('foo')) raises(ValueError, lambda: monomial_key(1)) M = [x, x**2*z**2, x*y, x**2, S(1), y**2, x**3, y, z, x*y**2*z, x**2*y**2] assert sorted(M, key=monomial_key('lex', [z, y, x])) == \ [S(1), x, x**2, x**3, y, x*y, y**2, x**2*y**2, z, x*y**2*z, x**2*z**2] assert sorted(M, key=monomial_key('grlex', [z, y, x])) == \ [S(1), x, y, z, x**2, x*y, y**2, x**3, x**2*y**2, x*y**2*z, x**2*z**2] assert sorted(M, key=monomial_key('grevlex', [z, y, x])) == \ [S(1), x, y, z, x**2, x*y, y**2, x**3, x**2*y**2, x**2*z**2, x*y**2*z] def test_build_product_order(): assert build_product_order((("grlex", x, y), ("grlex", z, t)), [x, y, z, t])((4, 5, 6, 7)) == \ ((9, (4, 5)), (13, (6, 7))) assert build_product_order((("grlex", x, y), ("grlex", z, t)), [x, y, z, t]) == \ build_product_order((("grlex", x, y), ("grlex", z, t)), [x, y, z, t])
wolfram74/numerical_methods_iserles_notes
venv/lib/python2.7/site-packages/sympy/polys/tests/test_orderings.py
Python
mit
4,262
''' CORE WINDOW ================ it's a core window use in globally needed, in this module provide standard, variable, object and anything the window needed. :package : core stigma ''' from kivy.config import Config class _CWindows(object): ''' this is windows base class, is used for inheritance for Window usage. ''' config = Config class Windows(_CWindows): def __init__(self, **kwargs): ''' this is window class used for manage window interface for application. ''' super(Windows, self).__init__() self.app = kwargs['app'] @property def setWindow(self): ''' a method will set window based on configuration. ''' for k, v in self.app.items(): if isinstance(v, dict): for a, z in v.items(): self.config.set(k, a, z) return self.config def getWindow(self, info): ''' a method will show information about window configuration. ''' return self.app[info]
Kzulfazriawan/stigma-game-demo
core/window.py
Python
mit
1,062
""" [2015-04-27] Challenge #212 [Easy] Rövarspråket https://www.reddit.com/r/dailyprogrammer/comments/341c03/20150427_challenge_212_easy_rövarspråket/ # Description When we Swedes are young, we are taught a SUPER-SECRET language that only kids know, so we can hide secrets from our confused parents. This language is known as "Rövarspråket" (which means "Robber's language", more or less), and is surprisingly easy to become fluent in, at least when you're a kid. Recently, the cheeky residents of /r/Sweden decided to play a trick on the rest on reddit, and get a thread entirely in Rövarspråket to /r/all. [The results were hilarious](http://np.reddit.com/r/sweden/comments/301sqr/dodetot_%C3%A4ror_fof%C3%B6ror_lolitote/). Rövarspråket is not very complicated: you take an ordinary word and replace the consonants with the consonant doubled and with an "o" in between. So the consonant "b" is replaced by "bob", "r" is replaced with "ror", "s" is replaced with "sos", and so on. Vowels are left intact. It's made for Swedish, but it works just as well in English. Your task today is to write a program that can encode a string of text into Rövarspråket. (note: this is a higly guarded Swedish state secret, so I trust that none of you will share this very privileged information with anyone! If you do, you will be extradited to Sweden and be forced to eat [surströmming](http://en.wikipedia.org/wiki/Surstr%C3%B6mming) as penance.) (note 2: surströmming is actually not that bad, it's much tastier than its reputation would suggest! I'd go so far as to say that it's the tastiest half-rotten fish in the world!) # Formal inputs &amp; outputs ## Input You will recieve one line of input, which is a text string that should be encoded into Rövarspråket. ## Output The output will be the encoded string. A few notes: your program should be able to handle case properly, which means that "Hello" should be encoded to "Hohelollolo", and *not* as "HoHelollolo" (note the second capital "H"). Also, since Rövarspråket is a Swedish invention, your program should follow Swedish rules regarding what is a vowel and what is a consonant. The Swedish alphabet is the same as the English alphabet except that there are three extra characters at the end (Å, Ä and Ö) which are all vowels. In addition, Y is always a vowel in Swedish, so the full list of vowels in Swedish is A, E, I, O, U, Y, Å, Ä and Ö. The rest are consonants. Lastly, any character that is not a vowel or a consonant (i.e. things like punctuation) should be left intact in the output. # Example inputs ## Input 1 Jag talar Rövarspråket! ## Output 1 Jojagog totalolaror Rorövovarorsospoproråkoketot! ## Input 2 I'm speaking Robber's language! ## Output 2 I'mom sospopeakokinongog Rorobobboberor'sos lolanongoguagoge! # Challenge inputs ## Input 1 Tre Kronor är världens bästa ishockeylag. ## Input 2 Vår kung är coolare än er kung. # Bonus Make your program able to decode a Rövarspråket-encoded sentence as well as encode it. # Notes This excellent problem (which filled my crusty old Swedish heart with glee) was suggested by /u/pogotc. Thanks so much for the suggestion! If you have an idea for a problem, head on over to /r/dailyprogrammer_ideas and post your suggestion! If it's good idea, we might use it, and you'll be as cool as /u/pogotc. """ def main(): pass if __name__ == "__main__": main()
DayGitH/Python-Challenges
DailyProgrammer/DP20150427A.py
Python
mit
3,445
from django.conf import settings from samaritan import DEFAULT_USER_MODEL USER_MODEL_NAME = getattr(settings, 'USER_MODEL', DEFAULT_USER_MODEL)
benslavin/django-samaritan
samaritan/utils.py
Python
bsd-3-clause
145
# ---------------------------------------------------------------------- # Numenta Platform for Intelligent Computing (NuPIC) # Copyright (C) 2013, Numenta, Inc. Unless you have purchased from # Numenta, Inc. a separate commercial license for this software code, the # following terms and conditions apply: # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License version 3 as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. # See the GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see http://www.gnu.org/licenses. # # http://numenta.org/licenses/ # ---------------------------------------------------------------------- ## This file defines parameters for a prediction experiment. import os from nupic.frameworks.opf.expdescriptionhelpers import importBaseDescription # the sub-experiment configuration config = \ { 'modelParams': { 'clParams': { 'clVerbosity': 0}, 'sensorParams': { 'encoders': { 'consumption': { 'clipInput': True, 'fieldname': u'consumption', 'n': 28, 'name': u'consumption', 'type': 'AdaptiveScalarEncoder', 'w': 21}, 'timestamp_dayOfWeek': { 'dayOfWeek': ( 21, 3), 'fieldname': u'timestamp', 'name': u'timestamp_dayOfWeek', 'type': 'DateEncoder'}, 'timestamp_timeOfDay': { 'fieldname': u'timestamp', 'name': u'timestamp_timeOfDay', 'timeOfDay': ( 21, 1), 'type': 'DateEncoder'}, 'timestamp_weekend': None}, 'verbosity': 0}, 'spParams': { }, 'tpParams': { 'activationThreshold': 13, 'minThreshold': 9, 'verbosity': 0}}} mod = importBaseDescription('../hotgym/description.py', config) locals().update(mod.__dict__)
tkaitchuck/nupic
examples/opf/experiments/multistep/hotgym_best_tp_5step/description.py
Python
gpl-3.0
3,192
import re def isPalindrome(file): infile = open(file, 'r') lines = infile.readlines() num_lines = int(lines[0]) total_string = ""; for line in lines[1: (num_lines+1)]: total_string += line total_string = re.sub('[^a-zA-Z0-9]', '', total_string) total_string = total_string.lower() if(total_string == total_string[::-1]): print('Palindrome') else: print('Not a palindrome') isPalindrome('../resources/long_palindrome.txt')
JonShepChen/DailyProgrammerChallenges
challenges/232-Palindromes.py
Python
mit
488
import string import unittest from yyproto.dict import Dict from yyproto.list import List from yyproto.packer import Packer from yyproto.set import Set class TestPacker(unittest.TestCase): def test_integer(self): buf = bytearray(1024) packer = Packer(buf) packer.pack_integer('b', 42) self.assertEqual(1, packer.offset) packer.pack_integer('B', 42) self.assertEqual(1 + 1, packer.offset) packer.pack_integer('h', 42) self.assertEqual(1 + 1 + 2, packer.offset) packer.pack_integer('H', 42) self.assertEqual(1 + 1 + 2 + 2, packer.offset) packer.pack_integer('i', 42) self.assertEqual(1 + 1 + 2 + 2 + 4, packer.offset) packer.pack_integer('I', 42) self.assertEqual(1 + 1 + 2 + 2 + 8, packer.offset) packer.pack_integer('q', 42) self.assertEqual(1 + 1 + 2 + 2 + 8 + 8, packer.offset) packer.pack_integer('Q', 42) self.assertEqual(1 + 1 + 2 + 2 + 8 + 16, packer.offset) def test_int8(self): buf = bytearray(1024) packer = Packer(buf) value = 42 packer.pack_int8(value) self.assertEqual(1, packer.offset) def test_int16(self): buf = bytearray(1024) packer = Packer(buf) value = 42 packer.pack_int16(value) self.assertEqual(2, packer.offset) def test_int32(self): buf = bytearray(1024) packer = Packer(buf) value = 42 packer.pack_int32(value) self.assertEqual(4, packer.offset) def test_int64(self): buf = bytearray(1024) packer = Packer(buf) value = 42 packer.pack_int64(value) self.assertEqual(8, packer.offset) def test_uint8(self): buf = bytearray(1024) packer = Packer(buf) value = 42 packer.pack_uint8(value) self.assertEqual(1, packer.offset) def test_uint16(self): buf = bytearray(1024) packer = Packer(buf) value = 42 packer.pack_uint16(value) self.assertEqual(2, packer.offset) def test_uint32(self): buf = bytearray(1024) packer = Packer(buf) value = 42 packer.pack_uint32(value) self.assertEqual(4, packer.offset) def test_uint64(self): buf = bytearray(1024) packer = Packer(buf) value = 42 packer.pack_uint64(value) self.assertEqual(8, packer.offset) def test_float(self): buf = bytearray(1024) packer = Packer(buf) value = 3.14 packer.pack_float(value) self.assertEqual(2 + 4, packer.offset) def test_double(self): buf = bytearray(1024) packer = Packer(buf) value = 3.14 packer.pack_double(value) self.assertEqual(2 + 4, packer.offset) def test_string(self): buf = bytearray(1024) packer = Packer(buf) packer.pack_string('42') self.assertEqual(2 + 2, packer.offset) def test_binary(self): buf = bytearray(1024) packer = Packer(buf) packer.pack_binary('42') self.assertEqual(4 + 2, packer.offset) def test_list_int8(self): buf = bytearray(4096) packer = Packer(buf) l = List('b') l.extend([ord(c) for c in string.ascii_letters]) packer.pack_list(l) self.assertEqual(4 + len(string.ascii_letters), packer.offset) packer.pack_list([ord(c) for c in string.ascii_letters], 'b') self.assertEqual(2 * (4 + len(string.ascii_letters)), packer.offset) def test_list_string(self): buf = bytearray(4096) packer = Packer(buf) l = List('s') l.extend(['aa', 'bb', 'cc']) packer.pack_list(l) self.assertEqual(4 + 3 * (2 + 2), packer.offset) def test_set_int8(self): buf = bytearray(4096) packer = Packer(buf) s = Set('b') for c in string.ascii_letters: s.add(ord(c)) packer.pack_set(s) self.assertEqual(4 + len(string.ascii_letters), packer.offset) packer.pack_set(set([ord(c) for c in string.ascii_letters]), 'b') self.assertEqual(2 * (4 + len(string.ascii_letters)), packer.offset) def test_set_string(self): buf = bytearray(4096) packer = Packer(buf) s = Set('s') s.add('aa') s.add('bb') s.add('cc') s.add('cc') packer.pack_set(s) self.assertEqual(4 + 3 * (2 + 2), packer.offset) def test_message(self): from yyproto.header import Header buf = bytearray(1024) packer = Packer(buf) header = Header() packer.pack_message(header) self.assertEqual(10, packer.offset) def test_dict_int8_int8(self): buf = bytearray(1024) packer = Packer(buf) d = Dict('b', 'b') for i in range(0, 64): d[i] = i packer.pack_dict(d) self.assertEqual(4 + 64 * 2, packer.offset) def test_dic_str_str(self): buf = bytearray(1024) packer = Packer(buf) d = Dict('s', 's') for c in string.ascii_letters: d[c] = c packer.pack_dict(d) self.assertEqual(4 + len(string.ascii_letters) * 2 * (2 + 1), packer.offset) if __name__ == '__main__': unittest.main()
decimalbell/yyproto
python/yyproto/tests/test_packer.py
Python
bsd-3-clause
5,419
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar import warnings from azure.core.async_paging import AsyncItemPaged, AsyncList from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error from azure.core.pipeline import PipelineResponse from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest from azure.mgmt.core.exceptions import ARMErrorFormat from ... import models as _models T = TypeVar('T') ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] class SecureScoreControlDefinitionsOperations: """SecureScoreControlDefinitionsOperations async operations. You should not instantiate this class directly. Instead, you should create a Client instance that instantiates it for you and attaches it as an attribute. :ivar models: Alias to model classes used in this operation group. :type models: ~azure.mgmt.security.models :param client: Client for service requests. :param config: Configuration of service client. :param serializer: An object model serializer. :param deserializer: An object model deserializer. """ models = _models def __init__(self, client, config, serializer, deserializer) -> None: self._client = client self._serialize = serializer self._deserialize = deserializer self._config = config def list( self, **kwargs: Any ) -> AsyncIterable["_models.SecureScoreControlDefinitionList"]: """List the available security controls, their assessments, and the max score. :keyword callable cls: A custom type or function that will be passed the direct response :return: An iterator like instance of either SecureScoreControlDefinitionList or the result of cls(response) :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.security.models.SecureScoreControlDefinitionList] :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.SecureScoreControlDefinitionList"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2020-01-01" accept = "application/json" def prepare_request(next_link=None): # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') if not next_link: # Construct URL url = self.list.metadata['url'] # type: ignore # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') request = self._client.get(url, query_parameters, header_parameters) else: url = next_link query_parameters = {} # type: Dict[str, Any] request = self._client.get(url, query_parameters, header_parameters) return request async def extract_data(pipeline_response): deserialized = self._deserialize('SecureScoreControlDefinitionList', pipeline_response) list_of_elem = deserialized.value if cls: list_of_elem = cls(list_of_elem) return deserialized.next_link or None, AsyncList(list_of_elem) async def get_next(next_link=None): request = prepare_request(next_link) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) return pipeline_response return AsyncItemPaged( get_next, extract_data ) list.metadata = {'url': '/providers/Microsoft.Security/secureScoreControlDefinitions'} # type: ignore def list_by_subscription( self, **kwargs: Any ) -> AsyncIterable["_models.SecureScoreControlDefinitionList"]: """For a specified subscription, list the available security controls, their assessments, and the max score. :keyword callable cls: A custom type or function that will be passed the direct response :return: An iterator like instance of either SecureScoreControlDefinitionList or the result of cls(response) :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.security.models.SecureScoreControlDefinitionList] :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.SecureScoreControlDefinitionList"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2020-01-01" accept = "application/json" def prepare_request(next_link=None): # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') if not next_link: # Construct URL url = self.list_by_subscription.metadata['url'] # type: ignore path_format_arguments = { 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', pattern=r'^[0-9A-Fa-f]{8}-([0-9A-Fa-f]{4}-){3}[0-9A-Fa-f]{12}$'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') request = self._client.get(url, query_parameters, header_parameters) else: url = next_link query_parameters = {} # type: Dict[str, Any] request = self._client.get(url, query_parameters, header_parameters) return request async def extract_data(pipeline_response): deserialized = self._deserialize('SecureScoreControlDefinitionList', pipeline_response) list_of_elem = deserialized.value if cls: list_of_elem = cls(list_of_elem) return deserialized.next_link or None, AsyncList(list_of_elem) async def get_next(next_link=None): request = prepare_request(next_link) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) return pipeline_response return AsyncItemPaged( get_next, extract_data ) list_by_subscription.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Security/secureScoreControlDefinitions'} # type: ignore
Azure/azure-sdk-for-python
sdk/security/azure-mgmt-security/azure/mgmt/security/aio/operations/_secure_score_control_definitions_operations.py
Python
mit
8,156
import time import smbus import Adafruit_DHT # Change to 0 if using pi with 256MB i2c_bus = smbus.SMBus(1) class AbstractSensor(): def __init__(self): self.last_failure = 0 self.last_success = 0 def read(self): raise('Abstract') class DHTSensor(AbstractSensor): def __init__(self, bcm_pin, location): self.bcm_pin = bcm_pin self.location = location def read(self): rel_humidity, temp = Adafruit_DHT.read_retry( self.dht_type, self.bcm_pin) if not temp or not rel_humidity: self.last_failure = time.time() return self.last_success = time.time() return { 'temperature': temp, 'rel_humidity': rel_humidity } class ADCSensor(AbstractSensor): def __init__(self, i2c_addr, location): self.i2c_addr = i2c_addr self.location = location def read(self): for x in range(0, 4): i2c_bus.write_byte_data(0x48, 0x40 | ((x + 1) & 0x03), 0) v = i2c_bus.read_byte(0x48) print(v,) time.sleep(0.1) print() class DHT22Sensor(DHTSensor): def __init__(self, bcm_pin, location): self.dht_type = Adafruit_DHT.DHT22 super(DHT22Sensor, self).__init__(bcm_pin, location) class DHT11Sensor(DHTSensor): def __init__(self, bcm_pin, location): self.dht_type = Adafruit_DHT.DHT11 super(DHT11Sensor, self).__init__(bcm_pin, location)
jdupl/iot-greenhouse-ctrl
sensors.py
Python
gpl-3.0
1,502
from sys import maxint class BellmanFord( object ): def __init__( self ): ''' Constructor ''' def singleSourceShortestPath( self, weight, source ) : # auxiliary constants SIZE = len( weight ) EVE = -1; # to indicate no predecessor INFINITY = maxint # declare and initialize pred to EVE and minDist to INFINITY pred = [EVE] * SIZE minDist = [INFINITY] * SIZE # set minDist[source] = 0 because source is 0 distance from itself. minDist[source] = 0 # relax the edge set V-1 times to find all shortest paths for i in range( 1, SIZE - 1 ): for v in range( SIZE ): for x in self.adjacency( weight, v ): if minDist[x] > minDist[v] + weight[v][x]: minDist[x] = minDist[v] + weight[v][x] pred[x] = v # detect cycles if any for v in range( SIZE ): for x in self.adjacency( weight, v ): if minDist[x] > minDist[v] + weight[v][x]: raise Exception( "Negative cycle found" ) return [pred, minDist] #===================================================================== # Retrieve all the neighbors of vertex v. #===================================================================== def adjacency( self, G, v ) : result = [] for x in range( len( G ) ): if G[v][x] is not None: result.append( x ) return result;
salman-bhai/DS-Algo-Handbook
Algorithms/Graph_Algorithms/Bellman_Ford/bellman_ford.py
Python
mit
1,400
#!/usr/bin/python #-*-coding:utf-8-*- import redis class DupFilter: dup_key_taobao = 'dupefilter_t' REDIS_HOST = 'youhost' REDIS_PORT = 6379 """Redis-based request duplication filter""" # def __init__(self, server, key): # """Initialize duplication filter # # Parameters # ---------- # server : Redis instance # key : str # Where to store fingerprints # """ # self.server = server # self.key = key def __init__(self): host = self.REDIS_HOST port = self.REDIS_PORT self.server = redis.Redis(host, port) @classmethod def from_settings(cls, settings): host = settings.get('REDIS_HOST', 'localhost') port = settings.get('REDIS_PORT', 6379) server = redis.Redis(host, port) # create one-time key. needed to support to use this # class as standalone dupefilter with scrapy's default scheduler # if scrapy passes spider on open() method this wouldn't be needed # key = "dupefilter:%s" % int(time.time()) return cls(server, dup_key_taobao) @classmethod def from_crawler(cls, crawler): return cls.from_settings(crawler.settings) def check_dup(value): if self.server.sismember(self.key, value): return True self.server.sadd(self.key, value) return False
muzixinly/matrix
matrix/store/redis.py
Python
gpl-2.0
1,425
# Copyright 2014 Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import mock from oslotest import base as test_base import time from fuel_agent import errors from fuel_agent.utils import partition_utils as pu from fuel_agent.utils import utils class TestPartitionUtils(test_base.BaseTestCase): @mock.patch.object(pu, 'make_label') def test_wipe(self, mock_label): # should run call make_label method # in order to create new empty table which we think # is equivalent to wiping the old one pu.wipe('/dev/fake') mock_label.assert_called_once_with('/dev/fake') @mock.patch.object(pu, 'reread_partitions') @mock.patch.object(utils, 'execute') def test_make_label(self, mock_exec, mock_rerd): # should run parted OS command # in order to create label on a device mock_exec.return_value = ('out', '') # gpt by default pu.make_label('/dev/fake') mock_exec.assert_called_once_with( 'parted', '-s', '/dev/fake', 'mklabel', 'gpt', check_exit_code=[0, 1]) mock_rerd.assert_called_once_with('/dev/fake', out='out') mock_exec.reset_mock() mock_rerd.reset_mock() # label is set explicitly pu.make_label('/dev/fake', label='msdos') mock_exec.assert_called_once_with( 'parted', '-s', '/dev/fake', 'mklabel', 'msdos', check_exit_code=[0, 1]) mock_rerd.assert_called_once_with('/dev/fake', out='out') def test_make_label_wrong_label(self): # should check if label is valid # should raise exception if it is not self.assertRaises(errors.WrongPartitionLabelError, pu.make_label, '/dev/fake', 'wrong') @mock.patch.object(pu, 'reread_partitions') @mock.patch.object(utils, 'execute') def test_set_partition_flag(self, mock_exec, mock_rerd): # should run parted OS command # in order to set flag on a partition mock_exec.return_value = ('out', '') # default state is 'on' pu.set_partition_flag('/dev/fake', 1, 'boot') mock_exec.assert_called_once_with( 'parted', '-s', '/dev/fake', 'set', '1', 'boot', 'on', check_exit_code=[0, 1]) mock_rerd.assert_called_once_with('/dev/fake', out='out') mock_exec.reset_mock() mock_rerd.reset_mock() # if state argument is given use it pu.set_partition_flag('/dev/fake', 1, 'boot', state='off') mock_exec.assert_called_once_with( 'parted', '-s', '/dev/fake', 'set', '1', 'boot', 'off', check_exit_code=[0, 1]) mock_rerd.assert_called_once_with('/dev/fake', out='out') @mock.patch.object(utils, 'execute') def test_set_partition_flag_wrong_flag(self, mock_exec): # should check if flag is valid # should raise exception if it is not self.assertRaises(errors.WrongPartitionSchemeError, pu.set_partition_flag, '/dev/fake', 1, 'wrong') @mock.patch.object(utils, 'execute') def test_set_partition_flag_wrong_state(self, mock_exec): # should check if flag is valid # should raise exception if it is not self.assertRaises(errors.WrongPartitionSchemeError, pu.set_partition_flag, '/dev/fake', 1, 'boot', state='wrong') @mock.patch.object(pu, 'reread_partitions') @mock.patch.object(pu, 'info') @mock.patch.object(utils, 'execute') def test_make_partition(self, mock_exec, mock_info, mock_rerd): # should run parted OS command # in order to create new partition mock_exec.return_value = ('out', '') mock_info.return_value = { 'parts': [ {'begin': 0, 'end': 1000, 'fstype': 'free'}, ] } pu.make_partition('/dev/fake', 100, 200, 'primary') mock_exec.assert_called_once_with( 'parted', '-a', 'optimal', '-s', '/dev/fake', 'unit', 'MiB', 'mkpart', 'primary', '100', '200', check_exit_code=[0, 1]) mock_rerd.assert_called_once_with('/dev/fake', out='out') @mock.patch.object(utils, 'execute') def test_make_partition_wrong_ptype(self, mock_exec): # should check if partition type is one of # 'primary' or 'logical' # should raise exception if it is not self.assertRaises(errors.WrongPartitionSchemeError, pu.make_partition, '/dev/fake', 200, 100, 'wrong') @mock.patch.object(utils, 'execute') def test_make_partition_begin_overlaps_end(self, mock_exec): # should check if begin is less than end # should raise exception if it isn't self.assertRaises(errors.WrongPartitionSchemeError, pu.make_partition, '/dev/fake', 200, 100, 'primary') @mock.patch.object(pu, 'info') @mock.patch.object(utils, 'execute') def test_make_partition_overlaps_other_parts(self, mock_exec, mock_info): # should check if begin or end overlap other partitions # should raise exception if it does mock_info.return_value = { 'parts': [ {'begin': 0, 'end': 100, 'fstype': 'free'}, {'begin': 100, 'end': 200, 'fstype': 'notfree'}, {'begin': 200, 'end': 300, 'fstype': 'free'} ] } self.assertRaises(errors.WrongPartitionSchemeError, pu.make_partition, '/dev/fake', 99, 101, 'primary') self.assertRaises(errors.WrongPartitionSchemeError, pu.make_partition, '/dev/fake', 100, 200, 'primary') self.assertRaises(errors.WrongPartitionSchemeError, pu.make_partition, '/dev/fake', 200, 301, 'primary') self.assertEqual(mock_info.call_args_list, [mock.call('/dev/fake')] * 3) @mock.patch.object(pu, 'reread_partitions') @mock.patch.object(pu, 'info') @mock.patch.object(utils, 'execute') def test_remove_partition(self, mock_exec, mock_info, mock_rerd): # should run parted OS command # in order to remove partition mock_exec.return_value = ('out', '') mock_info.return_value = { 'parts': [ { 'begin': 1, 'end': 100, 'size': 100, 'num': 1, 'fstype': 'ext2' }, { 'begin': 100, 'end': 200, 'size': 100, 'num': 2, 'fstype': 'ext2' } ] } pu.remove_partition('/dev/fake', 1) mock_exec.assert_called_once_with( 'parted', '-s', '/dev/fake', 'rm', '1', check_exit_code=[0]) mock_rerd.assert_called_once_with('/dev/fake', out='out') @mock.patch.object(pu, 'info') @mock.patch.object(utils, 'execute') def test_remove_partition_notexists(self, mock_exec, mock_info): # should check if partition does exist # should raise exception if it doesn't mock_info.return_value = { 'parts': [ { 'begin': 1, 'end': 100, 'size': 100, 'num': 1, 'fstype': 'ext2' }, { 'begin': 100, 'end': 200, 'size': 100, 'num': 2, 'fstype': 'ext2' } ] } self.assertRaises(errors.PartitionNotFoundError, pu.remove_partition, '/dev/fake', 3) @mock.patch.object(utils, 'execute') def test_set_gpt_type(self, mock_exec): pu.set_gpt_type('dev', 'num', 'type') mock_exec.assert_called_once_with('sgdisk', '--typecode=%s:%s' % ('num', 'type'), 'dev', check_exit_code=[0]) @mock.patch.object(utils, 'execute') def test_info(self, mock_exec): mock_exec.return_value = [ 'BYT;\n' '/dev/fake:476940MiB:scsi:512:4096:msdos:ATA 1BD14;\n' '1:0.03MiB:1.00MiB:0.97MiB:free;\n' '1:1.00MiB:191MiB:190MiB:ext3::boot;\n' '2:191MiB:476939MiB:476748MiB:::lvm;\n' '1:476939MiB:476940MiB:1.02MiB:free;\n' ] expected = {'generic': {'dev': '/dev/fake', 'logical_block': 512, 'model': 'ATA 1BD14', 'physical_block': 4096, 'size': 476940, 'table': 'msdos'}, 'parts': [{'begin': 1, 'end': 1, 'fstype': 'free', 'num': 1, 'size': 1}, {'begin': 1, 'end': 191, 'fstype': 'ext3', 'num': 1, 'size': 190}, {'begin': 191, 'end': 476939, 'fstype': None, 'num': 2, 'size': 476748}, {'begin': 476939, 'end': 476940, 'fstype': 'free', 'num': 1, 'size': 2}]} actual = pu.info('/dev/fake') self.assertEqual(expected, actual) mock_exec.assert_called_once_with('parted', '-s', '/dev/fake', '-m', 'unit', 'MiB', 'print', 'free', check_exit_code=[0, 1]) @mock.patch.object(utils, 'execute') def test_reread_partitions_ok(self, mock_exec): pu.reread_partitions('/dev/fake', out='') self.assertEqual(mock_exec.call_args_list, []) @mock.patch.object(time, 'sleep') @mock.patch.object(utils, 'execute') def test_reread_partitions_device_busy(self, mock_exec, mock_sleep): mock_exec.return_value = ('', '') pu.reread_partitions('/dev/fake', out='_Device or resource busy_') mock_exec_expected = [ mock.call('partprobe', '/dev/fake', check_exit_code=[0, 1]), mock.call('partx', '-a', '/dev/fake', check_exit_code=[0, 1]) ] self.assertEqual(mock_exec.call_args_list, mock_exec_expected) mock_sleep.assert_called_once_with(1) @mock.patch.object(utils, 'execute') def test_reread_partitions_timeout(self, mock_exec): self.assertRaises(errors.BaseError, pu.reread_partitions, '/dev/fake', out='Device or resource busy', timeout=-40)
andrei4ka/fuel-web-redhat
fuel_agent/fuel_agent/tests/test_partition_utils.py
Python
apache-2.0
11,339
# -*- coding: utf-8 -*- ############################################################################### # # DeleteWalk # Deletes a given walk action. # # Python versions 2.6, 2.7, 3.x # # Copyright 2014, Temboo Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, # either express or implied. See the License for the specific # language governing permissions and limitations under the License. # # ############################################################################### from temboo.core.choreography import Choreography from temboo.core.choreography import InputSet from temboo.core.choreography import ResultSet from temboo.core.choreography import ChoreographyExecution import json class DeleteWalk(Choreography): def __init__(self, temboo_session): """ Create a new instance of the DeleteWalk Choreo. A TembooSession object, containing a valid set of Temboo credentials, must be supplied. """ super(DeleteWalk, self).__init__(temboo_session, '/Library/Facebook/Actions/Fitness/Walks/DeleteWalk') def new_input_set(self): return DeleteWalkInputSet() def _make_result_set(self, result, path): return DeleteWalkResultSet(result, path) def _make_execution(self, session, exec_id, path): return DeleteWalkChoreographyExecution(session, exec_id, path) class DeleteWalkInputSet(InputSet): """ An InputSet with methods appropriate for specifying the inputs to the DeleteWalk Choreo. The InputSet object is used to specify input parameters when executing this Choreo. """ def set_AccessToken(self, value): """ Set the value of the AccessToken input for this Choreo. ((required, string) The access token retrieved from the final step of the OAuth process.) """ super(DeleteWalkInputSet, self)._set_input('AccessToken', value) def set_ActionID(self, value): """ Set the value of the ActionID input for this Choreo. ((required, string) The id of an action to delete.) """ super(DeleteWalkInputSet, self)._set_input('ActionID', value) class DeleteWalkResultSet(ResultSet): """ A ResultSet with methods tailored to the values returned by the DeleteWalk Choreo. The ResultSet object is used to retrieve the results of a Choreo execution. """ def getJSONFromString(self, str): return json.loads(str) def get_Response(self): """ Retrieve the value for the "Response" output from this Choreo execution. ((boolean) The response from Facebook. Returns "true" on success.) """ return self._output.get('Response', None) class DeleteWalkChoreographyExecution(ChoreographyExecution): def _make_result_set(self, response, path): return DeleteWalkResultSet(response, path)
jordanemedlock/psychtruths
temboo/core/Library/Facebook/Actions/Fitness/Walks/DeleteWalk.py
Python
apache-2.0
3,181
#!/usr//bin/python3 import wiringpi2 as wiringpi from time import sleep wiringpi.wiringPiSetupGpio pin = 17 wiringpi.pinMode(pin, 1) while True: wiringpi.digitalWrite(pin, 0) sleep(0.5) wiringpi.digitalWrite(pin, 1) sleep(0.5)
citizensense/csk
libraries/windspeed.py
Python
gpl-3.0
245
import operator from functools import reduce from database import db def generic_aggregator(attribute, flatten=False, is_callable=False): def aggregator(self, data_filter=lambda x: x): data = data_filter(self.data) aggregated = [ getattr(datum, attribute)(data_filter) if is_callable else getattr(datum, attribute) for datum in data ] if aggregated and flatten: return reduce(operator.add, aggregated) else: return aggregated return aggregator def association_table_super_factory(bind=None): def make_association_table(fk1, fk2, index=False): """Create an association table basing on names of two given foreign keys. From keys: `site.id` and `kinase.id` a table named: site_kinase_association will be created and it will contain two columns: `site_id` and `kinase_id`. The foreign keys can be provided as models properties, e.g. Model.id, or as SQL strings, e.g. 'model.id'. """ if not isinstance(fk1, str): fk1 = f'{fk1.table}.{fk1.name}' if not isinstance(fk2, str): fk2 = f'{fk2.table}.{fk2.name}' fk1_table = fk1.split('.')[0] fk2_table = fk2.split('.')[0] table_name = f'{fk1_table}_{fk2_table}_association' col1 = fk1.replace('.', '_') col2 = fk2.replace('.', '_') table_args = [ table_name, db.metadata, db.Column(col1, db.Integer, db.ForeignKey(fk1, ondelete='cascade')), db.Column(col2, db.Integer, db.ForeignKey(fk2, ondelete='cascade')), ] if index: table_args.append( db.Index(f'idx_{col1}_{col2}', col1, col2) ) return db.Table( *table_args, info={'bind_key': bind} ) return make_association_table
reimandlab/Visualistion-Framework-for-Genome-Mutations
website/helpers/models.py
Python
lgpl-2.1
1,911
"""SCons.Tool.aixcc Tool-specific initialization for IBM xlc / Visual Age C compiler. There normally shouldn't be any need to import this module directly. It will usually be imported through the generic SCons.Tool.Tool() selection method. """ # # __COPYRIGHT__ # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY # KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. # __revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__" import os.path import SCons.Platform.aix from . import cc packages = ['vac.C', 'ibmcxx.cmp'] def get_xlc(env): xlc = env.get('CC', 'xlc') return SCons.Platform.aix.get_xlc(env, xlc, packages) def generate(env): """Add Builders and construction variables for xlc / Visual Age suite to an Environment.""" path, _cc, version = get_xlc(env) if path and _cc: _cc = os.path.join(path, _cc) if 'CC' not in env: env['CC'] = _cc cc.generate(env) if version: env['CCVERSION'] = version def exists(env): path, _cc, version = get_xlc(env) if path and _cc: xlc = os.path.join(path, _cc) if os.path.exists(xlc): return xlc return None # Local Variables: # tab-width:4 # indent-tabs-mode:nil # End: # vim: set expandtab tabstop=4 shiftwidth=4:
timj/scons
src/engine/SCons/Tool/aixcc.py
Python
mit
2,228
#!/usr/bin/python3 import sys import os pwd = os.getcwd() if not sys.argv[1:]: print("Give me at least 1 file") else: for dir in sys.argv[1:]: dirPath = pwd + "/" + dir if os.path.exists(dirPath) and os.path.isfile(dirPath): with open(str(dir), "r") as file: print(file.read()) else: print("File", dirPath, "doesn't exist or it isn't a file")
lesina/labs2016
Laba09/exercise03.py
Python
gpl-3.0
417
# Copyright 2019 The Kubeflow Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. def use_azure_secret(secret_name='azcreds'): """An operator that configures the container to use Azure user credentials. The azcreds secret is created as part of the kubeflow deployment that stores the client ID and secrets for the kubeflow azure service principal. With this service principal, the container has a range of Azure APIs to access to. """ def _use_azure_secret(task): from kubernetes import client as k8s_client (task.container.add_env_variable( k8s_client.V1EnvVar( name='AZ_SUBSCRIPTION_ID', value_from=k8s_client.V1EnvVarSource( secret_key_ref=k8s_client.V1SecretKeySelector( name=secret_name, key='AZ_SUBSCRIPTION_ID'))) ).add_env_variable( k8s_client.V1EnvVar( name='AZ_TENANT_ID', value_from=k8s_client.V1EnvVarSource( secret_key_ref=k8s_client.V1SecretKeySelector( name=secret_name, key='AZ_TENANT_ID'))) ).add_env_variable( k8s_client.V1EnvVar( name='AZ_CLIENT_ID', value_from=k8s_client.V1EnvVarSource( secret_key_ref=k8s_client.V1SecretKeySelector( name=secret_name, key='AZ_CLIENT_ID')))) .add_env_variable( k8s_client.V1EnvVar( name='AZ_CLIENT_SECRET', value_from=k8s_client.V1EnvVarSource( secret_key_ref=k8s_client.V1SecretKeySelector( name=secret_name, key='AZ_CLIENT_SECRET'))))) return task return _use_azure_secret
kubeflow/pipelines
sdk/python/kfp/deprecated/azure.py
Python
apache-2.0
2,272
from django.conf import settings from .site_parser import SiteParser, SiteSettingsParser class SiteManager(object): def __new__(cls, *args, **kwargs): return super(SiteManager, cls).__new__(cls) def __init__(self, url=settings.MYTARDIS_SITES_URL): self.url = url try: self.username = settings.MYTARDIS_SITES_USERNAME self.password = settings.MYTARDIS_SITES_PASSWORD except AttributeError: self.username = '' self.password = '' self.site_parser = SiteParser(self.url, username=self.username, password=self.password) def sites(self): sites = self.site_parser.get() if sites is None: return for site in sites: ssp = SiteSettingsParser( site['url'], username=site['username'], password=site['password']) site_settings = ssp.get() if site_settings is not None: yield site_settings def get_site_settings(self, url): sites = self.site_parser.get() if sites is None: return None for site in sites: if site['url'] == url: ssp = SiteSettingsParser( site['url'], username=site['username'], password=site['password']) if ssp is not None: return ssp.get() return None return None
iiman/mytardis
tardis/apps/sync/site_manager.py
Python
bsd-3-clause
1,434
#!/usr/bin/env python # coding=utf-8 __author__ = 'Dean'
qicfan/lightfile
file.py
Python
gpl-2.0
57
#!/usr/bin/env python # -*- coding: utf-8 -*- """ Search tools refactor in progress, don't commit to common until done svns pre commit hook should stop me from commiting this file! """ import config, connection from itertools import imap def iterblocks(iterable, size, **kwds): """ http://code.activestate.com/recipes/542194/ Splits given iterable object into a set of objects, each with size given by the size variable, \n also takes blocktype keyword argument for description of type of objects to be returned, \n can truncate and pad if number of objects in iterable isn't splitable ideally """ truncate = kwds.get('truncate',False) blocktype = kwds.get('blocktype',tuple) if truncate and 'pad' in kwds: raise ValueError("'truncate' must be false if 'pad' is given") iterator = iter(iterable) while True: block = blocktype(islice(iterator,size)) if not block: break if len(block) < size: if 'pad' in kwds: block = blocktype(chain(block, repeat(kwds['pad'], size-len(block)))) elif truncate: break yield block def granulate(elems, granulation = 10): """ Performs granulation on a given list, splits it into smaller lists, \n granulation param sets defines the amount of returned elems. """ elems_len = float(len(elems)) package_len = ceil(elems_len / granulation) return iterblocks(elems, package_len, blocktype = list) def filter_even(iterable): """ Filters even indexed objects from given iterable, returns iterator """ return imap(lambda i: iterable[i],filter(lambda i: i%2 == 0,range(len(iterable)))) def filter_odd(iterable): """ Filter odd indexed objects from iterable, returns iterator """ return imap(lambda i: iterable[i],filter(lambda i: i%2 == 1,range(len(iterable)))) def index(add_iterable, remove_iterable, config_name = None ): """ Takes config_name and data in a form of iterable object, selects apropiate config \n and indexes data. If error occurs, recuretly finds error source. \n """ if config_name: config.load_config(config_name) errors = [] def recurently_do(operation, iterable, granulation=1): iterables = granulate(iterable, granulation) for iterable in iterables: response = operation(iterable) if not response.status==200: if len(iterable)==1: if isinstance(iterable[0], dict) and iterable[0].has_key('id'): errors.append(iterable[0]['id']) else: errors.append(iterable[0]) else: recurently_do(operation, iterable, granulation*10) # go go go recurently_do(connection.add, add_iterable) recurently_do(connection.delete_multi, remove_iterable) return errors
mdomans/insol
insol/tools.py
Python
apache-2.0
2,970
#!/usr/bin/env python # -*- coding: utf-8 -*- ''' Author: Thomas Beucher Module: Experiments Description: Class used to generate all the trajectories of the experimental setup and also used for CMAES optimization ''' import numpy as np import time #from Utils.ThetaNormalization import normalization, unNormalization from GlobalVariables import pathDataFolder from TrajMaker import TrajMaker from Utils.FileWriting import checkIfFolderExists, findDataFilename, writeArray from multiprocess.pool import Pool from functools import partial #------------------------------------------------------------------------------ class Experiments: def __init__(self, rs, sizeOfTarget, saveTraj, foldername, thetafile, popSize, period, estim="Inv"): ''' Initializes parameters used to run functions below Inputs: ''' self.rs = rs self.name = "Experiments" self.call = 0 self.dimState = rs.inputDim self.dimOutput = rs.outputDim self.numberOfRepeat = rs.numberOfRepeatEachTraj self.foldername = foldername self.tm = TrajMaker(rs, sizeOfTarget, saveTraj, thetafile, estim) self.posIni = np.loadtxt(pathDataFolder + rs.experimentFilePosIni) if(len(self.posIni.shape)==1): self.posIni=self.posIni.reshape((1,self.posIni.shape[0])) self.costStore = [] self.cost12Store=[] self.CMAESCostStore = [] self.CMAESTimeStore = [] self.trajTimeStore = [] self.bestCost = -10000.0 self.lastCoord = [] self.popSize = popSize self.period = period def printLastCoordInfo(self): vec = np.array(self.lastCoord) print ("moyenne : "+ str(np.mean(vec))) print ("min : " + str(np.min(vec))) print ("max :" + str(np.max(vec))) def initTheta(self, theta): ''' Input: -theta: controller ie vector of parameters, numpy array ''' self.theta=theta self.tm.setTheta(self.theta) def saveCost(self): ''' filename = findDataFilename(self.foldername+"Cost/","traj",".cost") filenameTime = findDataFilename(self.foldername+"TrajTime/","traj",".time") filenameX = findDataFilename(self.foldername+"finalX/","x",".last") np.savetxt(filename, self.costStore) np.savetxt(filenameTime, self.trajTimeStore) np.savetxt(filenameX, self.lastCoord) ''' writeArray(self.costStore,self.foldername+"Cost/","traj",".cost") writeArray(self.cost12Store,self.foldername+"CostU12/","traj",".cost") writeArray(self.trajTimeStore, self.foldername+"TrajTime/","traj",".time") writeArray(self.lastCoord, self.foldername+"finalX/","x",".last") def setNoise(self, noise): self.tm.setnoise(noise) def runOneTrajectory(self, x, y): #self.tm.saveTraj = True cost, trajTime, lastX = self.tm.runTrajectory(x, y, self.foldername) #cost, trajTime, lastX = self.tm.runTrajectoryOpti(x, y) #print "Exp local x y cost : ", x, y, cost if lastX != -1000: self.lastCoord.append(lastX) return cost, trajTime def runRichTrajectories(self, repeat): globCost = [] xy = np.loadtxt(pathDataFolder + "PosCircu540") #xy = np.loadtxt(pathDataFolder + "PosSquare") for el in xy: costAll, trajTimeAll = np.zeros(repeat), np.zeros(repeat) for i in range(repeat): costAll[i], trajTimeAll[i] = self.runOneTrajectory(el[0], el[1]) meanCost = np.mean(costAll) meanTrajTime = np.mean(trajTimeAll) self.costStore.append([el[0], el[1], meanCost]) self.trajTimeStore.append([el[0], el[1], meanTrajTime]) globCost.append(meanCost) return np.mean(globCost) def runTrajectoriesForResultsGeneration(self, repeat): globMeanCost=0. globTimeCost=0. for xy in self.posIni: costAll, trajTimeAll, costU12 = np.zeros(repeat), np.zeros(repeat), np.zeros(repeat) for i in range(repeat): costAll[i], trajTimeAll[i] = self.runOneTrajectory(xy[0], xy[1]) costU12[i] = self.tm.costU12 meanCost = np.mean(costAll) meanTrajTime = np.mean(trajTimeAll) meanCostU12=np.mean(costU12) self.costStore.append([xy[0], xy[1], meanCost]) self.trajTimeStore.append([xy[0], xy[1], meanTrajTime]) self.cost12Store.append([xy[0], xy[1], meanCostU12]) globMeanCost+=meanCost globTimeCost+=meanTrajTime #self.printLastCoordInfo() return globMeanCost/len(self.posIni), globTimeCost/len(self.posIni) def runTrajectoriesForResultsGenerationNController(self, repeat, thetaName): globMeanCost=0. globTimeCost=0. for enum,xy in enumerate(self.posIni): try : costAll, trajTimeAll, costU12 = np.zeros(repeat), np.zeros(repeat), np.zeros(repeat) controllerFileName = thetaName.replace("*",str(enum)) self.tm.controller.load(controllerFileName) for i in range(repeat): costAll[i], trajTimeAll[i] = self.runOneTrajectory(xy[0], xy[1]) costU12[i] = self.tm.costU12 meanCost = np.mean(costAll) meanTrajTime = np.mean(trajTimeAll) meanCostU12=np.mean(costU12) self.costStore.append([xy[0], xy[1], meanCost]) self.trajTimeStore.append([xy[0], xy[1], meanTrajTime]) self.cost12Store.append([xy[0], xy[1], meanCostU12]) globMeanCost+=meanCost globTimeCost+=meanTrajTime except IOError: pass #self.printLastCoordInfo() return globMeanCost/len(self.posIni), globTimeCost/len(self.posIni) def runTrajectoriesForResultsGenerationOnePoint(self, repeat, point): xy = self.posIni[point] costAll, trajTimeAll = np.zeros(repeat), np.zeros(repeat) for i in range(repeat): costAll[i], trajTimeAll[i] = self.runOneTrajectory(xy[0], xy[1]) meanCost = np.mean(costAll) meanTrajTime = np.mean(trajTimeAll) return meanCost, meanTrajTime def runTrajectoriesForResultsGenerationOpti(self, repeat): globMeanCost=0. globTimeCost=0. #pool=Pool() costAll, trajTimeAll = np.zeros(repeat), np.zeros(repeat) for xy in self.posIni: for i in range(repeat): costAll[i], trajTimeAll[i] = self.runOneTrajectoryOpti(xy[0], xy[1]) meanCost = np.mean(costAll) meanTrajTime = np.mean(trajTimeAll) self.costStore.append([xy[0], xy[1], meanCost]) self.trajTimeStore.append([xy[0], xy[1], meanTrajTime]) globMeanCost+=meanCost globTimeCost+=meanTrajTime #self.printLastCoordInfo() size=len(self.posIni) return globMeanCost/size, globTimeCost/size def runTrajectoriesForResultsGenerationEstim(self, repeat): globMeanCost=0. globTimeCost=0. #pool=Pool() costAll, trajTimeAll = np.zeros(repeat), np.zeros(repeat) for xy in self.posIni: for i in range(repeat): costAll[i], trajTimeAll[i] = self.runOneTrajectoryEstim(xy[0], xy[1]) meanCost = np.mean(costAll) meanTrajTime = np.mean(trajTimeAll) self.costStore.append([xy[0], xy[1], meanCost]) self.trajTimeStore.append([xy[0], xy[1], meanTrajTime]) globMeanCost+=meanCost globTimeCost+=meanTrajTime #self.printLastCoordInfo() size=len(self.posIni) return globMeanCost/size, globTimeCost/size def runMultiProcessTrajectories(self, repeat): pool=Pool(processes=len(self.posIni)) result = pool.map(partial(self.runNtrajectory, repeat=repeat) , [(x, y) for x, y in self.posIni]) pool.close() pool.join() meanCost, meanTraj=0., 0. for Cost, traj in result: meanCost+=Cost meanTraj+=traj size = len(result) return meanCost/size, meanTraj/size def runNtrajectory(self, (x, y), repeat): costAll, trajTimeAll = np.zeros(repeat), np.zeros(repeat) for i in range(repeat): costAll[i], trajTimeAll[i] = self.runOneTrajectoryOpti(x, y) meanCost = np.mean(costAll) meanTrajTime = np.mean(trajTimeAll) self.costStore.append([x, y, meanCost]) self.trajTimeStore.append([x, y, meanTrajTime]) return meanCost, meanTrajTime def mapableTrajecrtoryFunction(self,x,y,useless): return self.runOneTrajectory(x, y) def runNtrajectoryMulti(self, (x, y), repeat): pool=Pool(processes=4) result = pool.map(partial(self.mapableTrajecrtoryFunction,x,y) , range(repeat)) pool.close() pool.join() meanCost, meanTraj=0., 0. for Cost, traj in result: meanCost+=Cost meanTraj+=traj size = len(result) return meanCost/size, meanTraj/size def runOneTrajectoryOpti(self, x, y): #self.tm.saveTraj = True cost, trajTime, lastX = self.tm.runTrajectoryOpti(x, y) #cost, trajTime, lastX = self.tm.runTrajectoryOpti(x, y) #print "Exp local x y cost : ", x, y, cost if lastX != -1000: self.lastCoord.append(lastX) return cost, trajTime def runOneTrajectoryEstim(self, x, y): #self.tm.saveTraj = True cost, trajTime, lastX = self.tm.runTrajectoryEstim(x, y) #cost, trajTime, lastX = self.tm.runTrajectoryOpti(x, y) #print "Exp local x y cost : ", x, y, cost if lastX != -1000: self.lastCoord.append(lastX) return cost, trajTime def runTrajectories(self,theta, fonction): ''' Generates all the trajectories of the experimental setup and return the mean cost. This function is used by cmaes to optimize the controller. Input: -theta: vector of parameters, one dimension normalized numpy array Ouput: -meanAll: the mean of the cost of all trajectories generated, float ''' #c = Chrono() self.initTheta(theta) #print "theta avant appel :", theta #compute all the trajectories x times each, x = numberOfRepeat meanCost, meanTime = fonction(self.numberOfRepeat) #cma.plot() #opt = cma.CMAOptions() #print "CMAES options :", opt #c.stop() #print("Indiv #: ", self.call, "\n Cost: ", meanCost) if (self.call==0): self.localBestCost = meanCost self.localWorstCost = meanCost self.localBestTime = meanTime self.localWorstTime = meanTime self.periodMeanCost = 0.0 self.periodMeanTime = 0.0 else: if meanCost>self.localBestCost: self.localBestCost = meanCost elif meanCost<self.localWorstCost: self.localWorstCost = meanCost if meanTime>self.localBestTime: self.localBestTime = meanTime elif meanTime<self.localWorstTime: self.localWorstTime = meanTime if meanCost>self.bestCost: self.bestCost = meanCost if meanCost>0: extension = ".save" + str(meanCost) filename = findDataFilename(self.foldername+"Theta/", "theta", extension) np.savetxt(filename, self.theta) filename2 = self.foldername + "Best.theta" np.savetxt(filename2, self.theta) self.periodMeanCost += meanCost self.periodMeanTime += meanTime self.call += 1 self.call = self.call%self.period if (self.call==0): self.periodMeanCost = self.periodMeanCost/self.period self.periodMeanTime = self.periodMeanTime/self.period self.CMAESCostStore.append((self.localWorstCost,self.periodMeanCost,self.localBestCost)) self.CMAESTimeStore.append((self.localWorstTime,self.periodMeanTime,self.localBestTime)) costfoldername = self.foldername+"Cost/" checkIfFolderExists(costfoldername) cost = open(costfoldername+"cmaesCost.log","a") time = open(costfoldername+"cmaesTime.log","a") cost.write(str(self.localWorstCost)+" "+str(self.periodMeanCost)+" "+str(self.localBestCost)+"\n") time.write(str(self.localWorstTime)+" "+str(self.periodMeanTime)+" "+str(self.localBestTime)+"\n") cost.close() time.close() #np.savetxt(costfoldername+"cmaesCost.log",self.CMAESCostStore) #Note: inefficient, should rather add to the file #np.savetxt(costfoldername+"cmaesTime.log",self.CMAESTimeStore) #Note: inefficient, should rather add to the file return 10.0*(self.rs.rhoCF-meanCost)/self.rs.rhoCF def runTrajectoriesCMAES(self, theta): ''' Generates all the trajectories of the experimental setup and return the mean cost. This function is used by cmaes to optimize the controller. Input: -theta: vector of parameters, one dimension normalized numpy array Ouput: -meanAll: the mean of the cost of all trajectories generated, float ''' return self.runTrajectories(theta, self.runMultiProcessTrajectories) def runTrajectoriesCMAESOnePoint(self, x, y, theta): ''' Generates all the trajectories of the experimental setup and return the mean cost. This function is used by cmaes to optimize the controller. Input: -theta: vector of parameters, one dimension normalized numpy array Ouput: -meanAll: the mean of the cost of all trajectories generated, float ''' return self.runTrajectories(theta, partial(self.runNtrajectory,(x,y))) def runTrajectoriesCMAESOnePointMulti(self, x, y, theta): return self.runTrajectories(theta, partial(self.runNtrajectoryMulti,(x,y)))
osigaud/ArmModelPython
Control/Experiments/Experiments.py
Python
gpl-2.0
14,422
# # Copyright (c) 2001 - 2014 The SCons Foundation # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY # KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. # __revision__ = "src/engine/SCons/Tool/MSCommon/common.py 2014/09/27 12:51:43 garyo" __doc__ = """ Common helper functions for working with the Microsoft tool chain. """ import copy import os import subprocess import re import SCons.Util logfile = os.environ.get('SCONS_MSCOMMON_DEBUG') if logfile == '-': def debug(x): print x elif logfile: try: import logging except ImportError: debug = lambda x: open(logfile, 'a').write(x + '\n') else: logging.basicConfig(filename=logfile, level=logging.DEBUG) debug = logging.debug else: debug = lambda x: None _is_win64 = None def is_win64(): """Return true if running on windows 64 bits. Works whether python itself runs in 64 bits or 32 bits.""" # Unfortunately, python does not provide a useful way to determine # if the underlying Windows OS is 32-bit or 64-bit. Worse, whether # the Python itself is 32-bit or 64-bit affects what it returns, # so nothing in sys.* or os.* help. # Apparently the best solution is to use env vars that Windows # sets. If PROCESSOR_ARCHITECTURE is not x86, then the python # process is running in 64 bit mode (on a 64-bit OS, 64-bit # hardware, obviously). # If this python is 32-bit but the OS is 64, Windows will set # ProgramW6432 and PROCESSOR_ARCHITEW6432 to non-null. # (Checking for HKLM\Software\Wow6432Node in the registry doesn't # work, because some 32-bit installers create it.) global _is_win64 if _is_win64 is None: # I structured these tests to make it easy to add new ones or # add exceptions in the future, because this is a bit fragile. _is_win64 = False if os.environ.get('PROCESSOR_ARCHITECTURE','x86') != 'x86': _is_win64 = True if os.environ.get('PROCESSOR_ARCHITEW6432'): _is_win64 = True if os.environ.get('ProgramW6432'): _is_win64 = True return _is_win64 def read_reg(value): return SCons.Util.RegGetValue(SCons.Util.HKEY_LOCAL_MACHINE, value)[0] def has_reg(value): """Return True if the given key exists in HKEY_LOCAL_MACHINE, False otherwise.""" try: SCons.Util.RegOpenKeyEx(SCons.Util.HKEY_LOCAL_MACHINE, value) ret = True except WindowsError: ret = False return ret # Functions for fetching environment variable settings from batch files. def normalize_env(env, keys, force=False): """Given a dictionary representing a shell environment, add the variables from os.environ needed for the processing of .bat files; the keys are controlled by the keys argument. It also makes sure the environment values are correctly encoded. If force=True, then all of the key values that exist are copied into the returned dictionary. If force=false, values are only copied if the key does not already exist in the copied dictionary. Note: the environment is copied.""" normenv = {} if env: for k in env.keys(): normenv[k] = copy.deepcopy(env[k]).encode('mbcs') for k in keys: if k in os.environ and (force or not k in normenv): normenv[k] = os.environ[k].encode('mbcs') # This shouldn't be necessary, since the default environment should include system32, # but keep this here to be safe, since it's needed to find reg.exe which the MSVC # bat scripts use. sys32_dir = os.path.join(os.environ.get("SystemRoot", os.environ.get("windir",r"C:\Windows\system32")),"System32") if sys32_dir not in normenv['PATH']: normenv['PATH'] = normenv['PATH'] + os.pathsep + sys32_dir debug("PATH: %s"%normenv['PATH']) return normenv def get_output(vcbat, args = None, env = None): """Parse the output of given bat file, with given args.""" if env is None: # Create a blank environment, for use in launching the tools env = SCons.Environment.Environment(tools=[]) # TODO: This is a hard-coded list of the variables that (may) need # to be imported from os.environ[] for v[sc]*vars*.bat file # execution to work. This list should really be either directly # controlled by vc.py, or else derived from the common_tools_var # settings in vs.py. vars = [ 'COMSPEC', # VS100 and VS110: Still set, but modern MSVC setup scripts will # discard these if registry has values. However Intel compiler setup # script still requires these as of 2013/2014. 'VS110COMNTOOLS', 'VS100COMNTOOLS', 'VS90COMNTOOLS', 'VS80COMNTOOLS', 'VS71COMNTOOLS', 'VS70COMNTOOLS', 'VS60COMNTOOLS', ] env['ENV'] = normalize_env(env['ENV'], vars, force=False) if args: debug("Calling '%s %s'" % (vcbat, args)) popen = SCons.Action._subproc(env, '"%s" %s & set' % (vcbat, args), stdin = 'devnull', stdout=subprocess.PIPE, stderr=subprocess.PIPE) else: debug("Calling '%s'" % vcbat) popen = SCons.Action._subproc(env, '"%s" & set' % vcbat, stdin = 'devnull', stdout=subprocess.PIPE, stderr=subprocess.PIPE) # Use the .stdout and .stderr attributes directly because the # .communicate() method uses the threading module on Windows # and won't work under Pythons not built with threading. stdout = popen.stdout.read() stderr = popen.stderr.read() # Extra debug logic, uncomment if necessar # debug('get_output():stdout:%s'%stdout) # debug('get_output():stderr:%s'%stderr) if stderr: # TODO: find something better to do with stderr; # this at least prevents errors from getting swallowed. import sys sys.stderr.write(stderr) if popen.wait() != 0: raise IOError(stderr.decode("mbcs")) output = stdout.decode("mbcs") return output def parse_output(output, keep = ("INCLUDE", "LIB", "LIBPATH", "PATH")): # dkeep is a dict associating key: path_list, where key is one item from # keep, and pat_list the associated list of paths dkeep = dict([(i, []) for i in keep]) # rdk will keep the regex to match the .bat file output line starts rdk = {} for i in keep: rdk[i] = re.compile('%s=(.*)' % i, re.I) def add_env(rmatch, key, dkeep=dkeep): plist = rmatch.group(1).split(os.pathsep) for p in plist: # Do not add empty paths (when a var ends with ;) if p: p = p.encode('mbcs') # XXX: For some reason, VC98 .bat file adds "" around the PATH # values, and it screws up the environment later, so we strip # it. p = p.strip('"') dkeep[key].append(p) for line in output.splitlines(): for k,v in rdk.items(): m = v.match(line) if m: add_env(m, k) return dkeep # TODO(sgk): unused def output_to_dict(output): """Given an output string, parse it to find env variables. Return a dict where keys are variables names, and values their content""" envlinem = re.compile(r'^([a-zA-z0-9]+)=([\S\s]*)$') parsedenv = {} for line in output.splitlines(): m = envlinem.match(line) if m: parsedenv[m.group(1)] = m.group(2) return parsedenv # TODO(sgk): unused def get_new(l1, l2): """Given two list l1 and l2, return the items in l2 which are not in l1. Order is maintained.""" # We don't try to be smart: lists are small, and this is not the bottleneck # is any case new = [] for i in l2: if i not in l1: new.append(i) return new # Local Variables: # tab-width:4 # indent-tabs-mode:nil # End: # vim: set expandtab tabstop=4 shiftwidth=4:
stonekyx/binary
vendor/scons-local-2.3.4/SCons/Tool/MSCommon/common.py
Python
gpl-3.0
9,169
"""The Airly component.""" import asyncio from datetime import timedelta import logging from math import ceil from aiohttp.client_exceptions import ClientConnectorError from airly import Airly from airly.exceptions import AirlyError import async_timeout from homeassistant.const import CONF_API_KEY, CONF_LATITUDE, CONF_LONGITUDE from homeassistant.core import Config, HomeAssistant from homeassistant.exceptions import ConfigEntryNotReady from homeassistant.helpers.aiohttp_client import async_get_clientsession from homeassistant.helpers.update_coordinator import DataUpdateCoordinator, UpdateFailed from .const import ( ATTR_API_ADVICE, ATTR_API_CAQI, ATTR_API_CAQI_DESCRIPTION, ATTR_API_CAQI_LEVEL, DOMAIN, MAX_REQUESTS_PER_DAY, NO_AIRLY_SENSORS, ) PLATFORMS = ["air_quality", "sensor"] _LOGGER = logging.getLogger(__name__) def set_update_interval(hass, instances): """Set update_interval to another configured Airly instances.""" # We check how many Airly configured instances are and calculate interval to not # exceed allowed numbers of requests. interval = timedelta(minutes=ceil(24 * 60 / MAX_REQUESTS_PER_DAY) * instances) if hass.data.get(DOMAIN): for instance in hass.data[DOMAIN].values(): instance.update_interval = interval return interval async def async_setup(hass: HomeAssistant, config: Config) -> bool: """Set up configured Airly.""" return True async def async_setup_entry(hass, config_entry): """Set up Airly as config entry.""" api_key = config_entry.data[CONF_API_KEY] latitude = config_entry.data[CONF_LATITUDE] longitude = config_entry.data[CONF_LONGITUDE] # For backwards compat, set unique ID if config_entry.unique_id is None: hass.config_entries.async_update_entry( config_entry, unique_id=f"{latitude}-{longitude}" ) websession = async_get_clientsession(hass) # Change update_interval for other Airly instances update_interval = set_update_interval( hass, len(hass.config_entries.async_entries(DOMAIN)) ) coordinator = AirlyDataUpdateCoordinator( hass, websession, api_key, latitude, longitude, update_interval ) await coordinator.async_refresh() if not coordinator.last_update_success: raise ConfigEntryNotReady hass.data.setdefault(DOMAIN, {}) hass.data[DOMAIN][config_entry.entry_id] = coordinator for component in PLATFORMS: hass.async_create_task( hass.config_entries.async_forward_entry_setup(config_entry, component) ) return True async def async_unload_entry(hass, config_entry): """Unload a config entry.""" unload_ok = all( await asyncio.gather( *[ hass.config_entries.async_forward_entry_unload(config_entry, component) for component in PLATFORMS ] ) ) if unload_ok: hass.data[DOMAIN].pop(config_entry.entry_id) # Change update_interval for other Airly instances set_update_interval(hass, len(hass.data[DOMAIN])) return unload_ok class AirlyDataUpdateCoordinator(DataUpdateCoordinator): """Define an object to hold Airly data.""" def __init__(self, hass, session, api_key, latitude, longitude, update_interval): """Initialize.""" self.latitude = latitude self.longitude = longitude self.airly = Airly(api_key, session) super().__init__(hass, _LOGGER, name=DOMAIN, update_interval=update_interval) async def _async_update_data(self): """Update data via library.""" data = {} with async_timeout.timeout(20): measurements = self.airly.create_measurements_session_point( self.latitude, self.longitude ) try: await measurements.update() except (AirlyError, ClientConnectorError) as error: raise UpdateFailed(error) from error values = measurements.current["values"] index = measurements.current["indexes"][0] standards = measurements.current["standards"] if index["description"] == NO_AIRLY_SENSORS: raise UpdateFailed("Can't retrieve data: no Airly sensors in this area") for value in values: data[value["name"]] = value["value"] for standard in standards: data[f"{standard['pollutant']}_LIMIT"] = standard["limit"] data[f"{standard['pollutant']}_PERCENT"] = standard["percent"] data[ATTR_API_CAQI] = index["value"] data[ATTR_API_CAQI_LEVEL] = index["level"].lower().replace("_", " ") data[ATTR_API_CAQI_DESCRIPTION] = index["description"] data[ATTR_API_ADVICE] = index["advice"] return data
tchellomello/home-assistant
homeassistant/components/airly/__init__.py
Python
apache-2.0
4,810
# Copyright 2015 Cloudbase Solutions SRL # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Unit tests for the Hyper-V Mechanism Driver. """ from unittest import mock from networking_hyperv.neutron import constants from networking_hyperv.neutron.ml2 import mech_hyperv from networking_hyperv.tests import base class TestHypervMechanismDriver(base.BaseTestCase): def setUp(self): super(TestHypervMechanismDriver, self).setUp() self.mech_hyperv = mech_hyperv.HypervMechanismDriver() def test_get_allowed_network_types(self): agent = {'configurations': {'tunnel_types': []}} actual_net_types = self.mech_hyperv.get_allowed_network_types(agent) network_types = [constants.TYPE_LOCAL, constants.TYPE_FLAT, constants.TYPE_VLAN] self.assertEqual(network_types, actual_net_types) def test_get_allowed_network_types_nvgre(self): agent = {'configurations': {'tunnel_types': [constants.TYPE_NVGRE]}} actual_net_types = self.mech_hyperv.get_allowed_network_types(agent) network_types = [constants.TYPE_LOCAL, constants.TYPE_FLAT, constants.TYPE_VLAN, constants.TYPE_NVGRE] self.assertEqual(network_types, actual_net_types) def test_get_mappings(self): agent = {'configurations': { 'vswitch_mappings': [mock.sentinel.mapping]}} mappings = self.mech_hyperv.get_mappings(agent) self.assertEqual([mock.sentinel.mapping], mappings) def test_physnet_in_mappings(self): physnet = 'test_physnet' match_mapping = '.*' different_mapping = 'fake' pattern_matched = self.mech_hyperv.physnet_in_mappings( physnet, [match_mapping]) self.assertTrue(pattern_matched) pattern_matched = self.mech_hyperv.physnet_in_mappings( physnet, [different_mapping]) self.assertFalse(pattern_matched) pattern_matched = self.mech_hyperv.physnet_in_mappings( physnet, [different_mapping, match_mapping]) self.assertTrue(pattern_matched)
openstack/networking-hyperv
networking_hyperv/tests/unit/neutron/test_mech_hyperv.py
Python
apache-2.0
2,652
from pymt import * # callback for the buttons def test_button(btn, *largs): print 'button pressed', btn.label # create a grid layout with 2 rows layout = MTGridLayout(rows=2) for x in xrange(22): btn = MTToggleButton(label='label%d' % x) btn.connect('on_press', curry(test_button, btn)) layout.add_widget(btn) # create a list of 400x200 size, and disable scrolling on Y axis lst = MTList(size=(400, 200), do_y=False) lst.add_widget(layout) # center the list on the screen anchor = MTAnchorLayout() anchor.add_widget(lst) runTouchApp(anchor)
nuigroup/pymt-widgets
examples/framework/ui_widgets_list.py
Python
lgpl-3.0
562
# -*- coding: utf-8 -*- # Copyright 2020 Green Valley Belgium NV # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # @@license_version:1.7@@ from google.appengine.ext import db, deferred from rogerthat.dal.service import get_service_api_callback_records_query def run(service_user, cursor=None): query = get_service_api_callback_records_query(service_user) query.with_cursor(cursor) records = query.fetch(100) put = list() for rec in records: rec.timestamp = 0 - abs(rec.timestamp) put.append(rec) db.put(put) if len(records) > 0: return deferred.defer(run, service_user, query.cursor(), _transactional=db.is_in_transaction())
our-city-app/oca-backend
src/rogerthat/bizz/job/unschedule_service_api_callback_records.py
Python
apache-2.0
1,183
#!/usr/bin/env python # # Copyright 2015-2020 Blizzard Entertainment. Subject to the MIT license. # See the included LICENSE file for more information. # from heroprotocol.decoders import * import six # Decoding instructions for each protocol type. typeinfos = [ ('_int',[(0,7)]), #0 ('_int',[(0,4)]), #1 ('_int',[(0,5)]), #2 ('_int',[(0,6)]), #3 ('_int',[(0,14)]), #4 ('_int',[(0,22)]), #5 ('_int',[(0,32)]), #6 ('_choice',[(0,2),{0:('m_uint6',3),1:('m_uint14',4),2:('m_uint22',5),3:('m_uint32',6)}]), #7 ('_struct',[[('m_userId',2,-1)]]), #8 ('_blob',[(0,8)]), #9 ('_int',[(0,8)]), #10 ('_struct',[[('m_flags',10,0),('m_major',10,1),('m_minor',10,2),('m_revision',10,3),('m_build',6,4),('m_baseBuild',6,5)]]), #11 ('_int',[(0,3)]), #12 ('_bool',[]), #13 ('_array',[(16,0),10]), #14 ('_optional',[14]), #15 ('_blob',[(16,0)]), #16 ('_struct',[[('m_dataDeprecated',15,0),('m_data',16,1)]]), #17 ('_struct',[[('m_signature',9,0),('m_version',11,1),('m_type',12,2),('m_elapsedGameLoops',6,3),('m_useScaledTime',13,4),('m_ngdpRootKey',17,5),('m_dataBuildNum',6,6),('m_replayCompatibilityHash',17,7)]]), #18 ('_fourcc',[]), #19 ('_blob',[(0,7)]), #20 ('_int',[(0,64)]), #21 ('_struct',[[('m_region',10,0),('m_programId',19,1),('m_realm',6,2),('m_name',20,3),('m_id',21,4)]]), #22 ('_struct',[[('m_a',10,0),('m_r',10,1),('m_g',10,2),('m_b',10,3)]]), #23 ('_int',[(0,2)]), #24 ('_optional',[10]), #25 ('_struct',[[('m_name',9,0),('m_toon',22,1),('m_race',9,2),('m_color',23,3),('m_control',10,4),('m_teamId',1,5),('m_handicap',0,6),('m_observe',24,7),('m_result',24,8),('m_workingSetSlotId',25,9),('m_hero',9,10)]]), #26 ('_array',[(0,5),26]), #27 ('_optional',[27]), #28 ('_blob',[(0,10)]), #29 ('_blob',[(0,11)]), #30 ('_struct',[[('m_file',30,0)]]), #31 ('_optional',[13]), #32 ('_int',[(-9223372036854775808,64)]), #33 ('_blob',[(0,12)]), #34 ('_blob',[(40,0)]), #35 ('_array',[(0,6),35]), #36 ('_optional',[36]), #37 ('_array',[(0,6),30]), #38 ('_optional',[38]), #39 ('_struct',[[('m_playerList',28,0),('m_title',29,1),('m_difficulty',9,2),('m_thumbnail',31,3),('m_isBlizzardMap',13,4),('m_restartAsTransitionMap',32,16),('m_timeUTC',33,5),('m_timeLocalOffset',33,6),('m_description',34,7),('m_imageFilePath',30,8),('m_campaignIndex',10,15),('m_mapFileName',30,9),('m_cacheHandles',37,10),('m_miniSave',13,11),('m_gameSpeed',12,12),('m_defaultDifficulty',3,13),('m_modPaths',39,14)]]), #40 ('_optional',[9]), #41 ('_optional',[35]), #42 ('_optional',[6]), #43 ('_struct',[[('m_race',25,-1)]]), #44 ('_struct',[[('m_team',25,-1)]]), #45 ('_blob',[(0,9)]), #46 ('_struct',[[('m_name',9,-20),('m_clanTag',41,-19),('m_clanLogo',42,-18),('m_highestLeague',25,-17),('m_combinedRaceLevels',43,-16),('m_randomSeed',6,-15),('m_racePreference',44,-14),('m_teamPreference',45,-13),('m_testMap',13,-12),('m_testAuto',13,-11),('m_examine',13,-10),('m_customInterface',13,-9),('m_testType',6,-8),('m_observe',24,-7),('m_hero',46,-6),('m_skin',46,-5),('m_mount',46,-4),('m_banner',46,-3),('m_spray',46,-2),('m_toonHandle',20,-1)]]), #47 ('_array',[(0,5),47]), #48 ('_struct',[[('m_lockTeams',13,-16),('m_teamsTogether',13,-15),('m_advancedSharedControl',13,-14),('m_randomRaces',13,-13),('m_battleNet',13,-12),('m_amm',13,-11),('m_competitive',13,-10),('m_practice',13,-9),('m_cooperative',13,-8),('m_noVictoryOrDefeat',13,-7),('m_heroDuplicatesAllowed',13,-6),('m_fog',24,-5),('m_observers',24,-4),('m_userDifficulty',24,-3),('m_clientDebugFlags',21,-2),('m_ammId',43,-1)]]), #49 ('_int',[(1,4)]), #50 ('_int',[(1,8)]), #51 ('_bitarray',[(0,6)]), #52 ('_bitarray',[(0,8)]), #53 ('_bitarray',[(0,4)]), #54 ('_bitarray',[(0,2)]), #55 ('_bitarray',[(0,7)]), #56 ('_struct',[[('m_allowedColors',52,-6),('m_allowedRaces',53,-5),('m_allowedDifficulty',52,-4),('m_allowedControls',54,-3),('m_allowedObserveTypes',55,-2),('m_allowedAIBuilds',56,-1)]]), #57 ('_array',[(0,5),57]), #58 ('_struct',[[('m_randomValue',6,-26),('m_gameCacheName',29,-25),('m_gameOptions',49,-24),('m_gameSpeed',12,-23),('m_gameType',12,-22),('m_maxUsers',2,-21),('m_maxObservers',2,-20),('m_maxPlayers',2,-19),('m_maxTeams',50,-18),('m_maxColors',3,-17),('m_maxRaces',51,-16),('m_maxControls',1,-15),('m_mapSizeX',10,-14),('m_mapSizeY',10,-13),('m_mapFileSyncChecksum',6,-12),('m_mapFileName',30,-11),('m_mapAuthorName',9,-10),('m_modFileSyncChecksum',6,-9),('m_slotDescriptions',58,-8),('m_defaultDifficulty',3,-7),('m_defaultAIBuild',0,-6),('m_cacheHandles',36,-5),('m_hasExtensionMod',13,-4),('m_isBlizzardMap',13,-3),('m_isPremadeFFA',13,-2),('m_isCoopMode',13,-1)]]), #59 ('_optional',[1]), #60 ('_optional',[2]), #61 ('_struct',[[('m_color',61,-1)]]), #62 ('_array',[(0,17),6]), #63 ('_struct',[[('m_hero',19,-2),('m_tier',10,-1)]]), #64 ('_array',[(0,10),64]), #65 ('_struct',[[('m_control',10,-25),('m_userId',60,-24),('m_teamId',1,-23),('m_colorPref',62,-22),('m_racePref',44,-21),('m_difficulty',3,-20),('m_aiBuild',0,-19),('m_handicap',0,-18),('m_observe',24,-17),('m_logoIndex',6,-16),('m_hero',46,-15),('m_skin',46,-14),('m_mount',46,-13),('m_workingSetSlotId',25,-12),('m_rewards',63,-11),('m_toonHandle',20,-10),('m_tandemLeaderUserId',60,-9),('m_hasSilencePenalty',13,-8),('m_hasVoiceSilencePenalty',13,-7),('m_isBlizzardStaff',13,-6),('m_banner',46,-5),('m_spray',46,-4),('m_announcerPack',46,-3),('m_voiceLine',46,-2),('m_heroMasteryTiers',65,-1)]]), #66 ('_array',[(0,5),66]), #67 ('_struct',[[('m_phase',12,-11),('m_maxUsers',2,-10),('m_maxObservers',2,-9),('m_slots',67,-8),('m_randomSeed',6,-7),('m_hostUserId',60,-6),('m_isSinglePlayer',13,-5),('m_pickedMapTag',10,-4),('m_gameDuration',6,-3),('m_defaultDifficulty',3,-2),('m_defaultAIBuild',0,-1)]]), #68 ('_struct',[[('m_userInitialData',48,-3),('m_gameDescription',59,-2),('m_lobbyState',68,-1)]]), #69 ('_struct',[[('m_syncLobbyState',69,-1)]]), #70 ('_struct',[[('m_name',20,-1)]]), #71 ('_blob',[(0,6)]), #72 ('_struct',[[('m_name',72,-1)]]), #73 ('_struct',[[('m_name',72,-3),('m_type',6,-2),('m_data',20,-1)]]), #74 ('_struct',[[('m_type',6,-3),('m_name',72,-2),('m_data',34,-1)]]), #75 ('_array',[(0,5),10]), #76 ('_struct',[[('m_signature',76,-2),('m_toonHandle',20,-1)]]), #77 ('_struct',[[('m_gameFullyDownloaded',13,-14),('m_developmentCheatsEnabled',13,-13),('m_testCheatsEnabled',13,-12),('m_multiplayerCheatsEnabled',13,-11),('m_syncChecksummingEnabled',13,-10),('m_isMapToMapTransition',13,-9),('m_debugPauseEnabled',13,-8),('m_useGalaxyAsserts',13,-7),('m_platformMac',13,-6),('m_cameraFollow',13,-5),('m_baseBuildNum',6,-4),('m_buildNum',6,-3),('m_versionFlags',6,-2),('m_hotkeyProfile',46,-1)]]), #78 ('_struct',[[]]), #79 ('_int',[(0,16)]), #80 ('_struct',[[('x',80,-2),('y',80,-1)]]), #81 ('_struct',[[('m_which',12,-2),('m_target',81,-1)]]), #82 ('_struct',[[('m_fileName',30,-5),('m_automatic',13,-4),('m_overwrite',13,-3),('m_name',9,-2),('m_description',29,-1)]]), #83 ('_int',[(1,32)]), #84 ('_struct',[[('m_sequence',84,-1)]]), #85 ('_null',[]), #86 ('_int',[(0,20)]), #87 ('_int',[(-2147483648,32)]), #88 ('_struct',[[('x',87,-3),('y',87,-2),('z',88,-1)]]), #89 ('_struct',[[('m_targetUnitFlags',80,-7),('m_timer',10,-6),('m_tag',6,-5),('m_snapshotUnitLink',80,-4),('m_snapshotControlPlayerId',60,-3),('m_snapshotUpkeepPlayerId',60,-2),('m_snapshotPoint',89,-1)]]), #90 ('_choice',[(0,2),{0:('None',86),1:('TargetPoint',89),2:('TargetUnit',90)}]), #91 ('_struct',[[('m_target',91,-4),('m_time',88,-3),('m_verb',29,-2),('m_arguments',29,-1)]]), #92 ('_struct',[[('m_data',92,-1)]]), #93 ('_int',[(0,26)]), #94 ('_struct',[[('m_abilLink',80,-3),('m_abilCmdIndex',2,-2),('m_abilCmdData',25,-1)]]), #95 ('_optional',[95]), #96 ('_choice',[(0,2),{0:('None',86),1:('TargetPoint',89),2:('TargetUnit',90),3:('Data',6)}]), #97 ('_optional',[89]), #98 ('_struct',[[('m_cmdFlags',94,-7),('m_abil',96,-6),('m_data',97,-5),('m_vector',98,-4),('m_sequence',84,-3),('m_otherUnit',43,-2),('m_unitGroup',43,-1)]]), #99 ('_array',[(0,6),2]), #100 ('_choice',[(0,2),{0:('None',86),1:('Mask',52),2:('OneIndices',100),3:('ZeroIndices',100)}]), #101 ('_struct',[[('m_unitLink',80,-4),('m_subgroupPriority',10,-3),('m_intraSubgroupPriority',10,-2),('m_count',3,-1)]]), #102 ('_array',[(0,6),102]), #103 ('_array',[(0,6),6]), #104 ('_struct',[[('m_subgroupIndex',2,-4),('m_removeMask',101,-3),('m_addSubgroups',103,-2),('m_addUnitTags',104,-1)]]), #105 ('_struct',[[('m_controlGroupId',1,-2),('m_delta',105,-1)]]), #106 ('_struct',[[('m_controlGroupIndex',1,-3),('m_controlGroupUpdate',12,-2),('m_mask',101,-1)]]), #107 ('_struct',[[('m_count',3,-6),('m_subgroupCount',3,-5),('m_activeSubgroupIndex',2,-4),('m_unitTagsChecksum',6,-3),('m_subgroupIndicesChecksum',6,-2),('m_subgroupsChecksum',6,-1)]]), #108 ('_struct',[[('m_controlGroupId',1,-2),('m_selectionSyncData',108,-1)]]), #109 ('_struct',[[('m_chatMessage',29,-1)]]), #110 ('_struct',[[('m_unitTag',6,-3),('m_buttonSlotA',10,-2),('m_buttonSlotB',10,-1)]]), #111 ('_struct',[[('m_speed',12,-1)]]), #112 ('_int',[(-128,8)]), #113 ('_struct',[[('m_delta',113,-1)]]), #114 ('_struct',[[('x',88,-2),('y',88,-1)]]), #115 ('_struct',[[('m_point',115,-4),('m_unit',6,-3),('m_pingedMinimap',13,-2),('m_option',88,-1)]]), #116 ('_struct',[[('m_verb',29,-2),('m_arguments',29,-1)]]), #117 ('_struct',[[('m_alliance',6,-2),('m_control',6,-1)]]), #118 ('_struct',[[('m_unitTag',6,-1)]]), #119 ('_struct',[[('m_unitTag',6,-2),('m_flags',10,-1)]]), #120 ('_struct',[[('m_conversationId',88,-2),('m_replyId',88,-1)]]), #121 ('_optional',[20]), #122 ('_struct',[[('m_gameUserId',1,-6),('m_observe',24,-5),('m_name',9,-4),('m_toonHandle',122,-3),('m_clanTag',41,-2),('m_clanLogo',42,-1)]]), #123 ('_array',[(0,5),123]), #124 ('_int',[(0,1)]), #125 ('_struct',[[('m_userInfos',124,-2),('m_method',125,-1)]]), #126 ('_struct',[[('m_button',80,-2),('m_metaKeyFlags',80,-1)]]), #127 ('_choice',[(0,3),{0:('None',86),1:('Checked',13),2:('ValueChanged',6),3:('SelectionChanged',88),4:('TextChanged',30),5:('MouseEvent',127)}]), #128 ('_struct',[[('m_controlId',88,-3),('m_eventType',88,-2),('m_eventData',128,-1)]]), #129 ('_struct',[[('m_soundHash',6,-2),('m_length',6,-1)]]), #130 ('_array',[(0,7),6]), #131 ('_struct',[[('m_soundHash',131,-2),('m_length',131,-1)]]), #132 ('_struct',[[('m_syncInfo',132,-1)]]), #133 ('_struct',[[('m_queryId',80,-3),('m_lengthMs',6,-2),('m_finishGameLoop',6,-1)]]), #134 ('_struct',[[('m_queryId',80,-2),('m_lengthMs',6,-1)]]), #135 ('_struct',[[('m_animWaitQueryId',80,-1)]]), #136 ('_struct',[[('m_sound',6,-1)]]), #137 ('_struct',[[('m_transmissionId',88,-2),('m_thread',6,-1)]]), #138 ('_struct',[[('m_transmissionId',88,-1)]]), #139 ('_optional',[81]), #140 ('_optional',[80]), #141 ('_optional',[113]), #142 ('_struct',[[('m_target',140,-6),('m_distance',141,-5),('m_pitch',141,-4),('m_yaw',141,-3),('m_reason',142,-2),('m_follow',13,-1)]]), #143 ('_struct',[[('m_skipType',125,-1)]]), #144 ('_int',[(0,11)]), #145 ('_struct',[[('x',145,-2),('y',145,-1)]]), #146 ('_struct',[[('m_button',6,-5),('m_down',13,-4),('m_posUI',146,-3),('m_posWorld',89,-2),('m_flags',113,-1)]]), #147 ('_struct',[[('m_posUI',146,-3),('m_posWorld',89,-2),('m_flags',113,-1)]]), #148 ('_struct',[[('m_achievementLink',80,-1)]]), #149 ('_struct',[[('m_hotkey',6,-2),('m_down',13,-1)]]), #150 ('_struct',[[('m_abilLink',80,-3),('m_abilCmdIndex',2,-2),('m_state',113,-1)]]), #151 ('_struct',[[('m_soundtrack',6,-1)]]), #152 ('_struct',[[('m_key',113,-2),('m_flags',113,-1)]]), #153 ('_struct',[[('m_error',88,-2),('m_abil',96,-1)]]), #154 ('_int',[(0,19)]), #155 ('_struct',[[('m_decrementMs',155,-1)]]), #156 ('_struct',[[('m_portraitId',88,-1)]]), #157 ('_struct',[[('m_functionName',20,-1)]]), #158 ('_struct',[[('m_result',88,-1)]]), #159 ('_struct',[[('m_gameMenuItemIndex',88,-1)]]), #160 ('_int',[(-32768,16)]), #161 ('_struct',[[('m_wheelSpin',161,-2),('m_flags',113,-1)]]), #162 ('_struct',[[('m_button',80,-1)]]), #163 ('_struct',[[('m_cutsceneId',88,-2),('m_bookmarkName',20,-1)]]), #164 ('_struct',[[('m_cutsceneId',88,-1)]]), #165 ('_struct',[[('m_cutsceneId',88,-3),('m_conversationLine',20,-2),('m_altConversationLine',20,-1)]]), #166 ('_struct',[[('m_cutsceneId',88,-2),('m_conversationLine',20,-1)]]), #167 ('_struct',[[('m_leaveReason',2,-1)]]), #168 ('_struct',[[('m_observe',24,-7),('m_name',9,-6),('m_toonHandle',122,-5),('m_clanTag',41,-4),('m_clanLogo',42,-3),('m_hijack',13,-2),('m_hijackCloneGameUserId',60,-1)]]), #169 ('_optional',[84]), #170 ('_struct',[[('m_state',24,-2),('m_sequence',170,-1)]]), #171 ('_struct',[[('m_sequence',170,-2),('m_target',89,-1)]]), #172 ('_struct',[[('m_sequence',170,-2),('m_target',90,-1)]]), #173 ('_struct',[[('m_catalog',10,-4),('m_entry',80,-3),('m_field',9,-2),('m_value',9,-1)]]), #174 ('_struct',[[('m_index',6,-1)]]), #175 ('_struct',[[('m_shown',13,-1)]]), #176 ('_struct',[[('m_recipient',12,-2),('m_string',30,-1)]]), #177 ('_struct',[[('m_recipient',12,-2),('m_point',115,-1)]]), #178 ('_struct',[[('m_progress',88,-1)]]), #179 ('_struct',[[('m_status',24,-1)]]), #180 ('_struct',[[('m_abilLink',80,-3),('m_abilCmdIndex',2,-2),('m_buttonLink',80,-1)]]), #181 ('_struct',[[('m_behaviorLink',80,-2),('m_buttonLink',80,-1)]]), #182 ('_choice',[(0,2),{0:('None',86),1:('Ability',181),2:('Behavior',182),3:('Vitals',161)}]), #183 ('_struct',[[('m_announcement',183,-4),('m_announceLink',80,-3),('m_otherUnitTag',6,-2),('m_unitTag',6,-1)]]), #184 ('_struct',[[('m_unitTagIndex',6,0),('m_unitTagRecycle',6,1),('m_unitTypeName',29,2),('m_controlPlayerId',1,3),('m_upkeepPlayerId',1,4),('m_x',10,5),('m_y',10,6)]]), #185 ('_struct',[[('m_unitTagIndex',6,0),('m_unitTagRecycle',6,1),('m_x',10,2),('m_y',10,3)]]), #186 ('_struct',[[('m_unitTagIndex',6,0),('m_unitTagRecycle',6,1),('m_killerPlayerId',60,2),('m_x',10,3),('m_y',10,4),('m_killerUnitTagIndex',43,5),('m_killerUnitTagRecycle',43,6)]]), #187 ('_struct',[[('m_unitTagIndex',6,0),('m_unitTagRecycle',6,1),('m_controlPlayerId',1,2),('m_upkeepPlayerId',1,3)]]), #188 ('_struct',[[('m_unitTagIndex',6,0),('m_unitTagRecycle',6,1),('m_unitTypeName',29,2)]]), #189 ('_struct',[[('m_playerId',1,0),('m_upgradeTypeName',29,1),('m_count',88,2)]]), #190 ('_struct',[[('m_unitTagIndex',6,0),('m_unitTagRecycle',6,1)]]), #191 ('_array',[(0,10),88]), #192 ('_struct',[[('m_firstUnitIndex',6,0),('m_items',192,1)]]), #193 ('_struct',[[('m_playerId',1,0),('m_type',6,1),('m_userId',43,2),('m_slotId',43,3)]]), #194 ('_struct',[[('m_key',29,0)]]), #195 ('_struct',[[('__parent',195,0),('m_value',29,1)]]), #196 ('_array',[(0,6),196]), #197 ('_optional',[197]), #198 ('_struct',[[('__parent',195,0),('m_value',88,1)]]), #199 ('_array',[(0,6),199]), #200 ('_optional',[200]), #201 ('_struct',[[('m_eventName',29,0),('m_stringData',198,1),('m_intData',201,2),('m_fixedData',201,3)]]), #202 ('_struct',[[('m_value',6,0),('m_time',6,1)]]), #203 ('_array',[(0,6),203]), #204 ('_array',[(0,5),204]), #205 ('_struct',[[('m_name',29,0),('m_values',205,1)]]), #206 ('_array',[(0,21),206]), #207 ('_struct',[[('m_instanceList',207,0)]]), #208 ('_struct',[[('m_hero',29,0),('m_controllingTeam',6,1)]]), #209 ('_struct',[[('m_hero',29,0),('m_controllingPlayer',6,1)]]), #210 ('_struct',[[('m_hero',29,0),('m_newControllingPlayer',6,1)]]), #211 ] # Map from protocol NNet.Game.*Event eventid to (typeid, name) game_event_types = { 5: (79, 'NNet.Game.SUserFinishedLoadingSyncEvent'), 7: (78, 'NNet.Game.SUserOptionsEvent'), 9: (71, 'NNet.Game.SBankFileEvent'), 10: (73, 'NNet.Game.SBankSectionEvent'), 11: (74, 'NNet.Game.SBankKeyEvent'), 12: (75, 'NNet.Game.SBankValueEvent'), 13: (77, 'NNet.Game.SBankSignatureEvent'), 14: (82, 'NNet.Game.SCameraSaveEvent'), 21: (83, 'NNet.Game.SSaveGameEvent'), 22: (79, 'NNet.Game.SSaveGameDoneEvent'), 23: (79, 'NNet.Game.SLoadGameDoneEvent'), 25: (85, 'NNet.Game.SCommandManagerResetEvent'), 26: (93, 'NNet.Game.SGameCheatEvent'), 27: (99, 'NNet.Game.SCmdEvent'), 28: (106, 'NNet.Game.SSelectionDeltaEvent'), 29: (107, 'NNet.Game.SControlGroupUpdateEvent'), 30: (109, 'NNet.Game.SSelectionSyncCheckEvent'), 32: (110, 'NNet.Game.STriggerChatMessageEvent'), 33: (111, 'NNet.Game.SDynamicButtonSwapEvent'), 34: (112, 'NNet.Game.SSetAbsoluteGameSpeedEvent'), 35: (114, 'NNet.Game.SAddAbsoluteGameSpeedEvent'), 36: (116, 'NNet.Game.STriggerPingEvent'), 37: (117, 'NNet.Game.SBroadcastCheatEvent'), 38: (118, 'NNet.Game.SAllianceEvent'), 39: (119, 'NNet.Game.SUnitClickEvent'), 40: (120, 'NNet.Game.SUnitHighlightEvent'), 41: (121, 'NNet.Game.STriggerReplySelectedEvent'), 43: (126, 'NNet.Game.SHijackReplayGameEvent'), 44: (79, 'NNet.Game.STriggerSkippedEvent'), 45: (130, 'NNet.Game.STriggerSoundLengthQueryEvent'), 46: (137, 'NNet.Game.STriggerSoundOffsetEvent'), 47: (138, 'NNet.Game.STriggerTransmissionOffsetEvent'), 48: (139, 'NNet.Game.STriggerTransmissionCompleteEvent'), 49: (143, 'NNet.Game.SCameraUpdateEvent'), 50: (79, 'NNet.Game.STriggerAbortMissionEvent'), 55: (129, 'NNet.Game.STriggerDialogControlEvent'), 56: (133, 'NNet.Game.STriggerSoundLengthSyncEvent'), 57: (144, 'NNet.Game.STriggerConversationSkippedEvent'), 58: (147, 'NNet.Game.STriggerMouseClickedEvent'), 59: (148, 'NNet.Game.STriggerMouseMovedEvent'), 60: (149, 'NNet.Game.SAchievementAwardedEvent'), 61: (150, 'NNet.Game.STriggerHotkeyPressedEvent'), 62: (151, 'NNet.Game.STriggerTargetModeUpdateEvent'), 64: (152, 'NNet.Game.STriggerSoundtrackDoneEvent'), 66: (153, 'NNet.Game.STriggerKeyPressedEvent'), 67: (158, 'NNet.Game.STriggerMovieFunctionEvent'), 76: (154, 'NNet.Game.STriggerCommandErrorEvent'), 86: (79, 'NNet.Game.STriggerMovieStartedEvent'), 87: (79, 'NNet.Game.STriggerMovieFinishedEvent'), 88: (156, 'NNet.Game.SDecrementGameTimeRemainingEvent'), 89: (157, 'NNet.Game.STriggerPortraitLoadedEvent'), 90: (159, 'NNet.Game.STriggerCustomDialogDismissedEvent'), 91: (160, 'NNet.Game.STriggerGameMenuItemSelectedEvent'), 92: (162, 'NNet.Game.STriggerMouseWheelEvent'), 95: (163, 'NNet.Game.STriggerButtonPressedEvent'), 96: (79, 'NNet.Game.STriggerGameCreditsFinishedEvent'), 97: (164, 'NNet.Game.STriggerCutsceneBookmarkFiredEvent'), 98: (165, 'NNet.Game.STriggerCutsceneEndSceneFiredEvent'), 99: (166, 'NNet.Game.STriggerCutsceneConversationLineEvent'), 100: (167, 'NNet.Game.STriggerCutsceneConversationLineMissingEvent'), 101: (168, 'NNet.Game.SGameUserLeaveEvent'), 102: (169, 'NNet.Game.SGameUserJoinEvent'), 103: (171, 'NNet.Game.SCommandManagerStateEvent'), 104: (172, 'NNet.Game.SCmdUpdateTargetPointEvent'), 105: (173, 'NNet.Game.SCmdUpdateTargetUnitEvent'), 106: (134, 'NNet.Game.STriggerAnimLengthQueryByNameEvent'), 107: (135, 'NNet.Game.STriggerAnimLengthQueryByPropsEvent'), 108: (136, 'NNet.Game.STriggerAnimOffsetEvent'), 109: (174, 'NNet.Game.SCatalogModifyEvent'), 110: (175, 'NNet.Game.SHeroTalentTreeSelectedEvent'), 111: (79, 'NNet.Game.STriggerProfilerLoggingFinishedEvent'), 112: (176, 'NNet.Game.SHeroTalentTreeSelectionPanelToggledEvent'), } # The typeid of the NNet.Game.EEventId enum. game_eventid_typeid = 0 # Map from protocol NNet.Game.*Message eventid to (typeid, name) message_event_types = { 0: (177, 'NNet.Game.SChatMessage'), 1: (178, 'NNet.Game.SPingMessage'), 2: (179, 'NNet.Game.SLoadingProgressMessage'), 3: (79, 'NNet.Game.SServerPingMessage'), 4: (180, 'NNet.Game.SReconnectNotifyMessage'), 5: (184, 'NNet.Game.SPlayerAnnounceMessage'), } # The typeid of the NNet.Game.EMessageId enum. message_eventid_typeid = 1 # Map from protocol NNet.Replay.Tracker.*Event eventid to (typeid, name) tracker_event_types = { 1: (185, 'NNet.Replay.Tracker.SUnitBornEvent'), 2: (187, 'NNet.Replay.Tracker.SUnitDiedEvent'), 3: (188, 'NNet.Replay.Tracker.SUnitOwnerChangeEvent'), 4: (189, 'NNet.Replay.Tracker.SUnitTypeChangeEvent'), 5: (190, 'NNet.Replay.Tracker.SUpgradeEvent'), 6: (185, 'NNet.Replay.Tracker.SUnitInitEvent'), 7: (191, 'NNet.Replay.Tracker.SUnitDoneEvent'), 8: (193, 'NNet.Replay.Tracker.SUnitPositionsEvent'), 9: (194, 'NNet.Replay.Tracker.SPlayerSetupEvent'), 10: (202, 'NNet.Replay.Tracker.SStatGameEvent'), 11: (208, 'NNet.Replay.Tracker.SScoreResultEvent'), 12: (186, 'NNet.Replay.Tracker.SUnitRevivedEvent'), 13: (209, 'NNet.Replay.Tracker.SHeroBannedEvent'), 14: (210, 'NNet.Replay.Tracker.SHeroPickedEvent'), 15: (211, 'NNet.Replay.Tracker.SHeroSwappedEvent'), } # The typeid of the NNet.Replay.Tracker.EEventId enum. tracker_eventid_typeid = 2 # The typeid of NNet.SVarUint32 (the type used to encode gameloop deltas). svaruint32_typeid = 7 # The typeid of NNet.Replay.SGameUserId (the type used to encode player ids). replay_userid_typeid = 8 # The typeid of NNet.Replay.SHeader (the type used to store replay game version and length). replay_header_typeid = 18 # The typeid of NNet.Game.SDetails (the type used to store overall replay details). game_details_typeid = 40 # The typeid of NNet.Replay.SInitData (the type used to store the initial lobby). replay_initdata_typeid = 70 def _varuint32_value(value): # Returns the numeric value from a SVarUint32 instance. for v in six.itervalues(value): return v return 0 def _decode_event_stream(decoder, eventid_typeid, event_types, decode_user_id): # Decodes events prefixed with a gameloop and possibly userid gameloop = 0 while not decoder.done(): start_bits = decoder.used_bits() # decode the gameloop delta before each event delta = _varuint32_value(decoder.instance(svaruint32_typeid)) gameloop += delta # decode the userid before each event if decode_user_id: userid = decoder.instance(replay_userid_typeid) # decode the event id eventid = decoder.instance(eventid_typeid) typeid, typename = event_types.get(eventid, (None, None)) if typeid is None: raise CorruptedError('eventid(%d) at %s' % (eventid, decoder)) # decode the event struct instance event = decoder.instance(typeid) event['_event'] = typename event['_eventid'] = eventid # insert gameloop and userid event['_gameloop'] = gameloop if decode_user_id: event['_userid'] = userid # the next event is byte aligned decoder.byte_align() # insert bits used in stream event['_bits'] = decoder.used_bits() - start_bits yield event def decode_replay_game_events(contents): """Decodes and yields each game event from the contents byte string.""" decoder = BitPackedDecoder(contents, typeinfos) for event in _decode_event_stream(decoder, game_eventid_typeid, game_event_types, decode_user_id=True): yield event def decode_replay_message_events(contents): """Decodes and yields each message event from the contents byte string.""" decoder = BitPackedDecoder(contents, typeinfos) for event in _decode_event_stream(decoder, message_eventid_typeid, message_event_types, decode_user_id=True): yield event def decode_replay_tracker_events(contents): """Decodes and yields each tracker event from the contents byte string.""" decoder = VersionedDecoder(contents, typeinfos) for event in _decode_event_stream(decoder, tracker_eventid_typeid, tracker_event_types, decode_user_id=False): yield event def decode_replay_header(contents): """Decodes and return the replay header from the contents byte string.""" decoder = VersionedDecoder(contents, typeinfos) return decoder.instance(replay_header_typeid) def decode_replay_details(contents): """Decodes and returns the game details from the contents byte string.""" decoder = VersionedDecoder(contents, typeinfos) return decoder.instance(game_details_typeid) def decode_replay_initdata(contents): """Decodes and return the replay init data from the contents byte string.""" decoder = BitPackedDecoder(contents, typeinfos) return decoder.instance(replay_initdata_typeid) def decode_replay_attributes_events(contents): """Decodes and yields each attribute from the contents byte string.""" buffer = BitPackedBuffer(contents, 'little') attributes = {} if not buffer.done(): attributes['source'] = buffer.read_bits(8) attributes['mapNamespace'] = buffer.read_bits(32) _ = buffer.read_bits(32) attributes['scopes'] = {} while not buffer.done(): value = {} value['namespace'] = buffer.read_bits(32) value['attrid'] = attrid = buffer.read_bits(32) scope = buffer.read_bits(8) value['value'] = buffer.read_aligned_bytes(4)[::-1].strip(b'\x00') if not scope in attributes['scopes']: attributes['scopes'][scope] = {} if not attrid in attributes['scopes'][scope]: attributes['scopes'][scope][attrid] = [] attributes['scopes'][scope][attrid].append(value) return attributes def unit_tag(unitTagIndex, unitTagRecycle): return (unitTagIndex << 18) + unitTagRecycle def unit_tag_index(unitTag): return (unitTag >> 18) & 0x00003fff def unit_tag_recycle(unitTag): return (unitTag) & 0x0003ffff
Blizzard/heroprotocol
heroprotocol/versions/protocol68406.py
Python
mit
26,730
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations class Migration(migrations.Migration): dependencies = [ ('pppcemr', '0014_auto_20151001_1108'), ] operations = [ migrations.CreateModel( name='EncounterType', fields=[ ('id', models.AutoField(serialize=False, auto_created=True, primary_key=True, verbose_name='ID')), ('name', models.CharField(blank=True, null=True, max_length=50)), ], ), migrations.AlterField( model_name='encounter', name='encounter_type', field=models.ForeignKey(blank=True, null=True, to='pppcemr.EncounterType'), ), migrations.AlterField( model_name='encounter', name='id', field=models.AutoField(serialize=False, auto_created=True, primary_key=True, verbose_name='ID'), ), ]
sstebbins/pppcpro
pppcemr/migrations/0015_auto_20151001_1549.py
Python
agpl-3.0
970
# -*- coding: utf-8 -*- import os from pysignfe.nfe.manual_500 import consrecinfe_310 from pysignfe.nfe.manual_600 import ESQUEMA_ATUAL from pysignfe.xml_sped import * from pysignfe.nfe.manual_600.nfe_310 import NFe DIRNAME = os.path.dirname(__file__) class ConsReciNFe(consrecinfe_310.ConsReciNFe): def __init__(self): super(ConsReciNFe, self).__init__() self.versao = TagDecimal(nome=u'consReciNFe', codigo=u'BP02', propriedade=u'versao', namespace=NAMESPACE_NFE, valor=u'3.10', raiz=u'/') self.caminho_esquema = os.path.join(DIRNAME, u'schema/', ESQUEMA_ATUAL + u'/') self.arquivo_esquema = u'consReciNFe_v3.10.xsd' class InfProt(consrecinfe_310.InfProt): def __init__(self): super(InfProt, self).__init__() def get_xml(self): if not (self.tpAmb.valor or self.chNFe.valor or self.dhRecbto.valor): return '' xml = XMLNFe.get_xml(self) if self.Id.valor: xml += self.Id.xml else: xml += u'<infProt>' xml += self.tpAmb.xml xml += self.verAplic.xml xml += self.chNFe.xml xml += self.dhRecbto.xml xml += self.nProt.xml xml += self.digVal.xml xml += self.cStat.xml xml += self.xMotivo.xml xml += u'</infProt>' return xml def set_xml(self, arquivo): if self._le_xml(arquivo): self.Id.xml = arquivo self.tpAmb.xml = arquivo self.verAplic.xml = arquivo self.chNFe.xml = arquivo self.dhRecbto.xml = arquivo self.nProt.xml = arquivo self.digVal.xml = arquivo self.cStat.xml = arquivo self.xMotivo.xml = arquivo xml = property(get_xml, set_xml) class ProtNFe(consrecinfe_310.ProtNFe): def __init__(self): super(ProtNFe, self).__init__() self.versao = TagDecimal(nome=u'protNFe', codigo=u'PR02' , propriedade=u'versao', namespace=NAMESPACE_NFE, valor=u'3.10', raiz=u'/') self.infProt = InfProt() def get_xml(self): if not self.infProt.xml: return '' xml = XMLNFe.get_xml(self) xml += self.versao.xml xml += self.infProt.xml if len(self.Signature.URI) and (self.Signature.URI.strip() != u'#'): xml += self.Signature.xml xml += u'</protNFe>' return xml def set_xml(self, arquivo): if self._le_xml(arquivo): self.versao.xml = arquivo self.infProt.xml = self._le_noh(u'//protNFe/infProt') self.Signature.xml = self._le_noh(u'//protNFe/sig:Signature') xml = property(get_xml, set_xml) def protocolo_formatado_nfce(self): if not self.infProt.nProt.valor: return u'' return u'Protocolo de autorização: '+ self.infProt.nProt.valor def data_autorizacao_nfce(self): if not self.infProt.nProt.valor: return u'' return u'Data de autorização: '+ self.infProt.dhRecbto.formato_danfe() class RetConsReciNFe(consrecinfe_310.RetConsReciNFe): def __init__(self): super(RetConsReciNFe, self).__init__() self.versao = TagDecimal(nome=u'retConsReciNFe', codigo=u'BR02' , propriedade=u'versao', namespace=NAMESPACE_NFE, valor=u'3.10', raiz=u'/') self.dhRecbto = TagDataHoraUTC(nome=u'dhRecbto' , codigo=u'BR06a1', raiz=u'//retConsReciNFe') self.caminho_esquema = os.path.join(DIRNAME, u'schema/', ESQUEMA_ATUAL + u'/') self.arquivo_esquema = u'retConsReciNFe_v3.10.xsd' def get_xml(self): xml = XMLNFe.get_xml(self) xml += self.versao.xml xml += self.tpAmb.xml xml += self.verAplic.xml xml += self.nRec.xml xml += self.cStat.xml xml += self.xMotivo.xml xml += self.cUF.xml xml += self.dhRecbto.xml xml += self.cMsg.xml xml += self.xMsg.xml for pn in self.protNFe: xml += pn.xml xml += u'</retConsReciNFe>' return xml def set_xml(self, arquivo): if self._le_xml(arquivo): self.versao.xml = arquivo self.tpAmb.xml = arquivo self.verAplic.xml = arquivo self.nRec.xml = arquivo self.cStat.xml = arquivo self.xMotivo.xml = arquivo self.cUF.xml = arquivo self.dhRecbto.xml = arquivo self.cMsg.xml = arquivo self.xMsg.xml = arquivo self.protNFe = self.le_grupo('//retConsReciNFe/protNFe', ProtNFe) # # Monta o dicionário dos protocolos # for pn in self.protNFe: self.dic_protNFe[pn.infProt.chNFe.valor] = pn xml = property(get_xml, set_xml) class ProcNFe(consrecinfe_310.ProcNFe): def __init__(self): super(ProcNFe, self).__init__() self.versao = TagDecimal(nome=u'nfeProc', propriedade=u'versao', namespace=NAMESPACE_NFE, valor=u'3.10', raiz=u'/') self.NFe = NFe() self.protNFe = ProtNFe() self.caminho_esquema = os.path.join(DIRNAME, u'schema/', ESQUEMA_ATUAL + u'/') self.arquivo_esquema = u'procNFe_v3.10.xsd'
thiagopena/PySIGNFe
pysignfe/nfe/manual_600/consrecinfe_310.py
Python
lgpl-2.1
5,380
import pyaf.Bench.TS_datasets as tsds import tests.artificial.process_artificial_dataset as art art.process_dataset(N = 32 , FREQ = 'D', seed = 0, trendtype = "MovingAverage", cycle_length = 0, transform = "RelativeDifference", sigma = 0.0, exog_count = 0, ar_order = 12);
antoinecarme/pyaf
tests/artificial/transf_RelativeDifference/trend_MovingAverage/cycle_0/ar_12/test_artificial_32_RelativeDifference_MovingAverage_0_12_0.py
Python
bsd-3-clause
276
import asyncio import os import codecs import time import cloudbot from cloudbot import hook from cloudbot.event import EventType # +---------+ # | Formats | # +---------+ from cloudbot.util.formatting import strip_colors base_formats = { EventType.message: "[{server}:{channel}] <{nick}> {content}", EventType.notice: "[{server}:{channel}] -{nick}- {content}", EventType.action: "[{server}:{channel}] * {nick} {content}", EventType.join: "[{server}:{channel}] -!- {nick} [{user}@{host}] has joined", EventType.part: "[{server}:{channel}] -!- {nick} [{user}@{host}] has left ({content})", EventType.kick: "[{server}:{channel}] -!- {nick} has kicked {target} ({content})", } irc_formats = { "MODE": "[{server}:{channel}] -!- mode/{channel} [{param_tail}] by {nick}", "TOPIC": "[{server}:{channel}] -!- {nick} has changed the topic to: {content}", "QUIT": "[{server}] -!- {nick} has quit ({content})", "INVITE": "[{server}] -!- {nick} has invited {target} to {chan}", "NICK": "[{server}] {nick} is now known as {content}", } irc_default = "[{server}] {irc_raw}" ctcp_known = "[{server}:{channel}] {nick} [{user}@{host}] has requested CTCP {ctcp_command}" ctcp_known_with_message = ("[{server}:{channel}] {nick} [{user}@{host}] " "has requested CTCP {ctcp_command}: {ctcp_message}") ctcp_unknown = "[{server}:{channel}] {nick} [{user}@{host}] has requested unknown CTCP {ctcp_command}" ctcp_unknown_with_message = ("[{server}:{channel}] {nick} [{user}@{host}] " "has requested unknown CTCP {ctcp_command}: {ctcp_message}") # +------------+ # | Formatting | # +------------+ def format_event(event): """ Format an event :type event: cloudbot.event.Event :rtype: str """ # Setup arguments args = { "server": event.conn.name, "target": event.target, "channel": event.chan, "nick": event.nick, "user": event.user, "host": event.host } if event.content is not None: # We can't strip colors from None args["content"] = strip_colors(event.content) else: args["content"] = None # Try formatting with non-connection-specific formats if event.type in base_formats: return base_formats[event.type].format(**args) # Try formatting with IRC-formats, if this is an IRC event if event.irc_command is not None: return format_irc_event(event, args) def format_irc_event(event, args): """ Format an IRC event :param event: The event to format :param args: The pre-created arguments :return: """ # Setup arguments # Add the IRC-specific param_tail argument to the generic arguments args["param_tail"] = " ".join(event.irc_paramlist[1:]) # Try formatting with the IRC command if event.irc_command in irc_formats: return irc_formats[event.irc_command].format(**args) # Try formatting with the CTCP command if event.irc_ctcp_text is not None: ctcp_args = event.irc_ctcp_text.split(None, 1) ctcp_command = ctcp_args[0] ctcp_message = None if len(ctcp_args) > 1: ctcp_message = ctcp_args[1] args["ctcp_command"] = ctcp_command args["ctcp_message"] = ctcp_message if ctcp_command in ("VERSION", "PING", "TIME", "FINGER"): if ctcp_message: return ctcp_known_with_message.format(**args) else: return ctcp_known.format(**args) else: if ctcp_message: return ctcp_unknown_with_message.format(**args) else: return ctcp_unknown.format(**args) # No formats have been found, resort to the default # Check if the command is blacklisted for raw output logging_config = event.bot.config.get("logging", {}) if not logging_config.get("show_motd", True) and event.irc_command in ("375", "372", "376"): return None elif not logging_config.get("show_server_info", True) and event.irc_command in ( "003", "005", "250", "251", "252", "253", "254", "255", "256"): return None elif event.irc_command == "PING": return None # Format using the default raw format return irc_default.format(server=event.conn.name, irc_raw=event.irc_raw) # +--------------+ # | File logging | # +--------------+ file_format = "{server}_{chan}_%Y%m%d.log" raw_file_format = "{server}_%Y%m%d.log" folder_format = "%Y" # Stream cache, (server, chan) -> (file_name, stream) stream_cache = {} # Raw stream cache, server -> (file_name, stream) raw_cache = {} def get_log_filename(server, chan): current_time = time.gmtime() folder_name = time.strftime(folder_format, current_time) file_name = time.strftime(file_format.format(chan=chan, server=server), current_time).lower() return os.path.join(cloudbot.logging_dir, folder_name, file_name) def get_log_stream(server, chan): new_filename = get_log_filename(server, chan) cache_key = (server, chan) old_filename, log_stream = stream_cache.get(cache_key, (None, None)) # If the filename has changed since we opened the stream, we should re-open if new_filename != old_filename: # If we had a stream open before, we should close it if log_stream is not None: log_stream.flush() log_stream.close() logging_dir = os.path.dirname(new_filename) os.makedirs(logging_dir, exist_ok=True) # a dumb hack to bypass the fact windows does not allow * in file names new_filename = new_filename.replace("*", "server") log_stream = codecs.open(new_filename, mode="a", encoding="utf-8", buffering=1) stream_cache[cache_key] = (new_filename, log_stream) return log_stream def get_raw_log_filename(server): current_time = time.gmtime() folder_name = time.strftime(folder_format, current_time) file_name = time.strftime(raw_file_format.format(server=server), current_time).lower() return os.path.join(cloudbot.logging_dir, "raw", folder_name, file_name) def get_raw_log_stream(server): new_filename = get_raw_log_filename(server) old_filename, log_stream = stream_cache.get(server, (None, None)) # If the filename has changed since we opened the stream, we should re-open if new_filename != old_filename: # If we had a stream open before, we should close it if log_stream is not None: log_stream.flush() log_stream.close() logging_dir = os.path.dirname(new_filename) os.makedirs(logging_dir, exist_ok=True) log_stream = codecs.open(new_filename, mode="a", encoding="utf-8", buffering=1) stream_cache[server] = (new_filename, log_stream) return log_stream @hook.irc_raw("*", singlethread=True) def log_raw(event): """ :type event: cloudbot.event.Event """ logging_config = event.bot.config.get("logging", {}) if not logging_config.get("raw_file_log", False): return stream = get_raw_log_stream(event.conn.name) stream.write(event.irc_raw + os.linesep) stream.flush() @hook.irc_raw("*", singlethread=True) def log(event): """ :type event: cloudbot.event.Event """ text = format_event(event) if text is not None: if event.irc_command in ["PRIVMSG", "PART", "JOIN", "MODE", "TOPIC", "QUIT", "NOTICE"] and event.chan: stream = get_log_stream(event.conn.name, event.chan) stream.write(text + os.linesep) stream.flush() # Log console separately to prevent lag @asyncio.coroutine @hook.irc_raw("*") def console_log(bot, event): """ :type bot: cloudbot.bot.CloudBot :type event: cloudbot.event.Event """ text = format_event(event) if text is not None: bot.logger.info(text) # TODO: @hook.onstop() for when unloaded @hook.command("flushlog", permissions=["botcontrol"]) def flush_log(): for name, stream in stream_cache.values(): stream.flush() for name, stream in raw_cache.values(): stream.flush()
jkramarz/zuombot
plugins/log.py
Python
gpl-3.0
8,130
import numpy as np from matplotlib import image as img import requests from StringIO import StringIO class Map(object): def __init__(self, lat, long, satellite=True,zoom=10, size=(400,400), sensor=False): base="http://maps.googleapis.com/maps/api/staticmap?" params=dict( sensor= str(sensor).lower(), zoom= zoom, size= "x".join(map(str, size)), center= ",".join(map(str, (lat, long) )), style="feature:all|element:labels|visibility:off" ) if satellite: params["maptype"]="satellite" self.image = requests.get(base, params=params).content # Fetch our PNG image data self.pixels= img.imread(StringIO(self.image)) # Parse our PNG image as a numpy array def green(self, threshold): # Use NumPy to build an element-by-element logical array greener_than_red = self.pixels[:,:,1] > threshold* self.pixels[:,:,0] greener_than_blue = self.pixels[:,:,1] > threshold*self.pixels[:,:,2] green = np.logical_and(greener_than_red, greener_than_blue) return green def count_green(self, threshold = 1.1): return np.sum(self.green(threshold)) def show_green(data, threshold = 1.1): green = self.green(threshold) out = green[:,:,np.newaxis]*array([0,1,0])[np.newaxis,np.newaxis,:] buffer = StringIO() result = img.imsave(buffer, out, format='png') return buffer.getvalue()
ernestwcl/greengraph
greengraph/map.py
Python
apache-2.0
1,314
# -*- coding: utf-8 -*- # Generated by Django 1.9.6 on 2016-07-21 14:07 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('loans', '0006_auto_20160721_1640'), ] operations = [ migrations.AddField( model_name='loan', name='compound', field=models.IntegerField(default=1, help_text='number of compounding periods'), preserve_default=False, ), ]
lubegamark/senkumba
loans/migrations/0007_loan_compound.py
Python
mit
524
# -*- coding: utf-8 -*- # pylint: disable=invalid-name # Copyright 2017 IBM RESEARCH. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= """ local_qiskit_simulator command to snapshot the quantum state. """ from qiskit import CompositeGate from qiskit import Gate from qiskit import QuantumCircuit from qiskit._instructionset import InstructionSet from qiskit._quantumregister import QuantumRegister from qiskit.qasm import _node as node class SnapshotGate(Gate): """Simulator snapshot operation.""" def __init__(self, m, qubit, circ=None): """Create new snapshot gate.""" super().__init__("snapshot", [m], [qubit], circ) def qasm(self): """Return OPENQASM string.""" qubit = self.arg[0] m = self.param[0] return self._qasmif("snapshot(%d) %s[%d];" % (m, qubit[0].name, qubit[1])) def inverse(self): """Invert this gate.""" return self # self-inverse def reapply(self, circ): """Reapply this gate to corresponding qubits in circ.""" self._modifiers(circ.snapshot(self.param[0], self.arg[0])) def snapshot(self, m, q): """Cache the quantum state of local_qiskit_simulator.""" if isinstance(q, QuantumRegister): gs = InstructionSet() for j in range(q.size): gs.add(self.snapshot(m, (q, j))) return gs self._check_qubit(q) return self._attach(SnapshotGate(m, q, self)) # Add to QuantumCircuit and CompositeGate classes QuantumCircuit.snapshot = snapshot CompositeGate.snapshot = snapshot # cache quantum state (identity) QuantumCircuit.definitions["snapshot"] = { "print": True, "opaque": False, "n_args": 1, "n_bits": 1, "args": ["m"], "bits": ["a"], # gate snapshot(m) a { } "body": node.GateBody([]) }
atilag/qiskit-sdk-py
qiskit/extensions/qasm_simulator_cpp/snapshot.py
Python
apache-2.0
2,505
from django.forms import CharField, ValidationError from django.forms.fields import EMPTY_VALUES import re, string class TinyMCEField(CharField): def clean(self, value): "Validates max_length and min_length. Returns a Unicode object." if value in EMPTY_VALUES: return u'' stripped_value = re.sub(r'<.*?>', '', value) stripped_value = string.replace(stripped_value, '&nbsp;', ' ') stripped_value = string.replace(stripped_value, '&lt;', '<') stripped_value = string.replace(stripped_value, '&gt;', '>') stripped_value = string.replace(stripped_value, '&amp;', '&') stripped_value = string.replace(stripped_value, '\n', '') stripped_value = string.replace(stripped_value, '\r', '') value_length = len(stripped_value) value_length -= 1 if self.max_length is not None and value_length > self.max_length: raise ValidationError(self.error_messages['max_length'] % {'max': self.max_length, 'length': value_length}) if self.min_length is not None and value_length < self.min_length: raise ValidationError(self.error_messages['min_length'] % {'min': self.min_length, 'length': value_length}) return value
saebyn/django-classifieds
classifieds/forms/fields.py
Python
bsd-3-clause
1,278
# testing/util.py # Copyright (C) 2005-2021 the SQLAlchemy authors and contributors # <see AUTHORS file> # # This module is part of SQLAlchemy and is released under # the MIT License: https://www.opensource.org/licenses/mit-license.php import decimal import gc import random import sys import types from . import config from . import mock from .. import inspect from ..engine import Connection from ..schema import Column from ..schema import DropConstraint from ..schema import DropTable from ..schema import ForeignKeyConstraint from ..schema import MetaData from ..schema import Table from ..sql import schema from ..sql.sqltypes import Integer from ..util import decorator from ..util import defaultdict from ..util import has_refcount_gc from ..util import inspect_getfullargspec from ..util import py2k if not has_refcount_gc: def non_refcount_gc_collect(*args): gc.collect() gc.collect() gc_collect = lazy_gc = non_refcount_gc_collect else: # assume CPython - straight gc.collect, lazy_gc() is a pass gc_collect = gc.collect def lazy_gc(): pass def picklers(): picklers = set() if py2k: try: import cPickle picklers.add(cPickle) except ImportError: pass import pickle picklers.add(pickle) # yes, this thing needs this much testing for pickle_ in picklers: for protocol in range(-2, pickle.HIGHEST_PROTOCOL): yield pickle_.loads, lambda d: pickle_.dumps(d, protocol) if py2k: def random_choices(population, k=1): pop = list(population) # lame but works :) random.shuffle(pop) return pop[0:k] else: def random_choices(population, k=1): return random.choices(population, k=k) def round_decimal(value, prec): if isinstance(value, float): return round(value, prec) # can also use shift() here but that is 2.6 only return (value * decimal.Decimal("1" + "0" * prec)).to_integral( decimal.ROUND_FLOOR ) / pow(10, prec) class RandomSet(set): def __iter__(self): l = list(set.__iter__(self)) random.shuffle(l) return iter(l) def pop(self): index = random.randint(0, len(self) - 1) item = list(set.__iter__(self))[index] self.remove(item) return item def union(self, other): return RandomSet(set.union(self, other)) def difference(self, other): return RandomSet(set.difference(self, other)) def intersection(self, other): return RandomSet(set.intersection(self, other)) def copy(self): return RandomSet(self) def conforms_partial_ordering(tuples, sorted_elements): """True if the given sorting conforms to the given partial ordering.""" deps = defaultdict(set) for parent, child in tuples: deps[parent].add(child) for i, node in enumerate(sorted_elements): for n in sorted_elements[i:]: if node in deps[n]: return False else: return True def all_partial_orderings(tuples, elements): edges = defaultdict(set) for parent, child in tuples: edges[child].add(parent) def _all_orderings(elements): if len(elements) == 1: yield list(elements) else: for elem in elements: subset = set(elements).difference([elem]) if not subset.intersection(edges[elem]): for sub_ordering in _all_orderings(subset): yield [elem] + sub_ordering return iter(_all_orderings(elements)) def function_named(fn, name): """Return a function with a given __name__. Will assign to __name__ and return the original function if possible on the Python implementation, otherwise a new function will be constructed. This function should be phased out as much as possible in favor of @decorator. Tests that "generate" many named tests should be modernized. """ try: fn.__name__ = name except TypeError: fn = types.FunctionType( fn.__code__, fn.__globals__, name, fn.__defaults__, fn.__closure__ ) return fn def run_as_contextmanager(ctx, fn, *arg, **kw): """Run the given function under the given contextmanager, simulating the behavior of 'with' to support older Python versions. This is not necessary anymore as we have placed 2.6 as minimum Python version, however some tests are still using this structure. """ obj = ctx.__enter__() try: result = fn(obj, *arg, **kw) ctx.__exit__(None, None, None) return result except: exc_info = sys.exc_info() raise_ = ctx.__exit__(*exc_info) if not raise_: raise else: return raise_ def rowset(results): """Converts the results of sql execution into a plain set of column tuples. Useful for asserting the results of an unordered query. """ return {tuple(row) for row in results} def fail(msg): assert False, msg @decorator def provide_metadata(fn, *args, **kw): """Provide bound MetaData for a single test, dropping afterwards. Legacy; use the "metadata" pytest fixture. """ from . import fixtures metadata = schema.MetaData() self = args[0] prev_meta = getattr(self, "metadata", None) self.metadata = metadata try: return fn(*args, **kw) finally: # close out some things that get in the way of dropping tables. # when using the "metadata" fixture, there is a set ordering # of things that makes sure things are cleaned up in order, however # the simple "decorator" nature of this legacy function means # we have to hardcode some of that cleanup ahead of time. # close ORM sessions fixtures._close_all_sessions() # integrate with the "connection" fixture as there are many # tests where it is used along with provide_metadata if fixtures._connection_fixture_connection: # TODO: this warning can be used to find all the places # this is used with connection fixture # warn("mixing legacy provide metadata with connection fixture") drop_all_tables_from_metadata( metadata, fixtures._connection_fixture_connection ) # as the provide_metadata fixture is often used with "testing.db", # when we do the drop we have to commit the transaction so that # the DB is actually updated as the CREATE would have been # committed fixtures._connection_fixture_connection.get_transaction().commit() else: drop_all_tables_from_metadata(metadata, config.db) self.metadata = prev_meta def flag_combinations(*combinations): """A facade around @testing.combinations() oriented towards boolean keyword-based arguments. Basically generates a nice looking identifier based on the keywords and also sets up the argument names. E.g.:: @testing.flag_combinations( dict(lazy=False, passive=False), dict(lazy=True, passive=False), dict(lazy=False, passive=True), dict(lazy=False, passive=True, raiseload=True), ) would result in:: @testing.combinations( ('', False, False, False), ('lazy', True, False, False), ('lazy_passive', True, True, False), ('lazy_passive', True, True, True), id_='iaaa', argnames='lazy,passive,raiseload' ) """ keys = set() for d in combinations: keys.update(d) keys = sorted(keys) return config.combinations( *[ ("_".join(k for k in keys if d.get(k, False)),) + tuple(d.get(k, False) for k in keys) for d in combinations ], id_="i" + ("a" * len(keys)), argnames=",".join(keys) ) def lambda_combinations(lambda_arg_sets, **kw): args = inspect_getfullargspec(lambda_arg_sets) arg_sets = lambda_arg_sets(*[mock.Mock() for arg in args[0]]) def create_fixture(pos): def fixture(**kw): return lambda_arg_sets(**kw)[pos] fixture.__name__ = "fixture_%3.3d" % pos return fixture return config.combinations( *[(create_fixture(i),) for i in range(len(arg_sets))], **kw ) def resolve_lambda(__fn, **kw): """Given a no-arg lambda and a namespace, return a new lambda that has all the values filled in. This is used so that we can have module-level fixtures that refer to instance-level variables using lambdas. """ pos_args = inspect_getfullargspec(__fn)[0] pass_pos_args = {arg: kw.pop(arg) for arg in pos_args} glb = dict(__fn.__globals__) glb.update(kw) new_fn = types.FunctionType(__fn.__code__, glb) return new_fn(**pass_pos_args) def metadata_fixture(ddl="function"): """Provide MetaData for a pytest fixture.""" def decorate(fn): def run_ddl(self): metadata = self.metadata = schema.MetaData() try: result = fn(self, metadata) metadata.create_all(config.db) # TODO: # somehow get a per-function dml erase fixture here yield result finally: metadata.drop_all(config.db) return config.fixture(scope=ddl)(run_ddl) return decorate def force_drop_names(*names): """Force the given table names to be dropped after test complete, isolating for foreign key cycles """ @decorator def go(fn, *args, **kw): try: return fn(*args, **kw) finally: drop_all_tables(config.db, inspect(config.db), include_names=names) return go class adict(dict): """Dict keys available as attributes. Shadows.""" def __getattribute__(self, key): try: return self[key] except KeyError: return dict.__getattribute__(self, key) def __call__(self, *keys): return tuple([self[key] for key in keys]) get_all = __call__ def drop_all_tables_from_metadata(metadata, engine_or_connection): from . import engines def go(connection): engines.testing_reaper.prepare_for_drop_tables(connection) if not connection.dialect.supports_alter: from . import assertions with assertions.expect_warnings( "Can't sort tables", assert_=False ): metadata.drop_all(connection) else: metadata.drop_all(connection) if not isinstance(engine_or_connection, Connection): with engine_or_connection.begin() as connection: go(connection) else: go(engine_or_connection) def drop_all_tables(engine, inspector, schema=None, include_names=None): if include_names is not None: include_names = set(include_names) with engine.begin() as conn: for tname, fkcs in reversed( inspector.get_sorted_table_and_fkc_names(schema=schema) ): if tname: if include_names is not None and tname not in include_names: continue conn.execute( DropTable(Table(tname, MetaData(), schema=schema)) ) elif fkcs: if not engine.dialect.supports_alter: continue for tname, fkc in fkcs: if ( include_names is not None and tname not in include_names ): continue tb = Table( tname, MetaData(), Column("x", Integer), Column("y", Integer), schema=schema, ) conn.execute( DropConstraint( ForeignKeyConstraint([tb.c.x], [tb.c.y], name=fkc) ) ) def teardown_events(event_cls): @decorator def decorate(fn, *arg, **kw): try: return fn(*arg, **kw) finally: event_cls._clear() return decorate
zzzeek/sqlalchemy
lib/sqlalchemy/testing/util.py
Python
mit
12,503
# -*- coding: utf-8 -*- # # Read the Docs Template documentation build configuration file, created by # sphinx-quickstart on Tue Aug 26 14:19:49 2014. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys import os from recommonmark.parser import CommonMarkParser # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. #sys.path.insert(0, os.path.abspath('.')) # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] ## Add parser for Makdown source_parsers = { '.md': CommonMarkParser, } # The suffix of source filenames. source_suffix = ['.rst', '.md'] # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'Documentation DigitalSkills' copyright = u'2017, DigitalSkills' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = '0.0' # The full version, including alpha/beta/rc tags. release = '0.1' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = ['_build'] # The reST default role (used for this markup: `text`) to use for all # documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. #keep_warnings = False # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'default' #html_theme = 'sphinx_rtd_theme_digitalskills' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. html_theme_options = { } # Add any paths that contain custom themes here, relative to this directory. html_theme_path = ['_themes',] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied # directly to the root of the documentation. #html_extra_path = [] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'ReadtheDocsTemplatedoc' # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). #'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). #'pointsize': '10pt', # Additional stuff for the LaTeX preamble. #'preamble': '', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ ('index', 'ReadtheDocsTemplate.tex', u'Read the Docs Template Documentation', u'Read the Docs', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'readthedocstemplate', u'Read the Docs Template Documentation', [u'Read the Docs'], 1) ] # If true, show URL addresses after external links. #man_show_urls = False # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ('index', 'ReadtheDocsTemplate', u'Read the Docs Template Documentation', u'Read the Docs', 'ReadtheDocsTemplate', 'One line description of project.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. #texinfo_appendices = [] # If false, no module index is generated. #texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. #texinfo_show_urls = 'footnote' # If true, do not generate a @detailmenu in the "Top" node's menu. #texinfo_no_detailmenu = False
DigitalSkills-fr/Docs
docs/conf.py
Python
apache-2.0
8,474
# Copyright (c) 2010 Google Inc. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from datetime import datetime import logging import unittest2 as unittest from webkitpy.common.net import bugzilla from webkitpy.common.net.layouttestresults import LayoutTestResults from webkitpy.common.system.executive import ScriptError from webkitpy.common.system.outputcapture import OutputCapture from webkitpy.layout_tests.models import test_results from webkitpy.layout_tests.models import test_failures from webkitpy.thirdparty.mock import Mock from webkitpy.tool.bot.commitqueuetask import * from webkitpy.tool.bot.expectedfailures import ExpectedFailures from webkitpy.tool.mocktool import MockTool _log = logging.getLogger(__name__) class MockCommitQueue(CommitQueueTaskDelegate): def __init__(self, error_plan): self._error_plan = error_plan self._failure_status_id = 0 def run_command(self, command): _log.info("run_webkit_patch: %s" % command) if self._error_plan: error = self._error_plan.pop(0) if error: raise error def command_passed(self, success_message, patch): _log.info("command_passed: success_message='%s' patch='%s'" % ( success_message, patch.id())) def command_failed(self, failure_message, script_error, patch): _log.info("command_failed: failure_message='%s' script_error='%s' patch='%s'" % ( failure_message, script_error, patch.id())) self._failure_status_id += 1 return self._failure_status_id def refetch_patch(self, patch): return patch def expected_failures(self): return ExpectedFailures() def test_results(self): return None def report_flaky_tests(self, patch, flaky_results, results_archive): flaky_tests = [result.filename for result in flaky_results] _log.info("report_flaky_tests: patch='%s' flaky_tests='%s' archive='%s'" % (patch.id(), flaky_tests, results_archive.filename)) def archive_last_test_results(self, patch): _log.info("archive_last_test_results: patch='%s'" % patch.id()) archive = Mock() archive.filename = "mock-archive-%s.zip" % patch.id() return archive def build_style(self): return "both" def did_pass_testing_ews(self, patch): return False class FailingTestCommitQueue(MockCommitQueue): def __init__(self, error_plan, test_failure_plan): MockCommitQueue.__init__(self, error_plan) self._test_run_counter = -1 # Special value to indicate tests have never been run. self._test_failure_plan = test_failure_plan def run_command(self, command): if command[0] == "build-and-test": self._test_run_counter += 1 MockCommitQueue.run_command(self, command) def _mock_test_result(self, testname): return test_results.TestResult(testname, [test_failures.FailureTextMismatch()]) def test_results(self): # Doesn't make sense to ask for the test_results until the tests have run at least once. assert(self._test_run_counter >= 0) failures_for_run = self._test_failure_plan[self._test_run_counter] results = LayoutTestResults(map(self._mock_test_result, failures_for_run)) # This makes the results trustable by ExpectedFailures. results.set_failure_limit_count(10) return results # We use GoldenScriptError to make sure that the code under test throws the # correct (i.e., golden) exception. class GoldenScriptError(ScriptError): pass class CommitQueueTaskTest(unittest.TestCase): def _run_through_task(self, commit_queue, expected_logs, expected_exception=None, expect_retry=False): self.maxDiff = None tool = MockTool(log_executive=True) patch = tool.bugs.fetch_attachment(10000) task = CommitQueueTask(commit_queue, patch) success = OutputCapture().assert_outputs(self, task.run, expected_logs=expected_logs, expected_exception=expected_exception) if not expected_exception: self.assertEqual(success, not expect_retry) return task def test_success_case(self): commit_queue = MockCommitQueue([]) expected_logs = """run_webkit_patch: ['clean'] command_passed: success_message='Cleaned working directory' patch='10000' run_webkit_patch: ['update'] command_passed: success_message='Updated working directory' patch='10000' run_webkit_patch: ['apply-attachment', '--no-update', '--non-interactive', 10000] command_passed: success_message='Applied patch' patch='10000' run_webkit_patch: ['validate-changelog', '--check-oops', '--non-interactive', 10000] command_passed: success_message='ChangeLog validated' patch='10000' run_webkit_patch: ['build', '--no-clean', '--no-update', '--build-style=both'] command_passed: success_message='Built patch' patch='10000' run_webkit_patch: ['build-and-test', '--no-clean', '--no-update', '--test', '--non-interactive'] command_passed: success_message='Passed tests' patch='10000' run_webkit_patch: ['land-attachment', '--force-clean', '--non-interactive', '--parent-command=commit-queue', 10000] command_passed: success_message='Landed patch' patch='10000' """ self._run_through_task(commit_queue, expected_logs) def test_fast_success_case(self): commit_queue = MockCommitQueue([]) commit_queue.did_pass_testing_ews = lambda patch: True expected_logs = """run_webkit_patch: ['clean'] command_passed: success_message='Cleaned working directory' patch='10000' run_webkit_patch: ['update'] command_passed: success_message='Updated working directory' patch='10000' run_webkit_patch: ['apply-attachment', '--no-update', '--non-interactive', 10000] command_passed: success_message='Applied patch' patch='10000' run_webkit_patch: ['validate-changelog', '--check-oops', '--non-interactive', 10000] command_passed: success_message='ChangeLog validated' patch='10000' run_webkit_patch: ['build', '--no-clean', '--no-update', '--build-style=both'] command_passed: success_message='Built patch' patch='10000' run_webkit_patch: ['land-attachment', '--force-clean', '--non-interactive', '--parent-command=commit-queue', 10000] command_passed: success_message='Landed patch' patch='10000' """ self._run_through_task(commit_queue, expected_logs) def test_clean_failure(self): commit_queue = MockCommitQueue([ ScriptError("MOCK clean failure"), ]) expected_logs = """run_webkit_patch: ['clean'] command_failed: failure_message='Unable to clean working directory' script_error='MOCK clean failure' patch='10000' """ self._run_through_task(commit_queue, expected_logs, expect_retry=True) def test_update_failure(self): commit_queue = MockCommitQueue([ None, ScriptError("MOCK update failure"), ]) expected_logs = """run_webkit_patch: ['clean'] command_passed: success_message='Cleaned working directory' patch='10000' run_webkit_patch: ['update'] command_failed: failure_message='Unable to update working directory' script_error='MOCK update failure' patch='10000' """ self._run_through_task(commit_queue, expected_logs, expect_retry=True) def test_apply_failure(self): commit_queue = MockCommitQueue([ None, None, GoldenScriptError("MOCK apply failure"), ]) expected_logs = """run_webkit_patch: ['clean'] command_passed: success_message='Cleaned working directory' patch='10000' run_webkit_patch: ['update'] command_passed: success_message='Updated working directory' patch='10000' run_webkit_patch: ['apply-attachment', '--no-update', '--non-interactive', 10000] command_failed: failure_message='Patch does not apply' script_error='MOCK apply failure' patch='10000' """ self._run_through_task(commit_queue, expected_logs, GoldenScriptError) def test_validate_changelog_failure(self): commit_queue = MockCommitQueue([ None, None, None, GoldenScriptError("MOCK validate failure"), ]) expected_logs = """run_webkit_patch: ['clean'] command_passed: success_message='Cleaned working directory' patch='10000' run_webkit_patch: ['update'] command_passed: success_message='Updated working directory' patch='10000' run_webkit_patch: ['apply-attachment', '--no-update', '--non-interactive', 10000] command_passed: success_message='Applied patch' patch='10000' run_webkit_patch: ['validate-changelog', '--check-oops', '--non-interactive', 10000] command_failed: failure_message='ChangeLog did not pass validation' script_error='MOCK validate failure' patch='10000' """ self._run_through_task(commit_queue, expected_logs, GoldenScriptError) def test_build_failure(self): commit_queue = MockCommitQueue([ None, None, None, None, GoldenScriptError("MOCK build failure"), ]) expected_logs = """run_webkit_patch: ['clean'] command_passed: success_message='Cleaned working directory' patch='10000' run_webkit_patch: ['update'] command_passed: success_message='Updated working directory' patch='10000' run_webkit_patch: ['apply-attachment', '--no-update', '--non-interactive', 10000] command_passed: success_message='Applied patch' patch='10000' run_webkit_patch: ['validate-changelog', '--check-oops', '--non-interactive', 10000] command_passed: success_message='ChangeLog validated' patch='10000' run_webkit_patch: ['build', '--no-clean', '--no-update', '--build-style=both'] command_failed: failure_message='Patch does not build' script_error='MOCK build failure' patch='10000' run_webkit_patch: ['build', '--force-clean', '--no-update', '--build-style=both'] command_passed: success_message='Able to build without patch' patch='10000' """ self._run_through_task(commit_queue, expected_logs, GoldenScriptError) def test_red_build_failure(self): commit_queue = MockCommitQueue([ None, None, None, None, ScriptError("MOCK build failure"), ScriptError("MOCK clean build failure"), ]) expected_logs = """run_webkit_patch: ['clean'] command_passed: success_message='Cleaned working directory' patch='10000' run_webkit_patch: ['update'] command_passed: success_message='Updated working directory' patch='10000' run_webkit_patch: ['apply-attachment', '--no-update', '--non-interactive', 10000] command_passed: success_message='Applied patch' patch='10000' run_webkit_patch: ['validate-changelog', '--check-oops', '--non-interactive', 10000] command_passed: success_message='ChangeLog validated' patch='10000' run_webkit_patch: ['build', '--no-clean', '--no-update', '--build-style=both'] command_failed: failure_message='Patch does not build' script_error='MOCK build failure' patch='10000' run_webkit_patch: ['build', '--force-clean', '--no-update', '--build-style=both'] command_failed: failure_message='Unable to build without patch' script_error='MOCK clean build failure' patch='10000' """ self._run_through_task(commit_queue, expected_logs, expect_retry=True) def test_flaky_test_failure(self): commit_queue = MockCommitQueue([ None, None, None, None, None, ScriptError("MOCK tests failure"), ]) # CommitQueueTask will only report flaky tests if we successfully parsed # results.json and returned a LayoutTestResults object, so we fake one. commit_queue.test_results = lambda: LayoutTestResults([]) expected_logs = """run_webkit_patch: ['clean'] command_passed: success_message='Cleaned working directory' patch='10000' run_webkit_patch: ['update'] command_passed: success_message='Updated working directory' patch='10000' run_webkit_patch: ['apply-attachment', '--no-update', '--non-interactive', 10000] command_passed: success_message='Applied patch' patch='10000' run_webkit_patch: ['validate-changelog', '--check-oops', '--non-interactive', 10000] command_passed: success_message='ChangeLog validated' patch='10000' run_webkit_patch: ['build', '--no-clean', '--no-update', '--build-style=both'] command_passed: success_message='Built patch' patch='10000' run_webkit_patch: ['build-and-test', '--no-clean', '--no-update', '--test', '--non-interactive'] command_failed: failure_message='Patch does not pass tests' script_error='MOCK tests failure' patch='10000' archive_last_test_results: patch='10000' run_webkit_patch: ['build-and-test', '--no-clean', '--no-update', '--test', '--non-interactive'] command_passed: success_message='Passed tests' patch='10000' report_flaky_tests: patch='10000' flaky_tests='[]' archive='mock-archive-10000.zip' run_webkit_patch: ['land-attachment', '--force-clean', '--non-interactive', '--parent-command=commit-queue', 10000] command_passed: success_message='Landed patch' patch='10000' """ self._run_through_task(commit_queue, expected_logs) def test_failed_archive(self): commit_queue = MockCommitQueue([ None, None, None, None, None, ScriptError("MOCK tests failure"), ]) commit_queue.test_results = lambda: LayoutTestResults([]) # It's possible delegate to fail to archive layout tests, don't try to report # flaky tests when that happens. commit_queue.archive_last_test_results = lambda patch: None expected_logs = """run_webkit_patch: ['clean'] command_passed: success_message='Cleaned working directory' patch='10000' run_webkit_patch: ['update'] command_passed: success_message='Updated working directory' patch='10000' run_webkit_patch: ['apply-attachment', '--no-update', '--non-interactive', 10000] command_passed: success_message='Applied patch' patch='10000' run_webkit_patch: ['validate-changelog', '--check-oops', '--non-interactive', 10000] command_passed: success_message='ChangeLog validated' patch='10000' run_webkit_patch: ['build', '--no-clean', '--no-update', '--build-style=both'] command_passed: success_message='Built patch' patch='10000' run_webkit_patch: ['build-and-test', '--no-clean', '--no-update', '--test', '--non-interactive'] command_failed: failure_message='Patch does not pass tests' script_error='MOCK tests failure' patch='10000' run_webkit_patch: ['build-and-test', '--no-clean', '--no-update', '--test', '--non-interactive'] command_passed: success_message='Passed tests' patch='10000' run_webkit_patch: ['land-attachment', '--force-clean', '--non-interactive', '--parent-command=commit-queue', 10000] command_passed: success_message='Landed patch' patch='10000' """ self._run_through_task(commit_queue, expected_logs) def test_double_flaky_test_failure(self): commit_queue = FailingTestCommitQueue([ None, None, None, None, None, ScriptError("MOCK test failure"), ScriptError("MOCK test failure again"), ], [ "foo.html", "bar.html", "foo.html", ]) # The (subtle) point of this test is that report_flaky_tests does not appear # in the expected_logs for this run. # Note also that there is no attempt to run the tests w/o the patch. expected_logs = """run_webkit_patch: ['clean'] command_passed: success_message='Cleaned working directory' patch='10000' run_webkit_patch: ['update'] command_passed: success_message='Updated working directory' patch='10000' run_webkit_patch: ['apply-attachment', '--no-update', '--non-interactive', 10000] command_passed: success_message='Applied patch' patch='10000' run_webkit_patch: ['validate-changelog', '--check-oops', '--non-interactive', 10000] command_passed: success_message='ChangeLog validated' patch='10000' run_webkit_patch: ['build', '--no-clean', '--no-update', '--build-style=both'] command_passed: success_message='Built patch' patch='10000' run_webkit_patch: ['build-and-test', '--no-clean', '--no-update', '--test', '--non-interactive'] command_failed: failure_message='Patch does not pass tests' script_error='MOCK test failure' patch='10000' archive_last_test_results: patch='10000' run_webkit_patch: ['build-and-test', '--no-clean', '--no-update', '--test', '--non-interactive'] command_failed: failure_message='Patch does not pass tests' script_error='MOCK test failure again' patch='10000' """ tool = MockTool(log_executive=True) patch = tool.bugs.fetch_attachment(10000) task = CommitQueueTask(commit_queue, patch) success = OutputCapture().assert_outputs(self, task.run, expected_logs=expected_logs) self.assertFalse(success) def test_test_failure(self): commit_queue = MockCommitQueue([ None, None, None, None, None, GoldenScriptError("MOCK test failure"), ScriptError("MOCK test failure again"), ]) expected_logs = """run_webkit_patch: ['clean'] command_passed: success_message='Cleaned working directory' patch='10000' run_webkit_patch: ['update'] command_passed: success_message='Updated working directory' patch='10000' run_webkit_patch: ['apply-attachment', '--no-update', '--non-interactive', 10000] command_passed: success_message='Applied patch' patch='10000' run_webkit_patch: ['validate-changelog', '--check-oops', '--non-interactive', 10000] command_passed: success_message='ChangeLog validated' patch='10000' run_webkit_patch: ['build', '--no-clean', '--no-update', '--build-style=both'] command_passed: success_message='Built patch' patch='10000' run_webkit_patch: ['build-and-test', '--no-clean', '--no-update', '--test', '--non-interactive'] command_failed: failure_message='Patch does not pass tests' script_error='MOCK test failure' patch='10000' archive_last_test_results: patch='10000' run_webkit_patch: ['build-and-test', '--no-clean', '--no-update', '--test', '--non-interactive'] command_failed: failure_message='Patch does not pass tests' script_error='MOCK test failure again' patch='10000' archive_last_test_results: patch='10000' run_webkit_patch: ['build-and-test', '--force-clean', '--no-update', '--build', '--test', '--non-interactive'] command_passed: success_message='Able to pass tests without patch' patch='10000' """ self._run_through_task(commit_queue, expected_logs, GoldenScriptError) def test_red_test_failure(self): commit_queue = FailingTestCommitQueue([ None, None, None, None, None, ScriptError("MOCK test failure"), ScriptError("MOCK test failure again"), ScriptError("MOCK clean test failure"), ], [ "foo.html", "foo.html", "foo.html", ]) # Tests always fail, and always return the same results, but we # should still be able to land in this case! expected_logs = """run_webkit_patch: ['clean'] command_passed: success_message='Cleaned working directory' patch='10000' run_webkit_patch: ['update'] command_passed: success_message='Updated working directory' patch='10000' run_webkit_patch: ['apply-attachment', '--no-update', '--non-interactive', 10000] command_passed: success_message='Applied patch' patch='10000' run_webkit_patch: ['validate-changelog', '--check-oops', '--non-interactive', 10000] command_passed: success_message='ChangeLog validated' patch='10000' run_webkit_patch: ['build', '--no-clean', '--no-update', '--build-style=both'] command_passed: success_message='Built patch' patch='10000' run_webkit_patch: ['build-and-test', '--no-clean', '--no-update', '--test', '--non-interactive'] command_failed: failure_message='Patch does not pass tests' script_error='MOCK test failure' patch='10000' archive_last_test_results: patch='10000' run_webkit_patch: ['build-and-test', '--no-clean', '--no-update', '--test', '--non-interactive'] command_failed: failure_message='Patch does not pass tests' script_error='MOCK test failure again' patch='10000' archive_last_test_results: patch='10000' run_webkit_patch: ['build-and-test', '--force-clean', '--no-update', '--build', '--test', '--non-interactive'] command_failed: failure_message='Unable to pass tests without patch (tree is red?)' script_error='MOCK clean test failure' patch='10000' run_webkit_patch: ['land-attachment', '--force-clean', '--non-interactive', '--parent-command=commit-queue', 10000] command_passed: success_message='Landed patch' patch='10000' """ self._run_through_task(commit_queue, expected_logs) def test_very_red_tree_retry(self): lots_of_failing_tests = map(lambda num: "test-%s.html" % num, range(0, 100)) commit_queue = FailingTestCommitQueue([ None, None, None, None, None, ScriptError("MOCK test failure"), ScriptError("MOCK test failure again"), ScriptError("MOCK clean test failure"), ], [ lots_of_failing_tests, lots_of_failing_tests, lots_of_failing_tests, ]) # Tests always fail, and return so many failures that we do not # trust the results (see ExpectedFailures._can_trust_results) so we # just give up and retry the patch. expected_logs = """run_webkit_patch: ['clean'] command_passed: success_message='Cleaned working directory' patch='10000' run_webkit_patch: ['update'] command_passed: success_message='Updated working directory' patch='10000' run_webkit_patch: ['apply-attachment', '--no-update', '--non-interactive', 10000] command_passed: success_message='Applied patch' patch='10000' run_webkit_patch: ['validate-changelog', '--check-oops', '--non-interactive', 10000] command_passed: success_message='ChangeLog validated' patch='10000' run_webkit_patch: ['build', '--no-clean', '--no-update', '--build-style=both'] command_passed: success_message='Built patch' patch='10000' run_webkit_patch: ['build-and-test', '--no-clean', '--no-update', '--test', '--non-interactive'] command_failed: failure_message='Patch does not pass tests' script_error='MOCK test failure' patch='10000' archive_last_test_results: patch='10000' run_webkit_patch: ['build-and-test', '--no-clean', '--no-update', '--test', '--non-interactive'] command_failed: failure_message='Patch does not pass tests' script_error='MOCK test failure again' patch='10000' archive_last_test_results: patch='10000' run_webkit_patch: ['build-and-test', '--force-clean', '--no-update', '--build', '--test', '--non-interactive'] command_failed: failure_message='Unable to pass tests without patch (tree is red?)' script_error='MOCK clean test failure' patch='10000' """ self._run_through_task(commit_queue, expected_logs, expect_retry=True) def test_red_tree_patch_rejection(self): commit_queue = FailingTestCommitQueue([ None, None, None, None, None, GoldenScriptError("MOCK test failure"), ScriptError("MOCK test failure again"), ScriptError("MOCK clean test failure"), ], [ ["foo.html", "bar.html"], ["foo.html", "bar.html"], ["foo.html"], ]) # Tests always fail, but the clean tree only fails one test # while the patch fails two. So we should reject the patch! expected_logs = """run_webkit_patch: ['clean'] command_passed: success_message='Cleaned working directory' patch='10000' run_webkit_patch: ['update'] command_passed: success_message='Updated working directory' patch='10000' run_webkit_patch: ['apply-attachment', '--no-update', '--non-interactive', 10000] command_passed: success_message='Applied patch' patch='10000' run_webkit_patch: ['validate-changelog', '--check-oops', '--non-interactive', 10000] command_passed: success_message='ChangeLog validated' patch='10000' run_webkit_patch: ['build', '--no-clean', '--no-update', '--build-style=both'] command_passed: success_message='Built patch' patch='10000' run_webkit_patch: ['build-and-test', '--no-clean', '--no-update', '--test', '--non-interactive'] command_failed: failure_message='Patch does not pass tests' script_error='MOCK test failure' patch='10000' archive_last_test_results: patch='10000' run_webkit_patch: ['build-and-test', '--no-clean', '--no-update', '--test', '--non-interactive'] command_failed: failure_message='Patch does not pass tests' script_error='MOCK test failure again' patch='10000' archive_last_test_results: patch='10000' run_webkit_patch: ['build-and-test', '--force-clean', '--no-update', '--build', '--test', '--non-interactive'] command_failed: failure_message='Unable to pass tests without patch (tree is red?)' script_error='MOCK clean test failure' patch='10000' """ task = self._run_through_task(commit_queue, expected_logs, GoldenScriptError) self.assertEqual(task.results_from_patch_test_run(task._patch).failing_tests(), ["foo.html", "bar.html"]) # failure_status_id should be of the test with patch (1), not the test without patch (2). self.assertEqual(task.failure_status_id, 1) def test_land_failure(self): commit_queue = MockCommitQueue([ None, None, None, None, None, None, GoldenScriptError("MOCK land failure"), ]) expected_logs = """run_webkit_patch: ['clean'] command_passed: success_message='Cleaned working directory' patch='10000' run_webkit_patch: ['update'] command_passed: success_message='Updated working directory' patch='10000' run_webkit_patch: ['apply-attachment', '--no-update', '--non-interactive', 10000] command_passed: success_message='Applied patch' patch='10000' run_webkit_patch: ['validate-changelog', '--check-oops', '--non-interactive', 10000] command_passed: success_message='ChangeLog validated' patch='10000' run_webkit_patch: ['build', '--no-clean', '--no-update', '--build-style=both'] command_passed: success_message='Built patch' patch='10000' run_webkit_patch: ['build-and-test', '--no-clean', '--no-update', '--test', '--non-interactive'] command_passed: success_message='Passed tests' patch='10000' run_webkit_patch: ['land-attachment', '--force-clean', '--non-interactive', '--parent-command=commit-queue', 10000] command_failed: failure_message='Unable to land patch' script_error='MOCK land failure' patch='10000' """ # FIXME: This should really be expect_retry=True for a better user experiance. self._run_through_task(commit_queue, expected_logs, GoldenScriptError) def _expect_validate(self, patch, is_valid): class MockDelegate(object): def refetch_patch(self, patch): return patch def expected_failures(self): return ExpectedFailures() task = CommitQueueTask(MockDelegate(), patch) self.assertEqual(task.validate(), is_valid) def _mock_patch(self, attachment_dict={}, bug_dict={'bug_status': 'NEW'}, committer="fake"): bug = bugzilla.Bug(bug_dict, None) patch = bugzilla.Attachment(attachment_dict, bug) patch._committer = committer return patch def test_validate(self): self._expect_validate(self._mock_patch(), True) self._expect_validate(self._mock_patch({'is_obsolete': True}), False) self._expect_validate(self._mock_patch(bug_dict={'bug_status': 'CLOSED'}), False) self._expect_validate(self._mock_patch(committer=None), False) self._expect_validate(self._mock_patch({'review': '-'}), False)
klim-iv/phantomjs-qt5
src/webkit/Tools/Scripts/webkitpy/tool/bot/commitqueuetask_unittest.py
Python
bsd-3-clause
29,255
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) from spack import * class XcbUtilXrm(AutotoolsPackage): """XCB util-xrm module provides the 'xrm' library, i.e. utility functions for the X resource manager.""" homepage = "https://github.com/Airblader/xcb-util-xrm" git = "https://github.com/Airblader/xcb-util-xrm.git" # This GitHub project includes some git submodules, which must be fetched # in order to build it. version('1.2', tag='v1.2', submodules=True) depends_on('autoconf', type='build') depends_on('automake', type='build') depends_on('libtool', type='build') depends_on('m4', type='build') depends_on('pkgconfig', type='build') depends_on('[email protected]:')
rspavel/spack
var/spack/repos/builtin/packages/xcb-util-xrm/package.py
Python
lgpl-2.1
886
from datetime import datetime import hashlib from werkzeug.security import generate_password_hash, check_password_hash from itsdangerous import TimedJSONWebSignatureSerializer as Serializer from markdown import markdown import bleach from flask import current_app, request, url_for, Markup from flask.ext.login import UserMixin, AnonymousUserMixin from app.exceptions import ValidationError from . import db, login_manager class Permission: FOLLOW = 0x01 COMMENT = 0x02 WRITE_ARTICLES = 0x04 MODERATE_COMMENTS = 0x08 ADMINISTER = 0x80 class Role(db.Model): __tablename__ = 'roles' id = db.Column(db.Integer, primary_key=True) name = db.Column(db.String(64), unique=True) default = db.Column(db.Boolean, default=False, index=True) permissions = db.Column(db.Integer) users = db.relationship('User', backref='role', lazy='dynamic') @staticmethod def insert_roles(): roles = { 'User': (Permission.FOLLOW | Permission.COMMENT | Permission.WRITE_ARTICLES, True), 'Moderator': (Permission.FOLLOW | Permission.COMMENT | Permission.WRITE_ARTICLES | Permission.MODERATE_COMMENTS, False), 'Administrator': (0xff, False) } for r in roles: role = Role.query.filter_by(name=r).first() if role is None: role = Role(name=r) role.permissions = roles[r][0] role.default = roles[r][1] db.session.add(role) db.session.commit() def __repr__(self): return '<Role %r>' % self.name class Follow(db.Model): __tablename__ = 'follows' follower_id = db.Column(db.Integer, db.ForeignKey('users.id'), primary_key=True) followed_id = db.Column(db.Integer, db.ForeignKey('users.id'), primary_key=True) timestamp = db.Column(db.DateTime, default=datetime.utcnow) class User(UserMixin, db.Model): __tablename__ = 'users' id = db.Column(db.Integer, primary_key=True) email = db.Column(db.String(64), unique=True, index=True) username = db.Column(db.String(64), unique=True, index=True) role_id = db.Column(db.Integer, db.ForeignKey('roles.id')) password_hash = db.Column(db.String(128)) confirmed = db.Column(db.Boolean, default=False) name = db.Column(db.String(64)) location = db.Column(db.String(64)) about_me = db.Column(db.Text()) member_since = db.Column(db.DateTime(), default=datetime.utcnow) last_seen = db.Column(db.DateTime(), default=datetime.utcnow) avatar_hash = db.Column(db.String(32)) posts = db.relationship('Post', backref='author', lazy='dynamic') followed = db.relationship('Follow', foreign_keys=[Follow.follower_id], backref=db.backref('follower', lazy='joined'), lazy='dynamic', cascade='all, delete-orphan') followers = db.relationship('Follow', foreign_keys=[Follow.followed_id], backref=db.backref('followed', lazy='joined'), lazy='dynamic', cascade='all, delete-orphan') comments = db.relationship('Comment', backref='author', lazy='dynamic') @staticmethod def generate_fake(count=100): from sqlalchemy.exc import IntegrityError from random import seed import forgery_py seed() for i in range(count): u = User(email=forgery_py.internet.email_address(), username=forgery_py.internet.user_name(True), password=forgery_py.lorem_ipsum.word(), confirmed=True, name=forgery_py.name.full_name(), location=forgery_py.address.city(), about_me=forgery_py.lorem_ipsum.sentence(), member_since=forgery_py.date.date(True)) db.session.add(u) try: db.session.commit() except IntegrityError: db.session.rollback() @staticmethod def add_self_follows(): for user in User.query.all(): if not user.is_following(user): user.follow(user) db.session.add(user) db.session.commit() def __init__(self, **kwargs): super(User, self).__init__(**kwargs) if self.role is None: if self.email == current_app.config['FLASKY_ADMIN']: self.role = Role.query.filter_by(permissions=0xff).first() if self.role is None: self.role = Role.query.filter_by(default=True).first() if self.email is not None and self.avatar_hash is None: self.avatar_hash = hashlib.md5( self.email.encode('utf-8')).hexdigest() self.followed.append(Follow(followed=self)) @property def password(self): raise AttributeError('password is not a readable attribute') @password.setter def password(self, password): self.password_hash = generate_password_hash(password) def verify_password(self, password): return check_password_hash(self.password_hash, password) def generate_confirmation_token(self, expiration=3600): s = Serializer(current_app.config['SECRET_KEY'], expiration) return s.dumps({'confirm': self.id}) def confirm(self, token): s = Serializer(current_app.config['SECRET_KEY']) try: data = s.loads(token) except: return False if data.get('confirm') != self.id: return False self.confirmed = True db.session.add(self) return True def generate_reset_token(self, expiration=3600): s = Serializer(current_app.config['SECRET_KEY'], expiration) return s.dumps({'reset': self.id}) def reset_password(self, token, new_password): s = Serializer(current_app.config['SECRET_KEY']) try: data = s.loads(token) except: return False if data.get('reset') != self.id: return False self.password = new_password db.session.add(self) return True def generate_email_change_token(self, new_email, expiration=3600): s = Serializer(current_app.config['SECRET_KEY'], expiration) return s.dumps({'change_email': self.id, 'new_email': new_email}) def change_email(self, token): s = Serializer(current_app.config['SECRET_KEY']) try: data = s.loads(token) except: return False if data.get('change_email') != self.id: return False new_email = data.get('new_email') if new_email is None: return False if self.query.filter_by(email=new_email).first() is not None: return False self.email = new_email self.avatar_hash = hashlib.md5( self.email.encode('utf-8')).hexdigest() db.session.add(self) return True def can(self, permissions): return self.role is not None and \ (self.role.permissions & permissions) == permissions def is_administrator(self): return self.can(Permission.ADMINISTER) def ping(self): self.last_seen = datetime.utcnow() db.session.add(self) def gravatar(self, size=100, default='identicon', rating='g'): if request.is_secure: url = 'https://secure.gravatar.com/avatar' else: url = 'http://www.gravatar.com/avatar' hash = self.avatar_hash or hashlib.md5( self.email.encode('utf-8')).hexdigest() return '{url}/{hash}?s={size}&d={default}&r={rating}'.format( url=url, hash=hash, size=size, default=default, rating=rating) def follow(self, user): if not self.is_following(user): f = Follow(follower=self, followed=user) db.session.add(f) def unfollow(self, user): f = self.followed.filter_by(followed_id=user.id).first() if f: db.session.delete(f) def is_following(self, user): return self.followed.filter_by( followed_id=user.id).first() is not None def is_followed_by(self, user): return self.followers.filter_by( follower_id=user.id).first() is not None @property def followed_posts(self): return Post.query.join(Follow, Follow.followed_id == Post.author_id)\ .filter(Follow.follower_id == self.id) def to_json(self): json_user = { 'url': url_for('api.get_post', id=self.id, _external=True), 'username': self.username, 'member_since': self.member_since, 'last_seen': self.last_seen, 'posts': url_for('api.get_user_posts', id=self.id, _external=True), 'followed_posts': url_for('api.get_user_followed_posts', id=self.id, _external=True), 'post_count': self.posts.count() } return json_user def generate_auth_token(self, expiration): s = Serializer(current_app.config['SECRET_KEY'], expires_in=expiration) return s.dumps({'id': self.id}).decode('ascii') @staticmethod def verify_auth_token(token): s = Serializer(current_app.config['SECRET_KEY']) try: data = s.loads(token) except: return None return User.query.get(data['id']) def __repr__(self): return '<User %r>' % self.username class AnonymousUser(AnonymousUserMixin): def can(self, permissions): return False def is_administrator(self): return False login_manager.anonymous_user = AnonymousUser @login_manager.user_loader def load_user(user_id): return User.query.get(int(user_id)) class Post(db.Model): __tablename__ = 'posts' id = db.Column(db.Integer, primary_key=True) body = db.Column(db.Text) body_html = db.Column(db.Text) timestamp = db.Column(db.DateTime, index=True, default=datetime.utcnow) author_id = db.Column(db.Integer, db.ForeignKey('users.id')) comments = db.relationship('Comment', backref='post', lazy='dynamic') @staticmethod def generate_fake(count=100): from random import seed, randint import forgery_py seed() user_count = User.query.count() for i in range(count): u = User.query.offset(randint(0, user_count - 1)).first() p = Post(body=forgery_py.lorem_ipsum.sentences(randint(1, 5)), timestamp=forgery_py.date.date(True), author=u) db.session.add(p) db.session.commit() @staticmethod def on_changed_body(target, value, oldvalue, initiator): # allowed_tags = ['a', 'abbr', 'acronym', 'b', 'blockquote', 'code', # 'em', 'i', 'li', 'ol', 'pre', 'strong', 'ul', # 'h1', 'h2', 'h3', 'p'] # target.body_html = bleach.linkify(bleach.clean( # markdown(value, output_format='html'), # tags=allowed_tags, strip=True)) target.body_html = Markup(markdown(value, output_format='html')) def to_json(self): json_post = { 'url': url_for('api.get_post', id=self.id, _external=True), 'body': self.body, 'body_html': self.body_html, 'timestamp': self.timestamp, 'author': url_for('api.get_user', id=self.author_id, _external=True), 'comments': url_for('api.get_post_comments', id=self.id, _external=True), 'comment_count': self.comments.count() } return json_post @staticmethod def from_json(json_post): body = json_post.get('body') if body is None or body == '': raise ValidationError('post does not have a body') return Post(body=body) db.event.listen(Post.body, 'set', Post.on_changed_body) class Comment(db.Model): __tablename__ = 'comments' id = db.Column(db.Integer, primary_key=True) body = db.Column(db.Text) body_html = db.Column(db.Text) timestamp = db.Column(db.DateTime, index=True, default=datetime.utcnow) disabled = db.Column(db.Boolean) author_id = db.Column(db.Integer, db.ForeignKey('users.id')) post_id = db.Column(db.Integer, db.ForeignKey('posts.id')) @staticmethod def on_changed_body(target, value, oldvalue, initiator): allowed_tags = ['a', 'abbr', 'acronym', 'b', 'code', 'em', 'i', 'strong'] target.body_html = bleach.linkify(bleach.clean( markdown(value, output_format='html'), tags=allowed_tags, strip=True)) def to_json(self): json_comment = { 'url': url_for('api.get_comment', id=self.id, _external=True), 'post': url_for('api.get_post', id=self.post_id, _external=True), 'body': self.body, 'body_html': self.body_html, 'timestamp': self.timestamp, 'author': url_for('api.get_user', id=self.author_id, _external=True), } return json_comment @staticmethod def from_json(json_comment): body = json_comment.get('body') if body is None or body == '': raise ValidationError('comment does not have a body') return Comment(body=body) db.event.listen(Comment.body, 'set', Comment.on_changed_body)
VincentFF/personal-blog
app/models.py
Python
mit
13,862
# -*- coding: utf-8 -*- # Copyright 2022 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Generated code. DO NOT EDIT! # # Snippet for CreateTag # NOTE: This snippet has been automatically generated for illustrative purposes only. # It may require modifications to work in your environment. # To install the latest published package dependency, execute the following: # python3 -m pip install google-cloud-datacatalog # [START datacatalog_v1beta1_generated_DataCatalog_CreateTag_async] from google.cloud import datacatalog_v1beta1 async def sample_create_tag(): # Create a client client = datacatalog_v1beta1.DataCatalogAsyncClient() # Initialize request argument(s) tag = datacatalog_v1beta1.Tag() tag.column = "column_value" tag.template = "template_value" request = datacatalog_v1beta1.CreateTagRequest( parent="parent_value", tag=tag, ) # Make the request response = await client.create_tag(request=request) # Handle the response print(response) # [END datacatalog_v1beta1_generated_DataCatalog_CreateTag_async]
googleapis/python-datacatalog
samples/generated_samples/datacatalog_v1beta1_generated_data_catalog_create_tag_async.py
Python
apache-2.0
1,604
import pytest import time import requests as req from support.constants import request_detail, launch_detail, workitem_constants, dynamic_vars import support.helpers as helpers start_time = time.time() local_run = False class TestClass_SetupGettingStarted(object): def test_setup_gettingStarted(self, sut, offline_token, userid): print("\n\n==>Getting Started Test Setup Start....\n") if sut is None: launch_detail.base_url[launch_detail.base_wit] = r"https://api.openshift.io" print("SUT (WIT Target) not provided!!! Using default production SUT = ", launch_detail.base_url[launch_detail.base_wit]) else: # Identify if its a local run and set the local_run variable to True if "localhost" in sut or "0.0.0.0" in sut or "127.0.0.1" in sut: global local_run local_run = True launch_detail.base_url[launch_detail.base_wit] = sut print("SUT set to = ", sut) if userid is None: launch_detail.userid_primary = launch_detail.userid_prod_primary_default print("USERID not provided! Going ahead with the default USERID = ", launch_detail.userid_prod_primary_default) else: launch_detail.userid_primary = userid print("USERID set to = ", launch_detail.userid_primary) if offline_token in ["", "0", False, 0, None, "None"]: if local_run: try: launch_detail.token_userid_primary = launch_detail.get_local_token() if launch_detail.token_userid_primary: print "Local ACCESS_TOKEN obtained" except: pytest.exit("Failed to generate local ACCESS_TOKEN!!! Terminating the run!!!!!!!!!!!") else: pytest.exit("REFRESH_TOKEN not provided!!! Terminating the run!!!!!!!!!!!") else: launch_detail.offref_token_userid_primary = offline_token try: launch_detail.token_userid_primary = launch_detail.get_access_token_from_refresh() if launch_detail.token_userid_primary: print("ACCESS_TOKEN set to = A secret in Jenkins ;)") except: pytest.exit("Failed to generate ACCESS_TOKEN from OFFLINE_TOKEN!!! Terminating the run!!!!!!!!!!!") # Define Request Header, that includes Access Token request_detail.headers_default = {request_detail.content_type_key_default: request_detail.content_type_default, request_detail.authorization_key_default: request_detail.authorization_carrier_default + launch_detail.token_userid_primary} print("\n==Getting started Test Setup Complete....\n") class TestClass_GettingStartedTeardown(object): '''Class that dumps data to a file''' def test_getting_started_teardown(self, cleanup): import os import json launch_detail.launch_details_dict["token"] = launch_detail.token_userid_primary launch_detail.launch_details_dict["offline_token"] = launch_detail.offref_token_userid_primary launch_detail.launch_details_dict["user_id"] = launch_detail.userid_primary launch_detail.launch_details_dict["sut"] = launch_detail.base_url[launch_detail.base_wit] try: curr_dir = os.path.dirname(__file__) filepath = os.path.join(curr_dir, '..', 'launch_info_dump_getting_started.json') with open(filepath, 'w') as f: json.dump(launch_detail.launch_details_dict, f, sort_keys=True, indent=4) except Exception: print("Exception creating launch_info_dump_getting_started.json")
ldimaggi/fabric8-test
EE_API_automation/pytest/src/test_getting_started.py
Python
apache-2.0
3,767
""" Copyright 2017 Dell Inc. or its subsidiaries. All Rights Reserved. Module to hold helper classes and functions to determine run-time test IP information. Currently, """ import flogging import ipaddress import netifaces import socket import fit_common logs = flogging.get_loggers() class TestHostInterfacer(object): _cached = None @classmethod def get_testhost_ip(cls): if cls._cached is None: cls._cached = cls() logs.info('The IP address of %s has been selected as the most likely testhost IP address reachable from the DUT', cls._cached.__alleged_testhost_ip) return cls._cached.__alleged_testhost_ip def __init__(self): self.__alleged_testhost_ip = None s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) ip = fit_common.fitargs()['rackhd_host'] monip = fit_common.fitcfg()["rackhd-config"]["apiServerAddress"] monip_obj = ipaddress.ip_address(monip) logs.irl.debug('Trying to determine testhost IP address. Hitting rackhd_host value %s first', ip) s.connect((ip, 0)) logs.debug(' ip used to generate connection to %s was %s: ', ip, s.getsockname()[0]) alleged_testhost_ip_str = s.getsockname()[0] # python2/3 flake handling. The 'unicode' keyword is gone from p3. However, although # our code is p2, hound uses p3. We can cover both by using the -type- of a unicode string! ucode_type = type(u'unicode_string_to_type') alleged_testhost_ip = ipaddress.ip_address(ucode_type(alleged_testhost_ip_str)) if not alleged_testhost_ip.is_loopback: # A non-loopback address is about the best guess we can get. Use it. logs.irl.debug(' ip used to generate connection to %s is non-loopback. Using %s', ip, alleged_testhost_ip_str) self.__alleged_testhost_ip = alleged_testhost_ip_str return # Localhost. Great. We are either running on the DUT or are on a test-host. # In either case, grabbing pretty much any ip interface that isn't a loop back # should do the trick. docker_net = [] mono_net = [] eform_net = [] vbox_net = [] veth_net = [] extras_net = [] int_list = netifaces.interfaces() for interface in int_list: logs.irl.debug(' checking interface %s', interface) ifaddrs = netifaces.ifaddresses(interface) if netifaces.AF_INET not in ifaddrs: logs.irl.debug(' -- no ifaddrs on it, skipping') else: for net in ifaddrs[netifaces.AF_INET]: logs.irl.debug(' checking %s on %s', net, interface) addr = net['addr'] mask = net['netmask'] inet_form = u'{}/{}'.format(addr, mask) this_iface = ipaddress.ip_interface(inet_form) this_iface.on_name = interface dispo = None if this_iface.is_loopback: dispo = 'loopback-skip' elif monip_obj in this_iface.network: # really the last choice, all things considered! dispo = 'added to control-network-list' mono_net.append(this_iface) elif 'docker' in interface: dispo = 'added to docker list' docker_net.append(this_iface) elif interface.startswith('vbox'): dispo = 'added to vbox list' vbox_net.append(this_iface) elif interface.startswith('veth'): dispo = 'added to veth list' veth_net.append(this_iface) elif interface.startswith('eth') or interface.startswith('en'): dispo = 'added to en/eth list' eform_net.append(this_iface) else: logs.irl.debug('unknown interface type-ish %s seen', interface) dispo = 'added to extras list' extras_net.append(this_iface) logs.irl.debug(' -> %s', dispo) ordered_list = [] ordered_list.extend(eform_net) ordered_list.extend(docker_net) ordered_list.extend(vbox_net) ordered_list.extend(veth_net) ordered_list.extend(extras_net) ordered_list.extend(mono_net) logs.irl.debug(' Final list of possible addresses: %s', ordered_list) # note: we could go and ssh over and ping back to check these. For now, just # grab the first. if len(ordered_list) == 0: logs.warning('could not find the test-host ip address and fell back on localhost') self.__alleged_testhost_ip = '127.0.1.1' return picked = ordered_list[0] logs.irl.debug('picked %s on %s', picked.ip, picked.on_name) self.__alleged_testhost_ip = str(picked.ip) def get_testhost_ip(): return TestHostInterfacer.get_testhost_ip()
johren/RackHD
test/common/env_ip_helpers.py
Python
apache-2.0
5,200
import os from time import time from sklearn.externals import joblib from sklearn import tree from preprocessData import getDataXY trainX, trainY, testX, testY, validX, validY = getDataXY() # print len(trainX), len(trainY), len(testX), len(testY), len(validX), len(validY) X = trainX y = trainY # clf = SGDClassifier(loss="hinge", penalty="l2", n_iter=800) clf = tree.DecisionTreeClassifier() if not os.path.isfile('dt0.58.pkl'): clf.fit(X, y) else: clf = joblib.load('dt0.58.pkl') if not os.path.isfile('dt0.58.pkl'): joblib.dump(clf, 'dt0.58.pkl') # print clf.predict(testX[0:10]) # print testY[0:10] valid_result = clf.predict(validX) def get_accuracy(predict, true): right_num = 0 total_num = 0 for pre, tru in zip(predict, true): total_num += 1 if pre == tru: right_num += 1 return right_num / 0.1 / total_num start = time() print get_accuracy(clf.predict(validX), validY) end = time() print end - start, 's'
TheaGao/SklearnModel
DecisionTree.py
Python
mit
978
# -*- coding: utf-8 -*- extensions = [ 'sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.intersphinx', 'sphinx.ext.viewcode', ] intersphinx_mapping = { 'pyexcel': ('http://pyexcel.readthedocs.org/en/latest/', None) } spelling_word_list_filename = 'spelling_wordlist.txt' templates_path = ['_templates'] source_suffix = '.rst' master_doc = 'index' project = u'django-excel' copyright = u'2015-2016 Onni Software Ltd.' version = '0.0.5' release = '0.0.6' exclude_patterns = [] pygments_style = 'sphinx' import os # noqa import sys # noqa sys.path.append(os.path.abspath('_themes')) html_theme_path = ['_themes'] html_theme = 'djangodocs' html_static_path = ['_static'] htmlhelp_basename = 'django-exceldoc' latex_elements = {} latex_documents = [ ('index', 'django-excel.tex', u'django-excel Documentation', 'Onni Software Ltd.', 'manual'), ] man_pages = [ ('index', 'django-excel', u'django-excel Documentation', [u'Onni Software Ltd.'], 1) ] texinfo_documents = [ ('index', 'django-excel', u'django-excel Documentation', 'Onni Software Ltd.', 'django-excel', 'One line description of project.', 'Miscellaneous'), ]
fondelsur/todopinturas
doc/source/conf.py
Python
bsd-3-clause
1,177
#!/usr/bin/env python from setuptools import setup, find_packages setup( name='calc', version='0.0', packages=find_packages(), scripts=['scripts/calc'])
ewtoombs/python-calc
setup.py
Python
mit
187
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations class Migration(migrations.Migration): dependencies = [ ('restaurant', '0020_cuisine_menu'), ] operations = [ migrations.AlterField( model_name='cuisine', name='cuisine_id', field=models.AutoField(serialize=False, primary_key=True), ), ]
gauravbose/digital-menu
digimenu2/restaurant/migrations/0021_auto_20150704_1154.py
Python
bsd-3-clause
424
# coding=utf-8 # имена девушек генерируются из списков имен (тип девушки_first) и фамилий (тип девушки_last). Если списка фамилий # нет - генерируется только из списка имен. girls_names = { 'peasant_first': [ u'Жанна', u'Герда', u'Баббета', u'Cюзи', u'Альба', u'Амели', u'Аннета', u'Жоржетта', u'Бетти', u'Бетси', u'Бланка', u'Бьянка', u'Дейзи', u'Джинни', u'Джуди', u'Дороти', u'Зои', u'Ирен', u'Ивет', u'Колет', u'Криси', u'Кэтти', u'Кэт', u'Лили', u'Лиди', u'Лулу' ], 'citizen_first': [ u'Аделия', u'Аврора', u'Альбертина', u'Анджелла', u'Аврелия', u'Беатрис', u'Бернадетт', u'Бриджит', u'Вероник', u'Виолет', u'Вирджиния', u'Габриэлла', u'Джаннет', u'Джулиана', u'Доминика', u'Жаклина', u'Жозефина', u'Джульетта', u'Камилла', u'Каролина', u'Кэйтлин', u'Ирен', u'Мелисса', u'Марджори', u'Натали', u'Пенелопа', u'Розали', u'Розета', u'Селеста', u'Симона', u'Стефани', u'Сюзанна', u'Тереза', u'Флора', u'Эммануэль', u'Адалинда', u'Альбертина', u'Амелинда', u'Гризельда', u'Виктория', u'Ирма', u'Каролина', u'Кристиана', u'Кэтрин', u'Лиона', u'Лорели', u'Маргарита', u'Франциска', u'Хенелора', u'Хильда', u'Элеонора', u'Абигайль', u'Антония', u'Долорес', u'Доротея', u'Женевьева', u'Жозефина', u'Инесс', u'Кармелита', u'Консуэлла', u'Летиция', u'Марселла', u'Присцилла', u'Рамона', u'София', u'Ефимия', u'Ефания', u'Лидия', u'Беатриче', ], 'princess_first': [ u'Аннабель', u'Аделия', u'Авелин', u'Айседора', u'Альбертина', u'Анастасия', u'Антуанетта', u'Беатрис', u'Валентина', u'Виктория', u'Габриэлла', u'Джиневра', u'Доминика', u'Джулианна', u'Джульетта', u'Жюстина', u'Жозефина', u'Ивонна', u'Изабелла', u'Камилла', u'Клариса', u'Клементина', u'Кристина', u'Лукреция', u'Марго', u'Матильда', u'Мелисента', u'Марианна', u'Олимпия', u'Пенелопа', u'Розалинда', u'Розамунда', u'Селестина', u'Серафина', u'Сюзанна', u'Стефания', u'Тереза', u'Флафия', u'Фелиция', u'Генриэтта', u'Гертруда', u'Шарлотта', u'Эмммануэль', u'Альбертина', u'Амелинда', u'Брунгильда', u'Вильгельмина', u'Изольда', u'Рафаэлла', u'Амаранта', u'Дельфиния', u'Доротея', u'Мерседес', u'Офелия', ], 'princess_last': [ u'дэ Мюзи', u'фон Баургафф', u'дэ Альбре', u'дэ Блуа', u'дэ Виржи', u'ди Гиз', u'дэ Бриенн', u'дэ Колиньи', u'дэ Ла Тур', u'дэ Лузиньян', u'дэ Фуа', u'дэ Брисак', u'дэ Круа', u'дэ Лин', u'дэ Кюлот', u'дэ Сен-При', u'фон Баттенберг', u'фон Беннгис', u'фон Вальбиц', u'фон Вительсбах', u'фон Гогеншауфен', u'фон Зальф', u'фон Люденштафф', u'фон Мирбах', u'фон Розен', u'фон Церинген', u'фон Грюнберг', u'фон Штюрберг', u'фон Шелленбург', u'Строцци', u'Сфорца', u'Альбици', u'Барбариго', u'Пацци', u'Бранкаччо', u'да Верана', u'Висконти', u'Гримальди', u'да Полента', u'делла Тори', u'да Камино', u'Монтрефельто', u'Манфреди', u'Фарнезе', u'Фрегозо', u'де Мендоза', u'ла Серда', ], 'elf_first': [ u'Берунвен', u'Фанавен', u'Арвен', u'Лучиэнь', u'Феалиндэ', u'Эстелендиль', u'Астера', u'Теолинвен', u'Куивэн', u'Мрвэн', u'Интиальвен', u'Анарвен', u'Аманиэль', u'Анариэль', u'Лариэль', u'Лотанариэ', u'Исильиндиль', u'Селфарианис', u'Йорингель', u'Оросинвиль', u'Гилэстель', u'Валакирэ' ], 'ogre_first': [ u'Хунн', u'Йорва', u'Дирга', u'Велга', u'Сига', u'Йалгуль', u'Дорба', u'Гирга', u'Давири', u'Шалга', u'Орва', u'Дезра', u'Арга', u'Бигра', u'Варга', u'Енза', u'Зарта', u'Икла', u'Корда', u'Логаза', u'Мирбу', u'Нира', ], 'mermaid_first': [ u'Ариэль', u'Блажена', u'Будимила', u'Ведана', u'Велина', u'Венцеслава', u'Верея', u'Велезара', u'Веселина', u'Витана', u'Влада', u'Весемлиа', u'Годица', u'Горлина', u'Далина', u'Ждана', u'Деяна', u'Дивина', u'Доляна', u'Есена', u'Жилена', u'Завида', u'Зоряна', u'Златина', u'Ивица', u'Калёна', u'Красоя', u'Купава', u'Лада', u'Леля', u'Малиша', u'Млава', u'Милана', u'Младлена', u'Мирана', u'Невена', u'Обрица', u'Пава', u'Пригода', u'Рада', u'Ракита', u'Ружана', u'Силимина', u'Серебрина', u'Славена', u'Станимира', u'Стояна', u'Томила', u'Умила', u'Ундина', u'Цветана', u'Чаруна', u'Янина', u'Яромила', u'Ясмания' ], 'siren_first': [ u'Ариэль', u'Блажена', u'Будимила', u'Ведана', u'Велина', u'Венцеслава', u'Верея', u'Велезара', u'Веселина', u'Витана', u'Влада', u'Весемлиа', u'Годица', u'Горлина', u'Далина', u'Ждана', u'Деяна', u'Дивина', u'Доляна', u'Есена', u'Жилена', u'Завида', u'Зоряна', u'Златина', u'Ивица', u'Калёна', u'Красоя', u'Купава', u'Лада', u'Леля', u'Малиша', u'Млава', u'Милана', u'Младлена', u'Мирана', u'Невена', u'Обрица', u'Пава', u'Пригода', u'Рада', u'Ракита', u'Ружана', u'Силимина', u'Серебрина', u'Славена', u'Станимира', u'Стояна', u'Томила', u'Умила', u'Ундина', u'Цветана', u'Чаруна', u'Янина', u'Яромила', u'Ясмания' ], 'ice_first': [ u'Астрид', u'Бригита', u'Боргильда', u'Вигдис', u'Вилла', u'Гурдун', u'Гунхильд', u'Дорта', u'Ингрид', u'Ингеборга', u'Йорнун', u'Матильда', u'Рангильда', u'Руна', u'Сигурд', u'Сванхильда', u'Сигюнд', u'Ульрика', u'Фрида', u'Хлодвен', u'Хильда', u'Эрика' ], 'fire_first': [ u'Азиль', u'Азиза', u'Базайна', u'Багира', u'Будур', u'Бушра', u'Гюльчатай', u'Гуля', u'Гульнара', u'Гулистан', u'Фируза', u'Фатима', u'Ясмин', u'Айгюль', u'Зульфия', u'Ламия', u'Лейла', u'Марьям', u'Самира', u'Хурма', u'Чинара', u'Эльмира' ], 'titan_first': [ u'Агата', u'Адонисия', u'Алексино', u'Амброзия', u'Антигона', u'Ариадна', u'Артемисия', u'Афродита', u'Гликерия', u'Дельфиния', u'Деметра', u'Зиновия', u'Калисто', u'Калипсо', u'Кора', u'Ксения', u'Медея', u'Мельпомена', u'Мнемозина', u'Немезида', u'Олимпия', u'Пандора', u'Персефона', u'Таисия', u'Персея', u'Персея', u'Психея', u'Сапфо', u'Талия', u'Терпсихора', u'Фаломена', u'Гаромония', u'Хрисеида', u'Эфимия', u'Юнона' ] } # Информация о всех типах девушек girls_info = { 'peasant': { 'magic_rating': 0, # магический рейтинг 'regular_spawn': 'poisonous_asp', # идентификатор обычного отродья 'advanced_spawn': 'basilisk', # идентификатор продвинутого отродья 'giantess': False, # является ли великаншей 'avatar': 'peasant', # аватарка 'description': u'селянка', # описание для вывода в текст 't_count_min': 0, # количество сокровищ минимальное 't_count_max': 2, # количество сокровищ максимальное 't_price_min': 1, # минимальная цена предмета 't_price_max': 25, # максимальная цена предмета 't_alignment': 'human', # тип украшений 't_list': [ 'casket', 'statue', 'mirror', 'comb', 'phallos', 'band', 'earring', 'necklace', 'pendant', 'ring', 'broch', 'armbrace', 'legbrace', 'fibula', 'farting'], # список возможных предметов в сокровищах }, 'citizen': { 'magic_rating': 0, 'regular_spawn': 'winged_asp', 'advanced_spawn': 'kobold', 'giantess': False, 'avatar': 'citizen', 'description': u'горожанка', 't_count_min': 0, 't_count_max': 4, 't_price_min': 25, 't_price_max': 100, 't_alignment': 'human', 't_list': [ 'casket', 'statue', 'mirror', 'comb', 'phallos', 'band', 'diadem', 'tiara', 'earring', 'necklace', 'pendant', 'ring', 'broch', 'gemring', 'armbrace', 'legbrace', 'chain', 'fibula', 'taller'], }, 'thief': { 'magic_rating': 0, 'regular_spawn': 'winged_asp', 'advanced_spawn': 'kobold', 'giantess': False, 'avatar': 'thief', 'description': u'воровка', 't_count_min': 2, 't_count_max': 5, 't_price_min': 25, 't_price_max': 250, 't_alignment': 'human', 't_list': [ 'casket', 'statue', 'mirror', 'comb', 'phallos', 'band', 'diadem', 'tiara', 'earring', 'necklace', 'pendant', 'ring', 'broch', 'gemring', 'armbrace', 'legbrace', 'chain', 'fibula', 'taller', 'dublon'], }, 'knight': { 'magic_rating': 1, 'regular_spawn': 'krokk', 'advanced_spawn': 'lizardman', 'giantess': False, 'avatar': 'knight', 'description': u'воительница', 't_count_min': 2, 't_count_max': 5, 't_price_min': 25, 't_price_max': 250, 't_alignment': 'knight', 't_list': [ 'casket', 'statue', 'mirror', 'comb', 'phallos', 'band', 'diadem', 'tiara', 'earring', 'necklace', 'pendant', 'ring', 'broch', 'gemring', 'armbrace', 'legbrace', 'chain', 'fibula', 'taller', 'dublon'], }, 'princess': { 'magic_rating': 0, 'regular_spawn': 'krokk', 'advanced_spawn': 'lizardman', 'giantess': False, 'avatar': 'princess', 'description': u'аристократка', 't_count_min': 2, 't_count_max': 5, 't_price_min': 100, 't_price_max': 1000, 't_alignment': 'knight', 't_list': [ 'casket', 'statue', 'mirror', 'comb', 'phallos', 'band', 'diadem', 'tiara', 'earring', 'necklace', 'pendant', 'ring', 'broch', 'gemring', 'armbrace', 'legbrace', 'chain', 'fibula'], }, 'elf': { 'magic_rating': 1, 'regular_spawn': 'gargoyle', 'advanced_spawn': 'dragonborn', 'giantess': False, 'avatar': 'elf', 'description': u'эльфийская дева', 't_count_min': 1, 't_count_max': 4, 't_price_min': 250, 't_price_max': 2000, 't_alignment': 'elf', 't_list': [ 'casket', 'statue', 'mirror', 'comb', 'phallos', 'band', 'diadem', 'tiara', 'earring', 'necklace', 'pendant', 'ring', 'broch', 'gemring', 'armbrace', 'legbrace', 'chain'], }, 'mermaid': { 'magic_rating': 1, 'regular_spawn': 'octopus', 'advanced_spawn': 'sea_bastard', 'giantess': False, 'avatar': 'mermaid', 'description': u'русалка', 't_count_min': 0, 't_count_max': 4, 't_price_min': 10, 't_price_max': 200, 't_alignment': 'merman', 't_list': [ 'casket', 'statue', 'mirror', 'comb', 'phallos', 'band', 'diadem', 'tiara', 'earring', 'necklace', 'pendant', 'ring', 'broch', 'gemring', 'armbrace', 'legbrace', 'chain'], }, 'ogre': { 'magic_rating': 2, 'regular_spawn': 'strigg', 'advanced_spawn': 'minotaur', 'giantess': True, 'avatar': 'ogre', 'description': u'людоедка', 't_count_min': 0, 't_count_max': 3, 't_price_min': 250, 't_price_max': 1500, 't_alignment': 'knight', 't_list': [ 'casket', 'statue', 'mirror', 'comb', 'phallos', 'band', 'diadem', 'tiara', 'earring', 'necklace', 'pendant', 'ring', 'broch', 'gemring', 'armbrace', 'legbrace', 'chain', 'fibula', 'farting', 'taller', 'dublon'], }, 'siren': { 'magic_rating': 2, 'regular_spawn': 'murloc', 'advanced_spawn': 'naga', 'giantess': True, 'avatar': 'mermaid', 'description': u'сирена', 't_count_min': 1, 't_count_max': 4, 't_price_min': 250, 't_price_max': 2000, 't_alignment': 'merman', 't_list': [ 'casket', 'statue', 'mirror', 'comb', 'phallos', 'band', 'diadem', 'tiara', 'earring', 'necklace', 'pendant', 'ring', 'broch', 'gemring', 'armbrace', 'legbrace', 'chain', 'taller', 'dublon'], }, 'ice': { 'magic_rating': 2, 'regular_spawn': 'ice_worm', 'advanced_spawn': 'yettie', 'giantess': True, 'avatar': 'ice', 'description': u'ледяная великанша', 't_count_min': 1, 't_count_max': 5, 't_price_min': 250, 't_price_max': 2500, 't_alignment': 'human', 't_list': [ 'casket', 'statue', 'mirror', 'comb', 'phallos', 'band', 'diadem', 'tiara', 'earring', 'necklace', 'pendant', 'ring', 'broch', 'gemring', 'armbrace', 'legbrace', 'chain', 'taller', 'dublon'], }, 'fire': { 'magic_rating': 2, 'regular_spawn': 'hell_hound', 'advanced_spawn': 'barlog', 'giantess': True, 'avatar': 'fire', 'description': u'огненная великанша', 't_count_min': 1, 't_count_max': 5, 't_price_min': 250, 't_price_max': 2500, 't_alignment': 'dwarf', 't_list': [ 'casket', 'statue', 'mirror', 'comb', 'phallos', 'band', 'diadem', 'tiara', 'earring', 'necklace', 'pendant', 'ring', 'broch', 'gemring', 'armbrace', 'legbrace', 'chain', 'taller', 'dublon'], }, 'titan': { 'magic_rating': 2, 'regular_spawn': 'chimera', 'advanced_spawn': 'troll', 'giantess': True, 'avatar': 'titan', 'description': u'титанида', 't_count_min': 3, 't_count_max': 6, 't_price_min': 500, 't_price_max': 5000, 't_alignment': 'elf', 't_list': [ 'casket', 'statue', 'mirror', 'comb', 'phallos', 'band', 'diadem', 'tiara', 'earring', 'necklace', 'pendant', 'ring', 'broch', 'gemring', 'armbrace', 'legbrace', 'chain', 'taller', 'dublon'], }, } # Информация о всех типах отродий spawn_info = { 'goblin': { 'power': 1, # сила 'modifier': [], # возможные роли 'name': u'Гоблин', # название 'born': u'Ошибка', # Описание при рождении }, 'poisonous_asp': { 'power': 1, # сила 'modifier': ['poisonous'], # возможные роли 'name': u'Ядовитый аспид', # название 'born': u'вылупившиеся из продолговатых мягких яиц ядовитые змеи ничем особенно не отличаются от болотных гадюк, разве что крупнее и агрессивнее. Вместо того чтобы прятаться в глухих местах, эти злобные ядовитые твари вечно ищут кого ужалить, будь то люди или домашний скот. От их токсина нет противоядия, а смерь медленная и крайне мучительная.', # Описание при рождении }, 'winged_asp': { 'power': 2, 'modifier': ['poisonous'], 'name': u'Крылатый аспид', 'born': u'эти крупные ядовитые змеи в отличие от обычных пресмыкающихся наделены унаследованными от драконьей крови крыльями. Одно дело случайно наступить на гадюку, у совсем другое когда она приземляется тебе на шею прямо с неба. Яд крылатых аспидов приводит к долгой и мучительной смерти, а кусаться они ох как любят.', # Описание при рождении }, 'krokk': { 'power': 1, 'modifier': ['servant'], 'name': u'Крокк', 'born': u'рождённые от благородной дамы эти существа превосходят тварей которых способна родить любая простолюдинка. Впрочем, несмотря на мощное телосложение и кое-какие мозги в охранники логова Кроккки не годятся. Они подслеповаты и вечно дремлют устроившись на солнышке или нырнув в грязь. Впрочем их можно заставить выполнять работу по дому или на строителсьстве.', # Описание при рождении }, 'basilisk': { 'power': 3, 'modifier': ['poisonous'], 'name': u'Василиск', 'born': u'это жуткого вида птенчики, с петушиными гребнями и змеиными хвостами. Хотя эти твари и безмозглые, они всё же намного опаснее обычных ядовитых аспидов, которых крестьянка могла бы родить от менее коварного дракона. Василиски, так же известные как кокатриксы, способны отравить человека просто поглядев ему в глаза и к тому же они летают хоть и неуклюже.', # Описание при рождении }, 'kobold': { 'power': 2, 'modifier': ['servant'], 'name': u'Кобольд', 'born': u'мелкие, дохлые и скрюченные кобольды это всё что способна произвести на свет обычная женщина даже от очень могучего драконьего семени. Тем не менее эти драконоподобные гуманоиды обладают достаточным интеллектом чтобы выполнять работу по хозяйству. В бою они примерно равны обычным гоблинам, но ещё и трусливы до ужаса, так что ставить их на охрану логова было бы опрометчиво.', # Описание при рождении }, 'lizardman': { 'power': 3, 'modifier': ['warrior'], 'name': u'Ящерик', 'born': u'сочетание могучего драконьего семени и чистой благородной крови дало лучших отродий которых только способна родить смертная женщина. Взрослый ящерик куда крупнее и сильнее обычного человека, покрыт прочной чешуёй и не чувствителен к боли. Рептилоиды быстры, наблюдательны и достаточно умны чтобы стать отличными воинами. Они так же любят строить коварные планы по тайному захвату мира, но тут им придётся встать в очередь за своей бабушкой - Владычицей.', # Описание при рождении }, 'dragonborn': { 'power': 3, 'modifier': ['elite'], 'name': u'Драконорождённый', 'born': u'драконорождённый потомок Aein Sidhe, сочетает в себе силу и ярость драконьего рода с колдовским могуществом старшей крови детей богини Дану. Его сила размеры и интеллект далеко превосходят возможности не только людей но и альвов с цвергами. Драконорождённый вполне может померяться силой с великаном и станет отличным элитным стражем сокровищ или воином армии тьмы.', # Описание при рождении }, 'gargoyle': { 'power': 4, 'modifier': ['warrior'], 'name': u'Гаргуйль', 'born': u'драконьему семени не хватило потенциала чтобы полностью раскрыть возможности магической крови детей Дану, но даже такие уродливые горгулии будут полезны в армии тьмы или на охранной службе. Способность к полёту даёт им превосходство над обычными рептилодами, даже не говоря о гоблинах и людях.', # Описание при рождении }, 'sea_bastard': { 'power': 3, 'modifier': ['poisonous', 'marine'], 'name': u'Рыбоглаз', 'born': u'в сочетании с могучим драконьем семенем кровь морского народа дала жуткую пародию на русалку - рыбоглаза. Эти злобные и уродливые твари способны жить лишь в морской воде и это единственное что мешает им стать дополнением к воинству тьмы что собирается в пустошах под рукой Владычицы. К тому же они слишком прожорливы и легко отвлекаются от караульной службы чтобы забить косяк рыб. Но всё же можно принудить их служить в морском логове.', # Описание при рождении }, 'octopus': { 'power': 5, 'modifier': ['poisonous', 'marine'], 'name': u'Ядовитый спрут', 'born': u'похожие на здоровенных лиловых осьминогов, эти безмозглые морские твари отличаются агрессивыным нравом и наличием яда в присосках. Неудачливыми ныряльщикам не поздоровится.', # Описание при рождении }, 'hell_hound': { 'power': 4, 'modifier': ['poisonous'], 'name': u'Адская гончая', 'born': u'Семя дракона сильно пострадало от раскалённой матки огненной великанши. Из яиц на свет появились мутировавшие многоголовые твари напоминающие помесь собаки, ящерицы и газовой горелки. Они слишком дики и тупы для армейской службы, зато способны навести ужас на обжитые земли.', # Описание при рождении }, 'minotaur': { 'power': 5, 'modifier': ['elite'], 'name': u'Минотавр', 'born': u'Семя коварного даркона отлично раскрыло потенциал дикости и ярости в крови людоедки. Рогатый, мохнатый, склонный к припадкам ярости минотавр способен в одиночку разметать отряд тяжелой панцирной пехоты и перетрахать всех женщин деревни за одну ночь. Всё же он достаточно умён чтобы служить высшей силе, так что из него выйдёт неплохой страж сокровищницы или элитный боец.', # Описание при рождении }, 'murloc': { 'power': 3, 'modifier': ['warrior', 'marine'], 'name': u'Мурлок', 'born': u'Жутко искажённые пародии сразу на людей, лягушек и рыб, мурлоки стали бы неплохими воинами в армии тьмы, если бы могли жить вдали от воды. Но так, максимум на что они способны - охранять от посягательств подводные логова драконов или терроризировать морской народ.', # Описание при рождении }, 'naga': { 'power': 6, 'modifier': ['elite', 'marine'], 'name': u'Нага', 'born': u'коварство дракона позволило породить от сирены огромную и могучую тварь именуемую Наг (змей). Наг сочетает в себе качества человека и морской змеи, но кроме того он обладает великанским размером, невероятной силой и живучестью. Он мог мы стать элитным бойцом армии тьмы, если бы не пересыхал на суше. Впрочем из него получится отличный страж сокровищницы. ', # Описание при рождении }, 'ice_worm': { 'power': 7, 'modifier': ['poisonous'], 'name': u'Ледяной червь', 'born': u'слишком слабое для ледяной великанши семя, сделало потомство неразумным. Это змеи. Огроменные, уродливые, с холодными как лёд панцирями и жуткими пастями змеи. Человека такая прожуёт не задумываясь, но для службы в армии ей не хватит мозгов. ', # Описание при рождении }, 'yettie': { 'power': 6, 'modifier': ['elite'], 'name': u'Йетти', 'born': u'Продуктом союза дракона и ледяной великанши стал мохнатый, рогатый великан больше похожий на обезьяну чем на разумное существо. И тем не менее, он хоть и дик но весьма умён. Йётти может стать отличным элитным бойцом в армии тьмы.', # Описание при рождении }, 'troll': { 'power': 8, 'modifier': ['elite'], 'name': u'Тролль', 'born': u'Тролль - самое могучее из отродий драконов, которое только может появиться на свет без вмешательства Владычицы. Он крупнее и сильнее чем титан. Практически неуязвим и достаточно умён чтобы служить в воинстве тьмы. А ещё он зелёный, толстый и любит когда его кормят.', # Описание при рождении }, 'strigg': { 'power': 6, 'modifier': ['poisonous'], 'name': u'Стригой', 'born': u'рождение можно считать неудачным. Семя дракона оказалось жидковатым для людоедки и в итоге на свет родились жуткие крылатые уродцы, лишенные каких либо мозгов. Стриги разумеется агрессивны и даже ядовиты, но слишком тупы для армейской службы. ', # Описание при рождении }, 'barlog': { 'power': 6, 'modifier': ['elite'], 'name': u'Дэв', 'born': u'драконьему семени хватило потенциала чтобы грамотно слиться с огненной сущностью великанши. Результатом стал огромный, пеперёк себя шире Дэв. Он не только чудовищно силён, но к тому же обладает властью над огнём. Это просто великолепный элитный воин для армии тьмы.', # Описание при рождении }, 'chimera': { 'power': 10, 'modifier': ['poisonous'], 'name': u'Химера', 'born': u'магическая сущность титаниды с трудом слилась с драконьей кровью, породив уродливую хищную химеру. Хотя эта многоголовая, агрессивная и ядовитая тварь способна разорвать в прямом бою даже великана, она не обладает даже зачатками разума. Служить в армии ей не суждено.', # Описание при рождении }, } girl_events = { 'escape': 'lb_event_girl_escape', # событие "побег из заключения" 'spawn': 'lb_event_girl_spawn', # событие "рождение отродий" 'free_spawn': 'lb_event_girl_free_spawn', # событие "рождение отродий на воле" 'hunger_death': 'lb_event_girl_hunger_death', # событие "смерть девушки от голода" 'kill': 'lb_event_girl_kill', # событие "беременную девушку убивают на свободе" } girls_texts = { # Подстановки: # %(dragon_name)s = Краткое имя текущего дракона # %(dragon_name_full)s = Имя дракона с эпитетом # %(dragon_type)s = Тип анатомии дракона (змей, линдвурм и т.п.) # %(girl_name)s = имя текущей женщины (однако, игра слов :) ) # %(girl_title)s = тип женщины (крестьянка, горожанка, леди, русалка, эльфийска дева и т.п.) # %(spawn_name)s - тип отродий для описаний рождения (начинается с заглавной буквы) # %(rob_list)s - список украденного 'girl': { # используется, если нет подходящего текста или отсутствует нужный тип девушки 'shout': ( # Реакция девушки, прямой речью u"Ой, а мне текст не написали (((", ), 'prelude': ( # Описание прелюдий u"Одним неуловимым движением %(dragon_name)s подобрался вплотную к женщине и сбил её с " u"ног, а затем начал рвать зубами её одежду словно остервенелый пёс. %(girl_name)s " u"отчаянно отбивалась и кричала, но толку от этого было не много, изодранная одежда " u"разлетелась клочками оставляя её полностью обнаженной и беззащитной перед охваченным " u"похотью ящером.", ), 'sex': ( # Описание секса с девушкой u"Отчаянно пытаясь спасти свою невинность, %(girl_title)s закрылась руками но %(dragon_type)s " u"предпринял обходной манёвр. Широко разинув свою зубастую пасть он обхватил голову девушки " u"челюстями, так что всё её лицо оказалось внутри, лишаясь доступа к воздуху. Девушка широко " u"открыла рот пытаясь вдохнуть хоть немного кислорода, но вместо этого в её глотку проник " u"длинный раздвоенный язык ящера. Теперь все когда все силы девушки были направлены на то " u"чтобы оторвать смрадную пасть от своего лица она и думать забыла о невинности. Скребя " u"ногтями по твёрдой чешуе дракона и дрыгая ногами %(girl_name)s внезапно почувствовала как " u"снизу в неё проникает что-то большое и твёрдое. Покрытый слизью рептилоидный член с " u"лёгкостью прорвал тонкую плёнку защищавшую вход в тугое молодое влагалище, безжалостно " u"растягивая и продавливая всё на своём пути. Почти теряя сознание от боли и недостатка " u"воздуха, %(girl_name)s внезапно почувствовала что челюсти насильника размыкаются, вновь " u"позволяя ей вдохнуть. %(dragon_name)s хотел насладиться её воплями и плачем.", ), 'impregnate': ( # Оплодотворение u"Сдавленная в безжалостных объятьях ящера, %(girl_title)s почувствовала как он " u"ускоряет темп своих движений. Боль стала практически невыносимой но крик девушки " u"потерялся, перекрытый рёвом наслаждения насильника. Конвульсивно содрагаясь всем " u"телом %(dragon_type)s вливал в истерзанное лоно девушки целые литры липкого и " u"густого семени, заставляя её маленький животик раздуться изнутри. Когда " u"%(dragon_name)s наконец отстранился от своей жертвы из неё вытек целый водопад " u"семени, но тем не менее количества оставшегося внутри было более чем достаточно " u"чтобы гарантировать надёжное оплодотворение. Дело было сделано надёжно.", ), 'new': ( # Описание новой девушки u"%(girl_name)s - %(girl_title)s.", ), 'free': ( # Описание процесса выпускания на свободу u"Пусть сама заботится о себе. Если её не убьют свои же, узнав что за отродье растёт в её чреве...", ), 'free_prison': ( # Описание процесса выпускания на свободу из тюрьмы u"Незачем держать её взаперти, охранять ещё... пусть катится на все четыре стороны.", ), 'steal': ( # Описание процесса воровства девушки u"%(dragon_name)s относит пленницу в своё логово...", ), 'jail': ( # Описание процесса заточения в темницу u"...и сажает её под замок.", ), 'jailed': ( # Описание процесса возврата в темницу u"%(dragon_name)s возвращает девушку в темницу.", ), 'eat': ( # Описание процесса поедания девушки. Как же ему не стыдно, червяку подколодному. u"Ты меня съешь?", ), 'rob': ( # Описание процесса ограбления девушки. u"%(dragon_name)s грабит девушку и получает: \n %(rob_list)s.", ), 'traps': ( # Описание процесса побега и гибели в ловушке. u"%(girl_name)s убегает из темницы и гибнет в ловушках.", ), 'escape': ( # Описание успешного побега u"%(girl_name)s спасается бегством", ), 'spawn_common': ( # Описание родов u"%(girl_name)s откладывает яйца из которых под наблюдением слуг вылупятся новые отродья. \n %(spawn_name)s.", ), 'spawn_elite': ( # Описание родов u"%(girl_name)s в мучениях откладывает огромное яйцо с толстой чешуйчатой скорлупой. \n %(spawn_name)s.", ), 'anguish': ( # Описание смерти от тоски u"%(girl_name)s умирает в тоске.", ), 'hunger_death': ( # Описание смерти от голода u"%(girl_name)s умирает от голода.", ), 'kill': ( # Описание смерти от селян u"Люди узнают, что %(girl_name)s беременна от дракона и убивают её.", ), 'free_spawn': ( # Описание родов на свободе u"%(girl_name)s в тайне от людей откладывает яйца из которых вылупляются кровожадные монстры... Теперь они будут резвиться на воле, терроризируя округу и возможно сожрут собственную мать.", ), 'prison': ( # Проведываем девушку в тюрьме u"%(girl_name)s находится в заключении.", ), }, 'peasant': { # используется для крестьянок 'new': ( # описание крестьянки u"Сельская девица по имени %(girl_name)s.", ), 'shout': ( # Реакция девушки, прямой речью u"Ой, божечки!..", u"Ай мамочка!..", u"Ты куда языком своим слюнявым тычешь змеюка поганая?!", u"Ой-ой-ой, только не ешь меня пожалуйста...", u"Ай. Нет-нет-нет, только не туда... ох...", u"Драконьчик, миленький, я всё сделаю тебе, только не кушай меня пожалуйста!", u"Ты что собрался делать этим елдаком, бесстыдник?! Да он не влезет же, ящерица смердячая! Ааааааааай...", u"Ай, что ты делаешь?! Больно... нет, пожалуйста... такой то здоровенный... уууй больно же!!!", u"Ишь что удумал, чудище. Пусти... ай, падла... пусти говорят тебе.", u"Неужто правду бабы говорят что драконы девок портат? Ой, не рычи. Понялая я, поняла. Не кусайся только.", u"Что, люба я тебе змей? Ишь елдаком махает как пастух погонялом!", u"Ох пресвятая дева, срамота то какая...", u"(тихонько плачет и закрывает лицо руками)", u"(яростно отбивается и пыхтит сквозь сжатые зубы)", u"Ой, ну не надо драконьчик, меня же маменька убьёт если узнает что я от тебя понесла. Может я ручками тебя там поглажу?", ), 'eat': ( # Описание процесса поедания девушки. u"Ой, божечки!..", u"Ай мамочка!..", u"Неееееет!...", u"Аааааааа!....", u"Ой не рычи так, мне страшно...", u"Ну и зубищи у тебя... ай нет-нет-нет...", u"Oh shi~", u"Не жри меня,... пожалуйста, я всё сделаю, только не жри!", u"Спаси-ите! Лю-юди!", u"Сожрать меня вздумал, уродина?! Чтобы ты подавился!", u"Я описилась...", u"Ой какой взгляд у тебя голодный...", u"Нет. Фу. Брысь. Ай не кусай меня.", u"Пошел вон скотина! А ну ка брысь-кому говорят. Облизывается он, ишь ты!", u"(сдавленно хрипит)", u"(тихонько плачет и закрывает лицо руками)", u"(яростно отбивается и пыхтит сквозь сжатые зубы)", ), }, 'citizen': { # используется для горожанок 'new': ( # описание крестьянки u"%(girl_name)s, дочь богача.", ), 'shout': ( # Реакция девушки, прямой речью u"О, Господи!..", u"Проклятая гадина!", u"Не смей! Мой отец тебя на шашлык за такое пустит, змеюка!", u"Прошу вас, господин дракон, не надо. Отпустите меня, умоляю...", u"Ай. Нет-нет-нет, только не туда... ох...", u"Только не надо зубов, я всё сделаю. Умоляю. Я же знаю чего вы хотите.", u"Ой нет, убеерите эту... это... от меня. Стыд то какой!", u"Ай, что вы делаете?! Больно... нет, умоляю... он же огромный... уууй больно же!!!", u"Ты что задумал, отродье Ехидны?! Пусти... ай, тварь... пусти говорят тебе.", u"Я слышала что драконы делают с девушками... Нет. пожалуйста не надо рычать. Я понимаю. Нет, не рвите я сниму... вот снимаю...", u"Ох, Господи, я такого срама даже у коня в деревне не видала! Жуть то какая...", u"Ох пресвятая дева, спаси и сохрани...", u"(тихонько плачет и закрывает лицо руками)", u"(яростно отбивается и пыхтит сквозь сжатые зубы)", u"Зачем вы сдираете с меня платье? Нет, я не могу. У меня же жених... Это свершенно не... ааааАХ!", ), 'eat': ( # Описание процесса поедания девушки. u"(молится) Отец наш небесный, да святится имя твоё, да пребудет воля твоя...", u"(молится) Если я пойду и долиною смертной тени, не убоюсь зла, потому что Ты со мной...", u"Неееееет!...", u"Аааааааа!....", u"(кашляет от исходящего изо рта дракона смрада)", u"Ну и зубищи у вас... ай нет-нет-нет...", u"Oh shi~", u"Не кушайте меня,... умоляю, я всё сделаю, только не ешьте!", u"Спаси-ите! Помогите! Кто-ниб... аааа....", u"Сожрать меня вздумал, уродина?! Чтобы ты подавился!", u"Нет, пожалуйста... я куплю вам целое стадо свиней... зачем меня то??", u"Ох этот алчный взгляд...", u"Нет. Фу. Брысь. Плохой дракон! Сидеть! Кому сказала сидеть!!!.", u"Пошел вон скотина! А ну ка брысь-кому говорят. Облизывается он, ишь ты!", u"(сдавленно хрипит)", u"(тихонько плачет и закрывает лицо руками)", u"(яростно отбивается и пыхтит сквозь сжатые зубы)", ), }, 'princess': { # используется для благородных дам 'new': ( # описание u"%(girl_name)s, дама благородных кровей.", ), 'shout': ( # Реакция девушки, прямой речью u"О, Господи!..", u"Не тронь меня бесовское исчадие!", u"Не смей! Мой отец тебя на шашлык за такое пустит, змеюка!", u"Некоторые сичтают драконов благородными животными. Может вы будете так добры и перестанете распускать свои лапы и язык?", u"Ай. Нет-нет-нет, только не туда... ох...", u"Только не надо зубов, я всё сделаю. Умоляю. Я же знаю чего вы хотите.", u"Ой нет, убеерите эту... это... от меня. Стыд то какой!", u"Ай, что вы делаете?! Больно... нет, умоляю... он же огромный... уууй больно же!!!", u"Ты что задумал, отродье Ехидны?! Пусти... ай, тварь... пусти говорят тебе.", u"Я слышала что драконы делают с девушками... Нет. пожалуйста не надо рычать. Я понимаю. Нет, не рвите я сниму... вот снимаю...", u"Ох, Господи, я такого срама даже у коня в деревне не видала! Жуть то какая...", u"Ох пресвятая дева, спаси и сохрани...", u"(тихонько плачет и закрывает лицо руками)", u"(яростно отбивается и пыхтит сквозь сжатые зубы)", u"Зачем вы сдираете с меня платье? Нет, я не могу. У меня же жених... Это свершенно не... ааааАХ!", ), 'eat': ( # Описание процесса поедания девушки. u"(молится) Pater noster, qui es in caelis, sanctificetur nomen tuum. Adveniat regnum tuum. Fiat voluntas tua,..", u"(молится) Nam etsi ambulavero in medio umbrae mortis, non timebo mala, quoniam tu mecum es. Virga tua, et baculus tuus,..", u"Неееееет!...", u"Аааааааа!....", u"(кашляет от исходящего изо рта дракона смрада)", u"Ну и зубищи у вас... ай нет-нет-нет...", u"Oh shi~", u"Не кушайте меня,... умоляю, я всё сделаю, только не ешьте!", u"Спаси-ите! Помогите! Кто-ниб... аааа....", u"Сожрать меня вздумал, уродина?! Чтобы ты подавился!", u"Нет, пожалуйста... я куплю вам целое стадо свиней... зачем меня то??", u"Ох этот алчный взгляд...", u"Нет. Фу. Брысь. Плохой дракон! Сидеть! Кому сказала сидеть?!!.", u"Пошел вон скотина! А ну ка брысь-кому говорят. Облизывается он, ишь ты!", u"(сдавленно хрипит)", u"(тихонько плачет и закрывает лицо руками)", u"(яростно отбивается и пыхтит сквозь сжатые зубы)", ), }, 'elf': { # используется для лесных дев 'new': ( # описание девы u"%(girl_name)s, прекрасная лесная дева из народа альвов, детей богини Дану.", ), 'shout': ( # Реакция девушки, прямой речью u"О, Дану!..", u"Не тронь меня исчадие скверны!", u"Не смей! Духи леса отомсят за мою поргуанную честь!", u"Уебери от меня эту... этот... Такой союз противен природе!", u"Чем я заслужила такое унижение?!", u"Ты можешь взять моё тело, но моей душой тебе не завладеть!", ), 'eat': ( # Описание процесса поедания девушки u"Неееееет!...", u"Аааааааа!....", u"Если хочешь чтобы я просила пощады - не надейся!", u"(кашляет от исходящего изо рта дракона смрада)", ), }, 'mermaid': { # используется для русалок 'new': ( # описание русалки u"%(girl_name)s, экзотическая морская дева.", ), 'shout': ( # Реакция девушки, прямой речью u"О, Дагон!..", u"Не тронь меня сухопутная ящерица!", u"Не смей! Духи вод отомсят за мою поргуанную честь!", u"Что это за хрень у тебя между ног?! Щупальце???", ), 'eat': ( # Описание процесса поедания девушки u"Неееееет!...", u"Аааааааа!....", ), }, 'siren': { # используется для сирен 'new': ( # описание u"%(girl_name)s, экзотическая морская великанша.", ), 'shout': ( # Реакция девушки, прямой речью u"О, Дагон!..", u"Не тронь меня сухопутная ящерица!", u"Не смей! Духи вод отомсят за мою поргуанную честь!", u"Что это за хрень у тебя между ног?! Щупальце???", ), 'eat': ( # Описание процесса поедания девушки u"Неееееет!...", u"Аааааааа!....", ), }, 'ogre': { # людоедка 'new': ( # описание u"%(girl_name)s, глупая и диковатая людоедка.", ), 'shout': ( # Реакция девушки, прямой речью u"Твоя меня не выебать! Моя сама выебать твоя!!! АРррргх! Смерть через СНУ-СНУ!", ), 'eat': ( # Описание процесса поедания девушки u"Большая ящерица кусать? Я тоже кусать! КТО БОЛЬШЕ ОТКУСИТ?!.", ), }, 'ice': { # ледяная великанша 'new': ( # описание u"%(girl_name)s, холодная и надменная ледяная великашна.", ), 'shout': ( # Реакция девушки, прямой речью u"Хочешь моих обьятий, змей? Твоя чешуя покроется инеем, а стручок скукожится от стужи в моих чреслах. Дерзай...", ), 'eat': ( # Описание процесса поедания девушки u"Ашшшшь... Я отморожу твои ничтожные кишки!..", ), }, 'fire': { # огненная великанша 'new': ( # описание u"%(girl_name)s, темпераментная и страстная огненная великанша.", ), 'shout': ( # Реакция девушки, прямой речью u"Ха! Поглядим какой из тебя любовник, змеюка. Хоть два раунда то выдержишь?", ), 'eat': ( # Описание процесса поедания девушки u"Решил меня сожрать? Без боя я не дамся!!!", ), }, 'titan': { # людоедка 'new': ( # описание u"%(girl_name)s, совершенная и величественная титанида.", ), 'shout': ( # Реакция девушки, прямой речью u"Повелеваю тебе оставить грязные мысли! Ты не достоин моей любви, червь!", ), 'eat': ( # Описание процесса поедания девушки u"О Боги, почему вы оставляете меня в смертый час?! Или я не ваша возлюбленная дщерь?", ), }, }
OldHuntsman/DefilerWings
game/pythoncode/girls_data.py
Python
bsd-3-clause
58,860
""" Documentation ------------- Documentation is done with `Sphinx <http://sphinx.pocoo.org/>`_ and some helper functions coming with the software for more customization. The folder that contains all the documentation is called ``docs``. To compile, you first have to install Sphinx 1.1 or a better version and pySPACE (see: :ref:`t_install`). For creating the inheritance diagrams the `Graphviz package <http://www.graphviz.org/>`_ is needed. The documentation can be created by running ``make html`` in the ``docs`` directory (therefore we have a ``Makefile`` in the ``docs`` folder). To view the documentation open the ``index.html`` in the ``.build/html`` folder. A compiled version of the documentation can be found `on the git web page <http://pyspace.github.io/pyspace/index.html>`_. """
pyspace/pyspace
docs/__init__.py
Python
bsd-3-clause
796
# Copyright (C) 2014 Andrey Antukh <[email protected]> # Copyright (C) 2014 Jesús Espino <[email protected]> # Copyright (C) 2014 David Barragán <[email protected]> # Copyright (C) 2014 Anler Hernández <[email protected]> # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import pytest from unittest.mock import patch, Mock from taiga_contrib_gitlab_auth import connector as gitlab def test_url_builder(): with patch("taiga_contrib_gitlab_auth.connector.URL", "http://localhost:4321"): assert (gitlab._build_url("login", "authorize") == "http://localhost:4321/oauth/authorize") assert (gitlab._build_url("login", "access-token") == "http://localhost:4321/oauth/token") assert (gitlab._build_url("user", "profile") == "http://localhost:4321/api/v3/user") def test_login_without_settings_params(): with pytest.raises(gitlab.GitLabApiError) as e, \ patch("taiga_contrib_gitlab_auth.connector.requests") as m_requests: m_requests.post.return_value = m_response = Mock() m_response.status_code = 200 m_response.json.return_value = {"access_token": "xxxxxxxx"} auth_info = gitlab.login("*access-code*", "**client-id**", "*ient-secret*", gitlab.HEADERS) assert e.value.status_code == 400 assert "error_message" in e.value.detail def test_login_success(): with patch("taiga_contrib_gitlab_auth.connector.requests") as m_requests, \ patch("taiga_contrib_gitlab_auth.connector.URL", "http://localhost:4321"), \ patch("taiga_contrib_gitlab_auth.connector.CLIENT_ID", "*CLIENT_ID*"), \ patch("taiga_contrib_gitlab_auth.connector.CLIENT_SECRET", "*CLIENT_SECRET*"): m_requests.post.return_value = m_response = Mock() m_response.status_code = 200 m_response.json.return_value = {"access_token": "xxxxxxxx"} auth_info = gitlab.login("*access-code*", "http://localhost:1234", "**client-id**", "*client-secret*", gitlab.HEADERS) assert auth_info.access_token == "xxxxxxxx" m_requests.post.assert_called_once_with("http://localhost:4321/oauth/token", headers=gitlab.HEADERS, params={'code': '*access-code*', 'client_id': '**client-id**', 'client_secret': '*client-secret*', 'grant_type': 'authorization_code', 'redirect_uri': 'http://localhost:1234'}) def test_login_whit_errors(): with pytest.raises(gitlab.GitLabApiError) as e, \ patch("taiga_contrib_gitlab_auth.connector.requests") as m_requests, \ patch("taiga_contrib_gitlab_auth.connector.URL", "http://localhost:4321"), \ patch("taiga_contrib_gitlab_auth.connector.CLIENT_ID", "*CLIENT_ID*"), \ patch("taiga_contrib_gitlab_auth.connector.CLIENT_SECRET", "*CLIENT_SECRET*"): m_requests.post.return_value = m_response = Mock() m_response.status_code = 200 m_response.json.return_value = {"error": "Invalid credentials"} gitlab.login("*access-code*", "**client-id**", "*ient-secret*", gitlab.HEADERS) assert e.value.status_code == 400 assert e.value.detail["status_code"] == 200 assert e.value.detail["error"] == "Invalid credentials" def test_get_user_profile_success(): with patch("taiga_contrib_gitlab_auth.connector.requests") as m_requests, \ patch("taiga_contrib_gitlab_auth.connector.URL", "http://localhost:4321"), \ patch("taiga_contrib_gitlab_auth.connector.CLIENT_ID", "*CLIENT_ID*"), \ patch("taiga_contrib_gitlab_auth.connector.CLIENT_SECRET", "*CLIENT_SECRET*"): m_requests.get.return_value = m_response = Mock() m_response.status_code = 200 m_response.json.return_value = {"id": 1955, "username": "mmcfly", "name": "martin seamus mcfly", "bio": "time traveler"} user_profile = gitlab.get_user_profile(gitlab.HEADERS) assert user_profile.id == 1955 assert user_profile.username == "mmcfly" assert user_profile.full_name == "martin seamus mcfly" assert user_profile.bio == "time traveler" m_requests.get.assert_called_once_with("http://localhost:4321/api/v3/user", headers=gitlab.HEADERS) def test_get_user_profile_whit_errors(): with pytest.raises(gitlab.GitLabApiError) as e, \ patch("taiga_contrib_gitlab_auth.connector.requests") as m_requests: m_requests.get.return_value = m_response = Mock() m_response.status_code = 401 m_response.json.return_value = {"error": "Invalid credentials"} gitlab.get_user_profile(gitlab.HEADERS) assert e.value.status_code == 400 assert e.value.detail["status_code"] == 401 assert e.value.detail["error"] == "Invalid credentials" def test_me(): with patch("taiga_contrib_gitlab_auth.connector.login") as m_login, \ patch("taiga_contrib_gitlab_auth.connector.get_user_profile") as m_get_user_profile: m_login.return_value = gitlab.AuthInfo(access_token="xxxxxxxx") m_get_user_profile.return_value = gitlab.User(id=1955, username="mmcfly", full_name="martin seamus mcfly", email="[email protected]", bio="time traveler") email, user = gitlab.me("**access-code**", "http://localhost:1234") assert email == "[email protected]" assert user.id == 1955 assert user.username == "mmcfly" assert user.full_name == "martin seamus mcfly" assert user.bio == "time traveler" headers = gitlab.HEADERS.copy() headers["Authorization"] = "Bearer xxxxxxxx" m_get_user_profile.assert_called_once_with(headers=headers)
taigaio/taiga-contrib-gitlab-auth
back/tests/unit/test_connectors_gitlab.py
Python
agpl-3.0
6,904
# Copyright 2014-2015 University of Chicago # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Repository Management """ import datetime import fnmatch import hashlib import os import os.path import re default_root = "/mcs/globus.org/ftppub/gt6" default_api_root = "/mcs/globus.org/api" default_releases = ["unstable", "testing", "stable"] public_key = """-----BEGIN PGP PUBLIC KEY BLOCK----- Version: GnuPG v1.4.5 (GNU/Linux) mQGiBE0PXQkRBAC12PfwFzMyTKAvCp3AEbzdwwDyEaBHYmd1+Dv+q5c48fEZQrzA PuZ75BnG8BRIo3ZSYJll9Xf5v8A0M6F35msBBdjUpI+PHZvSQ+yru6U3w9XCsmO9 jSGWM1XAw/hcDWOsETOsjJ56AqIKndOXtG2jeOMFD0MwJus9paDcv5pPkwCgk3Fk I+GdLaZf0O6vGUtq2Fo2EgkD/14AQ4SyUufwztQeLwlYXyihdUoIVBl4wm4fndJb TuzTlp3V/oabM8t+V92ftbqlAesFb1FdFQ9NeUEY0VIODR2OTsmEfLUSMK/kRfXM 4FatObXpEp58EydZb3oz/vwASEk1Nno5OW2noGZL3sCk+3j65MstI2q4kMvNSvl+ JEjUBACgNv/mDrn0UjWBuzxOuZrh1r2rBdsjIHx31o/vBF5YLfQhErZQTm6cfpRK W32Nm18btrqgxxHFAMb4wxnVxAxdM3zLSAaiqvi33z2wHReh5TfaVKpJBj7LpMSI hwu50iovsBjE7HiusJQBWBtk8Bqp4g9ic2sPV0caEMCUXU5R9bQjR2xvYnVzIFRv b2xraXQgPHN1cHBvcnRAZ2xvYnVzLm9yZz6IYAQTEQIAIAUCTQ9dCQIbAwYLCQgH AwIEFQIIAwQWAgMBAh4BAheAAAoJEESufsL68kNlb6IAoIemS8dr65xCkA4GQzgJ ngXwZgtvAKCOKs5Ork6HiNKIrWRGMLvA7iktBbkCDQRND10SEAgA37cRQGj/QNcc OjyBrL6e2wPT7UtpXBEHzfjhtmT8+VC+PSbKRxVfawLBtrfzSAAwsmye3c+XK/VB Pa06vSSmezeyNau+XtEVLwrwQwO/kM6wgNtb7zYyI67Y6XEPP+ZlqpZ0W14cTZBD 3SXWuu6zqjdtUnJCg/j/j0zH5TZa40aCfisERxNCQeoePk2gmMTJDJF0ASM3Nhys QIP9qpCA+eOJnKmMeEgDCW9j2mYO4tp9lCSbi15HAb41HKN6xypNWk+EHKyu9n50 88UocRHXLZFujzNTGIokWAcoC0D3qpVQehtAVgt1VPrE6MxFPek8ZN4Ho++92KB7 F6E0OsfF6wADBggAnNPguzYAIztF/EzZANUU/7Eon9zJaD4Lf/mnhB3bMuGvenY0 7HSBAXbUxVXs7uX3S6u9PZ9dytl2Fqh8w47TNcC0ACKLRnhxTJ92LLakzAGVGtNz 2W9l+YJaZ6qIQR9FmYpCyIWp6Vm47yOARThrMtnwUhb53g5ZfxgzpHNUDN/7utTy 3sUaMRiijecmSVhDFbrz7ryY2Btlcr7ZrBo0ODHohDkZVn2UrzE6qg9g5np03zYe 5OUM5Lt5GYZJSKZO81aJ5+9DlkiAev3BFEeCsSOwjrqLZpsr0olbIfeHCi8pvjOJ SCfx4Qs/hI34ykaUn3AgbgxqT0mSKfMasg2bIIhJBBgRAgAJBQJND10SAhsMAAoJ EESufsL68kNlBuAAnRRI5jFAvyjtQaoQpVqSL4/O45D7AJ9WrW/vxTzN0OyZyUU6 8T0dJyXArA== =r6rU -----END PGP PUBLIC KEY BLOCK----- """ uid = os.getuid() gid = None def _digest_file(filename, force=False): """ Compute the md5, sha1, sha512 hashes of a file and write them to disk. Parameters ---------- *filename*:: Name of the file to compute the hash of (str) *force*:: Overwrite existing hash file (bool [False]) """ if fnmatch.fnmatch(filename, "*.md5") or \ fnmatch.fnmatch(filename, "*.sha1") or \ fnmatch.fnmatch(filename, "*.sha512"): return for h in ['md5', 'sha1', 'sha512']: hashname = filename + "." + h if (force or not os.path.exists(hashname) or os.path.getmtime(filename) > os.path.getmtime(hashname)): digester = hashlib.new(h) f = file(filename, "r") digester.update(f.read()) f.close() f = file(hashname, "w") f.write( "%s %s\n" % (digester.hexdigest(), filename.split(os.sep)[-1])) f.close() class Repository(object): """ Repository class =================== This class contains the generic package management features for the various metadata types associated with different repository systems. It contains algorithms for matching packages and selecting ones to copy into another repository based on version matches. This is subclassed to implement the actual metdata parsing for various metadata formats. """ def __init__(self): self.packages = {} def get_packages( self, name=None, arch=None, version=None, source=None, newest_only=False): """ Construct a list of packages that match the optional parameters. If source is an Metadata object, match packages that have that package as the source package. Otherwise, filter the package list based on the name if not None, further filtering on version and arch if they are not None. If newest_only is True, only return the highest versions of the packages which match """ package_candidates = [] if source is not None: return [ (package) for package_list in self.packages for package in self.packages[package_list] if package.source_name == source.source_name and package.version == source.version ] elif name is not None: if version is not None: package_candidates = [ (package) for package_list in self.packages for package in self.packages[package_list] if name == package.name and package.version == version ] else: package_candidates = [ (package) for package_list in self.packages for package in self.packages[package_list] if name == package.name ] if arch is not None: package_candidates = [ (p) for p in package_candidates if p.arch == arch ] if newest_only and len(package_candidates) > 0: newv = package_candidates[-1].version return [p for p in package_candidates if p.version == newv] elif newest_only: return [] else: return package_candidates else: package_candidates = [] for n in self.packages: package_candidates.extend( self.get_packages( name=n, arch=arch, newest_only=newest_only)) return package_candidates def is_newer(self, pkg): """ Check to see if *pkg* is newer than any versions of the same package name within this repository. Returns 'True' if it is, 'False' otherwise. Parameters ---------- *self*: This Repository object *pkg*: Package metadata to compare against the versions in *self*. Returns ------- Boolean """ matches = self.get_packages(pkg.name, arch=pkg.arch, newest_only=True) return matches == [] or pkg > matches[-1] def __contains__(self, pkg): """ Check to see if pkg is included in this Repository """ return len(self.get_packages( name=pkg.name, arch=pkg.arch, version=pkg.version, newest_only=True)) > 0 def __iter__(self): """ Iterate through the packages in this repository """ return self.packages.keys() @staticmethod def create_index(path, recursive=False): for root, dirs, filenames in os.walk(path, topdown=not recursive): if not recursive: del dirs[0:] indexfile = os.path.join(root, "index.html") index_mtime = 0 regenerate_index = False if os.path.exists(indexfile): index_mtime = os.stat(indexfile).st_mtime else: regenerate_index = True if not regenerate_index: for dir in dirs: fulldir = os.path.join(root, dir) if os.stat(fulldir).st_mtime >= index_mtime: regenerate_index = True break if not regenerate_index: for filename in filenames: fullfilename = os.path.join(root, filename) if os.stat(fullfilename).st_mtime >= index_mtime: regenerate_index = True break if regenerate_index: try: f = open(indexfile, "w") f.write( "<html><head><title>{0}</title></head>\n" "<body>" "<table>\n" "<tr>" "<td><a href='../index.html'>Parent Directory</a></td>" "<td>{1}</td></tr>\n" .format( os.path.basename(root), datetime.datetime.fromtimestamp( os.stat( os.path.join( root, "..")).st_mtime).isoformat())) dirs.sort() for dir in dirs: f.write( "<tr>" "<td><a href='{0}/index.html'>{0}/</a></td>" "<td>{1}/</td></tr>\n" .format( dir, datetime.datetime.fromtimestamp( os.stat( os.path.join(root, dir)).st_mtime ).isoformat())) filenames.sort() for pkg in filenames: pkg_filename = os.path.join(root, pkg) if (os.path.isfile(pkg_filename) and not pkg_filename.endswith(".html")): f.write( "<tr>" "<td><a href='{0}'>{0}</a></td>" "<td>{1}</td></tr>\n" .format( pkg, datetime.datetime.fromtimestamp( os.stat( pkg_filename).st_mtime ).isoformat())) f.write("</table></body></html>\n") finally: f.close() os.utime(root, None) class Release(object): """ A Release is a top-level collection of +repo.Repository+ objects for particular package stability ('unstable', 'testing', 'stable') for each operating system. """ def __init__(self, name, repositories): self.name = name self.repositories = repositories def get_packages( self, name=None, os=None, version=None, arch=None, source=None, newest_only=False): return [p for repository in self.repositories_for_os_arch(os, arch) for p in repository.get_packages( name=name, arch=arch, version=version, source=source, newest_only=newest_only)] def is_newer(self, package): for repository in self.repositories_for_package(package): if repository.is_newer(package): return True return False def add_package(self, package, update_metadata=False): return [ repository.add_package(package, update_metadata) for repository in self.repositories_for_package(package)] def remove_package(self, package, update_metadata=False): return [ repository.remove_package(package, update_metadata) for repository in self.repositories_for_package(package)] def update_metadata(self, osname=None, arch=None, force=False): for repository in self.repositories_for_os_arch(osname, arch): repository.update_metadata(force) def repositories_for_os_arch(self, osname, arch): if osname is not None: if arch is not None: return [self.repositories[osname][arch]] else: return [ self.repositories[osname][ar] for ar in self.repositories[osname] ] else: return [ self.repositories[osn][ar] for osn in self.repositories for ar in self.repositories[osn] ] def repositories_for_package(self, package): """ Returns a list of repositories where the given package would belong. By default, its a list containing the repository that matches the package's os and arch, but subclasses can override this """ if package.os in self.repositories: return [self.repositories[package.os][package.arch]] else: return [] def get_operating_systems(self): return self.repositories.keys() def get_architectures(self, osname): return self.repositories[osname].keys() def __contains__(self, package): return len(self.get_packages( name=package.name, os=package.os, version=package.version, arch=package.arch)) > 0 class Manager(object): def __init__(self, releases): self.releases = releases def get_release(self, releasename): return self.releases[releasename] def package_name(self, name): return name.replace("_", "-") if name is not None else None def promote_packages( self, from_release=None, to_release="unstable", os=None, name=None, version=None, dryrun=False, exclude_package_names=None): """ Find new packages in the *from_release*, that are not in *to_release* and copy them there and update the distro metadata. The packages to promote can be limited by specifying the package *name*, *version*, and particular *os* to update. Parameters ---------- *from_release*:: The name of a release in this Manager object to copy new packages from. *to_release*:: The name of a release in this Manager object to copy new packages into. *os*:: Optional operating system indicator (either version or codename) to restrict the package promotion to. *name*:: Optional name of the packages to copy. If this is not present, all packages that have a newer source version in *from_release* than *to_release* are copied. *version*:: Optional version of the packages to copy. This is only used if the *name* option is used to additionally limit the packages to copy. *dryrun*:: (Optional) Boolean whether to prepare to promote the packages or just compute which packages are eligible for promotion. *exclude_package_names*:: (Optional) List of regular expressions matching packages to exclude from the promotion list. Returns ------- This function returns a list of packages that were promoted (or would have been if dryrun=False) """ from_release = self.get_release(from_release) # Find source packages in the from_release that are newer versions than # those in the to_release src_candidates = [src_info for src_info in from_release.get_packages( name=self.package_name(name), os=os, version=version, newest_only=(version is None))] src_candidates_by_os = {} for src in src_candidates: source_and_os = "{0}:{1}".format(src.source_name, src.os) if (source_and_os not in src_candidates_by_os or src_candidates_by_os[source_and_os].version < src.version): src_candidates_by_os[source_and_os] = src src_candidates = [ src_candidates_by_os[x] for x in src_candidates_by_os ] result = [] seen = {} to_release_object = self.get_release(to_release) # For each package found above, find source and binaries in # from_release and copy them over if they are not in to_release for src in src_candidates: source_and_os = "{0}:{1}".format(src.source_name, src.os) if source_and_os not in seen: seen[source_and_os] = True for package in from_release.get_packages(source=src): skip = False if exclude_package_names is not None: for exclude in exclude_package_names: if re.match(exclude, package.name) is not None: skip = True break if (not skip) and to_release_object.is_newer(package): if not dryrun: to_release_object.add_package( package, update_metadata=False) result.append(package) if not dryrun: to_release_object.update_metadata() return result # vim: filetype=python:
globus/globus-release-tools
share/python/repo/__init__.py
Python
apache-2.0
17,563
# -*- coding: utf-8 -*- # Copyright (c) 2003, Taro Ogawa. All Rights Reserved. # Copyright (c) 2013, Savoir-faire Linux inc. All Rights Reserved. # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, # MA 02110-1301 USA from decimal import Decimal from unittest import TestCase from num2words.currency import parse_currency_parts class CurrencyTestCase(TestCase): def test_parse_currency_parts(self): # integer with cents self.assertEqual(parse_currency_parts(101), (1, 1, False)) self.assertEqual(parse_currency_parts(-123), (1, 23, True)) # integer without cents self.assertEqual(parse_currency_parts(101, is_int_with_cents=False), (101, 0, False)) self.assertEqual(parse_currency_parts(-123, is_int_with_cents=False), (123, 0, True)) # float self.assertEqual(parse_currency_parts(1.01), (1, 1, False)) self.assertEqual(parse_currency_parts(-1.23), (1, 23, True)) self.assertEqual(parse_currency_parts(-1.2), (1, 20, True)) self.assertEqual(parse_currency_parts(0.004), (0, 0, False)) self.assertEqual(parse_currency_parts(0.005), (0, 1, False)) self.assertEqual(parse_currency_parts(0.006), (0, 1, False)) self.assertEqual(parse_currency_parts(0.0005), (0, 0, False)) self.assertEqual(parse_currency_parts(0.984), (0, 98, False)) self.assertEqual(parse_currency_parts(0.989), (0, 99, False)) self.assertEqual(parse_currency_parts(0.994), (0, 99, False)) self.assertEqual(parse_currency_parts(0.999), (1, 0, False)) # self.assertEqual(parse_currency_parts(0.985), (0, 99, False)) # self.assertEqual(parse_currency_parts(0.995), (1, 0, False)) # decimal self.assertEqual(parse_currency_parts(Decimal("1.01")), (1, 1, False)) self.assertEqual(parse_currency_parts(Decimal("-1.23")), (1, 23, True)) self.assertEqual(parse_currency_parts(Decimal("-1.233")), (1, 23, True)) self.assertEqual(parse_currency_parts(Decimal("-1.989")), (1, 99, True)) # string self.assertEqual(parse_currency_parts("1.01"), (1, 1, False)) self.assertEqual(parse_currency_parts("-1.23"), (1, 23, True)) self.assertEqual(parse_currency_parts("-1.2"), (1, 20, True)) self.assertEqual(parse_currency_parts("1"), (1, 0, False))
savoirfairelinux/num2words
tests/test_currency.py
Python
lgpl-2.1
3,103
import pytest import numpy as np import pandas as pd import networkx as nx from neuprint import Client, default_client, set_default_client from neuprint import (fetch_skeleton, heal_skeleton, reorient_skeleton, skeleton_df_to_nx, skeleton_df_to_swc, skeleton_swc_to_df) from neuprint.tests import NEUPRINT_SERVER, DATASET @pytest.fixture(scope='module') def client(): c = Client(NEUPRINT_SERVER, DATASET) set_default_client(c) assert default_client() is c return c @pytest.fixture def linear_skeleton(): """ A test fixture to produce a fake 'skeleton' with no branches, just 10 nodes in a line. """ rows = np.arange(1,11) coords = np.zeros((10,3), dtype=int) coords[:,0] = rows**2 radii = rows.astype(np.float32) links = [-1, *range(1,10)] df = pd.DataFrame({'rowId': rows, 'x': coords[:,0], 'y': coords[:,1], 'z': coords[:,2], 'radius': radii, 'link': links}) return df def test_skeleton_df_to_nx(linear_skeleton): g = skeleton_df_to_nx(linear_skeleton, directed=False) assert not isinstance(g, nx.DiGraph) expected_edges = linear_skeleton[['rowId', 'link']].values[1:] expected_edges.sort(axis=1) assert (np.array(g.edges) == expected_edges).all() g = skeleton_df_to_nx(linear_skeleton, directed=True) assert isinstance(g, nx.DiGraph) assert (np.array(g.edges) == linear_skeleton[['rowId', 'link']].values[1:]).all() g = skeleton_df_to_nx(linear_skeleton, with_attributes=True) assert (np.array(g.edges) == linear_skeleton[['rowId', 'link']].values[1:]).all() for row in linear_skeleton.itertuples(): attrs = g.nodes[row.rowId] assert tuple(attrs[k] for k in [*'xyz', 'radius']) == (row.x, row.y, row.z, row.radius) def test_skeleton_df_to_swc(linear_skeleton): swc = skeleton_df_to_swc(linear_skeleton) roundtrip_df = skeleton_swc_to_df(swc) assert (roundtrip_df == linear_skeleton).all().all() def test_reorient_skeleton(linear_skeleton): s = linear_skeleton.copy() reorient_skeleton(s, 10) assert (s['link'] == [*range(2,11), -1]).all() s = linear_skeleton.copy() reorient_skeleton(s, xyz=(100,0,0)) assert (s['link'] == [*range(2,11), -1]).all() s = linear_skeleton.copy() reorient_skeleton(s, use_max_radius=True) assert (s['link'] == [*range(2,11), -1]).all() def test_reorient_broken_skeleton(linear_skeleton): broken_skeleton = linear_skeleton.copy() broken_skeleton.loc[2, 'link'] = -1 broken_skeleton.loc[7, 'link'] = -1 s = broken_skeleton.copy() reorient_skeleton(s, 10) assert (s['link'].iloc[7:10] == [9,10,-1]).all() # reorienting shouldn't change the number of roots, # though they may change locations. assert len(s.query('link == -1')) == 3 def test_heal_skeleton(linear_skeleton): broken_skeleton = linear_skeleton.copy() broken_skeleton.loc[2, 'link'] = -1 broken_skeleton.loc[7, 'link'] = -1 healed_skeleton = heal_skeleton(broken_skeleton) assert (healed_skeleton == linear_skeleton).all().all() def test_heal_skeleton_with_threshold(linear_skeleton): broken_skeleton = linear_skeleton.copy() broken_skeleton.loc[2, 'link'] = -1 broken_skeleton.loc[7, 'link'] = -1 healed_skeleton = heal_skeleton(broken_skeleton, 10.0) # With a threshold of 10, the first break could be healed, # but not the second. expected_skeleton = linear_skeleton.copy() expected_skeleton.loc[7, 'link'] = -1 assert (healed_skeleton == expected_skeleton).all().all() def test_fetch_skeleton(client): orig_df = fetch_skeleton(5813027016, False) healed_df = fetch_skeleton(5813027016, True) assert len(orig_df) == len(healed_df) assert (healed_df['link'] == -1).sum() == 1 assert healed_df['link'].iloc[0] == -1 @pytest.mark.skip("Need to write a test for skeleton_segments()") def test_skeleton_segments(linear_skeleton): pass if __name__ == "__main__": args = ['-s', '--tb=native', '--pyargs', 'neuprint.tests.test_skeleton'] #args += ['-k', 'heal_skeleton'] pytest.main(args)
connectome-neuprint/neuprint-python
neuprint/tests/test_skeleton.py
Python
bsd-3-clause
4,220
"""Tests for cement.ext.ext_reload_config.""" import platform from cement.utils import test system = platform.system() if not system in ['Linux']: raise test.SkipTest('ext_reload_config not supported on %s' % system) import os import shutil import signal from time import sleep from cement.utils.misc import rando from cement.ext import ext_reload_config APP = rando()[:12] CONFIG1 = """ [%s] foo = bar1 """ % APP CONFIG2 = """ [%s] foo = bar2 """ % APP PLUGIN_CONFIG1 = """ [bogus] enable_plugin = false """ def bogus_hook_func(app): pass class ReloadConfigExtTestCase(test.CementExtTestCase): def setUp(self): super(ReloadConfigExtTestCase, self).setUp() f = open(self.tmp_file, 'w') f.write(CONFIG1) f.close() f = open(os.path.join(self.tmp_dir, 'bogus.conf'), 'w') f.write(PLUGIN_CONFIG1) f.close() self.app = self.make_app(APP, extensions=['reload_config'], config_files=[self.tmp_file], plugin_config_dirs=[self.tmp_dir], ) def test_reload_config(self): self.app.setup() self.app.hook.register('pre_reload_config', bogus_hook_func) self.app.hook.register('post_reload_config', bogus_hook_func) self.app.run() self.eq(self.app.config.get(APP, 'foo'), 'bar1') f = open(self.tmp_file, 'w') f.write(CONFIG2) f.close() sleep(1) try: self.eq(self.app.config.get(APP, 'foo'), 'bar2') finally: self.app.close() def test_no_plugin_dir(self): # coverage.. remove the plugin config dir shutil.rmtree(self.tmp_dir) self.app.setup() self.app.run() self.app.close() def test_signal_handling(self): self.app.setup() self.app.hook.register('pre_reload_config', bogus_hook_func) self.app.hook.register('post_reload_config', bogus_hook_func) self.app.run() sleep(1) try: ext_reload_config.signal_handler(self.app, signal.SIGINT, None) finally: self.app.close()
akhilman/cement
tests/ext/reload_config_tests.py
Python
bsd-3-clause
2,223
import asyncio import copy import json import math import os import pickle import re import sys import time from datetime import datetime from functools import partial from pprint import pformat import aiocron import biothings.utils.mongo as mongo from biothings import config as btconfig from biothings.hub import BUILDER_CATEGORY, UPLOADER_CATEGORY from biothings.utils.backend import DocMongoBackend from biothings.utils.common import (dotdict, find_classes_subclassing, get_random_string, iter_n, open_compressed_file, timesofar) from biothings.utils.dataload import merge_struct from biothings.utils.hub_db import (get_source_fullname, get_src_build, get_src_build_config, get_src_dump, get_src_master) from biothings.utils.loggers import get_logger from biothings.utils.manager import BaseManager from biothings.utils.mongo import doc_feeder, id_feeder from ..databuild.backend import (LinkTargetDocMongoBackend, SourceDocMongoBackend, TargetDocMongoBackend) from ..dataload.uploader import ResourceNotReady from .backend import create_backend from .buildconfig import AutoBuildConfig from .differ import set_pending_to_diff from .mapper import TransparentMapper logging = btconfig.logger def pending(build_name, action_name): src_build = get_src_build() src_build.update({"_id": build_name}, {"$addToSet": {"pending": action_name}}) class BuilderException(Exception): pass class ResumeException(Exception): pass class DataBuilder(object): """ Generic data builder. """ keep_archive = 10 # number of archived collection to keep. Oldest get dropped first. def __init__(self, build_name, source_backend, target_backend, log_folder, doc_root_key="root", mappers=[], default_mapper_class=TransparentMapper, sources=None, target_name=None, **kwargs): self.init_state() self.build_name = build_name self.sources = sources self.target_name = target_name self._partial_source_backend = None self._partial_target_backend = None if type(source_backend) == partial: self._partial_source_backend = source_backend else: self._state["source_backend"] = source_backend if type(target_backend) == partial: self._partial_target_backend = target_backend else: self._state["target_backend"] = target_backend # doc_root_key is a key name within src_build_config doc. # it's a list of datasources that are able to create a document # even it doesn't exist. It root documents list is not empty, then # any other datasources not listed there won't be able to create # a document, only will they able to update it. # If no root documets, any datasources can create/update a doc # and thus there's no priority nor limitations # note: negations can be used, like "!source1". meaning source1 is not # root document datasource. # Usefull to express; "all resources except source1" self.doc_root_key = doc_root_key # overall merge start time self.t0 = time.time() # step merge start time self.ti = time.time() self.logfile = None self.log_folder = log_folder self.mappers = {} self.timestamp = datetime.now() self.merge_stats = {} # keep track of cnt per source, etc... self.src_meta = { } # sources involved in this build (includes versions) self.stats = {} # can be customized self.mapping = {} # ES mapping (merged from src_master's docs) for mapper in mappers + [default_mapper_class()]: self.mappers[mapper.name] = mapper self.step = kwargs.get("step", 10000) self.prepared = False def init_state(self): self._state = { "logger": None, "source_backend": None, "target_backend": None, "build_config": None, } @property def logger(self): if not self._state["logger"]: self.prepare() return self._state["logger"] @property def source_backend(self): if self._state["source_backend"] is None: self.prepare() self._state["build_config"] = self._state[ "source_backend"].get_build_configuration(self.build_name) self._state["source_backend"].validate_sources(self.sources) return self._state["source_backend"] @property def target_backend(self): if self._state["target_backend"] is None: self.prepare() return self._state["target_backend"] @property def build_config(self): self._state[ "build_config"] = self.source_backend.get_build_configuration( self.build_name) return self._state["build_config"] @logger.setter def logger(self, value): self._state["logger"] = value @build_config.setter def build_config(self, value): self._state["build_config"] = value def prepare(self, state={}): if self.prepared: return if state: # let's be explicit, _state takes what it wants for k in self._state: self._state[k] = state[k] return if self._partial_source_backend: self._state["source_backend"] = self._partial_source_backend() if self._partial_target_backend: self._state["target_backend"] = self._partial_target_backend() self.setup() self.setup_log() self.prepared = True def unprepare(self): """ reset anything that's not pickable (so self can be pickled) return what's been reset as a dict, so self can be restored once pickled """ # TODO: use copy ? state = { "logger": self._state["logger"], "source_backend": self._state["source_backend"], "target_backend": self._state["target_backend"], "build_config": self._state["build_config"], } for k in state: self._state[k] = None self.prepared = False return state def get_predicates(self): """ Return a list of predicates (functions returning true/false, as in math logic) which instructs/dictates if job manager should start a job (process/thread) """ def no_uploader_running(job_manager): """Uploaders could change the data to be merged...""" num_offenders = 0 # self.sources is not populated. thanks. sources = self.build_config.get("sources", []) offending_sources = set() for src in sources: src_full_name = get_source_fullname(src) offending_sources.add(src_full_name) self.logger.debug("no_uploader_running: src full names %s", offending_sources) for job in job_manager.jobs.values(): if job['category'] == UPLOADER_CATEGORY: if 'source' in job: if job['source'] in offending_sources: num_offenders += 1 self.logger.info( "%s uploader running cannot build for now" % job['source'] ) else: num_offenders += 1 self.logger.warning( "uploader with pinfo: %s running, no source info. " "cannot build for now" % job ) else: pass # job is not an uploader return num_offenders == 0 #def no_merger_running(): # """ # Mergers use cache files, if more than one running and caches need to be built # both would try to write on the same cache file # """ # return len([j for j in job_manager.jobs.values() if j["category"] == BUILDER_CATEGORY]) == 0 return [no_uploader_running] def get_pinfo(self): """ Return dict containing information about the current process (used to report in the hub) """ pinfo = { "category": BUILDER_CATEGORY, "source": "%s:%s" % (self.build_name, self.target_backend.target_name), "step": "", "description": "" } preds = self.get_predicates() if preds: pinfo["__predicates__"] = preds return pinfo def setup_log(self): self.logger, _ = get_logger('build_%s' % self.build_name) def check_ready(self, force=False): if force: # don't even bother return src_build_config = self.source_backend.build_config src_dump = self.source_backend.dump _cfg = src_build_config.find_one({'_id': self.build_config['name']}) # check if all resources are uploaded for src_name in _cfg["sources"]: fullname = get_source_fullname(src_name) if not fullname: raise ResourceNotReady("Can't find source '%s'" % src_name) main_name = fullname.split(".")[0] src_doc = src_dump.find_one({"_id": main_name}) if not src_doc: raise ResourceNotReady( "Missing information for source '%s' to start merging" % src_name) if not src_doc.get("upload", {}).get("jobs", {}).get( src_name, {}).get("status") == "success": raise ResourceNotReady( "No successful upload found for resource '%s'" % src_name) def get_target_name(self): return '{}_{}_{}'.format(self.build_name, self.get_build_version(), get_random_string()).lower() def get_build_version(self): """ Generate an arbitrary major build version. Default is using a timestamp (YYMMDD) '.' char isn't allowed in build version as it's reserved for minor versions """ d = datetime.fromtimestamp(self.t0) version_fmt = self.build_config.get("build_version") if not version_fmt: version_fmt = "%Y%m%d" bversion = d.strftime(version_fmt) self.logger.info("Build version: %s", bversion) return bversion def register_status(self, status, transient=False, init=False, **extra): """ Register current build status. A build status is a record in src_build The key used in this dict the target_name. Then, any operation acting on this target_name is registered in a "jobs" list. """ assert self.build_config, "build_config needs to be specified first" # get it from source_backend, kind of weird... src_build = self.source_backend.build all_sources = self.build_config.get("sources", []) target_name = "%s" % self.target_backend.target_name backend_url = self.target_backend.get_backend_url() build_info = { '_id': target_name, # TODO: deprecate target_backend & target_name, use backed_url instead 'target_backend': self.target_backend.name, 'target_name': target_name, 'backend_url': backend_url, 'build_config': self.build_config, # these are all the sources required to build target # (not just the ones being processed, those are registered in jobs 'sources': all_sources, } job_info = { 'status': status, 'step_started_at': datetime.now().astimezone(), 'logfile': self.logfile, } if transient: # record some "in-progress" information job_info['pid'] = os.getpid() else: # only register time when it's a final state job_info["time"] = timesofar(self.ti) t1 = round(time.time() - self.ti, 0) job_info["time_in_s"] = t1 if "build" in extra: build_info.update(extra["build"]) if "job" in extra: job_info.update(extra["job"]) # create a new build entry in "build" dict if none exists build = src_build.find_one({'_id': target_name}) if not build: # first record for target_name, keep a timestamp build_info["started_at"] = datetime.fromtimestamp(self.t0).astimezone() build_info["jobs"] = [] src_build.insert_one(build_info) if init: # init timer for this step self.ti = time.time() src_build.update({'_id': target_name}, {"$push": { 'jobs': job_info }}) # now refresh/sync build = src_build.find_one({'_id': target_name}) else: # merge extra at root level # (to keep building data...) and update the last one # (it's been properly created before when init=True) build["jobs"] and build["jobs"][-1].update(job_info) # build_info is common to all jobs, so we want to keep # any existing data (well... except if it's explicitely specified) def merge_build_info(target, d): if "__REPLACE__" in d.keys(): d.pop("__REPLACE__") target = d else: for k, v in d.items(): if type(v) == dict: if k in target: target[k] = merge_build_info(target[k], v) else: v.pop("__REPLACE__", None) # merge v with "nothing" just to make sure to remove any "__REPLACE__" v = merge_build_info({}, v) target[k] = v else: target[k] = v return target build = merge_build_info(build, build_info) src_build.replace_one({"_id": build["_id"]}, build) def clean_old_collections(self): # use target_name is given, otherwise build name will be used # as collection name prefix, so they should start like that prefix = "%s_" % (self.target_name or self.build_name) db = mongo.get_target_db() cols = [c for c in db.collection_names() if c.startswith(prefix)] # timestamp is what's after _archive_, YYYYMMDD, so we can sort it safely cols = sorted(cols, reverse=True) to_drop = cols[self.keep_archive:] for colname in to_drop: self.logger.info("Cleaning old archive collection '%s'" % colname) db[colname].drop() def init_mapper(self, mapper_name): if self.mappers[mapper_name].need_load(): if mapper_name is None: self.logger.info("Initializing default mapper") else: self.logger.info("Initializing mapper name '%s'" % mapper_name) self.mappers[mapper_name].load() def generate_document_query(self, src_name): return None def get_root_document_sources(self): root_srcs = self.build_config.get(self.doc_root_key, []) or [] # check for "not this resource" and adjust the list none_root_srcs = [ src.replace("!", "") for src in root_srcs if src.startswith("!") ] if none_root_srcs: if len(none_root_srcs) != len(root_srcs): raise BuilderException("If using '!' operator, all datasources must use it (cannot mix), got: %s" % (repr(root_srcs))) # ok, grab sources for this build, srcs = self.build_config.get("sources", []) root_srcs = list(set(srcs).difference(set(none_root_srcs))) #self.logger.info("'except root' sources %s resolves to root source = %s" % (repr(none_root_srcs),root_srcs)) # resolve possible regex based source name (split-collections sources) root_srcs = self.resolve_sources(root_srcs) return root_srcs def setup(self, sources=None, target_name=None): sources = sources or self.sources # try to get target_name from args, otherwise for now generate it # using mongo backend (it'll be set later during merge() call) target_name = target_name or self.target_name self.target_backend.set_target_name(self.target_name, self.build_name) # root key is optional but if set, it must exist in build config if self.doc_root_key and self.doc_root_key not in self.build_config: raise BuilderException( "Root document key '%s' can't be found in build configuration" % self.doc_root_key) def get_stats(self, sources, job_manager): """ Return a dictionnary of metadata for this build. It's usually app-specific and this method may be overridden as needed. By default though, the total number of documents in the merged collection is stored (key "total") Return dictionary will be merged with any existing metadata in src_build collection. This behavior can be changed by setting a special key within metadata dict: {"__REPLACE__" : True} will... replace existing metadata with the one returned here. "job_manager" is passed in case parallelization is needed. Be aware that this method is already running in a dedicated thread, in order to use job_manager, the following code must be used at the very beginning of its implementation: asyncio.set_event_loop(job_manager.loop) """ total = self.target_backend.target_collection.count() return {"total": total} def get_custom_metadata(self, sources, job_manager): """ If more metadata is required, this method can be overridden and should return a dict. Existing metadata dict will be update with that one before storage. """ return {} def get_mapping(self, sources): """ Merge mappings from src_master """ mapping = {} src_master = self.source_backend.master for collection in self.build_config['sources']: meta = src_master.find_one({"_id": collection}) if 'mapping' in meta and meta["mapping"]: mapping = merge_struct(mapping, meta['mapping']) else: raise BuilderException('"%s" has no mapping data' % collection) return mapping def store_metadata(self, res, sources, job_manager): self.target_backend.post_merge() self.src_meta = self.source_backend.get_src_metadata() # now that we have merge stats (count/srcs) + all src involved # we can propagate stats self.update_src_meta_stats() self.mapping = self.get_mapping(sources) self.stats = self.get_stats(sources, job_manager) self.custom_metadata = self.get_custom_metadata(sources, job_manager) # also search for _meta in build_config bmeta = self.build_config.get("_meta") if bmeta: self.logger.info("Found _meta in build_config, merging: %s" % pformat(bmeta)) self.custom_metadata.update(self.build_config.get("_meta", {})) def update_src_meta_stats(self): for src, count in self.merge_stats.items(): mainsrc = get_source_fullname(src).split(".")[0] self.src_meta.setdefault(mainsrc, {}).setdefault("stats", {}) self.src_meta[mainsrc]["stats"].update({src: count}) def resolve_sources(self, sources): """ Source can be a string that may contain regex chars. It's usefull when you have plenty of sub-collections prefixed with a source name. For instance, given a source named "blah" stored in as many collections as chromosomes, insteand of passing each name as "blah_1", "blah_2", etc... "blah_.*" can be specified in build_config. This method resolves potential regexed source name into real, existing collection names """ if type(sources) == str: sources = [sources] src_db = mongo.get_src_db() cols = src_db.collection_names() masters = self.source_backend.get_src_master_docs() found = [] for src in sources: # check if master _id and name are different (meaning name is a regex) master = masters.get(src) if not master: raise BuilderException("'%s'could not be found in master documents (%s)" % (src, repr(list(masters.keys())))) search = src if master["_id"] != master["name"]: search = master["name"] # restrict pattern to minimal match pat = re.compile("^%s$" % search) for col in cols: if pat.match(col): found.append(col) return found def merge(self, sources=None, target_name=None, force=False, ids=None, steps=["merge", "post", "metadata"], job_manager=None, *args, **kwargs): """Merge given sources into a collection named target_name. If sources argument is omitted, all sources defined for this merger will be merged together, according to what is defined insrc_build_config. If target_name is not defined, a unique name will be generated. Optional parameters: - force=True will bypass any safety check - ids: list of _ids to merge, specifically. If None, all documents are merged. - steps: * merge: actual merge step, create merged documents and store them * post: once merge, run optional post-merge process * metadata: generate and store metadata (depends on merger, usually specifies the amount of merged data, source versions, etc...) """ assert job_manager # check what to do if type(steps) == str: steps = [steps] self.t0 = time.time() self.check_ready(force) # normalize avail_sources = self.build_config['sources'] if sources is None: self.target_backend.drop() self.target_backend.prepare() sources = avail_sources # merge all elif isinstance(sources, str): sources = [sources] if ids is None: # nothing passed specifically, let's have a look at the config ids = self.build_config.get("ids") if ids: # config calls for a merge on specific _ids if type(ids) == str: # path to a file m = map(lambda l: l.decode().strip(), open_compressed_file(ids).readlines()) ids = [_id for _id in m if not _id.startswith("#")] orig_sources = sources sources = self.resolve_sources(sources) if not sources and "merge" in steps: raise BuilderException("No source found, got %s while available sources are: %s" % (repr(orig_sources), repr(avail_sources))) if not target_name: target_name = self.get_target_name() self.target_name = target_name self.target_backend.set_target_name(self.target_name) self.custom_metadata = {} self.clean_old_collections() self.logger.info("Merging into target collection '%s'" % self.target_backend.target_name) strargs = "[sources=%s,target_name=%s]" % (sources, target_name) try: @asyncio.coroutine def do(): res = None if "merge" in steps or "post" in steps: job = self.merge_sources(source_names=sources, ids=ids, steps=steps, job_manager=job_manager, *args, **kwargs) res = yield from job if "metadata" in steps: pinfo = self.get_pinfo() pinfo["step"] = "metadata" self.register_status("building", transient=True, init=True, job={"step": "metadata"}) postjob = yield from job_manager.defer_to_thread( pinfo, partial(self.store_metadata, res, sources=sources, job_manager=job_manager)) def stored(f): try: nonlocal res if res: res = f.result( ) # consume to trigger exceptions if any strargs = "[sources=%s,stats=%s]" % \ (sources, self.merge_stats) build_version = self.get_build_version() if "." in build_version: raise BuilderException( "Can't use '.' in build version '%s', it's reserved for minor versions" % build_version) # get original start dt src_build = self.source_backend.build build = src_build.find_one({'_id': target_name}) _meta = { "biothing_type": build["build_config"]["doc_type"], "src": self.src_meta, "stats": self.stats, "build_version": build_version, "build_date": datetime.fromtimestamp(self.t0).astimezone().isoformat() } # custom _meta.update(self.custom_metadata) self.register_status('success', build={ "merge_stats": self.merge_stats, "mapping": self.mapping, "_meta": _meta, }) self.logger.info("success %s" % strargs, extra={"notify": True}) # set next step build_conf = AutoBuildConfig(build['build_config']) if build_conf.should_diff_new_build(): pending(target_name, 'diff') if build_conf.should_snapshot_new_build(): pending(target_name, 'snapshot') except Exception as e: strargs = "[sources=%s]" % sources self.register_status("failed", job={"err": repr(e)}) self.logger.exception("failed %s: %s" % (strargs, e), extra={"notify": True}) raise postjob.add_done_callback(stored) yield from postjob task = asyncio.ensure_future(do()) return task except (KeyboardInterrupt, Exception) as e: self.logger.exception(e) self.register_status("failed", job={"err": repr(e)}) self.logger.exception("failed %s: %s" % (strargs, e), extra={"notify": True}) raise def get_mapper_for_source(self, src_name, init=True): # src_name can be a regex (when source has split collections, they are merge but # comes from the same "template" sourcek docs = self.source_backend.get_src_master_docs() mapper_name = None for master_name in docs: pat = re.compile("^%s$" % master_name) if pat.match(src_name): mapper_name = docs[master_name].get("mapper") # TODO: this could be a list try: init and self.init_mapper(mapper_name) mapper = self.mappers[mapper_name] self.logger.info("Found mapper '%s' for source '%s'" % (mapper, src_name)) return mapper except KeyError: raise BuilderException( "Found mapper named '%s' but no mapper associated" % mapper_name) @asyncio.coroutine def merge_sources(self, source_names, steps=["merge", "post"], batch_size=100000, ids=None, job_manager=None): """ Merge resources from given source_names or from build config. Identify root document sources from the list to first process them. ids can a be list of documents to be merged in particular. """ assert job_manager # check what to do if type(steps) == str: steps = [steps] do_merge = "merge" in steps do_post_merge = "post" in steps self.merge_stats = {} self.stats = {} self.mapping = {} # try to identify root document sources amongst the list to first # process them (if any) defined_root_sources = self.get_root_document_sources() root_sources = list( set(source_names).intersection(set(defined_root_sources))) other_sources = list(set(source_names).difference(set(root_sources))) # got root doc sources but not part of the merge ? that's weird... if defined_root_sources and not root_sources: self.logger.warning( "Root document sources found (%s) but not part of the merge..." % defined_root_sources) source_names = sorted(source_names) root_sources = sorted(root_sources) other_sources = sorted(other_sources) self.logger.info("Sources to be merged: %s" % source_names) self.logger.info("Root sources: %s" % root_sources) self.logger.info("Other sources: %s" % other_sources) got_error = False @asyncio.coroutine def merge(src_names): jobs = [] for i, src_name in enumerate(src_names): yield from asyncio.sleep(0.0) job = self.merge_source(src_name, batch_size=batch_size, ids=ids, job_manager=job_manager) job = asyncio.ensure_future(job) def merged(f, name, stats): try: res = f.result() stats.update(res) except Exception as e: self.logger.exception( "Failed merging source '%s': %s" % (name, e)) nonlocal got_error got_error = e job.add_done_callback( partial(merged, name=src_name, stats=self.merge_stats)) jobs.append(job) yield from asyncio.wait([job]) # raise error as soon as we know something went wrong if got_error: raise got_error tasks = asyncio.gather(*jobs) yield from tasks if do_merge: if root_sources: self.register_status("building", transient=True, init=True, job={ "step": "merge-root", "sources": root_sources }) self.logger.info("Merging root document sources: %s" % root_sources) yield from merge(root_sources) self.register_status("success", job={ "step": "merge-root", "sources": root_sources }) if other_sources: self.register_status("building", transient=True, init=True, job={ "step": "merge-others", "sources": other_sources }) self.logger.info("Merging other resources: %s" % other_sources) yield from merge(other_sources) self.register_status("success", job={ "step": "merge-others", "sources": other_sources }) self.register_status("building", transient=True, init=True, job={"step": "finalizing"}) self.logger.info("Finalizing target backend") self.target_backend.finalize() self.register_status("success", job={"step": "finalizing"}) else: self.logger.info("Skip data merging") if do_post_merge: self.logger.info("Running post-merge process") self.register_status("building", transient=True, init=True, job={"step": "post-merge"}) pinfo = self.get_pinfo() pinfo["step"] = "post-merge" job = yield from job_manager.defer_to_thread( pinfo, partial(self.post_merge, source_names, batch_size, job_manager)) job = asyncio.ensure_future(job) def postmerged(f): try: self.logger.info("Post-merge completed [%s]" % f.result()) self.register_status("success", job={"step": "post-merge"}) except Exception as e: self.logger.exception("Failed post-merging source: %s" % e) nonlocal got_error got_error = e job.add_done_callback(postmerged) yield from job if got_error: raise got_error else: self.logger.info("Skip post-merge process") yield from asyncio.sleep(0.0) return self.merge_stats def document_cleaner(self, src_name, *args, **kwargs): """ Return a function taking a document as argument, cleaning the doc as needed, and returning that doc. If no function is needed, None. Note: the returned function must be pickleable, careful with lambdas and closures. """ return None @asyncio.coroutine def merge_source(self, src_name, batch_size=100000, ids=None, job_manager=None): # it's actually not optional assert job_manager _query = self.generate_document_query(src_name) # Note: no need to check if there's an existing document with _id (we want to merge only with an existing document) # if the document doesn't exist then the update() call will silently fail. # That being said... if no root documents, then there won't be any previously inserted # documents, and this update() would just do nothing. So if no root docs, then upsert # (update or insert, but do something) defined_root_sources = self.get_root_document_sources() upsert = not defined_root_sources or src_name in defined_root_sources if not upsert: self.logger.debug( "Documents from source '%s' will be stored only if a previous document exists with same _id" % src_name) jobs = [] total = self.source_backend[src_name].count() btotal = math.ceil(total / batch_size) bnum = 1 cnt = 0 got_error = False # grab ids only, so we can get more, let's say 10 times more id_batch_size = batch_size * 10 if ids: self.logger.info( "Merging '%s' specific list of _ids, create merger job with batch_size=%d" % (src_name, batch_size)) id_provider = [ids] else: self.logger.info( "Fetch _ids from '%s' with batch_size=%d, and create merger job with batch_size=%d" % (src_name, id_batch_size, batch_size)) id_provider = id_feeder(self.source_backend[src_name], batch_size=id_batch_size) if _query and ids is not None: self.logger.info( "Query/filter involved, but also specific list of _ids. Ignoring query and use _ids" ) if _query and ids is None: self.logger.info( "Query/filter involved, can't use cache to fetch _ids") # use doc_feeder but post-process doc to keep only the _id id_provider = map( lambda docs: [d["_id"] for d in docs], doc_feeder(self.source_backend[src_name], query=_query, step=batch_size, inbatch=True, fields={"_id": 1})) else: # when passing a list of _ids, IDs will be sent to the query, so we need to reduce the batch size id_provider = ids and iter_n(ids, int( batch_size / 100)) or id_feeder(self.source_backend[src_name], batch_size=id_batch_size, logger=self.logger) src_master = self.source_backend.master meta = src_master.find_one({"_id": src_name}) or {} merger = meta.get("merger", "upsert") self.logger.info("Documents from source '%s' will be merged using %s" % (src_name, merger)) doc_cleaner = self.document_cleaner(src_name) for big_doc_ids in id_provider: for doc_ids in iter_n(big_doc_ids, batch_size): # try to put some async here to give control back # (but everybody knows it's a blocking call: doc_feeder) yield from asyncio.sleep(0.1) cnt += len(doc_ids) pinfo = self.get_pinfo() pinfo["step"] = src_name pinfo["description"] = "#%d/%d (%.1f%%)" % (bnum, btotal, (cnt / total * 100)) self.logger.info("Creating merger job #%d/%d, to process '%s' %d/%d (%.1f%%)" % (bnum, btotal, src_name, cnt, total, (cnt/total*100.))) job = yield from job_manager.defer_to_process( pinfo, partial(merger_worker, self.source_backend[src_name].name, self.target_backend.target_name, doc_ids, self.get_mapper_for_source(src_name, init=False), doc_cleaner, upsert, merger, bnum)) def batch_merged(f, batch_num): nonlocal got_error if type(f.result()) != int: got_error = Exception( "Batch #%s failed while merging source '%s' [%s]" % (batch_num, src_name, f.result())) job.add_done_callback(partial(batch_merged, batch_num=bnum)) jobs.append(job) bnum += 1 # raise error as soon as we know if got_error: raise got_error self.logger.info("%d jobs created for merging step" % len(jobs)) tasks = asyncio.gather(*jobs) def done(f): nonlocal got_error if None in f.result(): got_error = Exception("Some batches failed") return # compute overall inserted/updated records (consume result() and check summable) _ = sum(f.result()) tasks.add_done_callback(done) yield from tasks if got_error: raise got_error else: return {"%s" % src_name: cnt} def post_merge(self, source_names, batch_size, job_manager): pass class LinkDataBuilder(DataBuilder): """ LinkDataBuilder creates a link to the original datasource to be merged, without actually copying the data (merged collection remains empty). This builder is only valid when using only one datasource (thus no real merge) is declared in the list of sources to be merged, and is useful to prevent data duplication between the datasource itself and the resulting merged collection. """ def __init__(self, build_name, source_backend, target_backend, *args, **kwargs): super().__init__(build_name, source_backend, target_backend=partial(LinkTargetDocMongoBackend), *args, **kwargs) conf = self.source_backend.get_build_configuration(self.build_name) assert len(conf["sources"]) == 1, \ "Found more than one source to link, not allowed: %s" % conf["sources"] assert hasattr(self.target_backend, "datasource_name") self.target_backend.datasource_name = conf["sources"][0] self.target_backend.source_db = self.source_backend @asyncio.coroutine def merge_source(self, src_name, *args, **kwargs): total = self.source_backend[src_name].count() return {"%s" % src_name: total} def fix_batch_duplicates(docs, fail_if_struct_is_different=False): """ Remove duplicates from docs based on _id. If _id's the same but structure is different (not real "duplicates", but different documents with the same _ids), merge docs all together (dict.update) or raise an error if fail_if_struct_is_different. """ dids = {} # docs per _id for d in docs: dids.setdefault(d["_id"], []).append(d) # now check doc structure for each duplicates # if same structure, replace with one occurence of the docs # if not the same, log all the docs as warning, and merge them all # as we would do if we were upserting doc one-by-one (no batch) # note: dict are unhashable (no set) so either compare one each other (n^2-ish) # or use json strings (let's try json...) for _id in dids: jl = set([json.dumps(e, sort_keys=True) for e in dids[_id]]) if len(jl) > 1: # different structure if fail_if_struct_is_different: raise ValueError( "Found duplicated with different document structure: %s" % dids[_id]) else: logging.warning( "Found duplicated with different document structure, merging them altogether: %s" % dids[_id]) # merge docs on top of each other dupdocs = dids[_id] merged = {} [merged.update(d) for d in dupdocs] dids[_id] = merged else: assert len(jl) == 1 # normalize to scalar dids[_id] = dids[_id][0] return list(dids.values()) def merger_worker(col_name, dest_name, ids, mapper, cleaner, upsert, merger, batch_num): try: src = mongo.get_src_db() tgt = mongo.get_target_db() col = src[col_name] dest = DocMongoBackend(tgt, tgt[dest_name]) cur = doc_feeder(col, step=len(ids), inbatch=False, query={'_id': { '$in': ids }}) if cleaner: cur = map(cleaner, cur) mapper.load() docs = [d for d in mapper.process(cur)] # while documents from cursor "cur" are unique, at this point, due to the use # a mapper, documents can be converted and there now can be duplicates (same _id) # (ex: mygene, ensembl -> entrez conversion). "docs" could produce a duplicated error # within the batch, so we need to remove duplicates. all_ids = [d["_id"] for d in docs] uniq_ids = set(all_ids) if len(all_ids) != len(uniq_ids): logging.warning("Found duplicated IDs within batch, trying to fix") docs = fix_batch_duplicates(docs) if merger == "merge_struct": stored_docs = dest.mget_from_ids([d["_id"] for d in docs]) ddocs = dict([(d["_id"], d) for d in docs]) for d in stored_docs: ddocs[d["_id"]] = merge_struct(d, ddocs[d["_id"]]) docs = list(ddocs.values()) cnt = dest.update(docs, upsert=upsert) return cnt except Exception as e: logger_name = "build_%s_%s_batch_%s" % (dest_name, col_name, batch_num) logger, _ = get_logger(logger_name, btconfig.LOG_FOLDER) logger.exception(e) logger.error("col_name: %s, dest_name: %s, ids: see pickle, " % (col_name, dest_name) + "mapper: %s, cleaner: %s, upsert: %s, " % (mapper, cleaner, upsert) + "merger: %s, batch_num: %s" % (merger, batch_num)) exc_fn = os.path.join(btconfig.LOG_FOLDER, "%s.exc.pick" % logger_name) pickle.dump(e, open(exc_fn, "wb")) logger.info("Exception was dumped in pickle file '%s'" % exc_fn) ids_fn = os.path.join(btconfig.LOG_FOLDER, "%s.ids.pick" % logger_name) pickle.dump(ids, open(ids_fn, "wb")) logger.info("IDs dumped in pickle file '%s'" % ids_fn) dat_fn = os.path.join(btconfig.LOG_FOLDER, "%s.docs.pick" % logger_name) pickle.dump(docs, open(dat_fn, "wb")) logger.info("Data (batch of docs) dumped in pickle file '%s'" % dat_fn) raise def set_pending_to_build(conf_name=None): src_build_config = get_src_build_config() qfilter = {} if conf_name: qfilter = {"_id": conf_name} logging.info("Setting pending_to_build flag for configuration(s): %s" % (conf_name and conf_name or "all configuraitons")) src_build_config.update(qfilter, {"$addToSet": {"pending": "build"}}) class BuilderManager(BaseManager): def __init__(self, source_backend_factory=None, target_backend_factory=None, builder_class=None, poll_schedule=None, *args, **kwargs): """ BuilderManager deals with the different builders used to merge datasources. It is connected to src_build() via sync(), where it grabs build information and register builder classes, ready to be instantiate when triggering builds. source_backend_factory can be a optional factory function (like a partial) that builder can call without any argument to generate a SourceBackend. Same for target_backend_factory for the TargetBackend. builder_class if given will be used as the actual Builder class used for the merge and will be passed same arguments as the base DataBuilder. It can also be a list of classes, in which case the default used one is the first, when it's necessary to define multiple builders. """ super(BuilderManager, self).__init__(*args, **kwargs) self.src_build_config = get_src_build_config() self.source_backend_factory = source_backend_factory self.target_backend_factory = target_backend_factory builder_class = builder_class or DataBuilder if isinstance(builder_class, list): self.arg_builder_classes = builder_class else: self.arg_builder_classes = [builder_class] self.default_builder_class = self.arg_builder_classes[0] or DataBuilder self.builder_classes = {} self.poll_schedule = poll_schedule self.setup_log() def clean_stale_status(self): src_build = get_src_build() for build in src_build.find(): dirty = False for job in build.get("jobs", []): if job.get("status") == "building": logging.warning( "Found stale build '%s', marking build status as 'canceled'" % build["_id"]) job["status"] = "canceled" dirty = True if dirty: src_build.replace_one({"_id": build["_id"]}, build) @property def source_backend(self): source_backend = self.source_backend_factory and self.source_backend_factory() or \ partial(SourceDocMongoBackend, build_config=partial(get_src_build_config), build=partial(get_src_build), master=partial(get_src_master), dump=partial(get_src_dump), sources=partial(mongo.get_src_db)) return source_backend @property def target_backend(self): target_backend = self.target_backend_factory and self.target_backend_factory() or \ partial(TargetDocMongoBackend, target_db=partial(mongo.get_target_db)) return target_backend def get_builder_class(self, build_config_name): """ builder class can be specified different way (in order): 1. within the build_config document (so, per configuration) 2. or defined in the builder manager (so, per manager) 3. or default to DataBuilder """ builder_class = None conf = self.src_build_config.find_one({"_id": build_config_name}) if conf.get("builder_class"): builder_class = self.builder_classes[ conf["builder_class"]]["class"] elif self.default_builder_class: builder_class = self.default_builder_class else: builder_class = DataBuilder return builder_class def register_builder(self, build_name): # will use partial to postponse object creations and their db connection # as we don't want to keep connection alive for undetermined amount of time # declare source backend def create(build_name): # postpone config import so app had time to set it up # before actual call time from biothings import config # assemble the whole klass = self.get_builder_class(build_name) self.logger.info("Build config '%s' will use builder class %s", build_name, klass) bdr = klass(build_name, source_backend=self.source_backend, target_backend=self.target_backend, log_folder=config.LOG_FOLDER) return bdr self.register[build_name] = partial(create, build_name) def get_builder(self, col_name): doc = get_src_build().find_one({"_id": col_name}) if not doc: raise BuilderException("No such build named '%s'" % repr(col_name)) assert "build_config" in doc, "Expecting build_config information" klass = self.get_builder_class(doc["build_config"]["name"]) bdr = klass(doc["build_config"]["name"], source_backend=self.source_backend, target_backend=self.target_backend, log_folder=btconfig.LOG_FOLDER) # overwrite with existing values bdr.build_config = doc["build_config"] bdr.target_backend.set_target_name(col_name) return bdr def delete_merged_data(self, merge_name): target_db = mongo.get_target_db() col = target_db[merge_name] col.drop() def delete_merge(self, merge_name): """Delete merged collections and associated metadata""" db = get_src_build() meta = db.find_one({"_id": merge_name}) if meta: db.remove({"_id": merge_name}) else: self.logger.warning( "No metadata found for merged collection '%s'" % merge_name) self.delete_merged_data(merge_name) def archive_merge(self, merge_name): """Delete merged collections and associated metadata""" db = get_src_build() meta = db.find_one({"_id": merge_name}) if meta: meta["archived"] = datetime.now() db.replace_one({"_id": merge_name}, meta) else: self.logger.warning( "No metadata found for merged collection '%s'" % merge_name) self.delete_merged_data(merge_name) def get_query_for_list_merge(self, only_archived): q = {"archived": {"$exists": 0}} if only_archived: q = {"archived": {"$exists": 1}} return q def list_merge(self, build_config=None, only_archived=False): q = self.get_query_for_list_merge(only_archived) docs = get_src_build().find(q) by_confs = {} for d in docs: by_confs.setdefault( d.get("build_config", {}).get("name", None), []).append(d["_id"]) if build_config: return sorted(by_confs.get(build_config, [])) else: for conf in by_confs: by_confs[conf] = sorted(by_confs[conf]) return by_confs def setup_log(self): self.logger, self.logfile = get_logger("buildmanager") def __getitem__(self, build_name): """ Return an instance of a builder for the build named 'build_name' Note: each call returns a different instance (factory call behind the scene...) """ # we'll get a partial class but will return an instance pclass = BaseManager.__getitem__(self, build_name) return pclass() def configure(self): """Sync with src_build_config and register all build config""" self.register = {} self.builder_classes = {} for conf in self.src_build_config.find(): self.register_builder(conf["_id"]) self.find_builder_classes() def resolve_builder_class(self, klass): """ Resolve class/partial definition to (obj,"type","mod.class") where names (class name, module, docstring, etc...) can directly be accessed whether it's a standard class or not """ obj = klass if type(klass) == partial: assert type(klass.func) == type btype = "partial" obj = klass.func elif type(klass) == type: btype = "class" else: raise TypeError("Unknown type for builder %s" % repr(klass)) modstr = obj.__module__ classstr = obj.__name__ classpathstr = "%s.%s" % (modstr, classstr) return (obj, btype, classpathstr) def find_builder_classes(self): """ Find all available build class: 1. classes passed during manager init (build_class) (that includes the default builder) 2. all subclassing DataBuilder in: a. biothings.hub.databuilder.* b. hub.databuilder.* (app-specific) """ bclasses = set(self.arg_builder_classes) mods = [sys.modules[__name__]] try: import hub.databuild as m mods.append(m) except ImportError: pass for klass in find_classes_subclassing(mods, DataBuilder): bclasses.add(klass) for klass in bclasses: try: obj, btype, classpathstr = self.resolve_builder_class(klass) helpstr = obj.__doc__ and " ".join( map(str.strip, obj.__doc__.splitlines())) self.builder_classes[classpathstr] = { "desc": helpstr, "type": btype, "class": klass, "default": klass == self.default_builder_class, } except Exception as e: logging.exception( "Can't extract information from builder class %s: %s" % (repr(klass), e)) def merge(self, build_name, sources=None, target_name=None, **kwargs): """ Trigger a merge for build named 'build_name'. Optional list of sources can be passed (one single or a list). target_name is the target collection name used to store to merge data. If none, each call will generate a unique target_name. """ try: bdr = self[build_name] job = bdr.merge(sources, target_name, job_manager=self.job_manager, **kwargs) return job except KeyError: raise BuilderException("No such builder for '%s'" % build_name) except ResourceNotReady as e: raise BuilderException( "Some datasources aren't ready for the merge: %s" % e) def list_sources(self, build_name): """ List all registered sources used to trigger a build named 'build_name' """ info = self.src_build_config.find_one({"_id": build_name}) return info and info["sources"] or [] def whatsnew(self, build_name=None, old=None): """ Return datasources which have changed since last time (last time is datasource information from metadata, either from given old src_build doc name, or the latest found if old=None) """ dbbuild = get_src_build() dbdump = get_src_dump() def whatsnewcomparedto(build_name, old=None): if old is None: # TODO: this will get big... but needs to be generic # because we handle different hub db backends (or it needs to be a # specific helper func to be defined all backends # FIXME: this gets slower as hub gets more builds, we are # finding all builds of all build configs when /whatsnew gets # requested builds = dbbuild.find({"build_config.name": build_name}) builds = sorted(builds, key=lambda e: e["started_at"]) if builds: old = builds[-1] else: raise BuilderException( "Can't find a build associated to config '%s'" % build_name) else: old = dbbuild.find_one({"_id": old}) meta_srcs = old.get("_meta", {}).get("src", {}) new = { "old_build": { "name": old["_id"], "built_at": old["started_at"] }, "sources": {} } for src_name, data in meta_srcs.items(): try: srcd = dbdump.find_one({"_id": src_name}) try: if srcd and srcd.get('download'): if not srcd['download']['status'] == "success": srcd = None if srcd and srcd.get('upload'): for sub in srcd['upload']['jobs'].values(): if not sub['status'] == "success": srcd = None break except KeyError as e: self.logger.warning( "whatsnew: src_dump:%s missing keys: %s, not touching", src_name, e ) if srcd and not srcd.get("download") and srcd.get( "upload"): # this is a collection only source, find all releases in sub sources, hopefully all are the same rels = [ sub["release"] for sub in srcd["upload"]["jobs"].values() ] srels = set(rels) if len(srels) != 1: raise ValueError( "Found different releases in sub-sources, expected only one common: %s" % repr(rels)) rel = rels[0] if data.get("version") and rel != data["version"]: new["sources"][src_name] = { "old": { "version": data["version"] }, "new": { "version": rel } } elif srcd and srcd.get("download", {}).get( "release" ) and srcd["download"]["release"] != data["version"]: new["sources"][src_name] = { "old": { "version": data["version"] }, "new": { "version": srcd["download"]["release"], "downloaded_at": srcd["download"].get("started_at") } } except Exception as e: self.logger.warning( "Can't check what's new for source '%s': %s" % (src_name, e)) return {build_name: new} if old is None and build_name is None: # do this for all build configs dbbuildconfig = get_src_build_config() configs = {} for d in dbbuildconfig.find({ "$or": [{ "archived": { "$exists": 0 } }, { "archived": False }]}): try: news = whatsnewcomparedto(d["_id"]) if news[d["_id"]]["sources"]: configs.update(news) except BuilderException: continue return configs else: return whatsnewcomparedto(build_name, old) def clean_temp_collections(self, build_name, date=None, prefix=''): """ Delete all target collections created from builder named "build_name" at given date (or any date is none given -- carefull...). Date is a string (YYYYMMDD or regex) Common collection name prefix can also be specified if needed. """ target_db = mongo.get_target_db() for col_name in target_db.collection_names(): search = prefix and prefix + "_" or "" search += build_name + '_' search += date and date + '_' or '' pat = re.compile(search) if pat.match(col_name) and 'current' not in col_name: logging.info("Dropping target collection '%s" % col_name) target_db[col_name].drop() def poll(self): """ Check "whatsnew()" to idenfity builds which could be automatically built, if {"autobuild" : {...}} is part of the build configuration. "autobuild" contains a dict with "schedule" (aiocron/crontab format), so each build configuration can have a different polling schedule. """ # don't use $exists in find(), not all hub backend implements that logger, _ = get_logger('autobuild') schedules = { conf["_id"]: conf["autobuild"]["schedule"] for conf in get_src_build_config().find() if conf.get("autobuild", {}).get("schedule") } @asyncio.coroutine def _autobuild(conf_name): new = self.whatsnew(conf_name) logger.info(f"{conf_name}:{schedules[conf_name]}") logger.info(f"{conf_name}:{new}") if new[conf_name]["sources"]: self.merge(conf_name) logger.info(f"{conf_name}:merge(*)") else: logger.info(f"{conf_name}:pass") logger.info(datetime.now().astimezone()) logger.info(schedules) # all schedules for _id, _schedule in schedules.items(): try: aiocron.crontab( _schedule, func=partial(_autobuild, _id), start=True, loop=self.job_manager.loop ) except Exception: logger.exception((_id, _schedule)) def trigger_merge(self, doc): return self.merge(doc["_id"]) def build_config_info(self): configs = {} err = None for name in self.register: try: builder = self[name] except Exception as e: conf = get_src_build_config().find_one({"_id": name}) if conf: builder = dotdict({"build_config": conf}) else: builder = None err = str(e) if not builder or issubclass(builder.target_backend.__class__, LinkTargetDocMongoBackend) or \ issubclass(builder.__class__, dict): # fake builder obj target_db = None # it's not a traditional target database, it's pointing to # somewhere else (TODO: maybe LinkTargetDocMongoBackend should # implement more methods to return info about that else: target_db = builder.target_backend.target_collection.database.client.address configs[name] = { "build_config": builder and builder.build_config, "archived": "archived" in (builder and builder.build_config or []) } if builder and builder.source_backend: configs[name]["source_backend"] = { "type": builder and builder.source_backend.__class__.__name__, "source_db": builder and builder.source_backend.sources.client.address, } if builder and builder.target_backend: configs[name]["target_backend"] = { "type": builder and builder.target_backend.__class__.__name__, "target_db": target_db } if err: configs[name]["error"] = err if builder and builder.mappers: configs[name]["mapper"] = {} for mappername, mapper in builder.mappers.items(): configs[name]["mapper"][ mappername] = mapper.__class__.__name__ res = {"build_configs": configs} # dict contains an actual class, non-serializable, so adjust: bclasses = copy.deepcopy(self.builder_classes) for k, v in bclasses.items(): v.pop("class") res["builder_classes"] = bclasses return res def build_info(self, id=None, conf_name=None, fields=None, only_archived=False): """ Return build information given an build _id, or all builds if _id is None. "fields" can be passed to select which fields to return or not (mongo notation for projections), if None return everything except: - "mapping" (too long) If id is None, more are filtered: - "sources" and some of "build_config" only_archived=True will return archived merges only """ res = {} q = self.get_query_for_list_merge(only_archived) if id is not None: q = {"_id": id} else: fields = {} fields["mapping"] = 0 fields["sources"] = 0 fields["build_config.sources"] = 0 fields["build_config.root"] = 0 if conf_name is not None: q["build_config._id"] = conf_name builds = [b for b in get_src_build().find(q, fields)] res = [ b for b in sorted( builds, key=lambda e: str(e["started_at"]), reverse=True) ] # set a global status (ie. latest job's status) # + get total #docs db = mongo.get_target_db() for b in res: jobs = b.get("jobs", []) b["status"] = "unknown" if jobs: b["status"] = jobs[-1]["status"] try: backend = create_backend(b["backend_url"]) b["count"] = backend.count() except KeyError: b["count"] = db[b["_id"]].count() if id: if res: return res.pop() else: raise ValueError("No such build named '%s'" % id) else: return res def upsert_build_conf(self, name, doc_type, sources, roots, builder_class, params, archived): col = get_src_build_config() builder_class = builder_class or self.resolve_builder_class( self.default_builder_class)[2] # class path string doc = { "_id": name, "name": name, "doc_type": doc_type, "sources": sources, "root": roots, "builder_class": builder_class } if archived: doc["archived"] = True else: doc.pop("archived", None) doc.update(params) col.save(doc) self.configure() def create_build_configuration(self, name, doc_type, sources, roots=[], builder_class=None, params={}, archived=False): col = get_src_build_config() # check conf doesn't exist yet if [d for d in col.find({"_id": name})]: raise ValueError("Configuration named '%s' already exists" % name) self.upsert_build_conf(name, doc_type, sources, roots, builder_class, params, archived) def update_build_configuration(self, name, doc_type, sources, roots=[], builder_class=None, params={}, archived=False): self.upsert_build_conf(name, doc_type, sources, roots, builder_class, params, archived) def delete_build_configuration(self, name): col = get_src_build_config() col.remove({"_id": name}) self.configure() def save_mapping(self, name, mapping=None, dest="build", mode="mapping"): logging.debug("Saving mapping for build '%s' destination='%s':\n%s" % (name, dest, pformat(mapping))) src_build = get_src_build() m = src_build.find_one({"_id": name}) assert m, "Can't find build document for '%s'" % name # either given a fully qualified source or just sub-source if dest == "build": m["mapping"] = mapping src_build.save(m) elif dest == "inspect": try: m["inspect"]["results"][mode] = mapping src_build.save(m) except KeyError as e: raise ValueError( "Can't save mapping, document doesn't contain expected inspection data" % e) else: raise ValueError("Unknow saving destination: %s" % repr(dest))
biothings/biothings.api
biothings/hub/databuild/builder.py
Python
apache-2.0
74,414
"""Unit tests for the ``tables`` module. Each test case in this module tests a single table. For example, the ``CampaignTableTestCase`` tests just the ``CampaignTable`` table. """ from django.test import TestCase from gurps_manager import factories, models, tables # pylint: disable=E1101 # Class 'FooForm' has no 'create' member (no-member) # Instance of 'FooForm' has no 'is_valid' member (no-member) # Instance of 'FooFormTestCase' has no 'assertTrue' member (no-member) # # pylint: disable=R0904 # Classes inheriting from TestCase will have 60+ too many public methods, and # that's not something I have control over. Ignore it. class CampaignTableTestCase(TestCase): """Tests for ``CampaignTable``.""" def setUp(self): """Instantiate a ``CampaignTable`` object.""" self.user = factories.UserFactory.create() table_cls = tables.campaign_table(self.user) self.table = table_cls(models.Campaign.objects.all()) def test_render_description(self): """Test method ``render_description``.""" string = factories._random_str(130, 150) self.assertEqual( self.table.render_description(string), tables._truncate_string(string) # pylint: disable=W0212 ) def test_render_actions_v1(self): """Test method ``render_actions``.""" campaign = factories.CampaignFactory.create() self.assertTrue(issubclass( type(self.table.render_actions(campaign)), str )) def test_render_actions_v2(self): """Test method ``render_actions``.""" campaign = factories.CampaignFactory.create(owner=self.user) self.assertTrue(issubclass( type(self.table.render_actions(campaign)), str )) class CharacterTableTestCase(TestCase): """Tests for ``CharacterTable``.""" def setUp(self): """Instantiate a ``CharacterTable`` object.""" self.user = factories.UserFactory.create() table_cls = tables.character_table(self.user) self.table = table_cls(models.Character.objects.all()) def test_render_spent_points(self): """Test method ``render_spent_points``.""" character = factories.CharacterFactory.create() self.assertEqual( self.table.render_spent_points(character), character.total_points_spent() ) def test_render_description(self): """Test method ``render_description``.""" string = factories._random_str(130, 150) self.assertEqual( self.table.render_description(string), tables._truncate_string(string) # pylint: disable=W0212 ) def test_render_story(self): """Test method ``render_story``.""" string = factories._random_str(130, 150) self.assertEqual( self.table.render_story(string), tables._truncate_string(string) # pylint: disable=W0212 ) def test_render_actions_v1(self): """Test method ``render_actions``.""" character = factories.CharacterFactory.create() self.assertTrue(issubclass( type(self.table.render_actions(character)), str )) def test_render_actions_v2(self): """Test method ``render_actions``.""" character = factories.CharacterFactory.create(owner=self.user) self.assertTrue(issubclass( type(self.table.render_actions(character)), str )) class TraitTableTestCase(TestCase): """Tests for ``TraitTable``.""" def setUp(self): """Instantiate a ``TraitTable`` object.""" self.table = tables.TraitTable(models.Trait.objects.all()) def test_render_description(self): """Test method ``render_description``.""" string = factories._random_str(130, 150) self.assertEqual( self.table.render_description(string), tables._truncate_string(string) # pylint: disable=W0212 ) class HitLocationTableTestCase(TestCase): """Tests for ``HitLocationTable``.""" def setUp(self): """Instantiate a ``HitLocationTable`` object.""" self.table = tables.HitLocationTable(models.HitLocation.objects.all()) def test_render_status(self): """Test method ``render_status``.""" string = factories._random_str(130, 150) self.assertEqual( self.table.render_status(string), tables._truncate_string(string) # pylint: disable=W0212 ) class ItemTableTestCase(TestCase): """Tests for ``ItemTable``.""" def setUp(self): """Instantiate a ``ItemTable`` object.""" self.table = tables.ItemTable(models.Item.objects.all()) def test_render_description(self): """Test method ``render_description``.""" string = factories._random_str(130, 150) self.assertEqual( self.table.render_description(string), tables._truncate_string(string) # pylint: disable=W0212 )
Ichimonji10/gurps-manager
apps/gurps_manager/test_tables.py
Python
gpl-3.0
5,019
#!/usr/bin/env python # -*- coding: UTF-8 -*- import unittest from satellite_sanity_lib.rules import sat6_task_pending class TestSat6TaskPending(unittest.TestCase): def test_match(self): input_data = {} input_data['hammer_task_list_paused_pending'] = \ """Id,Name,Owner,Started at,Ended at,State,Result,Task action,Task errors 9b4eb9a5-e21f-4fad-bd82-401ea28f0a71,,admin,2016/01/15 02:22:48,"",paused,pending,Synchronize,Abnormal termination (previous state: suspended) c055be91-4e31-4d21-91f1-3f09cb23a300,,,2016/01/15 02:21:37,"",paused,pending,Listen on candlepin events,Abnormal termination (previous state: suspended) 990c21f3-ee59-45e8-bad2-5d4d86ff2057,,admin,2016/01/15 02:08:13,"",paused,pending,Publish,Abnormal termination (previous state: running) 863557b2-091d-4b95-8606-3d95ac90257f,,admin,2016/01/15 02:07:59,"",paused,pending,Generate Capsule Metadata and Sync,Abnormal termination (previous state: suspended) bec05459-bb11-453c-b264-6a9dec4bba60,,,2016/01/14 18:32:01,"",paused,pending,Listen on candlepin events,Abnormal termination (previous state: suspended)""".split("\n") self.assertEqual({'count': 5}, sat6_task_pending.main(input_data)) def test_notmatch(self): input_data = {} input_data['hammer_task_list_paused_pending'] = ["Id,Name,Owner,Started at,Ended at,State,Result,Task action,Task errors"] self.assertEqual(None, sat6_task_pending.main(input_data))
RedHatSatellite/satellite-sanity
satellite_sanity_lib/rules/tests/test_sat6_task_pending.py
Python
gpl-3.0
1,420
a=1 def f(): a=2 def g(): print a return g func=f() func()
wonghoifung/learning-python
python_yuanmapouxi/C.py
Python
mit
65
import json from kraken.core.maths import * mat33 = Euler().toMat33() print "mat33:" + str(mat33) print "clone:" + str(mat33.clone()) otherMat33 = Mat33() otherMat33.row0 = Vec3(0, 1, 0) print "equal:" + str(mat33 == otherMat33) print "not equal:" + str(mat33 != otherMat33) print "add:" + str(mat33 + otherMat33) print "subtract:" + str(mat33 - otherMat33) print "multiply:" + str(mat33 * otherMat33)
goshow-jp/Kraken
tests/MathTests/mat33.py
Python
bsd-3-clause
408
# -*- coding: utf-8 -*- class Solution: # @param m, an integer # @param n, an integer # @return an integer def rangeBitwiseAnd(self, m, n): offset = 0 while m!=n: m = m >> 1 n = n >> 1 offset = offset + 1 return m<<offset
AtlantisFox/Green-Point-Challenge
leetcode/Bitwise_AND_of_Numbers_Range.py
Python
mit
299
""" This page is in the table of contents. Cleave is a script to cleave a shape into svg slice layers. ==Settings== ===Add Layer Template to SVG=== Default is on. When selected, the layer template will be added to the svg output, which adds javascript control boxes. So 'Add Layer Template to SVG' should be selected when the svg will be viewed in a browser. When off, no controls will be added, the svg output will only include the fabrication paths. So 'Add Layer Template to SVG' should be deselected when the svg will be used by other software, like Inkscape. ===Extra Decimal Places=== Default is one. Defines the number of extra decimal places export will output compared to the number of decimal places in the layer thickness. The higher the 'Extra Decimal Places', the more significant figures the output numbers will have. ===Import Coarseness=== Default is one. When a triangle mesh has holes in it, the triangle mesh slicer switches over to a slow algorithm that spans gaps in the mesh. The higher the 'Import Coarseness' setting, the wider the gaps in the mesh it will span. An import coarseness of one means it will span gaps of the perimeter width. ===Layer Thickness=== Default is 0.4 mm. Defines the thickness of the layer, this is the most important cleave setting. ===Layers=== Cleave slices from bottom to top. To get a single layer, set the "Layers From" to zero and the "Layers To" to one. The layer from until layer to range is a python slice. ====Layers From==== Default is zero. Defines the index of the bottom layer that will be cleaved. If the layer from is the default zero, the carving will start from the lowest layer. If the 'Layers From' index is negative, then the carving will start from the 'Layers From' index below the top layer. ====Layers To==== Default is a huge number, which will be limited to the highest index layer. Defines the index of the top layer that will be cleaved. If the 'Layers To' index is a huge number like the default, the carving will go to the top of the model. If the 'Layers To' index is negative, then the carving will go to the 'Layers To' index below the top layer. ===Mesh Type=== Default is 'Correct Mesh'. ====Correct Mesh==== When selected, the mesh will be accurately cleaved, and if a hole is found, cleave will switch over to the algorithm that spans gaps. ====Unproven Mesh==== When selected, cleave will use the gap spanning algorithm from the start. The problem with the gap spanning algothm is that it will span gaps, even if there is not actually a gap in the model. ===Perimeter Width=== Default is two millimeters. Defines the width of the perimeter. ===SVG Viewer=== Default is webbrowser. If the 'SVG Viewer' is set to the default 'webbrowser', the scalable vector graphics file will be sent to the default browser to be opened. If the 'SVG Viewer' is set to a program name, the scalable vector graphics file will be sent to that program to be opened. ==Examples== The following examples cleave the file Screw Holder Bottom.stl. The examples are run in a terminal in the folder which contains Screw Holder Bottom.stl and cleave.py. > python cleave.py This brings up the cleave dialog. > python cleave.py Screw Holder Bottom.stl The cleave tool is parsing the file: Screw Holder Bottom.stl .. The cleave tool has created the file: .. Screw Holder Bottom_cleave.svg > python Python 2.5.1 (r251:54863, Sep 22 2007, 01:43:31) [GCC 4.2.1 (SUSE Linux)] on linux2 Type "help", "copyright", "credits" or "license" for more information. >>> import cleave >>> cleave.main() This brings up the cleave dialog. >>> cleave.writeOutput('Screw Holder Bottom.stl') The cleave tool is parsing the file: Screw Holder Bottom.stl .. The cleave tool has created the file: .. Screw Holder Bottom_cleave.svg """ from __future__ import absolute_import try: import psyco psyco.full() except: pass #Init has to be imported first because it has code to workaround the python bug where relative imports don't work if the module is imported as a main module. import __init__ from fabmetheus_utilities import euclidean from fabmetheus_utilities import gcodec from fabmetheus_utilities.fabmetheus_tools import fabmetheus_interpret from fabmetheus_utilities import settings from fabmetheus_utilities import svg_writer from skeinforge_application.skeinforge_utilities import skeinforge_polyfile from skeinforge_application.skeinforge_utilities import skeinforge_profile import math import os import sys import time __author__ = 'Enrique Perez ([email protected])' __date__ = "$Date: 2008/02/05 $" __license__ = 'GPL 3.0' def getCraftedText( fileName, gcodeText = '', repository = None ): "Get cleaved text." if fileName.endswith('.svg'): gcodeText = gcodec.getTextIfEmpty(fileName, gcodeText) if gcodec.isProcedureDoneOrFileIsEmpty( gcodeText, 'cleave'): return gcodeText carving = svg_writer.getCarving(fileName) if carving == None: return '' if repository == None: repository = CleaveRepository() settings.getReadRepository(repository) return CleaveSkein().getCarvedSVG( carving, fileName, repository ) def getNewRepository(): "Get the repository constructor." return CleaveRepository() def writeOutput( fileName = ''): "Cleave a GNU Triangulated Surface file. If no fileName is specified, cleave the first GNU Triangulated Surface file in this folder." startTime = time.time() print('File ' + gcodec.getSummarizedFileName(fileName) + ' is being cleaved.') repository = CleaveRepository() settings.getReadRepository(repository) cleaveGcode = getCraftedText( fileName, '', repository ) if cleaveGcode == '': return suffixFileName = fileName[ : fileName.rfind('.') ] + '_cleave.svg' suffixDirectoryName = os.path.dirname(suffixFileName) suffixReplacedBaseName = os.path.basename(suffixFileName).replace(' ', '_') suffixFileName = os.path.join( suffixDirectoryName, suffixReplacedBaseName ) gcodec.writeFileText( suffixFileName, cleaveGcode ) print('The cleaved file is saved as ' + gcodec.getSummarizedFileName(suffixFileName) ) print('It took %s to cleave the file.' % euclidean.getDurationString( time.time() - startTime ) ) settings.openSVGPage( suffixFileName, repository.svgViewer.value ) class CleaveRepository: "A class to handle the cleave settings." def __init__(self): "Set the default settings, execute title & settings fileName." skeinforge_profile.addListsToCraftTypeRepository('skeinforge_application.skeinforge_plugins.craft_plugins.cleave.html', self ) self.fileNameInput = settings.FileNameInput().getFromFileName( fabmetheus_interpret.getTranslatorFileTypeTuples(), 'Open File to be Cleaved', self, '') self.addLayerTemplateToSVG = settings.BooleanSetting().getFromValue('Add Layer Template to SVG', self, True) self.extraDecimalPlaces = settings.IntSpin().getFromValue( 0, 'Extra Decimal Places (integer):', self, 2, 1 ) self.importCoarseness = settings.FloatSpin().getFromValue( 0.5, 'Import Coarseness (ratio):', self, 2.0, 1.0 ) self.layerThickness = settings.FloatSpin().getFromValue( 0.1, 'Layer Thickness (mm):', self, 1.0, 0.4 ) self.layersFrom = settings.IntSpin().getFromValue( 0, 'Layers From (index):', self, 20, 0 ) self.layersTo = settings.IntSpin().getSingleIncrementFromValue( 0, 'Layers To (index):', self, 912345678, 912345678 ) self.meshTypeLabel = settings.LabelDisplay().getFromName('Mesh Type: ', self, ) importLatentStringVar = settings.LatentStringVar() self.correctMesh = settings.Radio().getFromRadio( importLatentStringVar, 'Correct Mesh', self, True ) self.unprovenMesh = settings.Radio().getFromRadio( importLatentStringVar, 'Unproven Mesh', self, False ) self.perimeterWidth = settings.FloatSpin().getFromValue( 0.4, 'Perimeter Width (mm):', self, 4.0, 2.0 ) settings.LabelSeparator().getFromRepository(self) self.svgViewer = settings.StringSetting().getFromValue('SVG Viewer:', self, 'webbrowser') settings.LabelSeparator().getFromRepository(self) self.executeTitle = 'Cleave' def execute(self): "Cleave button has been clicked." fileNames = skeinforge_polyfile.getFileOrDirectoryTypes( self.fileNameInput.value, fabmetheus_interpret.getImportPluginFileNames(), self.fileNameInput.wasCancelled ) for fileName in fileNames: writeOutput(fileName) class CleaveSkein: "A class to cleave a carving." def getCarvedSVG( self, carving, fileName, repository ): "Parse gnu triangulated surface text and store the cleaved gcode." layerThickness = repository.layerThickness.value perimeterWidth = repository.perimeterWidth.value carving.setCarveLayerThickness( layerThickness ) importRadius = 0.5 * repository.importCoarseness.value * abs( perimeterWidth ) carving.setCarveImportRadius( max( importRadius, 0.01 * layerThickness ) ) carving.setCarveIsCorrectMesh( repository.correctMesh.value ) rotatedBoundaryLayers = carving.getCarveRotatedBoundaryLayers() if len( rotatedBoundaryLayers ) < 1: print('There are no slices for the model, this could be because the model is too small.') return '' layerThickness = carving.getCarveLayerThickness() decimalPlacesCarried = max( 0, 1 + repository.extraDecimalPlaces.value - int( math.floor( math.log10( layerThickness ) ) ) ) svgWriter = svg_writer.SVGWriter(repository.addLayerTemplateToSVG.value, carving, decimalPlacesCarried, perimeterWidth) truncatedRotatedBoundaryLayers = svg_writer.getTruncatedRotatedBoundaryLayers(repository, rotatedBoundaryLayers) return svgWriter.getReplacedSVGTemplate( fileName, 'cleave', truncatedRotatedBoundaryLayers, carving.getFabmetheusXML()) def main(): "Display the cleave dialog." if len( sys.argv ) > 1: writeOutput(' '.join( sys.argv[1 :] ) ) else: settings.startMainLoopFromConstructor( getNewRepository() ) if __name__ == "__main__": main()
natetrue/ReplicatorG
skein_engines/skeinforge-31/skeinforge_application/skeinforge_plugins/craft_plugins/cleave.py
Python
gpl-2.0
9,814
# -*- coding: utf-8 -*- # Generated by Django 1.10.6 on 2017-05-23 16:52 from __future__ import unicode_literals import django.core.validators from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('bookings', '0004_auto_20170523_1637'), ] operations = [ migrations.AlterField( model_name='bookingoccurrence', name='end', field=models.DateTimeField(verbose_name='fin'), ), migrations.AlterField( model_name='bookingoccurrence', name='start', field=models.DateTimeField(verbose_name='début'), ), migrations.AlterField( model_name='resource', name='number', field=models.PositiveIntegerField(default=1, validators=[django.core.validators.MinValueValidator(1)], verbose_name='quantité'), ), migrations.AlterField( model_name='resourcelock', name='end', field=models.DateTimeField(verbose_name='fin'), ), migrations.AlterField( model_name='resourcelock', name='start', field=models.DateTimeField(verbose_name='début'), ), ]
BdEINSALyon/resa
bookings/migrations/0005_auto_20170523_1652.py
Python
gpl-3.0
1,255
import sqlalchemy as sa from sqlalchemy.ext.declarative import declarative_base from sqlalchemy_utils import get_columns class TestGetColumns(object): def setup_method(self, method): Base = declarative_base() class Building(Base): __tablename__ = 'building' id = sa.Column('_id', sa.Integer, primary_key=True) name = sa.Column('_name', sa.Unicode(255)) self.Building = Building def test_table(self): assert isinstance( get_columns(self.Building.__table__), sa.sql.base.ImmutableColumnCollection ) def test_instrumented_attribute(self): assert get_columns(self.Building.id) == [self.Building.__table__.c._id] def test_column_property(self): assert get_columns(self.Building.id.property) == [ self.Building.__table__.c._id ] def test_column(self): assert get_columns(self.Building.__table__.c._id) == [ self.Building.__table__.c._id ] def test_declarative_class(self): assert isinstance( get_columns(self.Building), sa.util._collections.OrderedProperties ) def test_declarative_object(self): assert isinstance( get_columns(self.Building()), sa.util._collections.OrderedProperties ) def test_mapper(self): assert isinstance( get_columns(self.Building.__mapper__), sa.util._collections.OrderedProperties ) def test_class_alias(self): assert isinstance( get_columns(sa.orm.aliased(self.Building)), sa.util._collections.OrderedProperties ) def test_table_alias(self): alias = sa.orm.aliased(self.Building.__table__) assert isinstance( get_columns(alias), sa.sql.base.ImmutableColumnCollection )
rmoorman/sqlalchemy-utils
tests/functions/test_get_columns.py
Python
bsd-3-clause
1,915
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # # Copyright 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """Support routring table configration on Router Revision ID: 1c33fa3cd1a1 Revises: 45680af419f9 Create Date: 2013-01-17 14:35:09.386975 """ # revision identifiers, used by Alembic. revision = '1c33fa3cd1a1' down_revision = '45680af419f9' # Change to ['*'] if this migration applies to all plugins migration_for_plugins = [ 'neutron.plugins.openvswitch.ovs_neutron_plugin.OVSNeutronPluginV2', 'neutron.plugins.linuxbridge.lb_neutron_plugin.LinuxBridgePluginV2', 'neutron.plugins.nec.nec_plugin.NECPluginV2', 'neutron.plugins.ryu.ryu_neutron_plugin.RyuNeutronPluginV2', 'neutron.plugins.metaplugin.meta_neutron_plugin.MetaPluginV2' ] from alembic import op import sqlalchemy as sa from neutron.db import migration def upgrade(active_plugin=None, options=None): if not migration.should_run(active_plugin, migration_for_plugins): return op.rename_table( 'routes', 'subnetroutes', ) op.create_table( 'routerroutes', sa.Column('destination', sa.String(length=64), nullable=False), sa.Column( 'nexthop', sa.String(length=64), nullable=False), sa.Column('router_id', sa.String(length=36), nullable=False), sa.ForeignKeyConstraint( ['router_id'], ['routers.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('destination', 'nexthop', 'router_id') ) def downgrade(active_plugin=None, options=None): if not migration.should_run(active_plugin, migration_for_plugins): return op.rename_table( 'subnetroutes', 'routes', ) op.drop_table('routerroutes')
armando-migliaccio/neutron
neutron/db/migration/alembic_migrations/versions/1c33fa3cd1a1_extra_route_config.py
Python
apache-2.0
2,289
from twisted.trial import unittest from opennsa import nsa class LabelTest(unittest.TestCase): def testLabelParsing(self): self.assertEquals(nsa.Label('', '1,2').values, [ (1,2) ] ) self.assertEquals(nsa.Label('', '1,2,3').values, [ (1,3) ] ) self.assertEquals(nsa.Label('', '1-2,3').values, [ (1,3) ] ) self.assertEquals(nsa.Label('', '1-3,2').values, [ (1,3) ] ) self.assertEquals(nsa.Label('', '1-3,3,1-2').values, [ (1,3) ] ) self.assertEquals(nsa.Label('', '2-4,8,1-3').values, [ (1,4), (8,8) ] ) def testLabelIntersection(self): l12 = nsa.Label('', '1,2') l123 = nsa.Label('', '1,2,3') l234 = nsa.Label('', '2-4') l48 = nsa.Label('', '4-8') self.assertEquals( l12.intersect(l12).values, [ (1,2) ] ) self.assertEquals( l12.intersect(l123).values, [ (1,2) ] ) self.assertEquals( l12.intersect(l234).values, [ (2,2) ] ) self.assertEquals( l123.intersect(l234).values, [ (2,3) ] ) self.assertEquals( l234.intersect(l48).values, [ (4,4) ] ) self.assertRaises(nsa.EmptyLabelSet, l12.intersect, l48) def testLabelValueEnumeration(self): self.assertEquals(nsa.Label('', '1-2,3').enumerateValues(), [ 1,2,3 ] ) self.assertEquals(nsa.Label('', '1-3,2').enumerateValues(), [ 1,2,3 ] ) self.assertEquals(nsa.Label('', '1-3,3,1-2').enumerateValues(), [ 1,2,3 ] ) self.assertEquals(nsa.Label('', '2-4,8,1-3').enumerateValues(), [ 1,2,3,4,8 ] ) def testContainedLabelsIntersection(self): self.failUnlessEquals(nsa.Label('', '80-89').intersect(nsa.Label('','81-82')).enumerateValues(), [ 81,82] ) def testIntersectedLabelUnderAndSingleValued(self): self.failUnlessRaises(nsa.EmptyLabelSet, nsa.Label('', '1781-1784').intersect, nsa.Label('', '1780-1780') )
jab1982/opennsa
test/test_nsa.py
Python
bsd-3-clause
1,927
import experiment from ..util import dirs from ..util import file_handling as fh from optparse import OptionParser import sys def main(): usage = "%prog project logfile " parser = OptionParser(usage=usage) parser.add_option('-n', dest='new_name', default=None, help='New name for experiment: default= old name + _rerun') #parser.add_option('--boolarg', action="store_true", dest="boolarg", default=False, # help='Keyword argument: default=%default') (options, args) = parser.parse_args() project = args[0] log_filename = args[1] new_name = options.new_name log = fh.read_json(log_filename) if new_name is None: new_name = log['name'] + '_rerun' log['name'] = new_name float_vars = ['best_alpha', 'alpha_exp_base', 'max_alpha_exp', 'min_alpha_exp', 'orig_T', 'tau'] for v in float_vars: if v in log: if log[v] is not None: log[v] = float(log[v]) else: log[v] = None #if log['reuse'] == 'False': # log['reuse'] = False #else: # log['reuse'] = True # convert list stirng to list #list_vars = ['feature_list', 'additional_label_files', 'additional_label_weights'] #for v in list_vars: # if v in log: # print v # print log[v] # quoted_strings = [p.strip() for p in log[v][1:-1].split(',')] # print quoted_strings # log[v] = [p[1:-1] for p in quoted_strings] # print log[v] # print '\n' #print log #if 'additional_label_weights' in log: # log['additional_label_weights'] = [float(w) for w in log['additional_label_weights']] dirs.make_base_dir(project) print log result = experiment.run_experiment(**log) print result if __name__ == '__main__': main()
dallascard/guac
core/experiment/rerun.py
Python
apache-2.0
1,879
# -*- coding: utf-8 -*- # # Copyright (C) 2009-2012: # Gabes Jean, [email protected] # Gerhard Lausser, [email protected] # Gregory Starck, [email protected] # Hartmut Goebel, [email protected] # # This file is part of Shinken. # # Shinken is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Shinken is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with Shinken. If not, see <http://www.gnu.org/licenses/>. import Queue class MyLifoQueue(Queue.Queue): """A class that implements a Fifo. Python versions < 2.5 do not have the Queue.LifoQueue class. MyLifoQueue overwrites methods of the Queue.Queue class and then behaves like Queue.LifoQueue. """ def _init(self, maxsize): self.maxsize = maxsize self.queue = [] def _qsize(self, len=len): return len(self.queue) def _put(self, item): self.queue.append(item) def _get(self): return self.queue.pop() class TopBaseLiveStatusStack(object): pass class LiveStatusStack(TopBaseLiveStatusStack): """A Lifo queue for filter functions. This class inherits either from MyLifoQueue or Queue.LifoQueue whatever is available with the current python version. Public functions: and_elements -- takes a certain number (given as argument) of filters from the stack, creates a new filter and puts this filter on the stack. If these filters are lambda functions, the new filter is a boolean and of the underlying filters. If the filters are sql where-conditions, they are also concatenated with and to form a new string containing a more complex where-condition. or_elements --- the same, only that the single filters are combined with a logical or. """ def __xinit__(self, *args, **kw): self.type = 'lambda' print "i am a", type(self) print "my parents are", [c.__name__ for c in self.__class__.__bases__] print "my first parent is", self.__class__.__bases__[0].__name__ if self.__class__.__name__ == 'LiveStatusStack': self.__class__.__bases__[0].__init__(self, *args, **kw) def not_elements(self): top_filter = self.get_stack() def negate_filter(ref): return not top_filter(ref) self.put_stack(negate_filter) def and_elements(self, num): """Take num filters from the stack, and them and put the result back""" if num > 1: filters = [] for _ in range(num): filters.append(self.get_stack()) # Take from the stack: # Make a combined anded function # Put it on the stack # List of functions taking parameter ref def and_filter(ref): myfilters = filters failed = False for filt in myfilters: if not filt(ref): failed = True break else: pass return not failed self.put_stack(and_filter) def or_elements(self, num): """Take num filters from the stack, or them and put the result back""" if num > 1: filters = [] for _ in range(num): filters.append(self.get_stack()) def or_filter(ref): myfilters = filters failed = True # Applying the filters in reversed order is faster. (Shown by measuring runtime) for filt in reversed(myfilters): if filt(ref): failed = False break else: pass return not failed self.put_stack(or_filter) def get_stack(self): """Return the top element from the stack or a filter which is always true""" if self.qsize() == 0: return lambda x : True else: return self.get() def put_stack(self, element): """Wrapper for a stack put operation which corresponds to get_stack""" self.put(element) try: Queue.LifoQueue TopBaseLiveStatusStack.__bases__ = (Queue.LifoQueue, object) #LiveStatusStack.__bases__ += (Queue.LifoQueue, ) except AttributeError: # Ptyhon 2.4 and 2.5 do not have it. # Use our own implementation. TopBaseLiveStatusStack.__bases__ = (MyLifoQueue, object) #LiveStatusStack.__bases__ += (MyLifoQueue, )
baloo/shinken
shinken/modules/livestatus_broker/livestatus_stack.py
Python
agpl-3.0
4,999
#!/usr/bin/env python # -*- coding: utf-8 -*- """ Utility functions for sum_adjoint in pypaw :copyright: Wenjie Lei ([email protected]), 2016 :license: GNU Lesser General Public License, version 3 (LGPLv3) (http://www.gnu.org/licenses/lgpl-3.0.en.html) """ from __future__ import print_function, division, absolute_import import numpy as np import copy from obspy import UTCDateTime from pytomo3d.signal.rotate import rotate_one_station_stream from pytomo3d.adjoint.process_adjsrc import convert_stream_to_adjs from pytomo3d.adjoint.process_adjsrc import convert_adjs_to_stream from pytomo3d.adjoint.process_adjsrc import add_missing_components from pyadjoint import AdjointSource def check_adj_consistency(adj_base, adj): """ Check the consistency of adj_base and adj If passed, return, then adj could be added into adj_base If not, raise ValueError """ if adj_base.network != adj.network or \ adj_base.station != adj.station or \ adj_base.component != adj.component: raise ValueError("Adjoint source network or station is different:" "%s, %s" % (adj_base.id, adj.id)) if not np.isclose(adj_base.dt, adj.dt): raise ValueError("DeltaT of current adjoint source(%f)" "and new added adj(%f) not the same" % (adj_base.dt, adj.dt)) if np.abs(adj_base.starttime - adj.starttime) > 0.5 * adj.dt: raise ValueError("Start time of current adjoint source({})" "and new added adj({}) not the same (dt: {})".format( adj_base.starttime, adj.starttime, adj.dt)) if len(adj_base.adjoint_source) != len(adj.adjoint_source): raise ValueError("Dimension of current adjoint_source(%d)" "and new added adj(%d) not the same" % (len(adj_base.adjoint_source), len(adj.adjoint_source))) def check_events_consistent(events): """ Check all events are consistent(same with each other) """ fn_base = list(events.keys())[0] event_base = events[fn_base] diffs = [] for asdf_fn, event in events.items(): if event_base != event: diffs.append(asdf_fn) if len(diffs) != 0: raise ValueError("Event information in %s not the same as others: %s" % (diffs, fn_base)) def load_to_adjsrc(adj): """ Load from asdf file adjoint source to pyadjoint.AdjointSources """ starttime = UTCDateTime(adj.parameters["starttime"]) _id = adj.parameters["station_id"] nw, sta = _id.split(".") comp = adj.parameters["component"] loc = adj.parameters["location"] new_adj = AdjointSource(adj.parameters["adjoint_source_type"], adj.parameters["misfit"], adj.parameters["dt"], adj.parameters["min_period"], adj.parameters["max_period"], comp, adjoint_source=np.array(adj.data), network=nw, station=sta, location=loc, starttime=starttime) station_info = {"latitude": adj.parameters["latitude"], "longitude": adj.parameters["longitude"], "elevation_in_m": adj.parameters["elevation_in_m"], "depth_in_m": adj.parameters["depth_in_m"], "station": sta, "network": nw, "location": loc} return new_adj, station_info def dump_adjsrc(adj, station_info): """ Combine the adj(pyadjoint.AdjointSource) and station information to form the adjoint content for ASDF file """ adj_array = np.asarray(adj.adjoint_source, dtype=np.float32) station_id = "%s.%s" % (adj.network, adj.station) starttime = "T".join(str(adj.starttime).split()) parameters = \ {"dt": adj.dt, "starttime": starttime, "misfit": adj.misfit, "adjoint_source_type": adj.adj_src_type, "min_period": adj.min_period, "max_period": adj.max_period, "location": adj.location, "latitude": station_info["latitude"], "longitude": station_info["longitude"], "elevation_in_m": station_info["elevation_in_m"], "depth_in_m": station_info["depth_in_m"], "station_id": station_id, "component": adj.component, "units": "m"} adj_path = "%s_%s_%s" % (adj.network, adj.station, adj.component) return adj_array, adj_path, parameters def create_weighted_adj(adj, weight): new_adj = copy.deepcopy(adj) new_adj.adjoint_source *= weight new_adj.misfit *= weight new_adj.location = "" return new_adj def sum_adj_to_base(adj_base, adj, weight): check_adj_consistency(adj_base, adj) adj_base.adjoint_source += weight * adj.adjoint_source adj_base.misfit += weight * adj.misfit adj_base.min_period = min(adj.min_period, adj_base.min_period) adj_base.max_period = max(adj.max_period, adj_base.max_period) def check_station_consistent(sta1, sta2): for key in sta1: if key == "location": # don't check location continue if key not in sta2: return False if isinstance(sta1[key], float): if not np.isclose(sta1[key], sta2[key]): return False else: if sta1[key] != sta2[key]: return False return True def get_station_adjsrcs(adjsrcs, sta_tag): """ Extract three components for a specific sta_tag """ comp_list = ["MXR", "MXT", "MXZ"] adj_list = [] for comp in comp_list: adj_name = "%s_%s" % (sta_tag, comp) if adj_name in adjsrcs: adj_list.append(adjsrcs[adj_name]) return adj_list def rotate_one_station_adjsrcs(sta_adjs, slat, slon, elat, elon): adj_stream, meta_info = convert_adjs_to_stream(sta_adjs) add_missing_components(adj_stream) rotate_one_station_stream( adj_stream, elat, elon, station_latitude=slat, station_longitude=slon, mode="RT->NE") new_adjs = convert_stream_to_adjs(adj_stream, meta_info) adj_dict = {} for _adj in new_adjs: adj_id = "%s_%s_%s" % (_adj.network, _adj.station, _adj.component) adj_dict[adj_id] = _adj return adj_dict def rotate_adjoint_sources(old_adjs, stations, event_latitude, event_longitude): print("="*15 + "\nRotate adjoint sources from RT to EN") done_sta_list = [] new_adjs = {} for adj_id, adj in old_adjs.items(): network = adj.network station = adj.station sta_tag = "%s_%s" % (network, station) if sta_tag not in done_sta_list: slat = stations[sta_tag]["latitude"] slon = stations[sta_tag]["longitude"] sta_adjs = get_station_adjsrcs(old_adjs, sta_tag) adj_dict = rotate_one_station_adjsrcs( sta_adjs, slat, slon, event_latitude, event_longitude) new_adjs.update(adj_dict) return new_adjs
wjlei1990/pytomo3d
pytomo3d/adjoint/sum_adjoint.py
Python
lgpl-3.0
7,230
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution Addon # Copyright (C) 2009-2013 IRSID (<http://irsid.ru>), # Paul Korotkov ([email protected]). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from openerp.osv import osv, fields class edu_time_category(osv.Model): _name = 'edu.time.category' _description = 'Time Category' # Fields _columns = { 'name': fields.char( 'Name', size = 64, required = True, ), 'code': fields.char( 'Code', size = 16, required = True, ), }
prospwro/odoo
addons/irsid_edu_training/models/time_category.py
Python
agpl-3.0
1,424
import random class TetrisObject: """A TetrisObject represents the actual object that the player can control. It consists of four blocks (positioned depending on the shape) and can move down, to the sides or be rotated.""" # Actual position x = 3; y = 0; # The current state of rotation rotation = 0; # The constructor, randomly picks on of the possible shapes def __init__(self): rand = random.randint(0,6) # Every shape has a series of possible rotations and a color if rand == 0: # J-shape self.shape = (((0,1),(1,1),(2,1),(2,2)), ((1,0),(1,1),(1,2),(0,2)), ((0,0),(0,1),(1,1),(2,1)), ((2,0),(1,0),(1,1),(1,2))) self.color = (0, 255, 0) elif rand == 1: # L-shape self.shape = (((0,2),(0,1),(1,1),(2,1)), ((0,0),(1,0),(1,1),(1,2)), ((0,1),(1,1),(2,1),(2,0)), ((1,0),(1,1),(1,2),(2,2))) self.color = (0, 0, 255) elif rand == 2: # S-shape self.shape = (((0,2),(1,2),(1,1),(2,1)), ((1,0),(1,1),(2,1),(2,2))) self.color = (0, 255, 255) elif rand == 3: # T-shape self.shape = (((0,1),(1,1),(1,2),(2,1)), ((1,0),(1,1),(0,1),(1,2)), ((0,1),(1,1),(1,0),(2,1)), ((1,0),(1,1),(2,1),(1,2))) self.color = (255, 255, 0) elif rand == 4: # Z-shape self.shape = (((0,1),(1,1),(1,2),(2,2)), ((1,2),(1,1),(2,1),(2,0))) self.color = (255, 0, 255) elif rand == 5: # |-shape self.shape = (((0,2),(1,2),(2,2),(3,2)), ((2,0),(2,1),(2,2),(2,3))) self.color = (128, 128, 128) elif rand == 6: # box-shape self.shape = (((1,1),(1,2),(2,1),(2,2)), ((1,1),(1,2),(2,1),(2,2))) self.color = (0, 0, 0) def get_pos(self): """Returns a tuple of the position of the shape's 4 blocks""" return ((self.shape[self.rotation][0][0] + self.x, self.shape[self.rotation][0][1] + self.y), (self.shape[self.rotation][1][0] + self.x, self.shape[self.rotation][1][1] + self.y), (self.shape[self.rotation][2][0] + self.x, self.shape[self.rotation][2][1] + self.y), (self.shape[self.rotation][3][0] + self.x, self.shape[self.rotation][3][1] + self.y)) def rotate(self, blocks): """Rotates the shape of this object if the resulting shape after rotation is at a valid position.""" new_shape = self.shape[(self.rotation + 1) % len(self.shape)] for i in range(len(new_shape)): if new_shape[i][0] + self.x < 0 or new_shape[i][0] + self.x >= len(blocks[0]) or blocks[new_shape[i][1] + self.y][new_shape[i][0] + self.x]: return self.rotation = (self.rotation + 1) % len(self.shape) def move_x(self, dx, blocks): """Moves this object to the left (negative dx), or to the right (positive dx) if possible""" # If the new position won't be valid set dx to 0 for i in range(len(self.shape[self.rotation])): if self.get_pos()[i][0] + dx < 0 or self.get_pos()[i][0] + dx >= len(blocks[0]) or blocks[self.get_pos()[i][1]][self.get_pos()[i][0] + dx]: dx = 0 # Update the position self.x += dx def move_y(self, blocks): """Moves this object downwards if possible. Returns True if it was successfully moved, False otherwise""" # If the new position is not valid, return False for i in range(len(self.shape[self.rotation])): if self.get_pos()[i][1] + 1 >= len(blocks) or blocks[self.get_pos()[i][1]+ 1][self.get_pos()[i][0]]: return False # Update the position and return True when finished self.y += 1 return True
introprogramming/exercises
exercises/tetris/TetrisObject.py
Python
mit
4,329
import logging from prompt_toolkit.keys import Keys from prompt_toolkit.key_binding.manager import KeyBindingManager from prompt_toolkit.filters import Condition from .filters import HasSelectedCompletion _logger = logging.getLogger(__name__) def pgcli_bindings(get_vi_mode_enabled, set_vi_mode_enabled): """ Custom key bindings for pgcli. """ assert callable(get_vi_mode_enabled) assert callable(set_vi_mode_enabled) key_binding_manager = KeyBindingManager( enable_open_in_editor=True, enable_system_bindings=True, enable_vi_mode=Condition(lambda cli: get_vi_mode_enabled())) @key_binding_manager.registry.add_binding(Keys.F2) def _(event): """ Enable/Disable SmartCompletion Mode. """ _logger.debug('Detected F2 key.') buf = event.cli.current_buffer buf.completer.smart_completion = not buf.completer.smart_completion @key_binding_manager.registry.add_binding(Keys.F3) def _(event): """ Enable/Disable Multiline Mode. """ _logger.debug('Detected F3 key.') buf = event.cli.current_buffer buf.always_multiline = not buf.always_multiline @key_binding_manager.registry.add_binding(Keys.F4) def _(event): """ Toggle between Vi and Emacs mode. """ _logger.debug('Detected F4 key.') set_vi_mode_enabled(not get_vi_mode_enabled()) @key_binding_manager.registry.add_binding(Keys.Tab) def _(event): """ Force autocompletion at cursor. """ _logger.debug('Detected <Tab> key.') b = event.cli.current_buffer if b.complete_state: b.complete_next() else: event.cli.start_completion(select_first=True) @key_binding_manager.registry.add_binding(Keys.ControlSpace) def _(event): """ Initialize autocompletion at cursor. If the autocompletion menu is not showing, display it with the appropriate completions for the context. If the menu is showing, select the next completion. """ _logger.debug('Detected <C-Space> key.') b = event.cli.current_buffer if b.complete_state: b.complete_next() else: event.cli.start_completion(select_first=False) @key_binding_manager.registry.add_binding(Keys.ControlJ, filter=HasSelectedCompletion()) def _(event): """ Makes the enter key work as the tab key only when showing the menu. """ _logger.debug('Detected <C-J> key.') event.current_buffer.complete_state = None b = event.cli.current_buffer b.complete_state = None return key_binding_manager
d33tah/pgcli
pgcli/key_bindings.py
Python
bsd-3-clause
2,756
from demosys.test.testcase import DemosysTestCase from demosys.effects.registry import effects class TextTestCase(DemosysTestCase): """Crude test executing text code""" def setUp(self): effects.add_package('demosys.effects.text') self.project.load() def test_create(self): instance = self.project.create_effect( 'TextWriter2D', 'TextWriter2D', (4, 4), text_lines=[ "ABCD", "!@#$", "abcd", "1234", ] ) instance.draw((0, 0), size=1.0)
Contraz/demosys-py
tests/test_text.py
Python
isc
613
''' Created on Dec 23, 2013 @author: Chris ''' import sys import wx from gooey.gui.lang import i18n from gooey.gui.message_event import EVT_MSG class MessagePump(object): def __init__(self): # self.queue = queue self.stdout = sys.stdout # Overrides stdout's write method def write(self, text): raise NotImplementedError class RuntimeDisplay(wx.Panel): def __init__(self, parent, build_spec, **kwargs): wx.Panel.__init__(self, parent, **kwargs) self.build_spec = build_spec self._init_properties() self._init_components() self._do_layout() # self._HookStdout() def _init_properties(self): self.SetBackgroundColour('#F0F0F0') def _init_components(self): self.text = wx.StaticText(self, label=i18n._("status")) self.cmd_textbox = wx.TextCtrl( self, -1, "", style=wx.TE_MULTILINE | wx.TE_READONLY | wx.TE_RICH) if self.build_spec.get('monospace_display'): pointsize = self.cmd_textbox.GetFont().GetPointSize() font = wx.Font(pointsize, wx.FONTFAMILY_MODERN, wx.FONTWEIGHT_NORMAL, wx.FONTWEIGHT_BOLD, False) self.cmd_textbox.SetFont(font) def _do_layout(self): sizer = wx.BoxSizer(wx.VERTICAL) sizer.AddSpacer(10) sizer.Add(self.text, 0, wx.LEFT, 30) sizer.AddSpacer(10) sizer.Add(self.cmd_textbox, 1, wx.LEFT | wx.RIGHT | wx.BOTTOM | wx.EXPAND, 30) sizer.AddSpacer(20) self.SetSizer(sizer) self.Bind(EVT_MSG, self.OnMsg) def _HookStdout(self): _stdout = sys.stdout _stdout_write = _stdout.write sys.stdout = MessagePump() sys.stdout.write = self.WriteToDisplayBox def AppendText(self, txt): self.cmd_textbox.AppendText(txt) def WriteToDisplayBox(self, txt): if txt is not '': self.AppendText(txt) def OnMsg(self, evt): pass
lrq3000/pyFileFixity
pyFileFixity/lib/gooey/gui/windows/runtime_display_panel.py
Python
mit
1,831
from __future__ import print_function, division import matplotlib import logging from sys import stdout matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab! from neuralnilm import (Net, RealApplianceSource, BLSTMLayer, DimshuffleLayer, BidirectionalRecurrentLayer) from neuralnilm.source import standardise, discretize, fdiff, power_and_fdiff from neuralnilm.experiment import run_experiment, init_experiment from neuralnilm.net import TrainingError from neuralnilm.layers import MixtureDensityLayer from neuralnilm.objectives import (scaled_cost, mdn_nll, scaled_cost_ignore_inactive, ignore_inactive, scaled_cost3) from neuralnilm.plot import MDNPlotter, CentralOutputPlotter from lasagne.nonlinearities import sigmoid, rectify, tanh from lasagne.objectives import mse, binary_crossentropy from lasagne.init import Uniform, Normal from lasagne.layers import (LSTMLayer, DenseLayer, Conv1DLayer, ReshapeLayer, FeaturePoolLayer, RecurrentLayer) from lasagne.updates import nesterov_momentum, momentum from functools import partial import os import __main__ from copy import deepcopy from math import sqrt import numpy as np import theano.tensor as T NAME = os.path.splitext(os.path.split(__main__.__file__)[1])[0] #PATH = "/homes/dk3810/workspace/python/neuralnilm/figures" PATH = "/data/dk3810/figures" SAVE_PLOT_INTERVAL = 500 GRADIENT_STEPS = 100 source_dict = dict( filename='/data/dk3810/ukdale.h5', appliances=[ ['fridge freezer', 'fridge', 'freezer'], 'hair straighteners', 'television', 'dish washer', ['washer dryer', 'washing machine'] ], on_power_thresholds=[5] * 5, min_on_durations=[60, 60, 60, 1800, 1800], min_off_durations=[12, 12, 12, 1800, 600], window=("2013-06-01", "2014-07-01"), seq_length=1024, # random_window=64, output_one_appliance=False, boolean_targets=False, train_buildings=[1], validation_buildings=[1], skip_probability=0.9, one_target_per_seq=False, n_seq_per_batch=16, subsample_target=4, include_diff=False, include_power=True, # clip_appliance_power=True, target_is_prediction=False, # independently_center_inputs = True, standardise_input=True, unit_variance_targets=True, input_padding=2, lag=0 # classification=True # reshape_target_to_2D=True # input_stats={'mean': np.array([ 0.05526326], dtype=np.float32), # 'std': np.array([ 0.12636775], dtype=np.float32)}, # target_stats={ # 'mean': np.array([ 0.04066789, 0.01881946, # 0.24639061, 0.17608672, 0.10273963], # dtype=np.float32), # 'std': np.array([ 0.11449792, 0.07338708, # 0.26608968, 0.33463112, 0.21250485], # dtype=np.float32)} ) N = 50 net_dict = dict( save_plot_interval=SAVE_PLOT_INTERVAL, # loss_function=partial(ignore_inactive, loss_func=mdn_nll, seq_length=SEQ_LENGTH), # loss_function=lambda x, t: mdn_nll(x, t).mean(), loss_function=lambda x, t: mse(x, t).mean(), # loss_function=lambda x, t: binary_crossentropy(x, t).mean(), # loss_function=partial(scaled_cost, loss_func=mse), # loss_function=ignore_inactive, # loss_function=partial(scaled_cost3, ignore_inactive=False), updates_func=momentum, learning_rate=1e-1, learning_rate_changes_by_iteration={ 100: 1e-2, # 400: 1e-3, # 800: 1e-4 # 500: 1e-3 # 4000: 1e-03, # 6000: 5e-06, # 7000: 1e-06 # 2000: 5e-06 # 3000: 1e-05 # 7000: 5e-06, # 10000: 1e-06, # 15000: 5e-07, # 50000: 1e-07 }, do_save_activations=True # auto_reshape=False, # plotter=CentralOutputPlotter # plotter=MDNPlotter ) def exp_a(name): global source source_dict_copy = deepcopy(source_dict) source = RealApplianceSource(**source_dict_copy) net_dict_copy = deepcopy(net_dict) net_dict_copy.update(dict( experiment_name=name, source=source )) net_dict_copy['layers_config'] = [ { 'type': BLSTMLayer, 'num_units': 40, 'gradient_steps': GRADIENT_STEPS, 'peepholes': False }, { 'type': DimshuffleLayer, 'pattern': (0, 2, 1) }, { 'type': Conv1DLayer, 'num_filters': 20, 'filter_length': 4, 'stride': 4, 'nonlinearity': rectify }, { 'type': DimshuffleLayer, 'pattern': (0, 2, 1) }, { 'type': BLSTMLayer, 'num_units': 80, 'gradient_steps': GRADIENT_STEPS, 'peepholes': False }, { 'type': DenseLayer, 'num_units': source.n_outputs, 'nonlinearity': T.nnet.softplus } ] net = Net(**net_dict_copy) return net def main(): # EXPERIMENTS = list('abcdefghijklmnopqrstuvwxyz') EXPERIMENTS = list('a') for experiment in EXPERIMENTS: full_exp_name = NAME + experiment func_call = init_experiment(PATH, experiment, full_exp_name) logger = logging.getLogger(full_exp_name) try: net = eval(func_call) run_experiment(net, epochs=100000) except KeyboardInterrupt: logger.info("KeyboardInterrupt") break except Exception as exception: logger.exception("Exception") # raise finally: logging.shutdown() if __name__ == "__main__": main()
JackKelly/neuralnilm_prototype
scripts/e367.py
Python
mit
5,841
import numpy as np import matplotlib.pyplot as plt from mpl_toolkits.basemap import Basemap import pyroms import pyroms_toolbox def plot_mask(gridid, Cpos='rho', proj=None, **kwargs): # get grid if type(gridid).__name__ == 'ROMS_Grid': grd = gridid else: grd = pyroms.grid.get_ROMS_grid(gridid) Cpos = str(Cpos) print Cpos # get grid information if Cpos == 'rho': lon = grd.hgrid.lon_vert lat = grd.hgrid.lat_vert mask = grd.hgrid.mask_rho elif Cpos == 'u': lon = 0.5 * (grd.hgrid.lon_vert[:,:-1] + grd.hgrid.lon_vert[:,1:]) lat = 0.5 * (grd.hgrid.lat_vert[:,:-1] + grd.hgrid.lat_vert[:,1:]) mask = grd.hgrid.mask_u elif Cpos == 'v': lon = 0.5 * (grd.hgrid.lon_vert[:-1,:] + grd.hgrid.lon_vert[1:,:]) lat = 0.5 * (grd.hgrid.lat_vert[:-1,:] + grd.hgrid.lat_vert[1:,:]) mask = grd.hgrid.mask_v else: raise Warning, 'Cpos must be rho, u or v' # defined color map land_color = kwargs.pop('land_color', (0.6, 1.0, 0.6)) sea_color = kwargs.pop('sea_color', (0.6, 0.6, 1.0)) cm = plt.matplotlib.colors.ListedColormap([land_color, sea_color], name='land/sea') if proj is None: plt.pcolor(lon, lat, mask, cmap=cm, vmin=0, vmax=1, \ edgecolor='k', **kwargs) pyroms_toolbox.plot_coast_line(grd) else: x, y = proj(lon, lat) Basemap.pcolor(proj, x, y, mask, cmap=cm, vmin=0, vmax=1, \ edgecolor='k', **kwargs) pyroms_toolbox.plot_coast_line(grd, proj=proj) lon_min = lon.min() lon_max = lon.max() lat_min = lat.min() lat_max = lat.max() proj.drawmeridians(np.arange(lon_min,lon_max,(lon_max-lon_min)/5.001), \ labels=[0,0,0,1], fmt='%.1f') proj.drawparallels(np.arange(lat_min,lat_max,(lat_max-lat_min)/5.001), \ labels=[1,0,0,0], fmt='%.1f')
kshedstrom/pyroms
pyroms_toolbox/pyroms_toolbox/plot_mask.py
Python
bsd-3-clause
2,034
""" WSGI config for webApp project. It exposes the WSGI callable as a module-level variable named ``application``. For more information on this file, see https://docs.djangoproject.com/en/1.6/howto/deployment/wsgi/ """ import os os.environ.setdefault("DJANGO_SETTINGS_MODULE", "webApp.settings") from django.core.wsgi import get_wsgi_application application = get_wsgi_application()
ctames/conference-host
webApp/wsgi.py
Python
mit
387