text
stringlengths 6
947k
| repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
|
---|---|---|---|---|---|---|
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Apple's Speedometer 2 performance benchmark.
"""
import os
import re
from benchmarks import press
from core import path_util
from telemetry import benchmark
from telemetry import story
from page_sets import speedometer2_pages
_SPEEDOMETER_DIR = os.path.join(path_util.GetChromiumSrcDir(),
'third_party', 'blink', 'perf_tests', 'speedometer')
@benchmark.Info(emails=['[email protected]'],
component='Blink')
class Speedometer2(press._PressBenchmark): # pylint: disable=protected-access
"""Speedometer2 Benchmark.
Runs all the speedometer 2 suites by default. Add --suite=<regex> to filter
out suites, and only run suites whose names are matched by the regular
expression provided.
"""
enable_smoke_test_mode = False
@classmethod
def Name(cls):
return 'speedometer2'
def CreateStorySet(self, options):
should_filter_suites = bool(options.suite)
filtered_suite_names = map(
speedometer2_pages.Speedometer2Story.GetFullSuiteName,
speedometer2_pages.Speedometer2Story.GetSuites(options.suite))
ps = story.StorySet(base_dir=_SPEEDOMETER_DIR)
ps.AddStory(speedometer2_pages.Speedometer2Story(ps, should_filter_suites,
filtered_suite_names, self.enable_smoke_test_mode))
return ps
@classmethod
def AddBenchmarkCommandLineArgs(cls, parser):
parser.add_option('--suite', type="string",
help="Only runs suites that match regex provided")
@classmethod
def ProcessCommandLineArgs(cls, parser, args):
if args.suite:
try:
if not speedometer2_pages.Speedometer2Story.GetSuites(args.suite):
raise parser.error('--suite: No matches.')
except re.error:
raise parser.error('--suite: Invalid regex.')
@benchmark.Info(emails=['[email protected]'],
component='Blink')
class V8Speedometer2Future(Speedometer2):
"""Speedometer2 benchmark with the V8 flag --future.
Shows the performance of upcoming V8 VM features.
"""
@classmethod
def Name(cls):
return 'speedometer2-future'
def SetExtraBrowserOptions(self, options):
options.AppendExtraBrowserArgs('--enable-features=V8VmFuture')
| endlessm/chromium-browser | tools/perf/benchmarks/speedometer2.py | Python | bsd-3-clause | 2,350 | 0.008936 |
#!/usr/bin/env python
#
# Copyright 2014 Institute for Theoretical Information Technology,
# RWTH Aachen University
# www.ti.rwth-aachen.de
#
# This is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, gr_unittest
import ofdm_swig as ofdm
class qa_moms_ff (gr_unittest.TestCase):
def setUp (self):
self.tb = gr.top_block ()
def tearDown (self):
self.tb = None
def test_001_t (self):
# set up fg
self.tb.run ()
# check data
if __name__ == '__main__':
gr_unittest.run(qa_moms_ff, "qa_moms_ff.xml")
| rwth-ti/gr-ofdm | python/ofdm/qa_moms_ff.py | Python | gpl-3.0 | 1,256 | 0.007962 |
import os.path
from robot.errors import DataError
from robot.utils import secs_to_timestr, timestr_to_secs
from selenium import webdriver
from selenium.common.exceptions import NoSuchWindowException
from Selenium2Library import webdrivermonkeypatches
from Selenium2Library.utils import BrowserCache
from Selenium2Library.locators import WindowManager
from .keywordgroup import KeywordGroup
ROOT_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
FIREFOX_PROFILE_DIR = os.path.join(ROOT_DIR, 'resources', 'firefoxprofile')
BROWSER_NAMES = {'ff': "_make_ff",
'firefox': "_make_ff",
'ie': "_make_ie",
'internetexplorer': "_make_ie",
'googlechrome': "_make_chrome",
'gc': "_make_chrome",
'chrome': "_make_chrome",
'opera' : "_make_opera",
'phantomjs' : "_make_phantomjs",
'htmlunit' : "_make_htmlunit",
'htmlunitwithjs' : "_make_htmlunitwithjs",
'android': "_make_android",
'iphone': "_make_iphone",
'safari': "_make_safari",
'edge': "_make_edge"
}
class _BrowserManagementKeywords(KeywordGroup):
def __init__(self):
self._cache = BrowserCache()
self._window_manager = WindowManager()
self._speed_in_secs = float(0)
self._timeout_in_secs = float(5)
self._implicit_wait_in_secs = float(0)
# Public, open and close
def close_all_browsers(self):
"""Closes all open browsers and resets the browser cache.
After this keyword new indexes returned from `Open Browser` keyword
are reset to 1.
This keyword should be used in test or suite teardown to make sure
all browsers are closed.
"""
self._debug('Closing all browsers')
self._cache.close_all()
def close_browser(self):
"""Closes the current browser."""
if self._cache.current:
self._debug('Closing browser with session id %s'
% self._cache.current.session_id)
self._cache.close()
def open_browser(self, url, browser='firefox', alias=None,remote_url=False,
desired_capabilities=None,ff_profile_dir=None):
"""Opens a new browser instance to given URL.
Returns the index of this browser instance which can be used later to
switch back to it. Index starts from 1 and is reset back to it when
`Close All Browsers` keyword is used. See `Switch Browser` for
example.
Optional alias is an alias for the browser instance and it can be used
for switching between browsers (just as index can be used). See `Switch
Browser` for more details.
Possible values for `browser` are as follows:
| firefox | FireFox |
| ff | FireFox |
| internetexplorer | Internet Explorer |
| ie | Internet Explorer |
| googlechrome | Google Chrome |
| gc | Google Chrome |
| chrome | Google Chrome |
| opera | Opera |
| phantomjs | PhantomJS |
| htmlunit | HTMLUnit |
| htmlunitwithjs | HTMLUnit with Javascipt support |
| android | Android |
| iphone | Iphone |
| safari | Safari |
| edge | Edge |
Note, that you will encounter strange behavior, if you open
multiple Internet Explorer browser instances. That is also why
`Switch Browser` only works with one IE browser at most.
For more information see:
http://selenium-grid.seleniumhq.org/faq.html#i_get_some_strange_errors_when_i_run_multiple_internet_explorer_instances_on_the_same_machine
Optional 'remote_url' is the url for a remote selenium server for example
http://127.0.0.1:4444/wd/hub. If you specify a value for remote you can
also specify 'desired_capabilities' which is a string in the form
key1:val1,key2:val2 that will be used to specify desired_capabilities
to the remote server. This is useful for doing things like specify a
proxy server for internet explorer or for specify browser and os if your
using saucelabs.com. 'desired_capabilities' can also be a dictonary
(created with 'Create Dictionary') to allow for more complex configurations.
Optional 'ff_profile_dir' is the path to the firefox profile dir if you
wish to overwrite the default.
"""
if remote_url:
self._info("Opening browser '%s' to base url '%s' through remote server at '%s'"
% (browser, url, remote_url))
else:
self._info("Opening browser '%s' to base url '%s'" % (browser, url))
browser_name = browser
browser = self._make_browser(browser_name,desired_capabilities,ff_profile_dir,remote_url)
try:
browser.get(url)
except:
self._cache.register(browser, alias)
self._debug("Opened browser with session id %s but failed to open url '%s'"
% (browser.session_id, url))
raise
self._debug('Opened browser with session id %s'
% browser.session_id)
return self._cache.register(browser, alias)
def create_webdriver(self, driver_name, alias=None, kwargs={}, **init_kwargs):
"""Creates an instance of a WebDriver.
Like `Open Browser`, but allows passing arguments to a WebDriver's
__init__. _Open Browser_ is preferred over _Create Webdriver_ when
feasible.
Returns the index of this browser instance which can be used later to
switch back to it. Index starts from 1 and is reset back to it when
`Close All Browsers` keyword is used. See `Switch Browser` for
example.
`driver_name` must be the exact name of a WebDriver in
_selenium.webdriver_ to use. WebDriver names include: Firefox, Chrome,
Ie, Opera, Safari, PhantomJS, and Remote.
Use keyword arguments to specify the arguments you want to pass to
the WebDriver's __init__. The values of the arguments are not
processed in any way before being passed on. For Robot Framework
< 2.8, which does not support keyword arguments, create a keyword
dictionary and pass it in as argument `kwargs`. See the
[http://selenium.googlecode.com/git/docs/api/py/api.html|Selenium API Documentation]
for information about argument names and appropriate argument values.
Examples:
| # use proxy for Firefox | | | |
| ${proxy}= | Evaluate | sys.modules['selenium.webdriver'].Proxy() | sys, selenium.webdriver |
| ${proxy.http_proxy}= | Set Variable | localhost:8888 | |
| Create Webdriver | Firefox | proxy=${proxy} | |
| # use a proxy for PhantomJS | | | |
| ${service args}= | Create List | --proxy=192.168.132.104:8888 | |
| Create Webdriver | PhantomJS | service_args=${service args} | |
Example for Robot Framework < 2.8:
| # debug IE driver | | | |
| ${kwargs}= | Create Dictionary | log_level=DEBUG | log_file=%{HOMEPATH}${/}ie.log |
| Create Webdriver | Ie | kwargs=${kwargs} | |
"""
if not isinstance(kwargs, dict):
raise RuntimeError("kwargs must be a dictionary.")
for arg_name in kwargs:
if arg_name in init_kwargs:
raise RuntimeError("Got multiple values for argument '%s'." % arg_name)
init_kwargs[arg_name] = kwargs[arg_name]
driver_name = driver_name.strip()
try:
creation_func = getattr(webdriver, driver_name)
except AttributeError:
raise RuntimeError("'%s' is not a valid WebDriver name" % driver_name)
self._info("Creating an instance of the %s WebDriver" % driver_name)
driver = creation_func(**init_kwargs)
self._debug("Created %s WebDriver instance with session id %s" % (driver_name, driver.session_id))
return self._cache.register(driver, alias)
def switch_browser(self, index_or_alias):
"""Switches between active browsers using index or alias.
Index is returned from `Open Browser` and alias can be given to it.
Example:
| Open Browser | http://google.com | ff |
| Location Should Be | http://google.com | |
| Open Browser | http://yahoo.com | ie | 2nd conn |
| Location Should Be | http://yahoo.com | |
| Switch Browser | 1 | # index |
| Page Should Contain | I'm feeling lucky | |
| Switch Browser | 2nd conn | # alias |
| Page Should Contain | More Yahoo! | |
| Close All Browsers | | |
Above example expects that there was no other open browsers when
opening the first one because it used index '1' when switching to it
later. If you aren't sure about that you can store the index into
a variable as below.
| ${id} = | Open Browser | http://google.com | *firefox |
| # Do something ... |
| Switch Browser | ${id} | | |
"""
try:
self._cache.switch(index_or_alias)
self._debug('Switched to browser with Selenium session id %s'
% self._cache.current.session_id)
except (RuntimeError, DataError): # RF 2.6 uses RE, earlier DE
raise RuntimeError("No browser with index or alias '%s' found."
% index_or_alias)
# Public, window management
def close_window(self):
"""Closes currently opened pop-up window."""
self._current_browser().close()
def get_window_identifiers(self):
"""Returns and logs id attributes of all windows known to the browser."""
return self._log_list(self._window_manager.get_window_ids(self._current_browser()))
def get_window_names(self):
"""Returns and logs names of all windows known to the browser."""
values = self._window_manager.get_window_names(self._current_browser())
# for backward compatibility, since Selenium 1 would always
# return this constant value for the main window
if len(values) and values[0] == 'undefined':
values[0] = 'selenium_main_app_window'
return self._log_list(values)
def get_window_titles(self):
"""Returns and logs titles of all windows known to the browser."""
return self._log_list(self._window_manager.get_window_titles(self._current_browser()))
def maximize_browser_window(self):
"""Maximizes current browser window."""
self._current_browser().maximize_window()
def get_window_size(self):
"""Returns current window size as `width` then `height`.
Example:
| ${width} | ${height}= | Get Window Size |
"""
size = self._current_browser().get_window_size()
return size['width'], size['height']
def set_window_size(self, width, height):
"""Sets the `width` and `height` of the current window to the specified values.
Example:
| Set Window Size | ${800} | ${600} |
| ${width} | ${height}= | Get Window Size |
| Should Be Equal | ${width} | ${800} |
| Should Be Equal | ${height} | ${600} |
"""
return self._current_browser().set_window_size(width, height)
def get_window_position(self):
"""Returns current window position as `x` then `y` (relative to the left and top of the screen).
Example:
| ${x} | ${y}= | Get Window Position |
"""
position = self._current_browser().get_window_position()
return position['x'], position['y']
def set_window_position(self, x, y):
"""Sets the position x and y of the current window (relative to the left and top of the screen) to the specified values.
Example:
| Set Window Position | ${8} | ${10} |
| ${x} | ${y}= | Get Window Position |
| Should Be Equal | ${x} | ${8} |
| Should Be Equal | ${y} | ${10} |
"""
return self._current_browser().set_window_position(x, y)
def select_frame(self, locator):
"""Sets frame identified by `locator` as current frame.
Key attributes for frames are `id` and `name.` See `introduction` for
details about locating elements.
"""
self._info("Selecting frame '%s'." % locator)
element = self._element_find(locator, True, True)
self._current_browser().switch_to_frame(element)
def select_window(self, locator=None):
"""Selects the window matching locator and return previous window handle.
locator: any of name, title, url, window handle, excluded handle's list, or special words.
return: either current window handle before selecting, or None if no current window.
If the window is found, all subsequent commands use that window, until
this keyword is used again. If the window is not found, this keyword fails.
By default, when a locator value is provided,
it is matched against the title of the window and the
javascript name of the window. If multiple windows with
same identifier are found, the first one is selected.
There are some special locators for searching target window:
string 'main' (default): select the main window;
string 'self': only return current window handle;
string 'new': select the last-indexed window assuming it is the newest opened window
window list: select the first window not in given list (See 'List Windows' to get the list)
It is also possible to specify the approach Selenium2Library should take
to find a window by specifying a locator strategy:
| *Strategy* | *Example* | *Description* |
| title | Select Window `|` title=My Document | Matches by window title |
| name | Select Window `|` name=${name} | Matches by window javascript name |
| url | Select Window `|` url=http://google.com | Matches by window's current URL |
Example:
| Click Link | popup_link | # opens new window |
| Select Window | popupName |
| Title Should Be | Popup Title |
| Select Window | | | # Chooses the main window again |
"""
try:
return self._current_browser().get_current_window_handle()
except NoSuchWindowException:
pass
finally:
self._window_manager.select(self._current_browser(), locator)
def list_windows(self):
"""Return all current window handles as a list"""
return self._current_browser().get_window_handles()
def unselect_frame(self):
"""Sets the top frame as the current frame."""
self._current_browser().switch_to_default_content()
# Public, browser/current page properties
def get_location(self):
"""Returns the current location."""
return self._current_browser().get_current_url()
def get_locations(self):
"""Returns and logs current locations of all windows known to the browser."""
return self._log_list(
[window_info[4] for window_info in
self._window_manager._get_window_infos(self._current_browser())]
)
def get_source(self):
"""Returns the entire html source of the current page or frame."""
return self._current_browser().get_page_source()
def get_title(self):
"""Returns title of current page."""
return self._current_browser().get_title()
def location_should_be(self, url):
"""Verifies that current URL is exactly `url`."""
actual = self.get_location()
if actual != url:
raise AssertionError("Location should have been '%s' but was '%s'"
% (url, actual))
self._info("Current location is '%s'." % url)
def location_should_contain(self, expected):
"""Verifies that current URL contains `expected`."""
actual = self.get_location()
if not expected in actual:
raise AssertionError("Location should have contained '%s' "
"but it was '%s'." % (expected, actual))
self._info("Current location contains '%s'." % expected)
def log_location(self):
"""Logs and returns the current location."""
url = self.get_location()
self._info(url)
return url
def log_source(self, loglevel='INFO'):
"""Logs and returns the entire html source of the current page or frame.
The `loglevel` argument defines the used log level. Valid log levels are
WARN, INFO (default), DEBUG, and NONE (no logging).
"""
source = self.get_source()
self._log(source, loglevel.upper())
return source
def log_title(self):
"""Logs and returns the title of current page."""
title = self.get_title()
self._info(title)
return title
def title_should_be(self, title):
"""Verifies that current page title equals `title`."""
actual = self.get_title()
if actual != title:
raise AssertionError("Title should have been '%s' but was '%s'"
% (title, actual))
self._info("Page title is '%s'." % title)
# Public, navigation
def go_back(self):
"""Simulates the user clicking the "back" button on their browser."""
self._current_browser().back()
def go_to(self, url):
"""Navigates the active browser instance to the provided URL."""
self._info("Opening url '%s'" % url)
self._current_browser().get(url)
def reload_page(self):
"""Simulates user reloading page."""
self._current_browser().refresh()
# Public, execution properties
def get_selenium_speed(self):
"""Gets the delay in seconds that is waited after each Selenium command.
See `Set Selenium Speed` for an explanation."""
return secs_to_timestr(self._speed_in_secs)
def get_selenium_timeout(self):
"""Gets the timeout in seconds that is used by various keywords.
See `Set Selenium Timeout` for an explanation."""
return secs_to_timestr(self._timeout_in_secs)
def get_selenium_implicit_wait(self):
"""Gets the wait in seconds that is waited by Selenium.
See `Set Selenium Implicit Wait` for an explanation."""
return secs_to_timestr(self._implicit_wait_in_secs)
def set_selenium_speed(self, seconds):
"""Sets the delay in seconds that is waited after each Selenium command.
This is useful mainly in slowing down the test execution to be able to
view the execution. `seconds` may be given in Robot Framework time
format. Returns the previous speed value.
Example:
| Set Selenium Speed | .5 seconds |
"""
old_speed = self.get_selenium_speed()
self._speed_in_secs = timestr_to_secs(seconds)
for browser in self._cache.browsers:
browser.set_speed(self._speed_in_secs)
return old_speed
def set_selenium_timeout(self, seconds):
"""Sets the timeout in seconds used by various keywords.
There are several `Wait ...` keywords that take timeout as an
argument. All of these timeout arguments are optional. The timeout
used by all of them can be set globally using this keyword.
See `Timeouts` for more information about timeouts.
The previous timeout value is returned by this keyword and can
be used to set the old value back later. The default timeout
is 5 seconds, but it can be altered in `importing`.
Example:
| ${orig timeout} = | Set Selenium Timeout | 15 seconds |
| Open page that loads slowly |
| Set Selenium Timeout | ${orig timeout} |
"""
old_timeout = self.get_selenium_timeout()
self._timeout_in_secs = timestr_to_secs(seconds)
for browser in self._cache.get_open_browsers():
browser.set_script_timeout(self._timeout_in_secs)
return old_timeout
def set_selenium_implicit_wait(self, seconds):
"""Sets Selenium 2's default implicit wait in seconds and
sets the implicit wait for all open browsers.
From selenium 2 function 'Sets a sticky timeout to implicitly
wait for an element to be found, or a command to complete.
This method only needs to be called one time per session.'
Example:
| ${orig wait} = | Set Selenium Implicit Wait | 10 seconds |
| Perform AJAX call that is slow |
| Set Selenium Implicit Wait | ${orig wait} |
"""
old_wait = self.get_selenium_implicit_wait()
self._implicit_wait_in_secs = timestr_to_secs(seconds)
for browser in self._cache.get_open_browsers():
browser.implicitly_wait(self._implicit_wait_in_secs)
return old_wait
def set_browser_implicit_wait(self, seconds):
"""Sets current browser's implicit wait in seconds.
From selenium 2 function 'Sets a sticky timeout to implicitly
wait for an element to be found, or a command to complete.
This method only needs to be called one time per session.'
Example:
| Set Browser Implicit Wait | 10 seconds |
See also `Set Selenium Implicit Wait`.
"""
implicit_wait_in_secs = timestr_to_secs(seconds)
self._current_browser().implicitly_wait(implicit_wait_in_secs)
# Private
def _current_browser(self):
if not self._cache.current:
raise RuntimeError('No browser is open')
return self._cache.current
def _get_browser_creation_function(self, browser_name):
func_name = BROWSER_NAMES.get(browser_name.lower().replace(' ', ''))
return getattr(self, func_name) if func_name else None
def _make_browser(self, browser_name, desired_capabilities=None,
profile_dir=None, remote=None):
creation_func = self._get_browser_creation_function(browser_name)
if not creation_func:
raise ValueError(browser_name + " is not a supported browser.")
browser = creation_func(remote, desired_capabilities, profile_dir)
browser.set_speed(self._speed_in_secs)
browser.set_script_timeout(self._timeout_in_secs)
browser.implicitly_wait(self._implicit_wait_in_secs)
return browser
def _make_ff(self , remote , desired_capabilites , profile_dir):
if not profile_dir: profile_dir = FIREFOX_PROFILE_DIR
profile = webdriver.FirefoxProfile(profile_dir)
if remote:
browser = self._create_remote_web_driver(webdriver.DesiredCapabilities.FIREFOX ,
remote , desired_capabilites , profile)
else:
browser = webdriver.Firefox(firefox_profile=profile)
return browser
def _make_ie(self , remote , desired_capabilities , profile_dir):
return self._generic_make_browser(webdriver.Ie,
webdriver.DesiredCapabilities.INTERNETEXPLORER, remote, desired_capabilities)
def _make_chrome(self , remote , desired_capabilities , profile_dir):
return self._generic_make_browser(webdriver.Chrome,
webdriver.DesiredCapabilities.CHROME, remote, desired_capabilities)
def _make_opera(self , remote , desired_capabilities , profile_dir):
return self._generic_make_browser(webdriver.Opera,
webdriver.DesiredCapabilities.OPERA, remote, desired_capabilities)
def _make_phantomjs(self , remote , desired_capabilities , profile_dir):
return self._generic_make_browser(webdriver.PhantomJS,
webdriver.DesiredCapabilities.PHANTOMJS, remote, desired_capabilities)
def _make_htmlunit(self , remote , desired_capabilities , profile_dir):
return self._generic_make_browser(webdriver.Remote,
webdriver.DesiredCapabilities.HTMLUNIT, remote, desired_capabilities)
def _make_htmlunitwithjs(self , remote , desired_capabilities , profile_dir):
return self._generic_make_browser(webdriver.Remote,
webdriver.DesiredCapabilities.HTMLUNITWITHJS, remote, desired_capabilities)
def _make_android(self , remote , desired_capabilities , profile_dir):
return self._generic_make_browser(webdriver.Remote,
webdriver.DesiredCapabilities.ANDROID, remote, desired_capabilities)
def _make_iphone(self , remote , desired_capabilities , profile_dir):
return self._generic_make_browser(webdriver.Remote,
webdriver.DesiredCapabilities.IPHONE, remote, desired_capabilities)
def _make_safari(self , remote , desired_capabilities , profile_dir):
return self._generic_make_browser(webdriver.Safari,
webdriver.DesiredCapabilities.SAFARI, remote, desired_capabilities)
def _make_edge(self , remote , desired_capabilities , profile_dir):
if hasattr(webdriver, 'Edge'):
return self._generic_make_browser(webdriver.Edge,
webdriver.DesiredCapabilities.EDGE, remote, desired_capabilities)
else:
raise ValueError("Edge is not a supported browser with your version of Selenium python library. Please, upgrade to minimum required version 2.47.0.")
def _generic_make_browser(self, webdriver_type , desired_cap_type, remote_url, desired_caps):
'''most of the make browser functions just call this function which creates the
appropriate web-driver'''
if not remote_url:
browser = webdriver_type()
else:
browser = self._create_remote_web_driver(desired_cap_type,remote_url , desired_caps)
return browser
def _create_remote_web_driver(self , capabilities_type , remote_url , desired_capabilities=None , profile=None):
'''parses the string based desired_capabilities if neccessary and
creates the associated remote web driver'''
desired_capabilities_object = capabilities_type.copy()
if type(desired_capabilities) in (str, unicode):
desired_capabilities = self._parse_capabilities_string(desired_capabilities)
desired_capabilities_object.update(desired_capabilities or {})
return webdriver.Remote(desired_capabilities=desired_capabilities_object,
command_executor=str(remote_url), browser_profile=profile)
def _parse_capabilities_string(self, capabilities_string):
'''parses the string based desired_capabilities which should be in the form
key1:val1,key2:val2
'''
desired_capabilities = {}
if not capabilities_string:
return desired_capabilities
for cap in capabilities_string.split(","):
(key, value) = cap.split(":", 1)
desired_capabilities[key.strip()] = value.strip()
return desired_capabilities
| Gaurang033/Selenium2Library | src/Selenium2Library/keywords/_browsermanagement.py | Python | apache-2.0 | 28,305 | 0.004911 |
# python3
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Base classes for GP testbed."""
import abc
from typing import Any, Dict, NamedTuple, Optional
import chex
import dataclasses
import typing_extensions
# Maybe this Data class needs to be a tf.Dataset
class Data(NamedTuple):
x: chex.Array
y: chex.Array
@dataclasses.dataclass
class PriorKnowledge:
input_dim: int
num_train: int
num_classes: int = 1
layers: Optional[int] = None
noise_std: Optional[float] = None
temperature: Optional[float] = None
extra: Optional[Dict[str, Any]] = None
@dataclasses.dataclass
class ENNQuality:
kl_estimate: float
extra: Optional[Dict[str, Any]] = None
class EpistemicSampler(typing_extensions.Protocol):
"""Interface for drawing posterior samples from distribution.
We are considering a model of data: y_i = f(x_i) + e_i.
In this case the sampler should only model f(x), not aleatoric y.
"""
def __call__(self, x: chex.Array, seed: int = 0) -> chex.Array:
"""Generate a random sample for epistemic f(x)."""
class TestbedAgent(typing_extensions.Protocol):
"""An interface for specifying a testbed agent."""
def __call__(self,
data: Data,
prior: Optional[PriorKnowledge] = None) -> EpistemicSampler:
"""Sets up a training procedure given ENN prior knowledge."""
class TestbedProblem(abc.ABC):
"""An interface for specifying a generative GP model of data."""
@abc.abstractproperty
def train_data(self) -> Data:
"""Access training data from the GP for ENN training."""
@abc.abstractmethod
def evaluate_quality(self, enn_sampler: EpistemicSampler) -> ENNQuality:
"""Evaluate the quality of a posterior sampler."""
@abc.abstractproperty
def prior_knowledge(self) -> PriorKnowledge:
"""Information describing the problem instance."""
| deepmind/enn | enn/experiments/neurips_2021/base.py | Python | apache-2.0 | 2,522 | 0.00912 |
# Copyright 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
"""
The models defined in this file represent the resource JSON description
format and provide a layer of abstraction from the raw JSON. The advantages
of this are:
* Pythonic interface (e.g. ``action.request.operation``)
* Consumers need not change for minor JSON changes (e.g. renamed field)
These models are used both by the resource factory to generate resource
classes as well as by the documentation generator.
"""
import logging
from botocore import xform_name
logger = logging.getLogger(__name__)
class Identifier(object):
"""
A resource identifier, given by its name.
:type name: string
:param name: The name of the identifier
"""
def __init__(self, name, member_name=None):
#: (``string``) The name of the identifier
self.name = name
self.member_name = member_name
class Action(object):
"""
A service operation action.
:type name: string
:param name: The name of the action
:type definition: dict
:param definition: The JSON definition
:type resource_defs: dict
:param resource_defs: All resources defined in the service
"""
def __init__(self, name, definition, resource_defs):
self._definition = definition
#: (``string``) The name of the action
self.name = name
#: (:py:class:`Request`) This action's request or ``None``
self.request = None
if 'request' in definition:
self.request = Request(definition.get('request', {}))
#: (:py:class:`ResponseResource`) This action's resource or ``None``
self.resource = None
if 'resource' in definition:
self.resource = ResponseResource(definition.get('resource', {}),
resource_defs)
#: (``string``) The JMESPath search path or ``None``
self.path = definition.get('path')
class DefinitionWithParams(object):
"""
An item which has parameters exposed via the ``params`` property.
A request has an operation and parameters, while a waiter has
a name, a low-level waiter name and parameters.
:type definition: dict
:param definition: The JSON definition
"""
def __init__(self, definition):
self._definition = definition
@property
def params(self):
"""
Get a list of auto-filled parameters for this request.
:type: list(:py:class:`Parameter`)
"""
params = []
for item in self._definition.get('params', []):
params.append(Parameter(**item))
return params
class Parameter(object):
"""
An auto-filled parameter which has a source and target. For example,
the ``QueueUrl`` may be auto-filled from a resource's ``url`` identifier
when making calls to ``queue.receive_messages``.
:type target: string
:param target: The destination parameter name, e.g. ``QueueUrl``
:type source_type: string
:param source_type: Where the source is defined.
:type source: string
:param source: The source name, e.g. ``Url``
"""
def __init__(self, target, source, name=None, path=None, value=None,
**kwargs):
#: (``string``) The destination parameter name
self.target = target
#: (``string``) Where the source is defined
self.source = source
#: (``string``) The name of the source, if given
self.name = name
#: (``string``) The JMESPath query of the source
self.path = path
#: (``string|int|float|bool``) The source constant value
self.value = value
# Complain if we encounter any unknown values.
if kwargs:
logger.warning('Unknown parameter options found: %s', kwargs)
class Request(DefinitionWithParams):
"""
A service operation action request.
:type definition: dict
:param definition: The JSON definition
"""
def __init__(self, definition):
super(Request, self).__init__(definition)
#: (``string``) The name of the low-level service operation
self.operation = definition.get('operation')
class Waiter(DefinitionWithParams):
"""
An event waiter specification.
:type name: string
:param name: Name of the waiter
:type definition: dict
:param definition: The JSON definition
"""
PREFIX = 'WaitUntil'
def __init__(self, name, definition):
super(Waiter, self).__init__(definition)
#: (``string``) The name of this waiter
self.name = name
#: (``string``) The name of the underlying event waiter
self.waiter_name = definition.get('waiterName')
class ResponseResource(object):
"""
A resource response to create after performing an action.
:type definition: dict
:param definition: The JSON definition
:type resource_defs: dict
:param resource_defs: All resources defined in the service
"""
def __init__(self, definition, resource_defs):
self._definition = definition
self._resource_defs = resource_defs
#: (``string``) The name of the response resource type
self.type = definition.get('type')
#: (``string``) The JMESPath search query or ``None``
self.path = definition.get('path')
@property
def identifiers(self):
"""
A list of resource identifiers.
:type: list(:py:class:`Identifier`)
"""
identifiers = []
for item in self._definition.get('identifiers', []):
identifiers.append(
Parameter(**item))
return identifiers
@property
def model(self):
"""
Get the resource model for the response resource.
:type: :py:class:`ResourceModel`
"""
return ResourceModel(self.type, self._resource_defs[self.type],
self._resource_defs)
class Collection(Action):
"""
A group of resources. See :py:class:`Action`.
:type name: string
:param name: The name of the collection
:type definition: dict
:param definition: The JSON definition
:type resource_defs: dict
:param resource_defs: All resources defined in the service
"""
@property
def batch_actions(self):
"""
Get a list of batch actions supported by the resource type
contained in this action. This is a shortcut for accessing
the same information through the resource model.
:rtype: list(:py:class:`Action`)
"""
return self.resource.model.batch_actions
class ResourceModel(object):
"""
A model representing a resource, defined via a JSON description
format. A resource has identifiers, attributes, actions,
sub-resources, references and collections. For more information
on resources, see :ref:`guide_resources`.
:type name: string
:param name: The name of this resource, e.g. ``sqs`` or ``Queue``
:type definition: dict
:param definition: The JSON definition
:type resource_defs: dict
:param resource_defs: All resources defined in the service
"""
def __init__(self, name, definition, resource_defs):
self._definition = definition
self._resource_defs = resource_defs
self._renamed = {}
#: (``string``) The name of this resource
self.name = name
#: (``string``) The service shape name for this resource or ``None``
self.shape = definition.get('shape')
def load_rename_map(self, shape=None):
"""
Load a name translation map given a shape. This will set
up renamed values for any collisions, e.g. if the shape,
an action, and a subresource all are all named ``foo``
then the resource will have an action ``foo``, a subresource
named ``Foo`` and a property named ``foo_attribute``.
This is the order of precedence, from most important to
least important:
* Load action (resource.load)
* Identifiers
* Actions
* Subresources
* References
* Collections
* Waiters
* Attributes (shape members)
Batch actions are only exposed on collections, so do not
get modified here. Subresources use upper camel casing, so
are unlikely to collide with anything but other subresources.
Creates a structure like this::
renames = {
('action', 'id'): 'id_action',
('collection', 'id'): 'id_collection',
('attribute', 'id'): 'id_attribute'
}
# Get the final name for an action named 'id'
name = renames.get(('action', 'id'), 'id')
:type shape: botocore.model.Shape
:param shape: The underlying shape for this resource.
"""
# Meta is a reserved name for resources
names = set(['meta'])
self._renamed = {}
if self._definition.get('load'):
names.add('load')
for item in self._definition.get('identifiers', []):
self._load_name_with_category(names, item['name'], 'identifier')
for name in self._definition.get('actions', {}):
self._load_name_with_category(names, name, 'action')
for name, ref in self._get_has_definition().items():
# Subresources require no data members, just typically
# identifiers and user input.
data_required = False
for identifier in ref['resource']['identifiers']:
if identifier['source'] == 'data':
data_required = True
break
if not data_required:
self._load_name_with_category(names, name, 'subresource',
snake_case=False)
else:
self._load_name_with_category(names, name, 'reference')
for name in self._definition.get('hasMany', {}):
self._load_name_with_category(names, name, 'collection')
for name in self._definition.get('waiters', {}):
self._load_name_with_category(names, Waiter.PREFIX + name,
'waiter')
if shape is not None:
for name in shape.members.keys():
self._load_name_with_category(names, name, 'attribute')
def _load_name_with_category(self, names, name, category,
snake_case=True):
"""
Load a name with a given category, possibly renaming it
if that name is already in use. The name will be stored
in ``names`` and possibly be set up in ``self._renamed``.
:type names: set
:param names: Existing names (Python attributes, properties, or
methods) on the resource.
:type name: string
:param name: The original name of the value.
:type category: string
:param category: The value type, such as 'identifier' or 'action'
:type snake_case: bool
:param snake_case: True (default) if the name should be snake cased.
"""
if snake_case:
name = xform_name(name)
if name in names:
logger.debug('Renaming %s %s %s' % (self.name, category, name))
self._renamed[(category, name)] = name + '_' + category
name += '_' + category
if name in names:
# This isn't good, let's raise instead of trying to keep
# renaming this value.
raise ValueError('Problem renaming {0} {1} to {2}!'.format(
self.name, category, name))
names.add(name)
def _get_name(self, category, name, snake_case=True):
"""
Get a possibly renamed value given a category and name. This
uses the rename map set up in ``load_rename_map``, so that
method must be called once first.
:type category: string
:param category: The value type, such as 'identifier' or 'action'
:type name: string
:param name: The original name of the value
:type snake_case: bool
:param snake_case: True (default) if the name should be snake cased.
:rtype: string
:return: Either the renamed value if it is set, otherwise the
original name.
"""
if snake_case:
name = xform_name(name)
return self._renamed.get((category, name), name)
def get_attributes(self, shape):
"""
Get a dictionary of attribute names to original name and shape
models that represent the attributes of this resource. Looks
like the following:
{
'some_name': ('SomeName', <Shape...>)
}
:type shape: botocore.model.Shape
:param shape: The underlying shape for this resource.
:rtype: dict
:return: Mapping of resource attributes.
"""
attributes = {}
identifier_names = [i.name for i in self.identifiers]
for name, member in shape.members.items():
snake_cased = xform_name(name)
if snake_cased in identifier_names:
# Skip identifiers, these are set through other means
continue
snake_cased = self._get_name('attribute', snake_cased,
snake_case=False)
attributes[snake_cased] = (name, member)
return attributes
@property
def identifiers(self):
"""
Get a list of resource identifiers.
:type: list(:py:class:`Identifier`)
"""
identifiers = []
for item in self._definition.get('identifiers', []):
name = self._get_name('identifier', item['name'])
member_name = item.get('memberName', None)
if member_name:
member_name = self._get_name('attribute', member_name)
identifiers.append(Identifier(name, member_name))
return identifiers
@property
def load(self):
"""
Get the load action for this resource, if it is defined.
:type: :py:class:`Action` or ``None``
"""
action = self._definition.get('load')
if action is not None:
action = Action('load', action, self._resource_defs)
return action
@property
def actions(self):
"""
Get a list of actions for this resource.
:type: list(:py:class:`Action`)
"""
actions = []
for name, item in self._definition.get('actions', {}).items():
name = self._get_name('action', name)
actions.append(Action(name, item, self._resource_defs))
return actions
@property
def batch_actions(self):
"""
Get a list of batch actions for this resource.
:type: list(:py:class:`Action`)
"""
actions = []
for name, item in self._definition.get('batchActions', {}).items():
name = self._get_name('batch_action', name)
actions.append(Action(name, item, self._resource_defs))
return actions
def _get_has_definition(self):
"""
Get a ``has`` relationship definition from a model, where the
service resource model is treated special in that it contains
a relationship to every resource defined for the service. This
allows things like ``s3.Object('bucket-name', 'key')`` to
work even though the JSON doesn't define it explicitly.
:rtype: dict
:return: Mapping of names to subresource and reference
definitions.
"""
if self.name not in self._resource_defs:
# This is the service resource, so let us expose all of
# the defined resources as subresources.
definition = {}
for name, resource_def in self._resource_defs.items():
# It's possible for the service to have renamed a
# resource or to have defined multiple names that
# point to the same resource type, so we need to
# take that into account.
found = False
has_items = self._definition.get('has', {}).items()
for has_name, has_def in has_items:
if has_def.get('resource', {}).get('type') == name:
definition[has_name] = has_def
found = True
if not found:
# Create a relationship definition and attach it
# to the model, such that all identifiers must be
# supplied by the user. It will look something like:
#
# {
# 'resource': {
# 'type': 'ResourceName',
# 'identifiers': [
# {'target': 'Name1', 'source': 'input'},
# {'target': 'Name2', 'source': 'input'},
# ...
# ]
# }
# }
#
fake_has = {
'resource': {
'type': name,
'identifiers': []
}
}
for identifier in resource_def.get('identifiers', []):
fake_has['resource']['identifiers'].append({
'target': identifier['name'], 'source': 'input'
})
definition[name] = fake_has
else:
definition = self._definition.get('has', {})
return definition
def _get_related_resources(self, subresources):
"""
Get a list of sub-resources or references.
:type subresources: bool
:param subresources: ``True`` to get sub-resources, ``False`` to
get references.
:rtype: list(:py:class:`ResponseResource`)
"""
resources = []
for name, definition in self._get_has_definition().items():
if subresources:
name = self._get_name('subresource', name, snake_case=False)
else:
name = self._get_name('reference', name)
action = Action(name, definition, self._resource_defs)
data_required = False
for identifier in action.resource.identifiers:
if identifier.source == 'data':
data_required = True
break
if subresources and not data_required:
resources.append(action)
elif not subresources and data_required:
resources.append(action)
return resources
@property
def subresources(self):
"""
Get a list of sub-resources.
:type: list(:py:class`ResponseResource`)
"""
return self._get_related_resources(True)
@property
def references(self):
"""
Get a list of reference resources.
:type: list(:py:class:`ResponseResource`)
"""
return self._get_related_resources(False)
@property
def collections(self):
"""
Get a list of collections for this resource.
:type: list(:py:class:`Collection`)
"""
collections = []
for name, item in self._definition.get('hasMany', {}).items():
name = self._get_name('collection', name)
collections.append(Collection(name, item, self._resource_defs))
return collections
@property
def waiters(self):
"""
Get a list of waiters for this resource.
:type: list(:py:class:`Waiter`)
"""
waiters = []
for name, item in self._definition.get('waiters', {}).items():
name = self._get_name('waiter', Waiter.PREFIX + name)
waiters.append(Waiter(name, item))
return waiters
| morissette/devopsdays-hackathon-2016 | venv/lib/python2.7/site-packages/boto3/resources/model.py | Python | gpl-3.0 | 20,675 | 0.000048 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# 创建一个基于TCP连接的Socket
import socket
# 创建一个socket
# AF_INET指定使用IPv4协议,要使用IPv6则指定为AF_INET6
# SOCK_STREAM指定使用面向流的TCP协议
# 此时,Socket对象创建成功,但还没有建立连接
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# 建立连接,参数是一个tuple,包含地址和端口号
s.connect(('www.baidu.com', 80))
# TCP连接创建的是双向通道,双方谁先发谁后发,怎么协调要根据具体协议来决定
# HTTP协议规定客户端必须先发请求给服务器,服务器收到后才发数据给客户端
s.send('GET / HTTP/1.1\r\nHost: www.baidu.com\r\nConnection: close\r\n\r\n')
# 发送的文件必须符合HTTP标准,如果格式没问题,就可以接收服务器返回的数据了
buffer = []
while True:
d = s.recv(1024) # 每次最多接收1K字节
if d:
buffer.append(d)
else:
break
data = ''.join(buffer)
# 数据接收完后调用close()方法关闭Socket
s.close()
# 接收到的数据包括HTTP头和网页本身,只需要把HTTP头和网页分离,打印HTTP头,保存网页内容到文件
header, html = data.split('\r\n\r\n', 1)
print header
# 把接收的数据写入文件
#with open('baidu.html', 'wb') as f:
# f.write(html)
# tcp_server.py的客户端程序
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# 建立连接
s.connect(('127.0.0.1', 9999))
# 接收欢迎消息
print s.recv(1024)
for data in ['Michael', 'Tracy', 'Sarah']:
s.send(data) # 发送数据
print s.recv(1024)
s.send('exit')
s.close()
| yddgit/hello-python | network/tcp_client.py | Python | apache-2.0 | 1,642 | 0.003584 |
import time
import logging
from autotest.client.shared import error
from virttest import utils_test
from generic.tests import file_transfer
def run(test, params, env):
"""
live_snapshot test:
1). Create live snapshot during big file creating
2). Create live snapshot when guest reboot
3). Check if live snapshot is created
4). Shutdown guest
:param test: Kvm test object
:param params: Dictionary with the test parameters
:param env: Dictionary with test environment.
"""
@error.context_aware
def create_snapshot(vm):
"""
Create live snapshot:
1). Check which monitor is used
2). Get device info
3). Create snapshot
"""
error.context("Creating live snapshot ...", logging.info)
block_info = vm.monitor.info("block")
if vm.monitor.protocol == 'qmp':
device = block_info[0]["device"]
else:
device = "".join(block_info).split(":")[0]
snapshot_name = params.get("snapshot_name")
format = params.get("snapshot_format", "qcow2")
vm.monitor.live_snapshot(device, snapshot_name, format)
logging.info("Check snapshot is created ...")
snapshot_info = str(vm.monitor.info("block"))
if snapshot_name not in snapshot_info:
logging.error(snapshot_info)
raise error.TestFail("Snapshot doesn't exist")
vm = env.get_vm(params["main_vm"])
vm.verify_alive()
timeout = int(params.get("login_timeout", 360))
dd_timeout = int(params.get("dd_timeout", 900))
session = vm.wait_for_login(timeout=timeout)
def runtime_test():
try:
clean_cmd = params.get("clean_cmd")
file_create = params.get("file_create")
clean_cmd += " %s" % file_create
logging.info("Clean file before creation")
session.cmd(clean_cmd)
logging.info("Creating big file...")
create_cmd = params.get("create_cmd") % file_create
args = (create_cmd, dd_timeout)
bg = utils_test.BackgroundTest(session.cmd_output, args)
bg.start()
time.sleep(5)
create_snapshot(vm)
if bg.is_alive():
try:
bg.join()
except Exception:
raise
finally:
session.close()
def reboot_test():
try:
bg = utils_test.BackgroundTest(vm.reboot, (session,))
logging.info("Rebooting guest ...")
bg.start()
sleep_time = int(params.get("sleep_time"))
time.sleep(sleep_time)
create_snapshot(vm)
finally:
bg.join()
def file_transfer_test():
try:
bg_cmd = file_transfer.run_file_transfer
args = (test, params, env)
bg = utils_test.BackgroundTest(bg_cmd, args)
bg.start()
sleep_time = int(params.get("sleep_time"))
time.sleep(sleep_time)
create_snapshot(vm)
if bg.is_alive():
try:
bg.join()
except Exception:
raise
finally:
session.close()
subcommand = params.get("subcommand")
eval("%s_test()" % subcommand)
| ypu/tp-qemu | qemu/tests/live_snapshot.py | Python | gpl-2.0 | 3,334 | 0 |
import os
TOP_STORIES_KEY = b'top_30'
TITLE = 'title'
URL = 'url'
BODY = 'body'
SENTENCES = 'sentences'
HACKER_NEWS_ID = 'hn_id'
TEXT = 'text'
DATE_FOUND = 'date_found'
AYLIEN_ID = 'AYLIENID'
AYLIEN_KEY = 'AYLIENKEY'
REDIS_HOST = 'REDIS_HOST'
REDIS_PORT = 'REDIS_PORT'
REDIS_PASS = 'REDIS_PASS'
def get_environment():
return {
AYLIEN_ID: os.environ.get(AYLIEN_ID),
AYLIEN_KEY: os.environ.get(AYLIEN_KEY),
REDIS_HOST: os.environ.get(REDIS_HOST),
REDIS_PORT: int(os.environ.get(REDIS_PORT), 0),
REDIS_PASS: os.environ.get(REDIS_PASS),
}
| Bachmann1234/hn-tldr | constants.py | Python | apache-2.0 | 586 | 0 |
#!/usr/bin/env python3
# Fortwrangler is a tool that attempts to resolve issues with fortran lines over standard length.
# Global libraries
import sys
# Global variables
# Strings inserted for continuation
CONTINUATION_ENDLINE = "&\n"
CONTINUATION_STARTLINE = " &"
# Line length settings
MIN_LENGTH = len(CONTINUATION_STARTLINE) + len(CONTINUATION_ENDLINE) + 1
FIXED_LINE_LENGTH = 80 # We don't actually do fixed format files, but I prefer 80 col anyway.
FREE_LINE_LENGTH = 132
DEFAULT_LINE_LENGTH = FREE_LINE_LENGTH
# I/O settings
STDERR = sys.stderr
STDOUT = sys.stdout
# We can't use Python's string splitter as we want to handle string literals properly.
def string_split(s, sep=" "):
inquotes=False
retlist = []
token = ""
for character in s.strip():
if character == sep and not inquotes:
if not (token == ""):
token = token + sep
retlist.append(token)
token = ""
else:
token = token + character
elif character == '"' and not inquotes:
inquotes = True
token = token + character
elif character == '"' and inquotes:
inquotes = False
token = token + character
else:
token = token + character
if not (token == ""):
retlist.append(token)
return retlist
# Fix a given file.
def force_fix_file(filename, maxlength=DEFAULT_LINE_LENGTH, output=STDOUT):
with open(filename) as infile:
for line in infile:
if len(line) > maxlength + 1:
tempstr=line[:(len(line) - (len(line.lstrip())-1)-1)]
tokens = string_split(line)
index = 0
for t in tokens:
if t == "!":
# Comments can be longer because the compiler just ignores them.
tempstr = tempstr + " ".join(tokens[index:len(tokens)])
break
else:
if (len(tempstr + t + " " + CONTINUATION_ENDLINE)) < maxlength + 1:
tempstr = tempstr + t + " "
else:
if (t.startswith('"') and t.endswith('"')):
tempstr = tempstr + t + " "
while (len(tempstr) > maxlength + 1):
outstr = tempstr[:(maxlength-1)] + CONTINUATION_ENDLINE
output.write(outstr)
tempstr = CONTINUATION_STARTLINE + tempstr[(maxlength-1):]
output.write(tempstr)
tempstr=""
else:
output.write(tempstr + " " + CONTINUATION_ENDLINE)
tempstr=CONTINUATION_STARTLINE + " " + t + " "
index += 1
output.write(tempstr + "\n")
else:
output.write(line)
# Only fix files if the violate the length rules!
def fix_file(filename, maxlength=DEFAULT_LINE_LENGTH, output=STDOUT):
if not check_file(filename):
force_fix_file(filename, maxlength, output)
else:
STDERR.write(filename + " not over line length, not modifying\n")
# Check to see if a file has lines longer than allowed, optionally report.
def check_file(filename, maxlength=DEFAULT_LINE_LENGTH, report=None):
overlengthlines = {}
counter = 0
with open(filename) as f:
for line in f:
counter += 1
if (len(line)) > maxlength + 1: # New lines count in Python line length.
overlengthlines[counter] = len(line)
if report != None:
report.write(filename + ": " + str(len(overlengthlines)) + "\n")
for a in sorted(overlengthlines.keys()):
report.write(str(a) + ": " + str(overlengthlines[a]) + "\n")
return len(overlengthlines) == 0
# Our main procedure.
# Arguments at the command-line:
# -o <file> - write out to file instead of stdout
# -i <extension> - do in place
# -c - check only
# -w <number> - set line length
def main():
import argparse
#check_file("example.f90", report=STDERR)
#fix_file("example.f")
maxlength = DEFAULT_LINE_LENGTH
output = STDOUT
parser = argparse.ArgumentParser(description="Fix free format Fortran files with invalid line lengths.")
parser.add_argument("-c", action="store_true", help="Check only.")
parser.add_argument("-i", metavar="ext", type=str, help="Do in place, back up copy with extension specified.")
parser.add_argument("-w", metavar="linelength", type=int, help="Custom line length.")
parser.add_argument("-o", metavar="outputfilename", type=str, help="Output to a file instead of STDOUT.")
parser.add_argument("files", metavar="file", type=str, nargs="+",help="Files to fix.")
args=parser.parse_args()
if args.w != None:
if args.w >= MIN_LENGTH:
maxlength = args.w
else:
STDERR.write("Error - you have specified a length [" + str(args.w) + "] smaller than the minimum possible ["+ str(MIN_LENGTH) + "]\n")
sys.exit(2)
if args.o and args.i:
STDERR.write("Error - you cannot both write output to a separate file and write it in place.\n")
sys.exit(1)
else:
if args.o != None:
outfile = open(args.o, 'w')
output = outfile
if args.c:
for a in args.files:
check_file(a, maxlength=maxlength, report=output)
elif args.i != None:
import os
for a in args.files:
if not check_file(a):
STDERR.write("Fixing file: " + a + "\n")
os.rename(a, a + args.i)
inplacefile = open(a, 'w')
force_fix_file(a + args.i, maxlength=maxlength, output=inplacefile)
inplacefile.close()
else:
for a in args.files:
fix_file(a, maxlength=maxlength, output=output)
if args.o != None:
outfile.close()
if __name__ == "__main__":
main() | owainkenwayucl/utils | src/fortwrangler.py | Python | mit | 6,251 | 0.007359 |
#**********************************************************************
# CS 325 - Project Group 9
# Joshua Atchley
# Aalon Cole
# Patrick Kilgore
#
# Project - Solving the Travelling Salesman Problem with Approximation
#
# Algorithm - Simulated Annealing as described in:
# Hansen, Per Brinch, "Simulated Annealing" (1992). Electrical
# Engineering and Computer Science Technical Reports. Paper 170.
# http://surface.syr.edu/eecs_techreports/170
#**********************************************************************
import math
import sys
import time
import random
from timeit import default_timer as timer
class City:
def __init__(self, number, xc, yc):
self.cNum = number
self.x = xc
self.y = yc
def distanceTo(self, endpoint):
xdiff = endpoint.x - self.x
ydiff = endpoint.y - self.y
dist = math.sqrt(xdiff*xdiff + ydiff*ydiff)
return int(round(dist))
def tourLength(tourArray):
n = len(tourArray)
dSum = tourArray[n-1].distanceTo(tourArray[0])
for i in range(n-1):
dSum += tourArray[i].distanceTo(tourArray[i+1])
return dSum
def initTour(inFile):
cities = []
for line in inFile:
if line != "":
cParams = [int(n) for n in line.split()]
cities.append(City(cParams[0], cParams[1], cParams[2]))
return cities
def anneal(tour, Tmax, alpha, steps, attempts, changes, startTime):
temp = Tmax
for k in range(steps):
# changed to loop up to
#while temp > 1e-6:
print("Temperature = {}, Tour Length = {}, Time Elapsed = {}".format(temp, tourLength(tour), timer() - startTime))
tour = tSearch(tour, temp, attempts, changes)
temp *= alpha
return tour
def tSearch(tour, temp, attempts, changes):
nAtt = 0
nChg = 0
while nAtt < attempts and nChg < changes:
# tSelect will return the tuple ci, cj, dE
selectionTuple = tSelect(tour)
if accept(selectionTuple[2], temp):
tour = tChange(tour, selectionTuple[0], selectionTuple[1])
nChg += 1
nAtt += 1
if nAtt >= attempts:
print("Max number of attempts reached, cooling...")
if nChg >= changes:
print("Max number of tour changes reached, cooling...")
return tour
def tSelect(tour):
# pick random cities in tour
ci = random.randint(0, len(tour) - 1)
cj = random.randint(0, len(tour) - 1)
# find the cities directly after ci and cj
cinx = (ci + 1) % len(tour)
cjnx = (cj + 1) % len(tour)
# calculate energy change , i.e. tour length change, for reversing the sequence
# between ci and cj
if ci != cj:
dE = (tour[ci].distanceTo(tour[cj]) + tour[cinx].distanceTo(tour[cjnx])
- tour[ci].distanceTo(tour[cinx]) - tour[cj].distanceTo(tour[cjnx]))
else:
dE = 0
return ci, cj, float(dE)
def accept(dE, temp):
if dE > 0:
acceptance = (math.exp(-dE / temp) > random.random())
else:
acceptance = True
return acceptance
def tChange(tour, ci, cj):
n = len(tour)
# snippet does not wrap around end of list
if ci < cj:
tSnip = tour[(ci+1):(cj+1)]
rSnip = list(reversed(tSnip))
tour[(ci + 1):(cj + 1)] = rSnip[:]
else:
# the snippet wraps around the end of the list, so ninjutsu is needed...
tSnip = tour[(ci+1):] + tour[:(cj+1)]
rSnip = list(reversed(tSnip))
divider = len(tour[(ci+1):])
tour[(ci+1):] = rSnip[:divider]
tour[:(cj + 1)] = rSnip[divider:]
return tour
def main():
random.seed(time.clock())
# set up I/O files
inputFileName = str(sys.argv[1])
#inputFileName = sys.path[0] + "/tsp_example_3.txt"
inputFile = open(inputFileName, 'r')
outputFileName = inputFileName + ".tour"
outputFile = open(outputFileName, 'w')
# the cityTour list will hold the current tour sequence
cityTour = initTour(inputFile)
random.shuffle(cityTour)
# initialize simulation parameters per recommendations by Hansen
n = len(cityTour)
Tmax = round(math.sqrt(n))
alpha = 0.95
steps = 20 * int(round(math.log1p(n + 1)))
attempts = 100 * n
changes = 10 * n
# call the annealing function with the defined parameters
startTime = timer()
cityTour = anneal(cityTour, Tmax, alpha, steps, attempts, changes, startTime)
end = timer()
print("Algorithm ran in {} seconds".format(end - startTime))
# write output file
outputFile.write(str(tourLength(cityTour)) + '\n')
for k in range(n):
outstring = str(cityTour[k].cNum) + '\n'
outputFile.write(outstring)
inputFile.close()
outputFile.close()
if __name__ == '__main__':
main()
| jatchley/OSU-Online-CS | 325/Final Project/Final.py | Python | mit | 4,765 | 0.004617 |
import unittest
from powerline_shell.utils import RepoStats
class RepoStatsTest(unittest.TestCase):
def setUp(self):
self.repo_stats = RepoStats()
self.repo_stats.changed = 1
self.repo_stats.conflicted = 4
def test_dirty(self):
self.assertTrue(self.repo_stats.dirty)
def test_simple(self):
self.assertEqual(self.repo_stats.new, 0)
def test_n_or_empty__empty(self):
self.assertEqual(self.repo_stats.n_or_empty("changed"), u"")
def test_n_or_empty__n(self):
self.assertEqual(self.repo_stats.n_or_empty("conflicted"), u"4")
def test_index(self):
self.assertEqual(self.repo_stats["changed"], 1)
| banga/powerline-shell | test/repo_stats_test.py | Python | mit | 687 | 0 |
"""
WSGI config for project_2do project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "project_2do.settings")
application = get_wsgi_application()
| Dru89/2do | project_2do/wsgi.py | Python | mit | 399 | 0 |
# (c) Nelen & Schuurmans. GPL licensed, see LICENSE.txt
# -*- coding: utf-8 -*-
"""
Lizard-security's ``admin.py`` contains two kinds of model admins:
- Our own model admins to make editing data sets, permission mappers and user
groups easier.
- ``SecurityFilteredAdmin`` as a base class for admins of models that use
lizard-security's data set mechanism.
"""
from django.contrib import admin
from django.contrib.auth.models import Permission
from tls import request as tls_request
from django.forms import ModelForm
from lizard_security.models import DataSet
from lizard_security.models import PermissionMapper
from lizard_security.models import UserGroup
from lizard_security.middleware import USER_GROUP_IDS
class DataSetAdmin(admin.ModelAdmin):
"""Unmodified admin for data sets."""
model = DataSet
class UserGroupAdminForm(ModelForm):
"""Custom form for user groups: ensures managers are also members.
A user group's manager should also automatically be a member. Otherwise
we'd need two queries to determine user group membership, now only one.
"""
class Meta:
model = UserGroup
def clean(self):
"""Make sure all managers are also members."""
members = list(self.cleaned_data['members'])
for manager in self.cleaned_data['managers']:
if manager not in members:
members.append(manager)
self.cleaned_data['members'] = members
return self.cleaned_data
class UserGroupAdmin(admin.ModelAdmin):
"""Custom admin for user groups: show manager/membership info directly.
User groups are also filtered to only those you are a manager of.
"""
model = UserGroup
form = UserGroupAdminForm
list_display = ('name', 'manager_info', 'number_of_members')
search_fields = ('name', )
filter_horizontal = ('managers', 'members')
def queryset(self, request):
"""Limit user groups to those you manage.
The superuser can edit all user groups, of course.
"""
qs = super(UserGroupAdmin, self).queryset(request)
if request.user.is_superuser:
return qs
return qs.filter(id__in=request.user.managed_user_groups.all())
class PermissionMapperAdmin(admin.ModelAdmin):
"""Custom admin for permission mapper: editable in the list display.
The most important items, data set and permission group, are editable in
the list display. The list display also gives you a good view on all data,
which is needed to keep track of all the various security settings if you
have more than a handful of permission mappers.
"""
model = PermissionMapper
list_display = ('name', 'user_group', 'data_set', 'permission_group')
list_editable = ('user_group', 'data_set', 'permission_group')
list_filter = ('user_group', 'data_set', 'permission_group')
search_fields = ('name', 'data_set__name')
class SecurityFilteredAdmin(admin.ModelAdmin):
"""Custom admin base class for models that use lizard-security data sets.
Django's default admin looks at global permissions to determine if you can
even view a certain model in the admin. SecurityFilteredAdmin takes
lizard-security's permission mapper into account.
"""
def _available_permissions(self):
"""Return all permissions we have through user group membership.
This method is used by the ``has_{add|change|delete}_permission()``
methods. They have to determine whether we have rights to
add/change/delete *some* instance of the model we're the admin for. So
we don't have to look at data sets, only at which permissions are
somehow connected to the user groups we're a member of.
"""
user_group_ids = getattr(tls_request, USER_GROUP_IDS, None)
if user_group_ids:
permissions = Permission.objects.filter(
group__permissionmapper__user_group__id__in=user_group_ids)
permissions = [(perm.content_type.app_label + '.' + perm.codename)
for perm in permissions]
return permissions
return []
def has_add_permission(self, request):
"""Return True if the given request has permission to add an object.
"""
opts = self.opts
perm = opts.app_label + '.' + opts.get_add_permission()
if request.user.has_perm(perm):
return True
return perm in self._available_permissions()
def has_change_permission(self, request, obj=None):
"""Return True if we have permission to change the object.
If ``obj`` is None, we just have to check if we have global
permissions or if we have the permission through a permission mapper.
TODO: specific check for object permissions.
"""
opts = self.opts
perm = opts.app_label + '.' + opts.get_change_permission()
# TODO: object permissions
if request.user.has_perm(perm):
return True
result = perm in self._available_permissions()
print "%r in %s: %s" % (perm, self._available_permissions(), result)
return result
def has_delete_permission(self, request, obj=None):
"""Return True if we have permission to delete the object.
If ``obj`` is None, we just have to check if we have global
permissions or if we have the permission through a permission mapper.
TODO: specific check for object permissions.
"""
opts = self.opts
perm = opts.app_label + '.' + opts.get_delete_permission()
# TODO: object permissions
if request.user.has_perm(perm):
return True
return perm in self._available_permissions()
admin.site.register(DataSet, DataSetAdmin)
admin.site.register(UserGroup, UserGroupAdmin)
admin.site.register(PermissionMapper, PermissionMapperAdmin)
| lizardsystem/lizard-security | lizard_security/admin.py | Python | gpl-3.0 | 5,909 | 0 |
import astropy.io.fits as fits
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as p
import numpy as n
import os
import sys
from scipy.stats import scoreatpercentile as sc
from scipy.interpolate import interp1d
survey = sys.argv[1]
z_min, z_max = 0., 1.6
imfs = ["Chabrier_ELODIE_", "Chabrier_MILES_", "Chabrier_STELIB_", "Kroupa_ELODIE_", "Kroupa_MILES_", "Kroupa_STELIB_", "Salpeter_ELODIE_", "Salpeter_MILES_", "Salpeter_STELIB_" ]
z_bins = n.array([0, 0.025, 0.375, 0.7, 0.85, 1.6])
key_SNR = 'SNR_ALL'
SNR_keys = n.array([ 'SNR_32_35', 'SNR_35_39', 'SNR_39_41', 'SNR_41_55', 'SNR_55_68', 'SNR_68_74', 'SNR_74_93' ])
SNR_w_min = n.array([ 32, 35, 39, 41, 55, 68, 74 ])
SNR_w_max = n.array([ 35, 39, 41, 55, 68, 74, 93 ])
wl_40 = ((z_bins[1:]+z_bins[:-1]) * 0.5 + 1)*40.
snr_ids = n.searchsorted(SNR_w_max, wl_40)
print(SNR_keys[snr_ids])
out_dir = os.path.join(os.environ['OBS_REPO'], 'spm', 'results')
#path_2_MAG_cat = os.path.join( os.environ['HOME'], 'SDSS', "dr14_specphot_gri.fits" )
#hd = fits.open(path_2_MAG_cat)
#path_2_sdss_cat = os.path.join( os.environ['HOME'], 'SDSS', '26', 'catalogs', "FireFly.fits" )
#path_2_eboss_cat = os.path.join( os.environ['HOME'], 'SDSS', 'v5_10_0', 'catalogs', "FireFly.fits" )
path_2_sdss_cat = os.path.join( os.environ['OBS_REPO'], 'SDSS', '26', 'catalogs', "FireFly.fits" )
path_2_eboss_cat = os.path.join( os.environ['OBS_REPO'], 'SDSS', 'v5_10_0', 'catalogs', "FireFly.fits" )
# OPENS THE CATALOGS
print("Loads catalog")
if survey =='deep2':
deep2_dir = os.path.join(os.environ['OBS_REPO'], 'DEEP2')
path_2_deep2_cat = os.path.join( deep2_dir, "zcat.deep2.dr4.v4.LFcatalogTC.Planck13.spm.fits" )
catalog = fits.open(path_2_deep2_cat)[1].data
if survey =='sdss':
catalog = fits.open(path_2_sdss_cat)[1].data
z_name, z_err_name, class_name, zwarning = 'Z', 'Z_ERR', 'CLASS', 'ZWARNING'
if survey =='boss':
catalog = fits.open(path_2_eboss_cat)[1].data
z_name, z_err_name, class_name, zwarning = 'Z_NOQSO', 'Z_ERR_NOQSO', 'CLASS_NOQSO', 'ZWARNING_NOQSO'
IMF = imfs[0]
prf = IMF.split('_')[0]+' & '+IMF.split('_')[1]
print(IMF, prf)
name, zflg_val, prefix = prf, 0., IMF
catalog_0 = (catalog[z_err_name] > 0.) & (catalog[z_name] > catalog[z_err_name]) & (catalog[class_name]=='GALAXY') & (catalog[zwarning]==zflg_val) & (catalog[z_name] > z_min) & (catalog[z_name] < z_max)
catalog_zOk = catalog_0 & (catalog['SNR_ALL']>0)
converged = (catalog_zOk)&(catalog[prefix+'stellar_mass'] < 10**13. ) & (catalog[prefix+'stellar_mass'] > 10**4 ) & (catalog[prefix+'stellar_mass'] > catalog[prefix+'stellar_mass_low_1sig'] ) & (catalog[prefix+'stellar_mass'] < catalog[prefix+'stellar_mass_up_1sig'] )
dex04 = (converged) & (catalog[prefix+'stellar_mass'] < 10**14. ) & (catalog[prefix+'stellar_mass'] > 0 ) & (catalog[prefix+'stellar_mass'] > catalog[prefix+'stellar_mass_low_1sig'] ) & (catalog[prefix+'stellar_mass'] < catalog[prefix+'stellar_mass_up_1sig'] ) & ( - n.log10(catalog[prefix+'stellar_mass_low_1sig']) + n.log10(catalog[prefix+'stellar_mass_up_1sig']) < 0.8 )
dex02 = (dex04) & ( - n.log10(catalog[prefix+'stellar_mass_low_1sig']) + n.log10(catalog[prefix+'stellar_mass_up_1sig']) < 0.4 )
#target_bits
program_names = n.array(list(set( catalog['PROGRAMNAME'] )))
program_names.sort()
sourcetypes = n.array(list(set( catalog['SOURCETYPE'] )))
sourcetypes.sort()
length = lambda selection : len(selection.nonzero()[0])
pcs_ref = list(n.arange(0., 101, 5))
g = lambda key, s1, pcs = pcs_ref : n.hstack(( length(s1), sc(catalog[key][s1], pcs) ))
sel_pg = lambda pgr : (catalog_zOk) & (catalog['PROGRAMNAME']==pgr)
sel_st = lambda pgr : (catalog_zOk) & (catalog['SOURCETYPE']==pgr)
sel0_pg = lambda pgr : (catalog_0) & (catalog['PROGRAMNAME']==pgr)
sel0_st = lambda pgr : (catalog_0) & (catalog['SOURCETYPE']==pgr)
all_galaxies = []
tpps = []
for pg in sourcetypes:
sel_all = sel_st(pg)
n_all = length( sel_all )
if n_all > 100 :
#print(pg, n_all)
all_galaxies.append(n_all)
all_out = []
for z_Min, z_Max, snr_key in zip(z_bins[:-1], z_bins[1:], SNR_keys[snr_ids]):
s_z = sel_all &(catalog[z_name] >= z_Min) & (catalog[z_name] < z_Max)
n_z = length(s_z)
#print(z_Min, z_Max, n_z)
if n_z > 0 :
#print(n.min(catalog[snr_key][s_z]), n.max(catalog[snr_key][s_z]))
itp = interp1d(sc(catalog[snr_key][s_z], pcs_ref), pcs_ref, kind='linear', fill_value= 100., bounds_error=False)
#print(itp.x, itp.y)
all_out.append( [n_z, itp(5), itp(20)] )
else :
all_out.append([0., -1, -1])
all_out = n.hstack((all_out))
tpp = pg + " & " + str(int(n_all)) + " & " + " & ".join(n.array([ str(int(el)) for el in all_out]) ) + ' \\\\ \n'
print( tpp)
tpps.append(tpp)
all_galaxies = n.array(all_galaxies)
tpps = n.array(tpps)
ids = n.argsort(all_galaxies)[::-1]
out_file = os.path.join(os.environ['OBS_REPO'], 'spm', 'results', "table_comp_"+survey+"_snr_all_sourcetype_SNR_moments.tex")
f=open(out_file, 'w')
#f.write('source type & N & \multicolumn{c}{2}{N galaxies} && \multicolumn{c}{2}{SNR ALL$>0$} & \\multicolumn{c}{2}{frefly converged} & \multicolumn{c}{2}{$\sigma_{\log_M}<0.4$} & \multicolumn{c}{2}{$\sigma_{\log_M}<0.2$} \\\\ \n')
#f.write(' & & N & % & & N & % & N & % & N & % \\\\ \n')
for jj in ids :
f.write( tpps[jj] )
f.close()
sys.exit()
#converged = (catalog_zOk)&(catalog[prefix+'stellar_mass'] < 10**13. ) & (catalog[prefix+'stellar_mass'] > 10**4 ) & (catalog[prefix+'stellar_mass'] > catalog[prefix+'stellar_mass_low_1sig'] ) & (catalog[prefix+'stellar_mass'] < catalog[prefix+'stellar_mass_up_1sig'] )
#dex04 = (converged) & (catalog[prefix+'stellar_mass'] < 10**14. ) & (catalog[prefix+'stellar_mass'] > 0 ) & (catalog[prefix+'stellar_mass'] > catalog[prefix+'stellar_mass_low_1sig'] ) & (catalog[prefix+'stellar_mass'] < catalog[prefix+'stellar_mass_up_1sig'] ) & ( - n.log10(catalog[prefix+'stellar_mass_low_1sig']) + n.log10(catalog[prefix+'stellar_mass_up_1sig']) < 0.8 )
#dex02 = (dex04) & ( - n.log10(catalog[prefix+'stellar_mass_low_1sig']) + n.log10(catalog[prefix+'stellar_mass_up_1sig']) < 0.4 )
#m_catalog = n.log10(catalog[prefix+'stellar_mass'])
#w_catalog = n.ones_like(catalog[prefix+'stellar_mass'])
#print(ld(catalog_zOk))
#return name + " & $"+ sld(converged)+"$ ("+str(n.round(ld(converged)/ld(catalog_zOk)*100.,1))+") & $"+ sld(dex04)+"$ ("+str(n.round(ld(dex04)/ld(catalog_zOk)*100.,1))+") & $"+ sld(dex02)+ "$ ("+str(n.round(ld(dex02)/ld(catalog_zOk)*100.,1))+r") \\\\"
##return catalog_sel, m_catalog, w_catalog
sys.exit()
for IMF in imfs :
prf = IMF.split('_')[0]+' & '+IMF.split('_')[1]
l2w = get_basic_stat_deep2(deep2, 'ZBEST', 'ZQUALITY', prf, 2., IMF, o2=False)
f.write(l2w + " \n")
f.write('\\hline \n')
#l2w = get_basic_stat_DR12(boss_12_portSF_kr, 'Z', 'Z_ERR', 'Portsmouth Kroupa Star-Forming & BOSS & 12 ', 0.)
#f.write(l2w + " \n")
#l2w = get_basic_stat_DR12(boss_12_portPA_kr, 'Z', 'Z_ERR', 'Portsmouth Kroupa Passive & BOSS & 12 ', 0.)
#f.write(l2w + " \n")
#l2w = get_basic_stat_DR12(boss_12_portSF_sa, 'Z', 'Z_ERR', 'Portsmouth Salpeter Star-Forming & BOSS & 12 ', 0.)
#f.write(l2w + " \n")
#l2w = get_basic_stat_DR12(boss_12_portPA_sa, 'Z', 'Z_ERR', 'Portsmouth Salpeter Passive & BOSS & 12 ', 0.)
#f.write(l2w + " \n")
for IMF in imfs :
prf = IMF.split('_')[0]+' & '+IMF.split('_')[1]
l2w = get_basic_stat_firefly_DR14(boss, 'Z_NOQSO', 'Z_ERR_NOQSO', 'CLASS_NOQSO', 'ZWARNING_NOQSO', prf, 0., IMF)
f.write(l2w + " \n")
f.write('\\hline \n')
#l2w = get_basic_stat_DR12(sdss_12_portSF_kr, 'Z', 'Z_ERR', 'Portsmouth Kroupa Star-Forming & SDSS & 12 ', 0.)
#f.write(l2w + " \n")
#l2w = get_basic_stat_DR12(sdss_12_portPA_kr, 'Z', 'Z_ERR', 'Portsmouth Kroupa Passive & SDSS & 12 ', 0.)
#f.write(l2w + " \n")
#l2w = get_basic_stat_DR12(sdss_12_portSF_sa, 'Z', 'Z_ERR', 'Portsmouth Salpeter Star-Forming & SDSS & 12 ', 0.)
#f.write(l2w + " \n")
#l2w = get_basic_stat_DR12(sdss_12_portPA_sa, 'Z', 'Z_ERR', 'Portsmouth Salpeter Passive & SDSS & 12 ', 0.)
#f.write(l2w + " \n")
for IMF in imfs :
prf = IMF.split('_')[0]+' & '+IMF.split('_')[1]
l2w = get_basic_stat_firefly_DR14(sdss, 'Z', 'Z_ERR', 'CLASS', 'ZWARNING', prf, 0., IMF)
f.write(l2w + " \n")
f.write('\\hline \n')
f.close()
#"""
out_file = os.path.join(os.environ['OBS_REPO'], 'spm', 'results', "table_2_r.tex")
f=open(out_file, 'w')
for IMF in imfs :
prf = IMF.split('_')[0]+' & '+IMF.split('_')[1]
l2w = get_basic_stat_deep2(deep2, 'ZBEST', 'ZQUALITY', prf, 2., IMF, o2=True)
f.write(l2w + " \n")
f.close()
| JohanComparat/pySU | spm/bin_SMF/create_table_snr.py | Python | cc0-1.0 | 8,566 | 0.025683 |
# -*- coding: utf-8 -*-
import re,urlparse,json
from liveresolver.modules import client
from BeautifulSoup import BeautifulSoup as bs
import xbmcgui
def resolve(url):
try:
result = client.request(url)
html = result
result = json.loads(result)
try:
f4m=result['content']['media']['f4m']
except:
reg=re.compile('"src":"http://(.+?).f4m"')
f4m=re.findall(reg,html)[0]
f4m='http://'+pom+'.f4m'
result = client.request(f4m)
soup = bs(result)
try:
base=soup.find('baseURL').getText()+'/'
except:
base=soup.find('baseurl').getText()+'/'
linklist = soup.findAll('media')
choices,links=[],[]
for link in linklist:
url = base + link['url']
bitrate = link['bitrate']
choices.append(bitrate)
links.append(url)
if len(links)==1:
return links[0]
if len(links)>1:
dialog = xbmcgui.Dialog()
index = dialog.select('Select bitrate', choices)
if index>-1:
return links[index]
return
except:
return
| sanctuaryaddon/sanctuary | script.module.liveresolver/lib/liveresolver/resolvers/playwire.py | Python | gpl-2.0 | 1,286 | 0.01944 |
__all__ = ["speedtest_exceptions", "speedtest"]
from . import sendtest
| Awesomecase/Speedtest | speedtest_sendtest/__init__.py | Python | gpl-3.0 | 71 | 0 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# ======================================================================
# Copyright 2016 Julien LE CLEACH
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ======================================================================
import threading
import zmq
from supervisor import loggers
from supervisor.loggers import LevelsByName
from supvisors.supvisorszmq import EventSubscriber
from supvisors.utils import EventHeaders
def create_logger(logfile=r'subscriber.log', loglevel=LevelsByName.INFO,
fmt='%(asctime)s %(levelname)s %(message)s\n',
rotating=True, maxbytes=10 * 1024 * 1024, backups=1, stdout=True):
""" Return a Supervisor logger. """
logger = loggers.getLogger(loglevel)
if stdout:
loggers.handle_stdout(logger, fmt)
loggers.handle_file(logger, logfile, fmt, rotating, maxbytes, backups)
return logger
class SupvisorsEventInterface(threading.Thread):
""" The SupvisorsEventInterface is a python thread that connects
to **Supvisors** and receives the events published.
The subscriber attribute shall be used to define the event types of interest.
The SupvisorsEventInterface requires:
- a ZeroMQ context,
- the event port number used by **Supvisors** to publish its events,
- a logger reference to log traces.
This event port number MUST correspond to the ``event_port`` value set
in the ``[supvisors]`` section of the Supervisor configuration file.
The default behaviour is to print the messages received.
For any other behaviour, just specialize the methods `on_xxx_status`.
Attributes:
- logger: the reference to the logger,
- subscriber: the wrapper of the ZeroMQ socket connected to **Supvisors**,
- stop_event: when set, breaks the infinite loop of the thread.
Constants:
- _Poll_timeout: duration used to time out the ZeroMQ poller, defaulted to 500 milli-seconds.
"""
_Poll_timeout = 500
def __init__(self, zmq_context, event_port, logger):
""" Initialization of the attributes. """
# thread attributes
threading.Thread.__init__(self)
# store the parameters
self.zmq_context = zmq_context
self.event_port = event_port
self.logger = logger
# create stop event
self.stop_event = threading.Event()
def stop(self):
""" This method stops the main loop of the thread. """
self.logger.info('request to stop main loop')
self.stop_event.set()
def run(self):
""" Main loop of the thread. """
# create event socket
self.subscriber = EventSubscriber(self.zmq_context, self.event_port, self.logger)
self.configure()
# create poller and register event subscriber
poller = zmq.Poller()
poller.register(self.subscriber.socket, zmq.POLLIN)
# poll events every seconds
self.logger.info('entering main loop')
while not self.stop_event.is_set():
socks = dict(poller.poll(self._Poll_timeout))
# check if something happened on the socket
if self.subscriber.socket in socks and \
socks[self.subscriber.socket] == zmq.POLLIN:
self.logger.debug('got message on subscriber')
try:
message = self.subscriber.receive()
except Exception as e:
self.logger.error(
'failed to get data from subscriber: {}'.format(e.message))
else:
if message[0] == EventHeaders.SUPVISORS:
self.on_supvisors_status(message[1])
elif message[0] == EventHeaders.ADDRESS:
self.on_address_status(message[1])
elif message[0] == EventHeaders.APPLICATION:
self.on_application_status(message[1])
elif message[0] == EventHeaders.PROCESS_EVENT:
self.on_process_event(message[1])
elif message[0] == EventHeaders.PROCESS_STATUS:
self.on_process_status(message[1])
self.logger.warn('exiting main loop')
self.subscriber.close()
def configure(self):
""" Default is subscription to everything. """
self.logger.info('subscribe to all messages')
self.subscriber.subscribe_all()
def on_supvisors_status(self, data):
""" Just logs the contents of the Supvisors Status message. """
self.logger.info('got Supvisors Status message: {}'.format(data))
def on_address_status(self, data):
""" Just logs the contents of the Address Status message. """
self.logger.info('got Address Status message: {}'.format(data))
def on_application_status(self, data):
""" Just logs the contents of the Application Status message. """
self.logger.info('got Application Status message: {}'.format(data))
def on_process_event(self, data):
""" Just logs the contents of the Process Event message. """
self.logger.info('got Process Event message: {}'.format(data))
def on_process_status(self, data):
""" Just logs the contents of the Process Status message. """
self.logger.info('got Process Status message: {}'.format(data))
if __name__ == '__main__':
import argparse
import time
# get arguments
parser = argparse.ArgumentParser(description='Start a subscriber to Supvisors events.')
parser.add_argument('-p', '--port', type=int, default=60002,
help="the event port of Supvisors")
parser.add_argument('-s', '--sleep', type=int, metavar='SEC', default=10,
help="the duration of the subscription")
args = parser.parse_args()
# create test subscriber
loop = SupvisorsEventInterface(zmq.Context.instance(), args.port, create_logger())
loop.subscriber.subscribe_all()
# start thread and sleep for a while
loop.start()
time.sleep(args.sleep)
# stop thread and halt
loop.stop()
loop.join()
| julien6387/supervisors | supvisors/client/subscriber.py | Python | apache-2.0 | 6,689 | 0.001196 |
#-*- coding: utf-8 -*-
'''
留下最後 N 個項目
'''
| williechen/DailyApp | 12/py201501/Ch01_03/__init__.py | Python | lgpl-3.0 | 62 | 0.020833 |
import uuid
from datetime import datetime
from unittest import TestCase
import pytz
import colander
from unicore.comments.service.models import (
COMMENT_MAX_LENGTH, COMMENT_CONTENT_TYPES, COMMENT_MODERATION_STATES,
COMMENT_STREAM_STATES)
from unicore.comments.service.schema import (
Comment, Flag, BannedUser, StreamMetadata)
from unicore.comments.service.tests.test_models import (
comment_data as comment_model_data,
flag_data as flag_model_data,
banneduser_data as banneduser_model_data,
streammetadata_data as streammetadata_model_data)
def simple_serialize(data):
for key in data.keys():
value = data[key]
if isinstance(value, bool):
data[key] = 'true' if value else 'false'
elif isinstance(value, int):
data[key] = str(value)
elif isinstance(value, datetime):
data[key] = value.isoformat()
elif isinstance(value, uuid.UUID):
data[key] = value.hex
elif isinstance(value, dict):
data[key] = value.copy()
else:
data[key] = unicode(value)
comment_data = comment_model_data.copy()
flag_data = flag_model_data.copy()
banneduser_data = banneduser_model_data.copy()
streammetadata_data = streammetadata_model_data.copy()
for data in (comment_data, flag_data, banneduser_data, streammetadata_data):
simple_serialize(data)
class CommentTestCase(TestCase):
def test_deserialize(self):
schema = Comment().bind()
clean = schema.deserialize(comment_data)
# must remove flag_count so that it doesn't get updated directly
self.assertNotIn('flag_count', clean)
# check typed fields
self.assertIsInstance(clean.pop('submit_datetime'), datetime)
self.assertEqual(clean.pop('is_removed'), False)
self.assertEqual(len(clean), len(comment_model_data) - 3)
self.assertDictContainsSubset(clean, comment_model_data)
# check that missing required fields raise an exception
incomplete_data = comment_data.copy()
required_fields = (
'app_uuid', 'content_uuid', 'user_uuid', 'comment', 'user_name',
'submit_datetime', 'content_type', 'content_title', 'content_url',
'locale')
for field in required_fields:
del incomplete_data[field]
try:
schema.deserialize(incomplete_data)
self.fail('Expected colander.Invalid to be raised')
except colander.Invalid as e:
self.assertEqual(len(e.children), len(required_fields))
# check that missing fields with model defaults are dropped
missing_data = comment_data.copy()
fields_with_model_default = (
'uuid', 'flag_count', 'is_removed', 'moderation_state',
'ip_address')
for field in fields_with_model_default:
del missing_data[field]
clean = schema.deserialize(missing_data)
for field in fields_with_model_default:
self.assertNotIn(field, clean)
def test_serialize(self):
schema = Comment(include_all=True).bind()
clean = schema.serialize(comment_model_data)
self.assertEqual(clean, comment_data)
# check that flag_count got serialized
self.assertIn('flag_count', clean)
# check that missing/None fields are 'None'
missing_and_none_data = comment_model_data.copy()
del missing_and_none_data['ip_address']
clean = schema.serialize(missing_and_none_data)
self.assertEqual(clean['ip_address'], 'None')
missing_and_none_data['ip_address'] = None
clean = schema.serialize(missing_and_none_data)
self.assertEqual(clean['ip_address'], 'None')
class FlagTestCase(TestCase):
def test_deserialize(self):
schema = Flag().bind()
clean = schema.deserialize(flag_data)
self.assertEqual(
clean.pop('submit_datetime'),
flag_model_data['submit_datetime'].replace(tzinfo=pytz.UTC))
self.assertEqual(len(clean), len(flag_model_data) - 1)
self.assertDictContainsSubset(clean, flag_model_data)
# check that missing required fields raise an exception
# all flag fields are required
incomplete_data = {}
try:
schema.deserialize(incomplete_data)
self.fail('Expected colander.Invalid to be raised')
except colander.Invalid as e:
self.assertEqual(len(e.children), len(flag_data))
def test_serialize(self):
schema = Flag().bind()
clean = schema.serialize(flag_model_data)
self.assertEqual(clean, flag_data)
class BannedUserTestCase(TestCase):
def test_deserialize(self):
schema = BannedUser().bind()
clean = schema.deserialize(banneduser_data)
self.assertEqual(
clean.pop('created'),
banneduser_model_data['created'].replace(tzinfo=pytz.UTC))
self.assertEqual(len(clean), len(banneduser_model_data) - 1)
self.assertDictContainsSubset(clean, banneduser_model_data)
copy = banneduser_data.copy()
del copy['created']
clean = schema.deserialize(copy)
self.assertNotIn('created', clean)
def test_serialize(self):
schema = BannedUser().bind()
clean = schema.serialize(banneduser_model_data)
self.assertEqual(clean, banneduser_data)
class StreamMetadataTestCase(TestCase):
def test_deserialize(self):
schema = StreamMetadata().bind()
clean = schema.deserialize(streammetadata_data)
self.assertEqual(clean, streammetadata_model_data)
copy = streammetadata_data.copy()
del copy['metadata']
clean = schema.deserialize(copy)
self.assertEqual(clean.get('metadata', None), {})
# dropped because unknown and no X- prefix
copy['metadata'] = {'unknown': 'value'}
clean = schema.deserialize(copy)
self.assertEqual(clean.get('metadata', None), {})
def test_serialize(self):
schema = StreamMetadata().bind()
clean = schema.serialize(streammetadata_model_data)
self.assertEqual(clean, streammetadata_data)
class ValidatorTestCase(TestCase):
schema_flag = Flag().bind()
schema_comment = Comment().bind()
schema_streammetadata = StreamMetadata().bind()
def setUp(self):
self.data_flag = flag_data.copy()
self.data_comment = comment_data.copy()
def test_uuid_validator(self):
self.data_flag['app_uuid'] = 'notauuid'
self.assertRaisesRegexp(
colander.Invalid, "'app_uuid'",
self.schema_flag.deserialize, self.data_flag)
def test_comment_uuid_validator(self):
comment_uuid = self.data_flag['comment_uuid']
schema = Flag().bind(comment_uuid=comment_uuid)
self.assertEqual(
schema.deserialize(self.data_flag)['comment_uuid'],
uuid.UUID(comment_uuid))
other_uuid = uuid.uuid4().hex
schema = Flag().bind(comment_uuid=other_uuid)
self.assertRaisesRegexp(
colander.Invalid, "is not one of %s" % uuid.UUID(other_uuid),
schema.deserialize, self.data_flag)
def test_ip_address_validator(self):
self.data_comment['ip_address'] = 'notanipaddress'
self.assertRaisesRegexp(
colander.Invalid, "'ip_address'",
self.schema_comment.deserialize, self.data_comment)
def test_locale_validator(self):
self.data_comment['locale'] = 'notalocale'
self.assertRaisesRegexp(
colander.Invalid, "'locale'",
self.schema_comment.deserialize, self.data_comment)
def test_comment_validator(self):
for val in ('', 'a' * (COMMENT_MAX_LENGTH + 1)):
self.data_comment['comment'] = val
self.assertRaisesRegexp(
colander.Invalid, "'comment'",
self.schema_comment.deserialize, self.data_comment)
def test_content_type_validator(self):
self.data_comment['content_type'] = 'notacontenttype'
types = ', '.join(COMMENT_CONTENT_TYPES)
self.assertRaisesRegexp(
colander.Invalid, 'is not one of %s' % types,
self.schema_comment.deserialize, self.data_comment)
def test_content_url_validator(self):
self.data_comment['content_url'] = 'notacontenturl'
self.assertRaisesRegexp(
colander.Invalid, "'content_url'",
self.schema_comment.deserialize, self.data_comment)
def test_moderation_state_validator(self):
self.data_comment['moderation_state'] = 'notamoderationstate'
states = ', '.join(map(lambda t: t[0], COMMENT_MODERATION_STATES))
self.assertRaisesRegexp(
colander.Invalid, 'is not one of %s' % states,
self.schema_comment.deserialize, self.data_comment)
def test_stream_state_validator(self):
smd_data = streammetadata_data.copy()
smd_data['metadata'] = smd_data['metadata'].copy()
smd_data['metadata']['state'] = 'invalid'
states = ', '.join(COMMENT_STREAM_STATES)
self.assertRaisesRegexp(
colander.Invalid, 'is not one of %s' % states,
self.schema_streammetadata.deserialize, smd_data)
| universalcore/unicore.comments | unicore/comments/service/tests/test_schema.py | Python | bsd-2-clause | 9,303 | 0 |
from __future__ import print_function
import sys
sys.path.insert(1,"../../")
import h2o
from tests import pyunit_utils
def parquet_parse_simple():
"""
Tests Parquet parser by comparing the summary of the original csv frame with the h2o parsed Parquet frame.
Basic use case of importing files with auto-detection of column types.
:return: None if passed. Otherwise, an exception will be thrown.
"""
csv = h2o.import_file(path=pyunit_utils.locate("smalldata/airlines/AirlinesTrain.csv.zip"))
parquet = h2o.import_file(path=pyunit_utils.locate("smalldata/parser/parquet/airlines-simple.snappy.parquet"))
csv.summary()
csv_summary = h2o.frame(csv.frame_id)["frames"][0]["columns"]
parquet.summary()
parquet_summary = h2o.frame(parquet.frame_id)["frames"][0]["columns"]
pyunit_utils.compare_frame_summary(csv_summary, parquet_summary)
if __name__ == "__main__":
pyunit_utils.standalone_test(parquet_parse_simple)
else:
parquet_parse_simple() | mathemage/h2o-3 | h2o-py/tests/testdir_parser/pyunit_parquet_parser_simple.py | Python | apache-2.0 | 999 | 0.008008 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Chat',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('content', models.CharField(max_length=200)),
('created', models.DateTimeField(auto_now_add=True)),
('user', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
],
options={
},
bases=(models.Model,),
),
]
| scott-w/pyne-django-tutorial | chatter/chatter/base/migrations/0001_initial.py | Python | mit | 799 | 0.001252 |
from multiprocessing import Process, Pipe,Value, Queue
from time import sleep, clock
from solr import Solr
#### EVERY connection must be a class with a .commit() method.
#### Starbase and solr already have these. If you want to make
#### a csv method, you need to define it as a custom class.
####
#### commit() would either be to open the file and append everyone 20 lines or so
#### OR you would append every line as it comes in, and commit is a dummy funtion, but it
#### needs to be there.
class SharkyWriterConnection():
def __init__(self, name):
self.name= name
def establishConnection(self):
# Expected Parameters ()
pass
def setEstablishConnectionFn(self, fn):
self.establishConnection= fn
def processMessage(self, message, target_queue):
# Expected Parameters (message, target_queue)
pass
def setProcessMessage(self, fn):
self.processMessage= fn
def writeOne(self,message,batch):
pass
def setWriteOne(self, fn):
self.writeOne= fn
def writeBatch(self):
pass
def setWriteBatch(self,fn):
self.writeBatch= fn
class SharkyWriter(Process):
def __init__(self, queue, health_pipe, sharky_writer_conn, beaver_shark_q):
Process.__init__(self)
self.queue= queue ## type: Queue (multiprocessor)
self.health_pipe= health_pipe ## type: Pipe (multiprocessor)
self.sharky_writer_conn= sharky_writer_conn
self.beaver_shark_q= beaver_shark_q ## Info for the logger.
self.batch = []
self.MaxBatchSize= 20
def run(self):
self.writeOne= self.sharky_writer_conn.writeOne
self.writeBatch= self.sharky_writer_conn.writeBatch
try:
self.conn= self.sharky_writer_conn.establishConnection(self)
self.beaver_shark_q.put(['info','Write connection %s established' % self.sharky_writer_conn.name])
except Exception,e:
self.beaver_shark_q.put(['exception',e])
while True:
while not self.queue.empty():
doc= self.queue.get()
self.writeOne(self, doc)
if len(self.batch) > self.MaxBatchSize:
# try/except built into function
self.writeBatch(self)
sleep(5)
| rawkintrevo/sharky | mysharky/writers.py | Python | apache-2.0 | 2,074 | 0.047734 |
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import py_utils
from telemetry import story as story_module
from telemetry.page import page as page_module
from telemetry.page import shared_page_state
class LeakDetectionSharedState(shared_page_state.SharedDesktopPageState):
def ShouldReuseBrowserForAllStoryRuns(self):
return True
class LeakDetectionPage(page_module.Page):
def __init__(self, url, page_set, name=''):
super(LeakDetectionPage, self).__init__(
url=url, page_set=page_set, name=name,
shared_page_state_class=LeakDetectionSharedState)
def RunNavigateSteps(self, action_runner):
tabs = action_runner.tab.browser.tabs
new_tab = tabs.New()
new_tab.action_runner.Navigate('about:blank')
new_tab.action_runner.PrepareForLeakDetection()
new_tab.action_runner.MeasureMemory()
new_tab.action_runner.Navigate(self.url)
self._WaitForPageLoadToComplete(new_tab.action_runner)
new_tab.action_runner.Navigate('about:blank')
new_tab.action_runner.PrepareForLeakDetection()
new_tab.action_runner.MeasureMemory()
new_tab.Close()
def _WaitForPageLoadToComplete(self, action_runner):
py_utils.WaitFor(action_runner.tab.HasReachedQuiescence, timeout=30)
# Some websites have a script that loads resources continuously, in which cases
# HasReachedQuiescence would not be reached. This class waits for document ready
# state to be complete to avoid timeout for those pages.
class ResourceLoadingLeakDetectionPage(LeakDetectionPage):
def _WaitForPageLoadToComplete(self, action_runner):
action_runner.tab.WaitForDocumentReadyStateToBeComplete()
class LeakDetectionStorySet(story_module.StorySet):
def __init__(self):
super(LeakDetectionStorySet, self).__init__(
archive_data_file='data/leak_detection.json',
cloud_storage_bucket=story_module.PARTNER_BUCKET)
urls_list = [
# Alexa top websites
'https://www.google.com',
'https://www.youtube.com',
'https://www.facebook.com',
'https://www.baidu.com',
'https://www.wikipedia.org',
'https://world.taobao.com/',
'https://www.tmall.com/',
'http://www.amazon.com',
'http://www.twitter.com',
'https://www.instagram.com/',
'http://www.jd.com/',
'https://vk.com/',
'https://outlook.live.com',
'https://www.reddit.com/',
'https://weibo.com/',
'https://www.sina.com.cn/',
'https://www.360.cn/',
'https://yandex.ru/',
'https://www.blogger.com/',
'https://www.netflix.com/',
'https://www.pornhub.com/',
'https://www.linkedin.com/',
'https://www.yahoo.co.jp/',
'https://www.csdn.net/',
'https://www.alipay.com/',
'https://www.twitch.tv/',
# TODO(keishi): Memory dump fails flakily crbug.com/963273
#'https://www.ebay.com/',
# TODO(keishi): Memory dump fails flakily crbug.com/963273
#'https://www.microsoft.com/',
# TODO(keishi): Memory dump fails flakily crbug.com/963273
#'https://www.xvideos.com/',
'https://mail.ru/',
'https://www.bing.com/',
'http://www.wikia.com/',
'https://www.office.com/',
'https://www.imdb.com/',
'https://www.aliexpress.com/',
'https://www.msn.com/',
'https://news.google.com/',
'https://www.theguardian.com/',
'https://www.indiatimes.com/',
# TODO(keishi): Memory dump fails flakily crbug.com/963273
#'http://www.foxnews.com/',
'https://weather.com/',
'https://www.shutterstock.com/',
'https://docs.google.com/',
'https://wordpress.com/',
# TODO(yuzus): This test crashes.
# 'https://www.apple.com/',
'https://play.google.com/store',
'https://www.dropbox.com/',
'https://soundcloud.com/',
'https://vimeo.com/',
'https://www.slideshare.net/',
'https://www.mediafire.com/',
'https://www.etsy.com/',
'https://www.ikea.com/',
'https://www.bestbuy.com/',
'https://www.homedepot.com/',
# TODO(keishi): Memory dump fails flakily crbug.com/963273
#'https://www.target.com/',
'https://www.booking.com/',
'https://www.tripadvisor.com/',
'https://9gag.com/',
'https://www.expedia.com/',
'https://www.roblox.com/',
'https://www.gamespot.com/',
'https://www.blizzard.com',
# TODO(keishi): Memory dump fails flakily crbug.com/963273
#'https://ign.com/',
'https://www.yelp.com/',
# Times out waiting for HasReachedQuiescence - crbug.com/927427
# 'https://gizmodo.com/',
'https://www.gsmarena.com/',
'https://www.theverge.com/',
'https://www.nlm.nih.gov/',
'https://archive.org/',
'https://www.udemy.com/',
'https://answers.yahoo.com/',
# TODO(crbug.com/985552): Memory dump fails flakily.
# 'https://www.goodreads.com/',
'https://www.cricbuzz.com/',
'http://www.goal.com/',
'http://siteadvisor.com/',
'https://www.patreon.com/',
'https://www.jw.org/',
'http://europa.eu/',
'https://translate.google.com/',
'https://www.epicgames.com/',
'http://www.reverso.net/',
'https://play.na.leagueoflegends.com/',
'https://www.thesaurus.com/',
'https://www.weebly.com/',
'https://www.deviantart.com/',
'https://www.scribd.com/',
'https://www.hulu.com/',
'https://www.xfinity.com/',
# India Alexa top websites
'https://porn555.com/',
'https://www.onlinesbi.com/',
'https://www.flipkart.com/',
'https://www.hotstar.com/',
'https://www.incometaxindiaefiling.gov.in/',
'https://stackoverflow.com/',
# TODO(crbug.com/1005035) Memory dump fails flakily.
# 'https://www.irctc.co.in/nget/',
'https://www.hdfcbank.com/',
'https://www.whatsapp.com/',
'https://uidai.gov.in/',
'https://billdesk.com/',
'https://www.icicibank.com/',
# US Alexa top websites
'https://imgur.com/',
'https://www.craigslist.org/',
'https://www.chase.com/',
# TODO(892352): tumblr started timing out due to a catapult roll. See
# https://crbug.com/892352
# 'https://www.tumblr.com/',
'https://www.paypal.com/',
# TODO(yuzus): espn.com is flaky. https://crbug.com/959796
#'http://www.espn.com/',
'https://edition.cnn.com/',
'https://www.pinterest.com/',
# TODO(keishi): Memory dump fails flakily crbug.com/963273
#'https://www.nytimes.com/',
'https://github.com/',
'https://www.salesforce.com/',
# Japan Alexa top websites
'https://www.rakuten.co.jp/',
'http://www.nicovideo.jp/',
'https://fc2.com/',
'https://ameblo.jp/',
'http://kakaku.com/',
'https://www.goo.ne.jp/',
'https://www.pixiv.net/',
# websites which were found to be leaking in the past
'https://www.prezi.com',
# TODO(keishi): Memory dump fails flakily crbug.com/963273
#'http://www.time.com',
'http://www.cheapoair.com',
'http://www.onlinedown.net',
'http://www.dailypost.ng',
'http://www.aljazeera.net',
'http://www.googleapps.com',
'http://www.airbnb.ch',
'http://www.livedoor.jp',
'http://www.blu-ray.com',
# TODO(953195): Test times out.
# 'http://www.block.io',
'http://www.hockeybuzz.com',
'http://www.silverpop.com',
'http://www.ansa.it',
'http://www.gulfair.com',
'http://www.nusatrip.com',
'http://www.samsung-fun.ru',
'http://www.opentable.com',
'http://www.magnetmail.net',
'http://zzz.com.ua',
'http://a-rakumo.appspot.com',
'http://www.sakurafile.com',
'http://www.psiexams.com',
'http://www.contentful.com',
'http://www.estibot.com',
'http://www.mbs.de',
'http://www.zhengjie.com',
'http://www.sjp.pl',
'http://www.mastodon.social',
'http://www.horairetrain.net',
'http://www.torrentzeu.to',
'http://www.inbank.it',
'http://www.gradpoint.com',
'http://www.mail.bg',
'http://www.aaannunci.it',
'http://www.leandomainsearch.com',
'http://www.wpjam.com',
'http://www.nigma.ru',
'http://www.do-search.com',
'http://www.omniboxes.com',
'http://whu.edu.cn',
'http://support.wordpress.com',
'http://www.webwebweb.com',
'http://www.sick.com',
'http://www.iowacconline.com',
'http://hdu.edu.cn',
'http://www.register.com',
'http://www.careesma.in',
'http://www.bestdic.ir',
'http://www.privacyassistant.net',
'http://www.sklavenzentrale.com',
'http://www.podbay.fm',
'http://www.coco.fr',
'http://www.skipaas.com',
'http://www.chatword.org',
'http://www.ezcardinfo.com',
'http://www.daydao.com',
'http://www.expediapartnercentral.com',
'http://www.22find.com',
'http://www.e-shop.gr',
'http://www.indeed.com',
'http://www.highwaybus.com',
'http://www.pingpang.info',
'http://www.besgold.com',
'http://www.arabam.com',
'http://makfax.com.mk',
'http://game.co.za',
'http://www.savaari.com',
'http://www.railsguides.jp',
]
resource_loading_urls_list = [
'https://www.hotels.com/',
'https://www.livejournal.com/',
# TODO(keishi): Memory dump fails flakily crbug.com/963273
#'https://www.yahoo.com',
'http://www.quora.com',
'https://www.macys.com',
'http://infomoney.com.br',
'http://www.listindiario.com',
'https://www.engadget.com/',
'https://www.sohu.com/',
'http://www.qq.com',
'http://www.benzworld.org',
'http://www.520mojing.com',
]
for url in urls_list:
self.AddStory(LeakDetectionPage(url, self, url))
for url in resource_loading_urls_list:
self.AddStory(ResourceLoadingLeakDetectionPage(url, self, url))
| chromium/chromium | tools/perf/contrib/leak_detection/page_sets.py | Python | bsd-3-clause | 10,066 | 0.001987 |
#!/usr/bin/python3
################################
# File Name: unittestExample.py
# Author: Chadd Williams
# Date: 10/20/2014
# Class: CS 360
# Assignment: Lecture Examples
# Purpose: Demonstrate unit tests
################################
# adapted from https://docs.python.org/3/library/unittest.html
# python3 -m unittest unittestExample -v
import random
import unittest
class TestListFunctions(unittest.TestCase):
listSize = 10
def setUp(self):
""" the text fixture, necessary setup for the tests to run
"""
self.theList = list(range(self.listSize))
# shuffle the list
random.shuffle(self.theList)
def tearDown(self):
""" nothing to tear down here
If your test created a database or built a network connection
you might delete the database or close the network connection
here. You might also close files you opened, close your
TK windows if this is GUI program, or kill threads if this is
a multithreaded application
"""
pass # nothing to do
def test_sort(self):
""" make sure sort works correctly
"""
self.theList.sort()
self.assertEqual(self.theList, list(range(self.listSize)))
def test_append(self):
""" make sure append works correctly
"""
self.theList.append(self.listSize+1)
self.assertEqual(self.theList[-1], self.listSize+1)
def test_exceptions(self):
"""test some exceptions
"""
# theList does not contain -1. Make sure remove
# raised the correct exception
self.assertRaises(ValueError, self.theList.remove, -1)
"""def test_thistestwillfail(self):
# theList DOES contain 1.
# remove will not raise the expected exception
self.assertRaises(ValueError, self.theList.remove, 0)"""
| NicLew/CS360-Practive-CI-Testing | unittestExample.py | Python | gpl-2.0 | 1,699 | 0.030606 |
'''
Created on 10 mars 2015
@author: Remi Cattiau
'''
from nxdrive.logging_config import get_logger
from nxdrive.wui.dialog import WebDialog, WebDriveApi
from nxdrive.wui.translator import Translator
from PyQt4 import QtCore
log = get_logger(__name__)
class WebConflictsApi(WebDriveApi):
def __init__(self, application, engine, dlg=None):
super(WebConflictsApi, self).__init__(application, dlg)
self._manager = application.manager
self._application = application
self._dialog = dlg
self._engine = engine
def set_engine(self, engine):
self._engine = engine
@QtCore.pyqtSlot(result=str)
def get_errors(self):
return super(WebConflictsApi, self).get_errors(self._engine._uid)
@QtCore.pyqtSlot(result=str)
def get_conflicts(self):
return super(WebConflictsApi, self).get_conflicts(self._engine._uid)
@QtCore.pyqtSlot(int)
def resolve_with_local(self, state_id):
try:
self._engine.resolve_with_local(state_id)
except Exception as e:
log.exception(e)
@QtCore.pyqtSlot(int)
def resolve_with_remote(self, state_id):
try:
self._engine.resolve_with_remote(state_id)
except Exception as e:
log.exception(e)
@QtCore.pyqtSlot(int)
def resolve_with_duplicate(self, state_id):
try:
self._engine.resolve_with_duplicate(state_id)
except Exception as e:
log.exception(e)
@QtCore.pyqtSlot(int)
def retry_pair(self, state_id):
try:
self._engine.retry_pair(int(state_id))
except Exception as e:
log.exception(e)
@QtCore.pyqtSlot(int)
def unsynchronize_pair(self, state_id):
try:
self._engine.unsynchronize_pair(int(state_id))
except Exception as e:
log.exception(e)
@QtCore.pyqtSlot(str, result=str)
def open_local(self, path):
return super(WebConflictsApi, self).open_local(self._engine._uid, path)
@QtCore.pyqtSlot(str, str, result=str)
def open_remote(self, remote_ref, remote_name):
remote_ref = str(remote_ref)
remote_name = unicode(remote_name)
log.debug("Should open this : %s (%s)", remote_name, remote_ref)
try:
self._engine.open_edit(remote_ref, remote_name)
except Exception as e:
log.exception(e)
return ""
def _export_state(self, state):
if state is None:
return None
result = super(WebConflictsApi, self)._export_state(state)
result["last_contributor"] = " " if state.last_remote_modifier is None \
else self._engine.get_user_full_name(state.last_remote_modifier)
date_time = self.get_date_from_sqlite(state.last_remote_updated)
result["last_remote_update"] = "" if date_time == 0 else Translator.format_datetime(date_time)
date_time = self.get_date_from_sqlite(state.last_local_updated)
result["last_local_update"] = "" if date_time == 0 else Translator.format_datetime(date_time)
result["remote_can_update"] = state.remote_can_update
return result
class WebConflictsDialog(WebDialog):
def set_engine(self, engine):
self._api.set_engine(engine)
| rsoumyassdi/nuxeo-drive | nuxeo-drive-client/nxdrive/wui/conflicts.py | Python | lgpl-2.1 | 3,320 | 0.001506 |
# Copyright 2012 OpenStack Foundation
# Copyright 2013 Nebula Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Identity v2 EC2 Credentials action implementations"""
import logging
import six
from cliff import command
from cliff import lister
from cliff import show
from openstackclient.common import utils
from openstackclient.i18n import _ # noqa
class CreateEC2Creds(show.ShowOne):
"""Create EC2 credentials"""
log = logging.getLogger(__name__ + ".CreateEC2Creds")
def get_parser(self, prog_name):
parser = super(CreateEC2Creds, self).get_parser(prog_name)
parser.add_argument(
'--project',
metavar='<project>',
help=_('Specify a project [admin only]'),
)
parser.add_argument(
'--user',
metavar='<user>',
help=_('Specify a user [admin only]'),
)
return parser
def take_action(self, parsed_args):
self.log.debug('take_action(%s)', parsed_args)
identity_client = self.app.client_manager.identity
if parsed_args.project:
project = utils.find_resource(
identity_client.tenants,
parsed_args.project,
).id
else:
# Get the project from the current auth
project = identity_client.auth_tenant_id
if parsed_args.user:
user = utils.find_resource(
identity_client.users,
parsed_args.user,
).id
else:
# Get the user from the current auth
user = identity_client.auth_user_id
creds = identity_client.ec2.create(user, project)
info = {}
info.update(creds._info)
return zip(*sorted(six.iteritems(info)))
class DeleteEC2Creds(command.Command):
"""Delete EC2 credentials"""
log = logging.getLogger(__name__ + '.DeleteEC2Creds')
def get_parser(self, prog_name):
parser = super(DeleteEC2Creds, self).get_parser(prog_name)
parser.add_argument(
'access_key',
metavar='<access-key>',
help=_('Credentials access key'),
)
parser.add_argument(
'--user',
metavar='<user>',
help=_('Specify a user [admin only]'),
)
return parser
def take_action(self, parsed_args):
self.log.debug('take_action(%s)', parsed_args)
identity_client = self.app.client_manager.identity
if parsed_args.user:
user = utils.find_resource(
identity_client.users,
parsed_args.user,
).id
else:
# Get the user from the current auth
user = identity_client.auth_user_id
identity_client.ec2.delete(user, parsed_args.access_key)
class ListEC2Creds(lister.Lister):
"""List EC2 credentials"""
log = logging.getLogger(__name__ + '.ListEC2Creds')
def get_parser(self, prog_name):
parser = super(ListEC2Creds, self).get_parser(prog_name)
parser.add_argument(
'--user',
metavar='<user>',
help=_('Specify a user [admin only]'),
)
return parser
def take_action(self, parsed_args):
self.log.debug('take_action(%s)', parsed_args)
identity_client = self.app.client_manager.identity
if parsed_args.user:
user = utils.find_resource(
identity_client.users,
parsed_args.user,
).id
else:
# Get the user from the current auth
user = identity_client.auth_user_id
columns = ('access', 'secret', 'tenant_id', 'user_id')
column_headers = ('Access', 'Secret', 'Project ID', 'User ID')
data = identity_client.ec2.list(user)
return (column_headers,
(utils.get_item_properties(
s, columns,
formatters={},
) for s in data))
class ShowEC2Creds(show.ShowOne):
"""Show EC2 credentials"""
log = logging.getLogger(__name__ + '.ShowEC2Creds')
def get_parser(self, prog_name):
parser = super(ShowEC2Creds, self).get_parser(prog_name)
parser.add_argument(
'access_key',
metavar='<access-key>',
help=_('Credentials access key'),
)
parser.add_argument(
'--user',
metavar='<user>',
help=_('Specify a user [admin only]'),
)
return parser
def take_action(self, parsed_args):
self.log.debug('take_action(%s)', parsed_args)
identity_client = self.app.client_manager.identity
if parsed_args.user:
user = utils.find_resource(
identity_client.users,
parsed_args.user,
).id
else:
# Get the user from the current auth
user = identity_client.auth_user_id
creds = identity_client.ec2.get(user, parsed_args.access_key)
info = {}
info.update(creds._info)
return zip(*sorted(six.iteritems(info)))
| varunarya10/python-openstackclient | openstackclient/identity/v2_0/ec2creds.py | Python | apache-2.0 | 5,662 | 0 |
import functools
class Solution:
def convert(self, s: str, numRows: int) -> str:
if numRows == 1:
return s
ret = [[] for _ in range(numRows)]
pattern = numRows*2 - 2
for i in range(len(s)):
if i % pattern < numRows:
ret[i % pattern].append(s[i])
else:
ret[pattern - (i % pattern)].append(s[i])
return functools.reduce(lambda a, b : a + b ,[''.join(c) for c in ret])
| MingfeiPan/leetcode | string/6.py | Python | apache-2.0 | 526 | 0.015209 |
import question_template
game_type = 'input_output'
source_language = 'C'
parameter_list = [
['$x0','string'],['$x1','string'],['$x2','string'],
['$y0','string'],['$y1','string'],['$y2','string']
]
tuple_list = [
['echo_io_forward_',['a','b','c',None,None,None]],
]
global_code_template = '''\
d #include <stdio.h>
x #include <stdio.h>
dx
'''
main_code_template = '''\
dx int i;
dx
dx for (i = 1; i < argc; i++)
dx printf("%s\\n", argv[i]);
'''
argv_template = '$x0 $x1 $x2'
stdin_template = '''\
'''
stdout_template = '''\
$y0
$y1
$y2
'''
question = question_template.Question_template(game_type,source_language,
parameter_list,tuple_list,global_code_template,main_code_template,
argv_template,stdin_template,stdout_template)
| stryder199/RyarkAssignments | Assignment2/ttt/archive/_old/other/echo_io.py | Python | mit | 750 | 0.042667 |
# Author: Drone
import web
from app.helpers import utils
from app.helpers import formatting
projectName = 'Remote Function Trainer'
listLimit = 40
# connect to database
db = web.database(dbn='mysql', db='rft', user='root', passwd='1234')
t = db.transaction()
#t.commit()
# in development debug error messages and reloader
web.config.debug = False
# in develpment template caching is set to false
cache = False
# template global functions
globals = utils.get_all_functions(formatting)
# set global base template
view = web.template.render('app/views', cache=cache, globals=globals)
# in production the internal errors are emailed to us
web.config.email_errors = '' | gcobos/rft | config.py | Python | agpl-3.0 | 673 | 0.002972 |
import argparse, json
import simpleamt
if __name__ == '__main__':
parser = argparse.ArgumentParser(parents=[simpleamt.get_parent_parser()])
args = parser.parse_args()
mtc = simpleamt.get_mturk_connection_from_args(args)
reject_ids = []
if args.hit_ids_file is None:
parser.error('Must specify --hit_ids_file.')
with open(args.hit_ids_file, 'r') as f:
hit_ids = [line.strip() for line in f]
for hit_id in hit_ids:
for a in mtc.get_assignments(hit_id):
reject_ids.append(a.AssignmentId)
print ('This will reject %d assignments with '
'sandbox=%s' % (len(reject_ids), str(args.sandbox)))
print 'Continue?'
s = raw_input('(Y/N): ')
if s == 'Y' or s == 'y':
print 'Rejecting assignments'
for idx, assignment_id in enumerate(reject_ids):
print 'Rejecting assignment %d / %d' % (idx + 1, len(reject_ids))
mtc.reject_assignment(assignment_id, feedback='Invalid results')
else:
print 'Aborting'
| achalddave/simple-amt | reject_assignments.py | Python | mit | 970 | 0.016495 |
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class MariadbCClient(CMakePackage):
"""MariaDB turns data into structured information in a wide array of
applications, ranging from banking to websites. It is an enhanced,
drop-in replacement for MySQL. MariaDB is used because it is fast,
scalable and robust, with a rich ecosystem of storage engines,
plugins and many other tools make it very versatile for a wide
variety of use cases. This package comprises only the standalone 'C
Connector', which enables connections to MariaDB and MySQL servers.
"""
homepage = "http://mariadb.org/about/"
url = "https://downloads.mariadb.com/Connectors/c/connector-c-3.0.3/mariadb-connector-c-3.0.3-src.tar.gz"
list_url = "https://downloads.mariadb.com/Connectors/c/"
list_depth = 1
version('3.1.9', sha256='108d99bf2add434dcb3bd9526ba1d89a2b9a943b62dcd9d0a41fcbef8ffbf2c7')
version('3.1.6', sha256='d266bb67df83c088c4fb05392713d2504c67be620894cedaf758a9561c116720')
version('3.1.5', sha256='a9de5fedd1a7805c86e23be49b9ceb79a86b090ad560d51495d7ba5952a9d9d5')
version('3.1.4', sha256='7a1a72fee00e4c28060f96c3efbbf38aabcbbab17903e82fce85a85002565316')
version('3.0.9', sha256='7277c0caba6f50b1d07e1d682baf0b962a63e2e6af9e00e09b8dcf36a7858641')
version('3.0.8', sha256='2ca368fd79e87e80497a5c9fd18922d8316af8584d87cecb35bd5897cb1efd05')
version('3.0.7', sha256='f63883c9360675d111646fba5c97feb0d08e0def5873dd189d78bafbb75fa004')
version('3.0.6', sha256='2b2d18dc969dc385f7f740e4db112300e11bc626c9ba9aa05c284704095b9e48')
version('3.0.5', sha256='940017f13a13846153eb9d36290824c4615c8a8be4142b6bbaeb698609f02667')
version('3.0.4', sha256='6eff680cd429fdb32940f6ea4755a997dda1bb00f142f439071f752fd0b200cf')
version('3.0.3', sha256='210f0ee3414b235d3db8e98e9e5a0a98381ecf771e67ca4a688036368984eeea')
version('3.0.2', sha256='518d14b8d77838370767d73f9bf1674f46232e1a2a34d4195bd38f52a3033758')
version('2.3.7', sha256='94f9582da738809ae1d9f1813185165ec7c8caf9195bdd04e511f6bdcb883f8e')
version('2.3.6', sha256='6b271d25dddda15f1c2328eee64f646a2e8b116ea21b04ece24b5a70712c3e96')
version('2.3.5', sha256='2f3bf4c326d74284debf7099f30cf3615f7978d1ec22b8c1083676688a76746f')
version('2.3.4', sha256='8beb0513da8a24ed2cb47836564c8b57045c3b36f933362f74b3676567c13abc')
version('2.3.3', sha256='82a5710134e7654b9cad58964d6a25ed91b3dc1804ff51e8be2def0032914089')
version('2.3.2', sha256='4063c8655dc37608d4eade981e25b76f67f5d36e8426dc7f20d59e48ebba628a')
version('2.3.1', sha256='6ab7e1477ae1484939675a3b499f98148980a0bd340d15d22df00a5c6656c633')
version('2.3.0', sha256='37faae901ca77bd48d2c6286f2e19e8c1abe7cac6fb1b128bd556617f4335c8a')
version('2.2.3', sha256='cd01ce2c418382f90fd0b21c3c756b89643880efe3447507bf740569b9d08eed')
version('2.2.2', sha256='93f56ad9f08bbaf0da8ef03bc96f7093c426ae40dede60575d485e1b99e6406b')
version('2.2.1', sha256='c30ba19be03a6ac8688ef7620aed0eabdf34ca9ee886c017c56b013b5f8ee06a')
version('2.2.0', sha256='3825b068d38bc19d6ad1eaecdd74bcd49d6ddd9d00559fb150e4e851a55bbbd4')
version('2.1.0', sha256='568050b89463af7610d458669fd9eee06dcc9405689aca8a526ac8c013b59167')
provides('mariadb-client')
provides('mysql-client')
depends_on('[email protected]:', type='build')
depends_on('curl')
depends_on('pcre')
depends_on('openssl')
depends_on('zlib')
def url_for_version(self, version):
url = "https://downloads.mariadb.com/Connectors/c/connector-c-{0}/mariadb-connector-c-{1}-src.tar.gz"
return url.format(version.up_to(3), version)
def cmake_args(self):
args = ['-DWITH_EXTERNAL_ZLIB=ON', '-DWITH_MYSQLCOMPAT=ON']
return args
| iulian787/spack | var/spack/repos/builtin/packages/mariadb-c-client/package.py | Python | lgpl-2.1 | 3,917 | 0.007148 |
### @author Rishi Jatia
import json
import re
import string
def decode_unicode(data, replace_boo=True):
# dictionary which direct maps unicode values to its letters
dictionary = {'0030':'0','0031':'1','0032':'2','0033':'3','0034':'4','0035':'5','0036':'6','0037':'7','0038':'8','0039':'9','0024':'$','0040':'@','00A2':'cents','00A3':'pounds','00A5':'yen','00C7':'C','00D0':'D','00D1':'N','00DD':'Y','00E7':'c','00F1':'n','00FD':'y','00FF':'y','010E':'D','010F':'F','0110':'D','0111':'D','0130':'I','0134':'J','0135':'J','0136':'K','0137':'K','0138':'K','0160':'S','0161':'S','0191':'F','0192':'F','0193':'G','0198':'K','0199':'K',
'019D':'N','019E':'N','01A4':'P','01A5':'P','01AC':'T','01AF':'U','01B5':'Z','01CD':'A','01CE':'A','01CF':'I','01D0':'I','01D1':'O','01D2':'O','01DE':'A','01DF':'A','01E0':'A','01E1':'A','01F4':'G','01F5':'G','01F8':'N','01F9':'N','01FA':'A','01FB':'A','021E':'H',
'021F':'H','0224':'Z','2113':'L','2718':'X','0225':'Z','2134':'O','0226':'A','0227':'A','0228':'E','0229':'E','0386':'A','0388':'E','0389':'H','038A':'I','0391':'A','0392':'B','0395':'E','0396':'Z','0397':'H','0399':'I','039A':'K','039C':'M','039D':'N','039F':'O','03A1':'P','03A4':'T','03A5':'Y','03A7':'X','03AA':'I','03AB':'B','1E10':'D','1E11':'D','1E12':'D','1E13':'D','1E1E':'F','1E1F':'F','1E20':'G','1E21':'H','1E2C':'I','1E2D':'I','1E2E':'I','1E2F':'I','1E3E':'M','1E3F':'M','1E70':'T','1E71':'T','1E8E':'Y','1E8F':'Y','1EE0':'O','1EE1':'O','1EE2':'O','1EE3':'O','1EE4':'O','1EF0':'U','1EF1':'U'}
# dictionary in which patterns (prefixes and suffixes) are matched to possible letter choices
pattern_dict = {'00C':'AEI', '00D':'OU','00E':'AEI','00F':'OU','010':'AC','011':'EG','012':'GHI','013':'L','014':'LNO','015':'RS','016':'TU','017':'UWYZ', '018':'BCD','01D':'U','01E':'GKO','020':'AEIO','021':'RUST','022':'O','1E0':'ABCD','1E1':'E','1E3':'KL','1E4':'MNO','1E5':'OPR','1E6':'ST','1E7':'UV','1E8':'WX','1E9':'Z','1EB':'A','1EC':'EIO','1ED':'O','1EE':'U','1EF':'Y','216':'greeknum','217':'greeknum','246':'consecnum','247':'numfrom17'}
#dictionary which matches patterns for emoticons
hex_dict = {'A':'10','B':'11','C':'12','D':'13','E':'14','F':'15','a':'10','b':'11','c':'12','d':'13','e':'14','f':'15'}
happy_dict = ['1F600','263A','1F601','1F602','1F603','1F604','1F605','1F606','1F60A','263A','1F642','1F607','1F60C','1F643','1F62C','1F63A','1F638','1F639']
sad_dict = ['1F610','1F611','1F623','1F494','1F625','1F62B','1F613','1F614','1F615','2639','1F641','1F616','1F61E','1F61F','1F624','1F622','1F62D','1F629','1F630','1F620']
sexual_dict = ['1F609','1F6C0','2B50','1F445','1F525','1F36D','2606','1F60D','1F460','1F618','1F617','1F61A','1F917','1F60F','1F63B','1F63D','1F483','1F46F','1F48F','1F444','1F48B','1F459','1F484','1F34C','1F4AF','264B']
hearts=['1F498','2664','2764','2661','2665','1F493','1F495','1F496','1F497','1F499','1F49A','1F49B','1F49C','1F49D','1F49E','1F49F','2763']
baseball_dict=['26BE', '1F3C0', '1F3CF']
count=0
misc_code = ' *misc* '
if not replace_boo:
misc_code = ''
retval=''
# first I am filtering out all the non-unicode characters from the data
regex=re.compile(r'\\u[0-9ABCDEFabcdef]{1,4}')
regex2=re.compile(r'\\U[0-9ABCDEFabcdef]{1,8}') #this is so that both types of unicode representations are filtered
lowers = list('abcdef')
uppers = [c.upper() for c in lowers]
ndata = set()
data = data.encode('unicode-escape').decode('utf-8')
data = re.sub(r'(?:\\x(?:[0-9]|[a-f]){2})+', ' ', data, flags=re.IGNORECASE)
for val in re.finditer(regex,data):
to_append=val.group()
#converting unicode to standard representation
for c in lowers:
if c in to_append:
to_append = to_append.replace(c, c.lower())
ndata.add(to_append)
for val in re.finditer(regex2,data):
to_append = '\u' + val.group()[5:]
for c in lowers:
if c in to_append:
to_append = to_append.replace(c, c.lower())
ndata.add(to_append)
ndata = list(ndata)
"""
Process of parsing:
-> Convert unicode into standard form
-> Convert each character of the unicode symbol to its numerical equivalent
-> Mapping Process:
- First check in pattern dictionary to map suffix/prefix
- Check Emoticon Dictionary
- Replace value pair with Key whenever found
- Then check direct dictionary
- Append to .txt file if unicode not found in any dictionary
"""
for unicode_str in ndata:
uni=unicode_str[2:]
if unicode_str not in data:
unicode_str='\U000' + unicode_str[2:]
#converting to standard representation
for c in uppers:
if c in unicode_str:
unicode_str = unicode_str.replace(c, c.lower())
if uni in baseball_dict:
retval+=' *baseball* '
#detecting baseball emoticons and converting to '*baseball*' and similar conversions for other categories of emoticons
data=string.replace(data,unicode_str,' *baseball* ')
if uni in happy_dict:
retval+=' *happy* '
if replace_boo:
data=string.replace(data,unicode_str,' *happy* ')
else:
data=string.replace(data,unicode_str,' ')
elif uni in sad_dict:
retval+=' *sad* '
if replace_boo:
data=string.replace(data,unicode_str,' *sad* ')
else:
data=string.replace(data,unicode_str,' ')
elif uni in sexual_dict:
retval+=' *sexual* '
if replace_boo:
data=string.replace(data,unicode_str,' *sexual* ')
else:
data=string.replace(data,unicode_str,' ')
elif uni in hearts:
retval+=' *hearts* '
if replace_boo:
data=string.replace(data,unicode_str,' *hearts* ')
else:
data=string.replace(data,unicode_str,' ')
elif uni in dictionary:
retval+=dictionary[uni]
data=string.replace(data,unicode_str,dictionary[uni])
elif uni[0:3]=='004' or uni[0:3]=='005':
#replacing unicodes for digits and before that, replacing hexadecimals with their numerical value
last_dig=uni[3:]
if last_dig in hex_dict:
last_dig=int(hex_dict[last_dig])
else:
last_dig=int(last_dig)
second_last_dig= int(uni[2:3])
num= (second_last_dig-4)*16 + last_dig
retval+=chr(64+num)
data=string.replace(data,unicode_str,chr(64+num))
elif uni[0:3]=='006' or uni[0:3]=='007':
last_dig=uni[3:]
if last_dig in hex_dict:
last_dig=int(hex_dict[last_dig])
else:
last_dig=int(last_dig)
second_last_dig= int(uni[2:3])
#parsing letters
num= (second_last_dig-6)*16 + last_dig
retval+=chr(64+num)
data=string.replace(data,unicode_str,chr(64+num))
elif uni[0:3] in pattern_dict:
val = pattern_dict[uni[0:3]]
if len(val)==1:
retval+=val
data=string.replace(data,unicode_str,val)
elif uni[0:3]=='00C':
last=uni[3:]
if last in hex_dict:
last=hex_dict[last]
try:
last=int(last)
except:
retval+=misc_code
#parsing miscelleneous
data=string.replace(data,unicode_str,misc_code)
if last>=0 and last<=5:
retval+=val[0]
data=string.replace(data,unicode_str,val[0])
elif last<=11:
retval+=val[1]
data=string.replace(data,unicode_str,val[1])
else:
retval+=val[2]
data=string.replace(data,unicode_str,val[2])
elif uni[0:3]=='00D':
last=uni[3:]
if last in hex_dict:
last=hex_dict[last]
try:
last=int(last)
except:
pass
if last>=2 and last<=6:
retval+=val[0]
data=string.replace(data,unicode_str,val[0])
elif last>=9 and last<=12:
retval+=val[1]
data=string.replace(data,unicode_str,val[1])
else:
retval+=misc_code
data=string.replace(data,unicode_str,misc_code)
elif uni[0:3]=='00E':
last=uni[3:]
if last in hex_dict:
last=hex_dict[last]
try:
last=int(last)
except:
retval+=misc_code
data=string.replace(data,unicode_str,misc_code)
if last>=0 and last<=5:
retval+=val[0]
data=string.replace(data,unicode_str,val[0])
elif last<=11:
retval+=val[1]
data=string.replace(data,unicode_str,val[1])
else:
retval+=val[2]
data=string.replace(data,unicode_str,val[2])
elif uni[0:3]=='00F':
last=uni[3:]
if last in hex_dict:
last=hex_dict[last]
try:
last=int(last)
except:
retval+=misc_code
data=string.replace(data,unicode_str,misc_code)
if last>=2 and last<=6:
retval+=val[0]
data=string.replace(data,unicode_str,val[0])
elif last>=9 and last<=12:
retval+=val[1]
data=string.replace(data,unicode_str,val[1])
else:
retval+=misc_code
data=string.replace(data,unicode_str,misc_code)
elif uni[0:3]=='010':
last=uni[3:]
if last in hex_dict:
last=hex_dict[last]
try:
last=int(last)
except:
retval+=misc_code
data=string.replace(data,unicode_str,misc_code)
if last>=0 and last<=5:
retval+=val[0]
data=string.replace(data,unicode_str,val[0])
elif last>=6 and last<=13:
retval+=val[1]
data=string.replace(data,unicode_str,val[1])
else:
retval+=misc_code
data=string.replace(data,unicode_str,misc_code)
elif uni[0:3]=='011':
last=uni[3:]
if last in hex_dict:
last=hex_dict[last]
try:
last=int(last)
except:
retval+=misc_code
data=string.replace(data,unicode_str,misc_code)
if last>=2 and last<=11:
retval+=val[0]
data=string.replace(data,unicode_str,val[0])
elif last>=12 and last<=15:
retval+=val[1]
data=string.replace(data,unicode_str,val[1])
else:
retval+=misc_code
data=string.replace(data,unicode_str,misc_code)
elif uni[0:3]=='012':
last=uni[3:]
if last in hex_dict:
last=hex_dict[last]
try:
last=int(last)
except:
retval+=misc_code
data=string.replace(data,unicode_str,misc_code)
if last>=0 and last<=3:
retval+=val[0]
data=string.replace(data,unicode_str,val[0])
elif last<=7:
retval+=val[1]
data=string.replace(data,unicode_str,val[1])
elif last<=15:
retval+=val[2]
data=string.replace(data,unicode_str,val[2])
else:
retval+=misc_code
data=string.replace(data,unicode_str,misc_code)
elif uni[0:3]=='014':
last=uni[3:]
if last in hex_dict:
last=hex_dict[last]
try:
last=int(last)
except:
retval+=misc_code
data=string.replace(data,unicode_str,misc_code)
if last>=0 and last<=2:
retval+=val[0]
data=string.replace(data,unicode_str,val[0])
elif last<=11:
retval+=val[1]
data=string.replace(data,unicode_str,val[1])
elif last<=15:
retval+=val[2]
data=string.replace(data,unicode_str,val[2])
else:
retval+=misc_code
data=string.replace(data,unicode_str,misc_code)
elif uni[0:3]=='015':
last=uni[3:]
if last in hex_dict:
last=hex_dict[last]
try:
last=int(last)
except:
retval+=misc_code
data=string.replace(data,unicode_str,misc_code)
if last>=4 and last<=9:
retval+=val[0]
data=string.replace(data,unicode_str,val[0])
elif last>=10 and last<=15:
retval+=val[1]
data=string.replace(data,unicode_str,val[1])
else:
retval+=misc_code
data=string.replace(data,unicode_str,misc_code)
elif uni[0:3]=='016':
last=uni[3:]
if last in hex_dict:
last=hex_dict[last]
try:
last=int(last)
except:
retval+=misc_code
data=string.replace(data,unicode_str,misc_code)
if last>=2 and last<=7:
retval+=val[0]
data=string.replace(data,unicode_str,val[0])
elif last>=8 and last<=15:
retval+=val[1]
data=string.replace(data,unicode_str,val[1])
else:
retval+=misc_code
data=string.replace(data,unicode_str,misc_code)
elif uni[0:3]=='017':
last=uni[3:]
if last in hex_dict:
last=hex_dict[last]
try:
last=int(last)
except:
retval+=misc_code
data=string.replace(data,unicode_str,misc_code)
if last>=0 and last<=3:
retval+=val[0]
data=string.replace(data,unicode_str,val[0])
elif last<=5:
retval+=val[1]
data=string.replace(data,unicode_str,val[1])
elif last<=8:
retval+=val[2]
data=string.replace(data,unicode_str,val[2])
elif last<=14:
retval+=val[3]
data=string.replace(data,unicode_str,val[3])
else:
retval+=misc_code
data=string.replace(data,unicode_str,misc_code)
elif uni[0:3]=='018':
last=uni[3:]
if last in hex_dict:
last=hex_dict[last]
try:
last=int(last)
except:
retval+=misc_code
data=string.replace(data,unicode_str,misc_code)
if last>=0 and last<=5:
retval+=val[0]
data=string.replace(data,unicode_str,val[0])
elif last<=8:
retval+=val[1]
data=string.replace(data,unicode_str,val[1])
elif last<=12:
retval+=val[2]
data=string.replace(data,unicode_str,val[2])
else:
retval+=misc_code
data=string.replace(data,unicode_str,misc_code)
elif uni[0:3]=='01E':
last=uni[3:]
if last in hex_dict:
last=hex_dict[last]
try:
last=int(last)
except:
retval+=misc_code
data=string.replace(data,unicode_str,misc_code)
if last>=4 and last<=7:
retval+=val[0]
data=string.replace(data,unicode_str,val[0])
elif last<=9:
retval+=val[1]
data=string.replace(data,unicode_str,val[1])
elif last<=13:
retval+=val[2]
data=string.replace(data,unicode_str,val[2])
else:
retval+=misc_code
data=string.replace(data,unicode_str,misc_code)
elif uni[0:3]=='020':
last=uni[3:]
if last in hex_dict:
last=hex_dict[last]
try:
last=int(last)
except:
retval+=misc_code
data=string.replace(data,unicode_str,misc_code)
if last>=0 and last<=3:
retval+=val[0]
data=string.replace(data,unicode_str,val[0])
elif last<=7:
retval+=val[1]
data=string.replace(data,unicode_str,val[1])
elif last<=11:
retval+=val[2]
data=string.replace(data,unicode_str,val[2])
elif last<=15:
retval+=val[3]
data=string.replace(data,unicode_str,val[3])
else:
retval+=misc_code
data=string.replace(data,unicode_str,misc_code)
elif uni[0:3]=='021':
last=uni[3:]
if last in hex_dict:
last=hex_dict[last]
try:
last=int(last)
except:
retval+=misc_code
data=string.replace(data,unicode_str,misc_code)
if last>=0 and last<=3:
retval+=val[0]
data=string.replace(data,unicode_str,val[0])
elif last<=7:
retval+=val[1]
data=string.replace(data,unicode_str,val[1])
elif last<=9:
retval+=val[2]
data=string.replace(data,unicode_str,val[2])
elif last<=11:
retval+=val[3]
data=string.replace(data,unicode_str,val[3])
else:
retval+=misc_code
data=string.replace(data,unicode_str,misc_code)
elif uni[0:3]=='1E0':
last=uni[3:]
if last in hex_dict:
last=hex_dict[last]
try:
last=int(last)
except:
retval+=misc_code
data=string.replace(data,unicode_str,misc_code)
if last>=0 and last<=1:
retval+=val[0]
data=string.replace(data,unicode_str,val[0])
elif last<=7:
retval+=val[1]
data=string.replace(data,unicode_str,val[1])
elif last<=9:
retval+=val[2]
data=string.replace(data,unicode_str,val[2])
elif last<=15:
retval+=val[3]
data=string.replace(data,unicode_str,val[3])
else:
retval+=misc_code
data=string.replace(data,unicode_str,misc_code)
elif uni[0:3]=='1E3':
last=uni[3:]
if last in hex_dict:
last=hex_dict[last]
try:
last=int(last)
except:
retval+=misc_code
data=string.replace(data,unicode_str,misc_code)
if last>=0 and last<=5:
retval+=val[0]
data=string.replace(data,unicode_str,val[0])
elif last<=13:
retval+=val[1]
data=string.replace(data,unicode_str,val[1])
else:
retval+=misc_code
data=string.replace(data,unicode_str,misc_code)
elif uni[0:3]=='1E4':
last=uni[3:]
if last in hex_dict:
last=hex_dict[last]
try:
last=int(last)
except:
pass
if last>=0 and last<=3:
retval+=val[0]
data=string.replace(data,unicode_str,val[0])
elif last<=11:
retval+=val[1]
data=string.replace(data,unicode_str,val[1])
elif last<=15:
retval+=val[2]
data=string.replace(data,unicode_str,val[2])
else:
retval+=misc_code
data=string.replace(data,unicode_str,misc_code)
elif uni[0:3]=='1E5':
last=uni[3:]
if last in hex_dict:
last=hex_dict[last]
try:
last=int(last)
except:
pass
if last>=0 and last<=3:
retval+=val[0]
data=string.replace(data,unicode_str,val[0])
elif last<=7:
retval+=val[1]
data=string.replace(data,unicode_str,val[1])
elif last<=15:
retval+=val[2]
data=string.replace(data,unicode_str,val[2])
else:
retval+=misc_code
data=string.replace(data,unicode_str,misc_code)
elif uni[0:3]=='1E6':
last=uni[3:]
if last in hex_dict:
last=hex_dict[last]
try:
last=int(last)
except:
retval+=misc_code
data=string.replace(data,unicode_str,misc_code)
if last>=0 and last<=9:
retval+=val[0]
data=string.replace(data,unicode_str,val[0])
elif last>=10 and last<=15:
retval+=val[1]
data=string.replace(data,unicode_str,val[1])
else:
retval+=misc_code
data=string.replace(data,unicode_str,misc_code)
elif uni[0:3]=='1E7':
last=uni[3:]
if last in hex_dict:
last=hex_dict[last]
try:
last=int(last)
except:
retval+=misc_code
data=string.replace(data,unicode_str,misc_code)
if last>=2 and last<=11:
retval+=val[0]
data=string.replace(data,unicode_str,val[0])
elif last>=12 and last<=15:
retval+=val[1]
data=string.replace(data,unicode_str,val[1])
else:
retval+=misc_code
data=string.replace(data,unicode_str,misc_code)
elif uni[0:3]=='1E8':
last=uni[3:]
if last in hex_dict:
last=hex_dict[last]
try:
last=int(last)
except:
retval+=misc_code
data=string.replace(data,unicode_str,misc_code)
if last>=0 and last<=9:
retval+=val[0]
data=string.replace(data,unicode_str,val[0])
elif last>=10 and last<=13:
retval+=val[1]
data=string.replace(data,unicode_str,val[1])
else:
retval+=misc_code
data=string.replace(data,unicode_str,misc_code)
elif uni[0:3]=='1EC':
last=uni[3:]
if last in hex_dict:
last=hex_dict[last]
try:
last=int(last)
except:
retval+=misc_code
data=string.replace(data,unicode_str,misc_code)
if last>=0 and last<=7:
retval+=val[0]
data=string.replace(data,unicode_str,val[0])
elif last<=11:
retval+=val[1]
data=string.replace(data,unicode_str,val[1])
elif last<=15:
retval+=val[2]
data=string.replace(data,unicode_str,val[2])
else:
retval+=misc_code
data=string.replace(data,unicode_str,misc_code)
elif uni[0:3]=='216' or uni[0:3]=='217':
last=uni[3:]
if last in hex_dict:
last=hex_dict[last]
try:
last=int(last)
except:
retval+=misc_code
data=string.replace(data,unicode_str,misc_code)
if last<=12:
retval+=str(last+1)
data=string.replace(data,unicode_str,str(last+1))
else:
retval+=misc_code
data=string.replace(data,unicode_str,misc_code)
elif uni[0:3]=='246' or uni[0:3]=='247':
last=uni[3:]
if last in hex_dict:
last=hex_dict[last]
try:
last=int(last)
except:
retval+=misc_code
data=string.replace(data,unicode_str,misc_code)
if uni[0:3]=='246':
retval+=str(last+1)
data=string.replace(data,unicode_str,str(last+1))
elif last<=3:
retval+=str(last+17)
data=string.replace(data,unicode_str,str(last+17))
else:
retval+=misc_code
data=string.replace(data,unicode_str,misc_code)
else:
retval+=misc_code
data = data.replace(unicode_str,misc_code)
if len(retval)==0:
retval="Sorry, no unicode strings were present"
try:
data = data.decode('unicode-escape')
except UnicodeDecodeError:
pass
retval = retval.encode('unicode-escape').decode('unicode-escape')
return (retval, data)
| usc-isi-i2/etk | etk/data_extractors/htiExtractors/unicode_decoder.py | Python | mit | 22,630 | 0.068714 |
import json
import time
import settings
from shared import common
from datetime import datetime
from AWSIoTPythonSDK.MQTTLib import AWSIoTMQTTShadowClient
import logging
handler = None
pin_door1 = 11 #BCM17
pin_door2 = 12 # BCM18
pin_door1_led = 13 # BCM27
pin_door2_led = 15 # BCM22
def init():
global handler
if settings.is_fake():
from doorpi import gpio_faker
handler = gpio_faker
else:
from doorpi import gpio
handler = gpio
return
# Custom Shadow callback
def customShadowCallback_Update(payload, responseStatus, token):
if responseStatus == "timeout":
print("Update request " + token + " time out!")
if responseStatus == "accepted":
payloadDict = json.loads(payload)
print("~~~~~~~~~~~~~~~~~~~~~~~")
print("Update request with token: " + token + " accepted!")
reported = payloadDict["state"]["reported"]
if "FemaleDoor" in reported:
print("FemaleDoor: " + str(payloadDict["state"]["reported"]["FemaleDoor"]))
if "MaleDoor" in reported:
print("MaleDoor: " + str(payloadDict["state"]["reported"]["MaleDoor"]))
if "connected" in reported:
print("connected: " + str(payloadDict["state"]["reported"]["connected"]))
print("~~~~~~~~~~~~~~~~~~~~~~~\n\n")
if responseStatus == "rejected":
print("Update request " + token + " rejected!")
def handle_command(client, message):
payload = message.payload.decode('utf-8')
print("Command received:")
print(payload)
#cmd = json.loads(payload)
#command = cmd["command"]
#cmd_id = cmd["id"]
#if command == "ping":
# common.send_pong(client, cmd_id, settings.topic_doorpi_event)
def handle_notification(message):
print("Notification received: " + str(message.payload))
def on_message(client, userdata, msg):
if msg.topic == settings.topic_doorpi_command:
handle_command(client, msg)
return
if msg.topic == settings.topic_doorpi_notify:
handle_notification(msg)
return
print("Spam received: " + str(msg.payload))
def send_data(client, door1_closed, door2_closed):
if door1_closed:
door1_message = "closed"
else:
door1_message = "open"
if door2_closed:
door2_message = "closed"
else:
door2_message = "open"
# Prepare our sensor data in JSON format.
payload = json.dumps({
"state": {
"reported": {
"FemaleDoor": door1_message,
"MaleDoor": door2_message
}
}
})
client.shadowUpdate(payload, customShadowCallback_Update, 5)
def new_state(pin, old_state):
new_state_first = handler.get_state(pin)
if new_state_first != old_state:
time.sleep(0.5)
new_state_verify = handler.get_state(pin)
if new_state_verify != old_state:
return True, new_state_verify
else:
return False, old_state
else:
return False, old_state
def set_led_state(door1_state, door2_state):
handler.set_state(pin_door1_led, door1_state)
handler.set_state(pin_door2_led, door2_state)
def start():
shadow, client = common.setup_aws_shadow_client(settings.aws_endpoint,
settings.aws_root_certificate,
settings.aws_private_key,
settings.aws_certificate,
settings.device_name)
JSONPayload = '{"state":{"reported":{"connected":"true"}}}'
client.shadowUpdate(JSONPayload, customShadowCallback_Update, 5)
handler.setup(pin_door1, pin_door2, pin_door1_led, pin_door2_led)
handler.signal_startup(pin_door1_led, pin_door2_led)
# Get initial state
door1 = handler.get_state(pin_door1)
door2 = handler.get_state(pin_door2)
set_led_state(door1, door2)
send_data(client, door1, door2)
time.sleep(2)
states_reported = 1
try:
while True:
door1_changed, door1_state = new_state(pin_door1, door1)
door2_changed, door2_state = new_state(pin_door2, door2)
if door1_changed or door2_changed:
door1 = door1_state
door2 = door2_state
set_led_state(door1, door2)
send_data(client, door1, door2)
states_reported += 1
print('States reported: '+str(states_reported))
time.sleep(0.2)
except KeyboardInterrupt:
JSONPayload = '{"state":{"reported":{"connected":"false"}}}'
client.shadowUpdate(JSONPayload, customShadowCallback_Update, 5)
shadow.disconnect()
handler.cleanup()
print('stopped')
def stop():
return
| royveshovda/pifog | source/piclient/doorpi/door_runner.py | Python | apache-2.0 | 4,850 | 0.003093 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('changeset', '0022_auto_20160222_2358'),
]
operations = [
migrations.AddField(
model_name='userdetail',
name='contributor_uid',
field=models.IntegerField(db_index=True, null=True, blank=True),
),
]
| batpad/osmcha-django | osmchadjango/changeset/migrations/0023_userdetail_contributor_uid.py | Python | gpl-3.0 | 441 | 0 |
from flask import Blueprint, render_template, g, request, jsonify
from time import gmtime, strftime
from flask_login import login_required, current_user
from app.models import Month, DreamDay, Dream
import datetime
main_module = Blueprint('main', __name__, template_folder='templates')
@main_module.route('/old')
@login_required
def index():
current_month = strftime("%B", gmtime())
current_n_month = datetime.datetime.today().month
current_year = datetime.datetime.today().year
exist = False
for m in g.user.months:
if m.n_month == current_n_month and m.year == current_year:
exist = True
break
if not exist:
month = Month(title=current_month, n_month=current_n_month, year=current_year)
month.dreams.append(Dream(title="be better than yesterday"))
month.dreams.append(Dream(title="collect all pokemons"))
month.dreams.append(Dream(title="learn to fly"))
g.user.months.append(month)
g.user.save()
return render_template('index.html', current_n_month=current_n_month)
@main_module.route('/add_half_hour', methods=['POST'])
@login_required
def add_half_hour():
dream_id = request.form['dream_id']
curr_month = g.user.get_current_month()
curr_dream = next(x for x in curr_month.dreams if str(x.id_) == dream_id)
curr_dream.current_time += 1
curr_dream_day = next((x for x in curr_month.dream_days if
x.number == datetime.datetime.today().day and x.dream_id == curr_dream.id_), None)
if curr_dream_day:
curr_dream_day.current_time += 1
else:
dream_day = DreamDay(dream_id=dream_id)
curr_month.dream_days.append(dream_day)
g.user.save()
return jsonify({'id_': dream_id, 'day_number': datetime.datetime.today().day})
@main_module.route('/change_slogan', methods=['POST'])
@login_required
def change_slogan():
new_slogan = request.form['slogan_value']
curr_month = g.user.get_current_month()
if curr_month.slogan != new_slogan:
curr_month.slogan = new_slogan
g.user.save()
return jsonify({'slogan_value': new_slogan})
@main_module.before_request
def before_request():
g.user = current_user
| Kwentar/Dream-Crusher | app/main_views.py | Python | apache-2.0 | 2,225 | 0.001348 |
import abc
from OHA.helpers.converters.BaseConverter import BaseConverter
__author__ = 'indrajit'
__email__ = '[email protected]'
class LengthConverter(BaseConverter):
def __init__(self, _value, _from=None, _to=None):
super(LengthConverter, self).__init__(_value, _from, _to)
def _supported_units(self):
return ['ft', 'in', 'm', 'cm']
@abc.abstractmethod
def _default_from_unit(self):
raise NotImplementedError('method not implemented')
@abc.abstractmethod
def _default_to_unit(self):
raise NotImplementedError('method not implemented')
def _convert(self):
if self._from == self._to:
return self._value
elif self._to == 'm' and self._from == 'ft':
return self._value * 3.28084
elif self._to == 'm' and self._from == 'in':
return self._value * 39.3701
elif self._to == 'cm' and self._from == 'ft':
return self._value * 0.0328084
elif self._to == 'cm' and self._from == 'in':
return self._value * 0.393701
elif self._to == 'ft' and self._from == 'm':
return self._value * 0.3048
elif self._to == 'ft' and self._from == 'cm':
return self._value * 30.48
elif self._to == 'in' and self._from == 'm':
return self._value * 0.0254
elif self._to == 'in' and self._from == 'cm':
return self._value * 2.54
else:
return None
| openhealthalgorithms/openhealthalgorithms | OHA/helpers/converters/LengthConverter.py | Python | apache-2.0 | 1,483 | 0 |
#!/usr/bin/python
"""This script run the pathologic """
try:
import optparse, sys, re, csv, traceback
from optparse import OptionGroup
import pickle
import math
from libs.python_modules.taxonomy.LCAComputation import *
import operator
from os import path, _exit, remove, rename
import logging.handlers
from glob import glob
from libs.python_modules.utils.sysutil import pathDelim
from libs.python_modules.utils.metapathways_utils import fprintf, printf, eprintf, exit_process
from libs.python_modules.utils.sysutil import getstatusoutput
from libs.python_modules.utils.pathwaytoolsutils import *
except:
print """ Could not load some user defined module functions"""
print """ Make sure your typed 'source MetaPathwaysrc'"""
print """ """
print traceback.print_exc(10)
sys.exit(3)
PATHDELIM=pathDelim()
def fprintf(file, fmt, *args):
file.write(fmt % args)
def printf(fmt, *args):
sys.stdout.write(fmt % args)
def files_exist( files , errorlogger = None):
status = True
for file in files:
if not path.exists(file):
if errorlogger:
errorlogger.write( 'ERROR\tCould not find ptools input file : ' + file )
status = False
return not status
usage = sys.argv[0] + """ -s sample -p pgdb_dir --ptoolsExec pathwaytools_executable """
parser = None
def createParser():
global parser
epilog = """The flat file extraction script"""
epilog = re.sub(r'\s+', ' ', epilog)
parser = optparse.OptionParser(usage=usage, epilog = epilog)
standard_options_group = OptionGroup(parser, "Standard Ptools group" )
# Input options
standard_options_group.add_option('-s', '--sample', dest='sample_name', default=None,
help='sample name')
standard_options_group.add_option('-p', '--pgdb', dest='pgdbdir', default=None,
help='folder of the PGDB')
standard_options_group.add_option('--ptoolsExec', dest='ptoolsExec', default=None,
help='PathoLogic Executable')
standard_options_group.add_option("-o", "--output-pwy-table", dest="table_out",
help='the output table for the pathways [REQUIRED]')
import os, signal
TIME = 10
def __StopPathwayTools():
processPATT = re.compile(r'pathway-tools-runtime')
for line in os.popen("ps xa"):
fields = line.split()
pid = fields[0]
process = fields[4]
result = processPATT.search(process)
if result :
os.kill(int(pid), signal.SIGHUP)
def StopPathwayTools():
try:
__StopPathwayTools()
time.sleep(TIME)
__StopPathwayTools()
time.sleep(TIME)
if path.exists("/tmp/ptools-socket"):
remove("/tmp/ptools-socket")
except:
pass
def main(argv, errorlogger = None, runcommand = None, runstatslogger = None):
global parser
options, args = parser.parse_args(argv)
# is there a pathwaytools executable installed
if False and not path.exists(options.ptoolsExec):
eprintf("ERROR\tPathwayTools executable %s not found!\n", options.ptoolsExec)
if errorlogger:
errorlogger.printf("ERROR\tPathwayTools executable %s not found!\n", options.ptoolsExec)
exit_process("ERROR\tPathwayTools executable %s not found!\n" %(options.ptoolsExec))
# command to build the ePGDB
command = "%s " %(options.ptoolsExec)
command += " -api"
pythonCyc = startPathwayTools(options.sample_name.lower(), options.ptoolsExec, True)
#resultLines = pythonCyc.getReactionListLines()
resultLines = pythonCyc.getFlatFiles()
StopPathwayTools()
try:
if False:
pythonCyc = startPathwayTools(options.sample_name.lower(), options.ptoolsExec, True)
pythonCyc.setDebug() # disable pathway debug statements
printf("INFO\tExtracting the reaction list from ePGDB " + options.sample_name + "\n")
resultLines = pythonCyc.getReactionListLines()
#pythonCyc.stopPathwayTools()
reaction_list_file = open(options.reactions_list + ".tmp", 'w')
for line in resultLines:
fprintf(reaction_list_file,"%s\n",line.strip())
reaction_list_file.close()
StopPathwayTools()
except:
print traceback.print_exc(10)
eprintf("ERROR\tFailed to run extract pathways for %s : \n" %(options.sample_name))
eprintf("INFO\tKill any other PathwayTools instance running on the machine and try again")
if errorlogger:
errorlogger.write("ERROR\tFailed to run extract pathways for %s : " %(options.sample_name))
errorlogger.write("INFO\tKill any other PathwayTools instance running on the machine and try again\n")
StopPathwayTools()
def startPathwayTools(organism, ptoolsExec, debug):
StopPathwayTools()
pythonCyc = PythonCyc()
pythonCyc.setDebug(debug = debug)
pythonCyc.setOrganism(organism)
pythonCyc.setPToolsExec(ptoolsExec)
pythonCyc.startPathwayTools()
return pythonCyc
def runPathologicCommand(runcommand = None):
if runcommand == None:
return False
result = getstatusoutput(runcommand)
return result[0]
# this is the portion of the code that fixes the name
def split_attributes(str, attributes):
rawattributes = re.split(';', str)
for attribStr in rawattributes:
insert_attribute(attributes, attribStr)
return attributes
def fixLine(line, id):
fields = line.split('\t')
if len(fields)==2:
return fields[0]+'\t' + id
def getID(line):
fields = line.split('\t')
if len(fields)==2:
return fields[1]
def write_new_file(lines, output_file):
print "Fixing file " + output_file
try:
outputfile = open(output_file,'w')
pass
except IOError:
print "ERROR :Cannot open output file " + output_file
for line in lines:
fprintf(outputfile, "%s\n", line)
outputfile.close()
def cleanup(string):
"""
Cleans up pathway long-names for presentation.
:param string:
:return:
"""
string = re.sub("|", "", string) # vertical bar
string = re.sub("&", "", string) # ampersand
string = re.sub(";", "", string) # semicolon
string = re.sub("<[^<]+?>", '', string) # HTML tags
string = re.sub("\'", "", string) # remove quotes
return string
def get_preferred_taxa_name(taxa_id, megan_map, id_to_name):
"""
Helper function to format NCBI IDs into preferred names. First checks for MEGAN name,
if not found moves to current taxonomy in loaded NCBI taxonomy tree, failing that
gives the taxonomy of 'Unknown', but still provides the id, e.g., 'Unknown (12345)'.
:param taxa_id: numeric taxa id to translate
:param megan_map: preferred megan mapping hash
:param id_to_name: local ncbi tree hash
:return: "perferred name (id)"
"""
taxa_id = str(taxa_id)
if taxa_id in megan_map:
taxa = megan_map[ taxa_id ] + " (" + taxa_id + ")"
elif taxa_id in id_to_name:
taxa = id_to_name[ taxa_id ] + " (" + taxa_id + ")"
else:
taxa = "Unknown" + " (" + taxa_id + ")"
return taxa
def MetaPathways_run_pathologic(argv, extra_command = None, errorlogger = None, runstatslogger =None):
if errorlogger != None:
errorlogger.write("#STEP\tBUILD_PGDB\n")
createParser()
main(argv, errorlogger = errorlogger, runcommand= extra_command, runstatslogger = runstatslogger)
return (0,'')
if __name__ == '__main__':
createParser()
main(sys.argv[1:])
| kishori82/MetaPathways_Python.3.0 | utilities/extract_flat_files.py | Python | mit | 7,627 | 0.023994 |
# Copyright 2021 ACSONE SA/NV
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
{
"name": "Mozaik Mass Mailing Access Rights",
"summary": """
New group: Mass Mailing Manager. Managers can edit
and unlink mass mailings.""",
"version": "14.0.1.0.0",
"license": "AGPL-3",
"author": "ACSONE SA/NV",
"website": "https://github.com/OCA/mozaik",
"depends": [
"mass_mailing",
],
"data": [
"security/groups.xml",
"security/ir.model.access.csv",
"views/mailing_mailing.xml",
"views/mail_template.xml",
],
"demo": [],
}
| mozaik-association/mozaik | mozaik_mass_mailing_access_rights/__manifest__.py | Python | agpl-3.0 | 625 | 0 |
# -*- coding: utf-8 -*-
from south.db import db
from south.v2 import SchemaMigration
from django.conf import settings
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'TouristicEventType.pictogram'
db.add_column('t_b_evenement_touristique_type', 'pictogram',
self.gf('django.db.models.fields.files.FileField')(max_length=512, null=True, db_column='picto'),
keep_default=False)
# Adding field 'TouristicContentType.pictogram'
db.add_column('t_b_contenu_touristique_type', 'pictogram',
self.gf('django.db.models.fields.files.FileField')(max_length=512, null=True, db_column='picto'),
keep_default=False)
def backwards(self, orm):
# Deleting field 'TouristicEventType.pictogram'
db.delete_column('t_b_evenement_touristique_type', 'picto')
# Deleting field 'TouristicContentType.pictogram'
db.delete_column('t_b_contenu_touristique_type', 'picto')
models = {
u'authent.structure': {
'Meta': {'ordering': "['name']", 'object_name': 'Structure'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256'})
},
u'common.theme': {
'Meta': {'ordering': "['label']", 'object_name': 'Theme', 'db_table': "'o_b_theme'"},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_column': "'theme'"}),
'pictogram': ('django.db.models.fields.files.FileField', [], {'max_length': '512', 'null': 'True', 'db_column': "'picto'"})
},
u'tourism.datasource': {
'Meta': {'ordering': "['title', 'url']", 'object_name': 'DataSource', 'db_table': "'t_t_source_donnees'"},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'pictogram': ('django.db.models.fields.files.FileField', [], {'max_length': '512', 'db_column': "'picto'"}),
'targets': ('multiselectfield.db.fields.MultiSelectField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_column': "'titre'"}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_column': "'type'"}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '400', 'db_column': "'url'"})
},
u'tourism.informationdesk': {
'Meta': {'ordering': "['name']", 'object_name': 'InformationDesk', 'db_table': "'t_b_renseignement'"},
'description': ('django.db.models.fields.TextField', [], {'db_column': "'description'", 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '256', 'null': 'True', 'db_column': "'email'", 'blank': 'True'}),
'geom': ('django.contrib.gis.db.models.fields.PointField', [], {'srid': str(settings.SRID), 'null': 'True', 'spatial_index': 'False', 'db_column': "'geom'", 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'municipality': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'db_column': "'commune'", 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256', 'db_column': "'nom'"}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'db_column': "'telephone'", 'blank': 'True'}),
'photo': ('django.db.models.fields.files.FileField', [], {'max_length': '512', 'null': 'True', 'db_column': "'photo'", 'blank': 'True'}),
'postal_code': ('django.db.models.fields.CharField', [], {'max_length': '8', 'null': 'True', 'db_column': "'code'", 'blank': 'True'}),
'street': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'db_column': "'rue'", 'blank': 'True'}),
'type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'desks'", 'db_column': "'type'", 'to': u"orm['tourism.InformationDeskType']"}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '256', 'null': 'True', 'db_column': "'website'", 'blank': 'True'})
},
u'tourism.informationdesktype': {
'Meta': {'ordering': "['label']", 'object_name': 'InformationDeskType', 'db_table': "'t_b_type_renseignement'"},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_column': "'label'"}),
'pictogram': ('django.db.models.fields.files.FileField', [], {'max_length': '512', 'null': 'True', 'db_column': "'picto'"})
},
u'tourism.touristiccontent': {
'Meta': {'object_name': 'TouristicContent', 'db_table': "'t_t_contenu_touristique'"},
'category': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'contents'", 'db_column': "'categorie'", 'to': u"orm['tourism.TouristicContentCategory']"}),
'contact': ('django.db.models.fields.TextField', [], {'db_column': "'contact'", 'blank': 'True'}),
'date_insert': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_column': "'date_insert'", 'blank': 'True'}),
'date_update': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_column': "'date_update'", 'blank': 'True'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_column': "'supprime'"}),
'description': ('django.db.models.fields.TextField', [], {'db_column': "'description'", 'blank': 'True'}),
'description_teaser': ('django.db.models.fields.TextField', [], {'db_column': "'chapeau'", 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '256', 'null': 'True', 'db_column': "'email'", 'blank': 'True'}),
'geom': ('django.contrib.gis.db.models.fields.GeometryField', [], {'srid': str(settings.SRID)}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_column': "'nom'"}),
'practical_info': ('django.db.models.fields.TextField', [], {'db_column': "'infos_pratiques'", 'blank': 'True'}),
'publication_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'db_column': "'date_publication'", 'blank': 'True'}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_column': "'public'"}),
'structure': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['authent.Structure']", 'db_column': "'structure'"}),
'themes': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'touristiccontents'", 'to': u"orm['common.Theme']", 'db_table': "'t_r_contenu_touristique_theme'", 'blank': 'True', 'symmetrical': 'False', 'null': 'True'}),
'type1': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'contents1'", 'blank': 'True', 'db_table': "'t_r_contenu_touristique_type1'", 'to': u"orm['tourism.TouristicContentType']"}),
'type2': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'contents2'", 'blank': 'True', 'db_table': "'t_r_contenu_touristique_type2'", 'to': u"orm['tourism.TouristicContentType']"}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '256', 'null': 'True', 'db_column': "'website'", 'blank': 'True'})
},
u'tourism.touristiccontentcategory': {
'Meta': {'ordering': "['label']", 'object_name': 'TouristicContentCategory', 'db_table': "'t_b_contenu_touristique_categorie'"},
'geometry_type': ('django.db.models.fields.CharField', [], {'default': "'point'", 'max_length': '16', 'db_column': "'type_geometrie'"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_column': "'nom'"}),
'pictogram': ('django.db.models.fields.files.FileField', [], {'max_length': '512', 'null': 'True', 'db_column': "'picto'"}),
'type1_label': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_column': "'label_type1'", 'blank': 'True'}),
'type2_label': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_column': "'label_type2'", 'blank': 'True'})
},
u'tourism.touristiccontenttype': {
'Meta': {'ordering': "['label']", 'object_name': 'TouristicContentType', 'db_table': "'t_b_contenu_touristique_type'"},
'category': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'types'", 'db_column': "'categorie'", 'to': u"orm['tourism.TouristicContentCategory']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_list': ('django.db.models.fields.IntegerField', [], {'db_column': "'liste_choix'"}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_column': "'nom'"}),
'pictogram': ('django.db.models.fields.files.FileField', [], {'max_length': '512', 'null': 'True', 'db_column': "'picto'"})
},
u'tourism.touristicevent': {
'Meta': {'ordering': "['-begin_date']", 'object_name': 'TouristicEvent', 'db_table': "'t_t_evenement_touristique'"},
'accessibility': ('django.db.models.fields.CharField', [], {'max_length': '256', 'db_column': "'accessibilite'", 'blank': 'True'}),
'begin_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'db_column': "'date_debut'", 'blank': 'True'}),
'booking': ('django.db.models.fields.TextField', [], {'db_column': "'reservation'", 'blank': 'True'}),
'contact': ('django.db.models.fields.TextField', [], {'db_column': "'contact'", 'blank': 'True'}),
'date_insert': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_column': "'date_insert'", 'blank': 'True'}),
'date_update': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_column': "'date_update'", 'blank': 'True'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_column': "'supprime'"}),
'description': ('django.db.models.fields.TextField', [], {'db_column': "'description'", 'blank': 'True'}),
'description_teaser': ('django.db.models.fields.TextField', [], {'db_column': "'chapeau'", 'blank': 'True'}),
'duration': ('django.db.models.fields.CharField', [], {'max_length': '64', 'db_column': "'duree'", 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '256', 'null': 'True', 'db_column': "'email'", 'blank': 'True'}),
'end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'db_column': "'date_fin'", 'blank': 'True'}),
'geom': ('django.contrib.gis.db.models.fields.PointField', [], {'srid': str(settings.SRID)}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'meeting_point': ('django.db.models.fields.CharField', [], {'max_length': '256', 'db_column': "'point_rdv'", 'blank': 'True'}),
'meeting_time': ('django.db.models.fields.TimeField', [], {'null': 'True', 'db_column': "'heure_rdv'", 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_column': "'nom'"}),
'organizer': ('django.db.models.fields.CharField', [], {'max_length': '256', 'db_column': "'organisateur'", 'blank': 'True'}),
'participant_number': ('django.db.models.fields.CharField', [], {'max_length': '256', 'db_column': "'nb_places'", 'blank': 'True'}),
'practical_info': ('django.db.models.fields.TextField', [], {'db_column': "'infos_pratiques'", 'blank': 'True'}),
'publication_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'db_column': "'date_publication'", 'blank': 'True'}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_column': "'public'"}),
'speaker': ('django.db.models.fields.CharField', [], {'max_length': '256', 'db_column': "'intervenant'", 'blank': 'True'}),
'structure': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['authent.Structure']", 'db_column': "'structure'"}),
'target_audience': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'db_column': "'public_vise'", 'blank': 'True'}),
'themes': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'touristic_events'", 'to': u"orm['common.Theme']", 'db_table': "'t_r_evenement_touristique_theme'", 'blank': 'True', 'symmetrical': 'False', 'null': 'True'}),
'type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['tourism.TouristicEventType']", 'null': 'True', 'db_column': "'type'", 'blank': 'True'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '256', 'null': 'True', 'db_column': "'website'", 'blank': 'True'})
},
u'tourism.touristiceventtype': {
'Meta': {'ordering': "['type']", 'object_name': 'TouristicEventType', 'db_table': "'t_b_evenement_touristique_type'"},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'pictogram': ('django.db.models.fields.files.FileField', [], {'max_length': '512', 'null': 'True', 'db_column': "'picto'"}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_column': "'type'"})
}
}
complete_apps = ['tourism']
| johan--/Geotrek | geotrek/tourism/migrations/0022_auto__add_field_touristiceventtype_pictogram__add_field_touristicconte.py | Python | bsd-2-clause | 14,288 | 0.006859 |
# Copyright (c) 2016 Czech National Corpus
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; version 2
# dated June, 1991.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import smtplib
from email.mime.text import MIMEText
import logging
import settings
def smtp_factory():
"""
Create a new SMTP instance with some predefined stuff
:return:
"""
username = settings.get('mailing', 'auth_username')
password = settings.get('mailing', 'auth_password')
port = settings.get_int('mailing', 'smtp_port', 25)
use_tls = settings.get_bool('mailing', 'use_tls', False)
server = smtplib.SMTP(settings.get('mailing', 'smtp_server'), port=port)
if use_tls:
server.starttls()
if username and password:
server.login(username, password)
return server
def message_factory(recipients, subject, text, reply_to=None):
"""
Create message instance with some predefined properties
"""
msg = MIMEText(text, 'plain', 'utf-8')
msg['Subject'] = subject
msg['From'] = settings.get('mailing', 'sender')
msg['To'] = recipients[0]
if reply_to:
msg.add_header('Reply-To', reply_to)
return msg
def send_mail(server, msg, recipients):
sender = settings.get('mailing', 'sender')
try:
server.sendmail(sender, recipients, msg.as_string())
ans = True
except Exception as ex:
logging.getLogger(__name__).warn(
'There were errors sending e-mail: %s' % (ex,))
ans = False
finally:
server.quit()
return ans
| tomachalek/kontext | lib/mailing.py | Python | gpl-2.0 | 2,086 | 0.000479 |
request = {
"method": "GET",
"uri": uri("/silly"),
"version": (1, 1),
"headers": [
("AAAAAAAAAAAAA", "++++++++++")
],
"body": b""
}
| DataDog/gunicorn | tests/requests/valid/004.py | Python | mit | 164 | 0 |
from fastpm.state import StateVector, Matter, Baryon, CDM, NCDM
from runtests.mpi import MPITest
from nbodykit.cosmology import Planck15 as cosmo
import numpy
BoxSize = 100.
Q = numpy.zeros((100, 3))
@MPITest([1, 4])
def test_create(comm):
matter = Matter(cosmo, BoxSize, Q, comm)
cdm = CDM(cosmo, BoxSize, Q, comm)
cdm.a['S'] = 1.0
cdm.a['P'] = 1.0
baryon = Baryon(cosmo, BoxSize, Q, comm)
baryon.a['S'] = 1.0
baryon.a['P'] = 1.0
state = StateVector(cosmo, {'0': baryon, '1' : cdm}, comm)
state.a['S'] = 1.0
state.a['P'] = 1.0
state.save("state")
| rainwoodman/fastpm-python | fastpm/tests/test_state.py | Python | gpl-3.0 | 600 | 0.005 |
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "django_propeller_demo.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
| tfroehlich82/django-propeller | manage.py | Python | mit | 819 | 0.001221 |
def extractLimostnWordpressCom(item):
'''
Parser for 'limostn.wordpress.com'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
if "The Outcast" in item['tags']:
return buildReleaseMessageWithType(item, "The Outcast", vol, chp, frag=frag, postfix=postfix)
return False | fake-name/ReadableWebProxy | WebMirror/management/rss_parser_funcs/feed_parse_extractLimostnWordpressCom.py | Python | bsd-3-clause | 380 | 0.028947 |
from grafo import Digraph
from CaminoMinimo import CaminoMinimo
import heapq
class Dijkstra(CaminoMinimo):
def __init__(self, grafo, origen, destino):
CaminoMinimo.__init__(self, grafo, origen, destino)
self.dist_heap = []
#heapq.heapify(self.dist_heap)
for i in xrange(self.grafo.V()):
self.distancias.append(self.INFINITE)
heapq.heappush(self.dist_heap, [self.distancia(i), i])
self.distancia_actual = 0
self._camino_minimo()
def _camino_minimo(self):
vertice = self.origen
self.distancias[vertice] = self.distancia_actual
self.padre[vertice] = None
anterior = None
while (len(self.visitados) < self.grafo.V()):
for vecino in self.grafo.adj(vertice):
nueva_dist = self.distancia_actual + self.grafo.obtener_arista(vertice, vecino).weight()
if (not self.visitado(vecino)) or (nueva_dist < self.distancias[vecino]):
self.distancias[vecino] = nueva_dist
self.padre[vecino] = vertice
self.visitados.add(vertice)
self.distancia_actual, vertice = self._obtener_siguiente()
def _obtener_siguiente(self):
heap = []
heapq.heapify(heap)
for i in xrange(self.grafo.V()):
if (i not in self.visitados):
heapq.heappush(heap, [self.distancia(i), i])
if len(heap) == 0:
return self.distancia_actual, None
return heapq.heappop(heap)
graph = Digraph(8)
graph.add_edge(0, 1, 1) # 1 --
graph.add_edge(0, 2, 3) # 3
graph.add_edge(2, 3, 1) # 1
graph.add_edge(3, 4, 1) # 1
graph.add_edge(1, 4, 1) # 4
graph.add_edge(1, 2, 1) # 1
graph.add_edge(4, 5, 5) # 2
graph.add_edge(5, 6, 1) # 1
graph.add_edge(5, 7, 4) # 4
graph.add_edge(6, 7, 1) # 1
search = Dijkstra(graph, 0, 7)
print search.camino(7)
print search.distancia(7) | GFibrizo/TPS_7529 | TDA Grafo/Dijkstra.py | Python | apache-2.0 | 1,960 | 0.003061 |
versions = {
'7.1.1': ['2022-03-04', {
'fix': {
'1649': 'Minor label error in model image',
'1650': 'Obsolete database version in install SQL'}}],
'7.1.0': ['2022-02-15', {
'feature': {
'1506': 'Update CIDOC CRM to 7.1.1',
'1285': 'Improved value types display',
'1593': 'Adding multiple aliases at once instead one at a time',
'1629': 'Improved mail function',
'1624': 'Minor improvements and refactor',
'1599': 'API: Search parameter include all subtypes',
'1600': 'API: Search for values in value types',
'1623': 'API: Show types for view name'},
'fix': {
'1626':
"API: typed_entities doesn't show types in geojson format"}}],
'7.0.4': ['2022-02-10', {
'fix': {
'1616': 'Error at inserting an administrative unit'}}],
'7.0.3': ['2022-02-02', {
'fix': {
'1634': 'Value type with subtype error'}}],
'7.0.2': ['2022-01-20', {
'fix': {
'1632': 'Multiple flag gets lost when updating a hierarchy'}}],
'7.0.1': ['2022-01-05', {
'fix': {
'1627': 'Error when creating a source from file view'}}],
'7.0.0': ['2022-01-01', {
'feature': {
'1566': 'Update OpenAtlas software to Debian/bullseye',
'1297': 'Connecting events sequentially',
'1577': 'Show actors at places from events',
'1615': 'Additional step by step examples in manual',
'1549': 'API: deprecation of node and subunit functions',
'1579': 'API: Include Swagger documentation',
'1598': 'API: Offset Pagination',
'1603': 'API: Specialized GeoJSON format for subunits',
'1622': 'API: search with dates',
'1605': 'Refactor'},
'fix': {
'1614': 'Custom folders in uploads causing errors'}}],
'6.6.4': ['2021-12-23', {
'fix': {'1621': 'Error at CSV export'}}],
'6.6.3': ['2021-12-08', {
'fix': {'1616': 'Error at inserting an administrative unit'}}],
'6.6.2': ['2021-11-23', {
'fix': {'1609': 'Problem with types'}}],
'6.6.1': ['2021-11-20', {
'fix': {'1607': 'Error at profile for readonly users'}}],
'6.6.0': ['2021-11-18', {
'feature': {
'1500': 'Production of artifacts',
'1563': 'OpenAtlas model to database',
'1597': 'Join artifacts and finds',
'1584': 'Track needed and actual database version',
'1589': 'Additional and improved system warnings',
'1546': 'API: New search parameter',
'1583': 'Refactor'}}],
'6.5.2': ['2021-11-07', {
'fix': {'#1596:': 'Sometimes unavailable add custom type button'}}],
'6.5.1': ['2021-10-28', {
'fix': {'#1592:': 'Error at import when using type ids'}}],
'6.5.0': ['2021-09-19', {
'feature': {
'1462': 'Current owner of artifacts',
'1562': 'Update manual overlay',
'1184': 'API: add additional output format RDFS',
'1551': 'API: Relation type adaptions, adding relationDescription',
'1561': 'Refactor'},
'fix': {
'1557': 'Save buttons blocked by map',
'1580': 'Hidden error messages for reference systems',
'1570': 'API: Wrong type signature in OpenApi'}}],
'6.4.1': ['2021-08-11', {
'fix': {
'1559': 'Installation problem because of missing upload folder'}}],
'6.4.0': ['2021-08-10', {
'feature': {
'1280': 'Picture Preview',
'1492': 'Image Processing',
'1552': 'External reference systems for sources',
'1538': 'Focus on table filter at overview pages',
'1531': 'Map overlay improved',
'1523': 'Performance issues while linking pictures',
'1558': 'Manual entry profession',
'1536': 'Refactor',
'1426': 'API: Display image smaller size',
'1495': 'API: Additional Geojson output for QGIS imports',
'1529': 'API: Increase request performance',
'1530': 'API: Geometries endpoint for frontend map',
'1535': 'API: Get all entities linked to an entity',
'1537': 'API: Type entities for actor types',
'1545': 'API: Filter entities by types'},
'fix': {
'1414': 'Enlarged Description Field Covers up Entry Buttons',
'1539': 'Pagination not shown for tables sometimes',
'1554': 'Error at value type view'}}],
'6.3.0': ['2021-06-13', {
'feature': {
'1513': 'Add reference page for multiple files',
'1520': 'Better value type display',
'1527': 'Improved tab system',
'1502': 'Show count of finds when entering additional',
'1509': 'Manual - examples for use cases',
'1512': 'Refactor',
'1478': 'API: latest with pagination',
'1516': 'API: implement Google JSON style',
'1526': 'API: Refactor'},
'fix': {
'1515': 'API: Paging count faulty'}}],
'6.2.1': ['2021-05-12', {
'fix': {
'1514': 'End dates of entities are not displayed correctly'}}],
'6.2.0': ['2021-05-08', {
'feature': {
'940': 'Multiple file upload',
'1284': 'Show image when editing a place or artifact',
'1428': 'Configuration of frontend site name',
'1476': 'Show/hide button for multiple reference systems',
'1494': 'Refactor',
'1496': 'API: Endpoints for entities of type',
'1490': 'API: Refactor'}}],
'6.1.0': ['2021-04-05', {
'feature': {
'1215': 'Time spans for types',
'1443': 'List view for untyped entities',
'1457': 'Public notes',
'963': 'API: Add type of places to export',
'1402': 'API: CSV export in API',
'1487': 'API: Endpoint for type hierarchies',
'1489': 'API: Geometry for artifacts'}}],
'6.0.1': ['2021-03-15', {
'fix': {
'1485': 'Cannot choose multiple for custom type'}}],
'6.0.0': ['2021-03-13', {
'feature': {
'1091': 'Reference systems for types',
'1109': 'Sustainable web map services',
'1456': 'Artifacts',
'1187': 'Add files for artifacts',
'1465':
'Merge legal body to group, information carrier to artifact',
'1461': 'Also search in date comments',
'1398': 'Compress SQL export files',
'1274': 'API: Automatic documentation for code',
'1390': 'API: Swagger file in OpenAtlas repository',
'1479': 'API: get by view name and system class',
'1484': 'API: Add new functions',
'1447': 'Refactor'},
'fix': {
'1477': 'Unable to select an entity with single quote in name',
'1452': 'API: "type" is empty if more entities are requested',
'1471': 'API: Url to linked places deprecated'}}],
'5.7.2': ['2021-01-27', {
'fix': {
'1455': 'Network graphic error'}}],
'5.7.1': ['2021-01-26', {
'fix': {
'1454': 'Error in install instructions'}}],
'5.7.0': ['2021-01-16', {
'feature': {
'1292': 'External reference systems',
'1440': 'Search with unaccented characters',
'1386': 'API: Flask restful framework'},
'fix': {
'1434': 'Errors with types if named like standard types',
'1427': 'API: Paging is broken'}}],
'5.6.0': ['2020-11-30', {
'feature': {
'930': 'Wikidata API',
'1409': 'Redesign forms',
'1393': 'Split profile display options',
'1395': 'Content for frontends',
'1347': 'All icons to Font Awesome icons',
'1379': 'Feature votes',
'1407': 'Extend session availability (prevent CSRF token timeout)',
'1412': 'API: Include Wikidata',
'1350': 'API: Pagination in an extra array',
'1375': 'API: Download result of request path',
'1401': 'API: file access',
'1377': 'API: IP restrictions',
'1348': 'Refactor'},
'fix': {
'1383': 'Map in tab views too big',
'1408': 'Link checker broken'}}],
'5.5.1': ['2020-10-09', {
'fix': {'1380': "Empty date comment is saved as 'None'"}}],
'5.5.0': ['2020-09-26', {
'feature': {
'929': 'Module options',
'999': 'Navigation for archeological subunits',
'1189': 'User interface improvements',
'1222': 'Usability improvement at select tables',
'1289': 'Citation example for edition and bibliography',
'1206': 'API: Show entities of subtypes',
'1331': 'API: Front end queries'}}],
'5.4.0': ['2020-09-09', {
'feature': {
'1317': 'Additional manual content',
'1321': 'Import with types',
'1255': 'API: Content Negotiation',
'1275': 'API: parameters',
'1299': 'API: Setting for CORS allowance in UI',
'1318': 'API: Selective requests'},
'fix': {
'1306':
'Search results: small table, missing mouse over description',
'1308': 'Missing connection for actors created from place',
'1319': 'Data table error in view',
'1326': 'User table in admin is not responsive',
'1328': 'Table layout error at check link duplicates function'}}],
'5.3.0': ['2020-07-15', {
'feature': {
'1272': 'Tabs redesign',
'1279': 'Change "Add" button label to "Link"',
'1229': 'Show descriptions in all tables',
'1282': 'Additional submit buttons for sub units',
'1283': 'More detailed type display',
'1286': 'Notifications for date field',
'1287': 'Map tiles max zoom',
'1276': 'Show child nodes in tree search',
'1258': 'Manual Text for Subunits',
'1211': 'API: CORS handler',
'1232': 'API: Error/Exception Handling'},
'fix': {
'547': 'Prevent double submit',
'1235': 'Layout issues with forms on smaller screens',
'1278': 'Broken table list views on smaller screens',
'1267': 'Model link checker bug',
'1288': 'Forward upon failed form validation',
'1291': "Data tables mouse over doesn't work if filtered",
'1234': "API: Relation inverse doesn't work proper"}}],
'5.2.0': ['2020-05-11', {
'feature': {
'1065': 'User manual in application',
'1167': 'Settings and profile'},
'fix': {
'1208': 'CSV export error with BC dates',
'1218': 'Resizeable form elements vanish below map',
'1223': 'Visibility of Full Entries'}}],
'5.1.1': ['2020-04-12', {
'fix': {
'1190': 'Tabs not shown in file view',
'1191': 'Show/Hide date is switched'}}],
'5.1.0': ['2020-04-11', {
'feature': {
'991': 'Images/Files for Types',
'1183': 'Mouse over effect for table rows'},
'fix': {
'1182': 'Type view: links and redirect',
'1188': 'Missing map in add feature view'}}],
'5.0.0': ['2020-03-24', {
'feature': {
'1048': 'Bootstrap layout',
'1050': 'API',
'1089': 'Human Remains',
'1136': 'Map enhancements',
'1138': 'Display usage of CIDOC CRM classes and properties',
'1175': 'Additional date checks',
'1066': 'Package Manager for JavaScript Libraries'},
'fix': {
'1134': 'Overlay maps: not enabled if Geonames disabled',
'1139': 'Breadcrumbs show place twice',
'1140': 'HTML Code is showing in description text Actions',
'1152': "Menu item isn't marked as active in entity view"}}],
'4.1.0': ['2020-01-30', {
'feature': {
'1070': 'Enhance network visualization',
'952': 'Show subunits on map'}}],
'4.0.0': ['2020-01-01', {
'feature': {
'905': 'Upgrade CIDOC CRM to 6.2.1.',
'1049': 'Upgrade Python to 3.7',
'1003': 'Import with dates',
'1068': 'Place import with point coordinates',
'1072': 'Show places of movement at the person view',
'1079': 'Static type checking with Mypy',
'1101': 'Disable showing default images for reference'},
'fix': {
'1069': 'Overlay maps: interchanged easting and northing',
'1071': "Filter function in jsTree doesn't clear correctly",
'1100': 'Save geometry not working'}}],
'3.20.1': ['2019-10-13', {
'fix': {
'1069': 'Overlay maps: interchanged easting and northing'}}],
'3.20.0': ['2019-10-06', {
'feature': {
'978': 'Image overlays for maps',
'1038': 'Move events',
'1060': 'New menu item "Object"',
'1061': 'More options to link entries',
'1058': 'SQL interface',
'1043': 'DataTables',
'1056': 'Additional codes for GeoNames search'}}],
'3.19.0': ['2019-08-26', {
'feature': {
'928': 'GeoNames links for places',
'1042': 'Personal notes for entities',
'1040': 'New user group: Contributor',
'1055': 'Add finds to overview count',
'1044': 'Hide date form fields only if they are empty',
'1041': 'Remove color themes',
'1054': 'Remove Production and Destruction'}}],
'3.18.1': ['2019-08-20', {
'fix': {
'1053': 'Bug at file upload with special characters'}}],
'3.18.0': ['2019-07-07', {
'feature': {
'1036': 'Search for similar names',
'1034': 'Advanced data integrity check functions',
'1025': 'New OpenAtlas project site'}}],
'3.17.1': ['2019-05-21', {
'fix': {
'1033': 'Map editing breaks upon save w/o edit'}}],
'3.17.0': ['2019-05-13', {
'feature': {
'597': 'Option to display aliases in tables',
'1026': 'Check function for duplicate links'},
'fix': {
'1015': "Multiple Place Add in Entity doesn't work correct",
'1016': 'Lines cannot be deleted or edited',
'1017': 'Lines, Areas and Shapes get sometimes deleted'}}],
'3.16.0': ['2019-04-19', {
'feature': {
'994': 'Line drawing in map',
'1011': 'Additional security features',
'1012': 'Update GeoNames search for map'}}],
'3.15.0': ['2019-04-04', {
'feature': {
'983': 'External References'},
'fix': {
'1010': 'Missing or misspelled map feature descriptions'}}],
'3.14.1': ['2019-03-14', {
'feature': {
'997': 'Advanced date completion'},
'fix': {
'1000': 'Bookmarks not working',
'987': 'Open Type in new Tab or Window'}}],
'3.14.0': ['2019-03-12', {
'feature': {
'988': 'Search with date filter',
'996': 'Better structured info tabs',
'981': 'Documentation for dates in model'},
'fix': {
'995': 'Double Search Results'}}],
'3.13.0': ['2019-02-28', {
'feature': {
'590': 'Search - advanced features',
'975': 'Unit labels for value types',
'985': 'Check for invalid dates at actor/event participation',
'936': 'Refactor dates',
'993': 'Refactor links'}}],
'3.12.0': ['2018-12-31', {
'feature': {
'652': 'Maps update and rewrite',
'891': 'Profile images',
'959': 'Performance',
'961': 'Check function for dates and circular dependencies',
'962': 'Configurable character limit for live searches'},
'fix': {
'970': "Insert and Continue doesn't work with place in Chrome"}}],
'3.11.1': ['2018-11-30', {
'fix': {
'964':
'Forms: tables with pager ignoring selection after '
'changing page'}}],
'3.11.0': ['2018-11-24', {
'feature': {
'956': 'Clustering for maps',
'949': 'Performance',
'935': 'Remove forms from types'}}],
'3.10.0': ['2018-11-09', {
'feature': {
'934': 'Import',
'951': 'Export: additional options',
'538': 'Move/delete types for multiple entities',
'954': 'Documentation and links in application'},
'fix': {
'942':
'Server 500 Error after wrong date input at Actor/Person'}}],
'3.9.0': ['2018-09-28', {
'feature': {
'867': 'Network visualization advanced'},
'fix': {
'939': 'Translation',
'941': 'Text in brackets of subtypes disappears when edited'}}],
'3.8.0': ['2018-08-26', {
'feature': {
'419': 'Export SQL',
'915': 'Export CSV'},
'fix': {
'926': 'Language display bug'}}],
'3.7.0': ['2018-07-22', {
'feature': {
'878': 'Enhanced admin functionality',
'913': 'Change logo function'},
'fix': {
'914': 'Network view in Development Version'}}],
'3.6.0': ['2018-06-09', {
'feature': {
'710': 'Value Types',
'902': 'Insert and continue for types',
'903': 'Check existing links function'}}],
'3.5.0': ['2018-05-12', {
'feature': {
'896': 'Legal notice text option',
'898': 'Option to send mail without login credentials',
'901': 'Display available disk space'}}],
'3.4.0': ['2018-04-17', {
'feature': {
'431': 'Sub-Units for Sites'}}],
'3.3.0': ['2018-03-20', {
'feature': {
'422': 'File upload'}}],
'3.2.0': ['2018-02-10', {
'feature': {
'732': 'Documentation and manual',
'888': 'User and activity view',
'879': 'Demo with MEDCON data'}}],
'3.1.0': ['2018-01-18', {
'feature': {
'863': 'BC dates and date validation',
'780': 'Color Schemes'}}],
'3.0.0': ['2017-12-31', {
'feature': {
'877': """Python/Flask Port
<p>
This version includes many new features and changes.
Some are listed below.
</p>
<p style="font-weight:bold">User Interface and Usability</p>
• Performance improvements<br>
• More possibilities to create and link entries in one go
<br>
• Merge of front and backend and cleaner user interface<br>
• Advanced display and editing of source translations<br>
• Acquisition: multiple recipients, donors and places<br>
• Advanced type index view<br>
• Restructured admin area<br>
• Admin functions to check and deal with orphaned data<br>
• Show more/less option for long descriptions
<p style="font-weight:bold">Software and Code</p>
• Switched main programming language to Python 3<br>
• Switched web application framework to Flask<br>
• Switched template engine to Jinja2<br>
• Update of third party JavaScript libraries
<p style="font-weight:bold">Security</p>
• bcrypt hash algorithm for sensitive data<br>
• CSRF protection for forms<br>
• Adaptions for using HTTPS<br>
• A show password option at forms"""}}],
'2.4.0': ['2017-03-27', {
'feature': {
'539': 'Network graph visualisation',
'819': 'Enable linebreaks for descriptions'}}],
'2.3.0': ['2016-12-17', {
'feature': {
'764': 'Improved hierarchy overview',
'702': 'Improved settings',
'707': 'Improved performance',
'494': 'Newsletter and registration mails',
'319': 'SMTP settings in admin interface',
'693': 'Event overview: show event dates if available',
'569': 'Previous and next buttons',
'768': 'Show info column in main lists'}}],
'2.2.0': ['2016-09-11', {
'feature': {
'547': 'Map: multiple points and areas for Places',
'719': 'Favicon support for current systems'}}],
'2.1.0': ['2016-08-18', {
'feature': {
'643': 'Performance improvements',
'700': 'Improved frontend',
'703': 'Add multiple actors, events and places to source'},
'fix': {
'696': 'Add texts to source'}}],
'2.0.0': ['2016-06-19', {
'feature': {
'428': 'Dynamic Types',
'687': 'Remove required restriction from type fields'},
'fix': {
'688': 'Translation errors',
'689': 'Show/Hide buttons'}}],
'1.6.0': ['2016-05-04', {
'feature': {
'340': 'Multi user capability',
'672': 'Add multiple sources for event/actor/place',
'679': 'Improved credits',
'683': 'Tooltips for form fields and profile'},
'fix': {
'674': 'Place error occurs during saving'}}],
'1.5.0': ['2016-04-01', {
'feature': {
'564': 'Add multiple entries for relation, member and involvement',
'586': 'Bookmarks',
'671': 'Redmine links for changelog'}}],
'1.4.0': ['2016-03-26', {
'feature': {
'528': 'jsTree views for hierarchies',
'566': 'Enhanced profile and site settings',
'580': 'Change menu and structure for easier access to types',
'654': 'New map layers',
'666': 'Added Credits'}}],
'1.3.0': ['2016-02-28', {
'feature': {
'462': 'Actor - add member to group from actor view',
'563': 'Actor - insert and continue for relations and members',
'600': 'Update of JavaScript libraries (for jsTree)',
'621': 'Revised and more compact schema'},
'fix': {
'601': 'Application is very slow on some views',
'638': 'Names with an apostrophe in a table not selectable'}}],
'1.2.0': ['2015-12-13', {
'feature': {
'499': 'Actor - place tab with map',
'511': 'Search - advanced features'},
'fix': {
'547': 'Prevent double submit',
'587': 'Support multiple email recipients'}}],
'1.1.0': ['2015-11-09', {
'feature': {
'529': 'Reference - Information Carrier',
'527': 'New user groups - manager and readonly',
'540': 'Help and FAQ site',
'555': 'Source - additional text type "Source Transliteration"',
'556': 'Actor - function for member not required anymore',
'560': 'Newsletter and show email option in profile'},
'fix': {
'551': 'Model - Updating type bug',
'544': 'Map - Form data lost after map search',
'533': 'Map - Markers outside of Map'}}],
'1.0.0': ['2015-10-11', {
'feature': {
'487': 'Event - Acquisition of places',
'508': 'Reference - Editions',
'504': 'Option to add comments for entities in texts tab',
'495': 'Layout - new OpenAtlas logo',
'412': 'Layout - improved layout and color scheme',
'525': 'Layout - overlays for tables in forms'}}],
'0.11.0': ['2015-09-11', {
'feature': {
'433': 'Search functions'}}],
'0.10.0': ['2015-09-01', {
'feature': {
'429': 'Actor - member of',
'472': 'Actor - activity for event relations',
'483': 'Actor - update for relations and involvements',
'486': 'Place - administrative units and historical places',
'389': 'Reference - bibliography',
'489':
'Layout - overwork, multiple possibilities to create and '
'link entities',
'421': 'CRM link checker'}}],
'0.9.0': ['2015-07-12', {
'feature': {
'392': 'Event',
'423': 'Map - advanced features',
'474': 'Layout - new tab based layout for views'}}],
'0.8.0': ['2015-06-18', {
'feature': {
'326': 'Map - search, show existing places',
'425': 'More types'}}],
'0.7.0': ['2015-05-29', {
'feature': {
'403': 'Physical things - places',
'353': 'Maps - with Leaflet (GIS)',
'420': 'First and last dates in list views'}}],
'0.6.0': ['2015-05-13', {
'feature': {
'402': 'Begin, end, birth and death (precise and fuzzy))',
'346': 'CRM - link checker',
'412': 'Layout - New color scheme'},
'fix': {
'410': 'wrong domains for links'}}],
'0.5.0': ['2015-04-08', {
'feature': {
'377': 'Hierarchical Data',
'391': 'Time periods',
'365': 'Actor to actor relations',
'386': 'Filter for tables'}}],
'0.4.0': ['2015-02-23', {
'feature': {
'362':
'Document - Insert, update, delete, add texts, link with '
'actors',
'354': 'Alternative names',
'360': 'New color theme'},
'fix': {
'357': 'wrong range link in list view properties'}}],
'0.3.0': ['2015-02-10', {
'feature': {
'348': 'Actor - begin and end dates',
'307': 'Log user CRM data manipulation'},
'fix': {
'344': 'tablesorter IE 11 bug'}}],
'0.2.0': ['2015-01-18', {
'feature': {
'310': 'Actor - insert, update, delete, list, view',
'337': 'CRM - new OpenAtlas shortcuts'}}],
'0.1.0': ['2014-12-30', {
'feature': {
'318': 'Import definitions from CIDOC rdfs'}}],
'0.0.1': ['2014-11-05', {
'feature': {
'':
'Initial version based on the "Zend Base" project from '
'<a target="_blank" href="https://craws.net">craws.net</a>'}}]}
| craws/OpenAtlas | openatlas/util/changelog.py | Python | gpl-2.0 | 26,790 | 0.000037 |
import datetime
import os
from django.conf import settings
from olympia.amo.tests import TestCase
from olympia.addons.models import Addon
from olympia.devhub.cron import update_blog_posts
from olympia.devhub.tasks import convert_purified
from olympia.devhub.models import BlogPost
class TestRSS(TestCase):
def test_rss_cron(self):
url = os.path.join(
settings.ROOT, 'src', 'olympia', 'devhub', 'tests',
'rss_feeds', 'blog.xml')
settings.DEVELOPER_BLOG_URL = url
update_blog_posts()
assert BlogPost.objects.count() == 5
bp = BlogPost.objects.all()[0]
url = ("http://blog.mozilla.com/addons/2011/06/10/"
"update-in-time-for-thunderbird-5/")
assert bp.title == 'Test!'
assert bp.date_posted == datetime.date(2011, 6, 10)
assert bp.permalink == url
class TestPurify(TestCase):
fixtures = ['base/addon_3615']
def setUp(self):
super(TestPurify, self).setUp()
self.addon = Addon.objects.get(pk=3615)
def test_no_html(self):
self.addon.the_reason = 'foo'
self.addon.save()
last = Addon.objects.get(pk=3615).modified
convert_purified([self.addon.pk])
addon = Addon.objects.get(pk=3615)
assert addon.modified == last
def test_has_html(self):
self.addon.the_reason = 'foo <script>foo</script>'
self.addon.save()
convert_purified([self.addon.pk])
addon = Addon.objects.get(pk=3615)
assert addon.the_reason.localized_string_clean
| harikishen/addons-server | src/olympia/devhub/tests/test_cron.py | Python | bsd-3-clause | 1,566 | 0 |
# Copyright (C) 2011, 2012 by Matteo Franchin
#
# This file is part of Pyrtist.
#
# Pyrtist is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 2.1 of the License, or
# (at your option) any later version.
#
# Pyrtist is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Pyrtist. If not, see <http://www.gnu.org/licenses/>.
import os
import re
import fnmatch
from logger import log_msg, set_log_context
from tree import DoxType, DoxProc, DoxInstance, DoxTree
from context import Context
import builder
class Dox(object):
def __init__(self):
self.file = None
self.tree = DoxTree()
self.context = Context()
def log(self, msg, show_hdr=False):
hdr = ""
if show_hdr and self.file != None:
hdr = self.file.filename
if self.file.nr_line != None:
hdr += "(%d)" % (self.file.nr_line)
hdr += ": "
log_msg(hdr + msg)
def read_recursively(self, rootpath,
extensions = ["*.dox", "*.bxh", "*.box"]):
for dirpath, dirnames, filenames in os.walk(rootpath):
doxfiles = []
for extension in extensions:
doxfiles.extend(fnmatch.filter(filenames, extension))
if doxfiles:
doxfiles = list(set(doxfiles)) # Remove duplicates
for filename in doxfiles:
self.read_file(os.path.join(dirpath, filename))
def read_file(self, filename):
"""Read documentation content from the given file."""
set_log_context("File '%s'" % filename)
with open(filename, "r") as f:
text = f.read()
slices = builder.create_classified_slices_from_text(text)
blocks = builder.create_blocks_from_classified_slices(slices)
builder.associate_targets_to_blocks(slices)
context = self.context.create_context(sourcefile=filename,
section=None)
self.context = builder.associate_contexts_to_blocks(blocks, context)
builder.add_blocks_to_tree(self.tree, blocks)
if __name__ == "__main__":
import sys
dox = Dox()
dox.read_recursively(sys.argv[1])
tree = dox.tree
tree.process()
from rst import RSTWriter
docinfo = \
{"title": "Box Reference Manual",
"has_index": True,
"index_title": "Index of available object types",
"out_file": "out"}
writer = RSTWriter(dox.tree, docinfo=docinfo)
writer.save()
| mfnch/pyrtist | pyrtist/gui/dox/dox.py | Python | lgpl-2.1 | 2,729 | 0.009894 |
"""
.. function:: timeslidingwindow(timewindow, timecolumn) -> query results
Returns the query input results annotated with the window id as an extra column.
The following arguments can be passed as parameters:
timewindow: It can be a numeric value that specifies the time length of
the window (in seconds).
timecolumn: It is the index of the temporal column (starting from 0) For
the moment, we assume that the data is ordered by the temporal column that
the user gives as input in ascending order.
Examples::
>>> table1('''
... "12.05.2010 00:00:00"
... "12.05.2010 00:01:00"
... "12.05.2010 00:02:00"
... "12.05.2010 00:03:00"
... "12.05.2010 00:04:00"
... ''')
>>> sql("timeslidingwindow timewindow:180 timecolumn:0 select * from table1")
wid | a
-------------------------
0 | 12.05.2010 00:00:00
0 | 12.05.2010 00:01:00
0 | 12.05.2010 00:02:00
0 | 12.05.2010 00:03:00
1 | 12.05.2010 00:01:00
1 | 12.05.2010 00:02:00
1 | 12.05.2010 00:03:00
1 | 12.05.2010 00:04:00
>>> table1('''
... "12.05.2010 00:00:00"
... "12.05.2010 00:01:00"
... "12.05.2010 00:01:00"
... "12.05.2010 00:02:00"
... "12.05.2010 00:03:00"
... "12.05.2010 00:04:00"
... "12.05.2010 00:05:00"
... ''')
... ''')
>>> sql("timeslidingwindow timewindow:120 timecolumn:0 select * from table1")
wid | a
-------------------------
0 | 12.05.2010 00:00:00
0 | 12.05.2010 00:01:00
0 | 12.05.2010 00:01:00
0 | 12.05.2010 00:02:00
1 | 12.05.2010 00:01:00
1 | 12.05.2010 00:01:00
1 | 12.05.2010 00:02:00
1 | 12.05.2010 00:03:00
2 | 12.05.2010 00:02:00
2 | 12.05.2010 00:03:00
2 | 12.05.2010 00:04:00
3 | 12.05.2010 00:03:00
3 | 12.05.2010 00:04:00
3 | 12.05.2010 00:05:00
>>> table2('''
... "12/05/2010 00:00:00"
... "12/05/2010 00:01:00"
... "12/05/2010 00:02:00"
... ''')
... ''')
>>> sql("timeslidingwindow timewindow:180 timecolumn:0 select * from table2")
wid | a
-------------------------
0 | 12/05/2010 00:00:00
0 | 12/05/2010 00:01:00
0 | 12/05/2010 00:02:00
"""
import setpath
import vtbase
import functions
from collections import deque
import time
from lib.dateutil import parser
### Classic stream iterator
registered = True
class TimeSlidingWindow(vtbase.VT):
def VTiter(self, *parsedArgs, **envars):
largs, dictargs = self.full_parse(parsedArgs)
if 'query' not in dictargs:
raise functions.OperatorError(__name__.rsplit('.')[-1], "No query argument ")
query = dictargs['query']
if 'timewindow' not in dictargs:
raise functions.OperatorError(__name__.rsplit('.')[-1], "No TimeWindow argument ")
else:
winlen = int(dictargs['timewindow'])
if 'timecolumn' not in dictargs:
raise functions.OperatorError(__name__.rsplit('.')[-1], "No timecolumn argument ")
else:
timecolumn = int(dictargs['timecolumn'])
cur = envars['db'].cursor()
c = cur.execute(query, parse=False)
try:
yield [('wid', 'integer')] + list(cur.getdescriptionsafe())
except StopIteration:
try:
raise
finally:
try:
c.close()
except:
pass
wid = 0
secs = 0
row = c.next()
firstTime = int(time.mktime(parser.parse(row[timecolumn], fuzzy=True).timetuple()))
head = {firstTime: [row]}
window = deque([])
while row:
prev = row
try:
row = c.next()
except StopIteration:
if wid == 0:
for k in head.keys():
for t in head[k]:
yield (wid,) + t
for rl in window:
for k in rl.keys():
for t in rl[k]:
yield (wid,) + t
break
secs = int(time.mktime(parser.parse(row[timecolumn], fuzzy=True).timetuple()))
if secs <= firstTime + winlen:
if prev[0] == row[timecolumn] and window:
old = window.pop()[secs]
old.append(row)
rowlist = {secs: old}
else:
rowlist = {secs: [row]}
window.append(rowlist)
else:
if wid == 0:
for k in head.keys():
for t in head[k]:
yield (wid,) + t
for rl in window:
for k in rl.keys():
for t in rl[k]:
yield (wid,) + t
while secs > firstTime + winlen and window:
try:
head = window.popleft()
firstTime = head.keys()[0]
except IndexError:
break
rowlist = {secs: [row]}
window.append(rowlist)
wid += 1
for k in head.keys():
for t in head[k]:
yield (wid,) + t
for rl in window:
for k in rl.keys():
for t in rl[k]:
yield (wid,) + t
def Source():
return vtbase.VTGenerator(TimeSlidingWindow)
if not ('.' in __name__):
"""
This is needed to be able to test the function, put it at the end of every
new function you create
"""
import sys
import setpath
from functions import *
testfunction()
if __name__ == "__main__":
reload(sys)
sys.setdefaultencoding('utf-8')
import doctest
doctest.testmod()
| sosguns2002/interactive-mining | interactive-mining-3rdparty-madis/madis/src/functions/vtable/timeslidingwindow.py | Python | gpl-3.0 | 6,001 | 0.0025 |
# -*- coding: utf-8 -*-
"""QGIS Unit tests for QgsVectorFileWriter.
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
from builtins import next
from builtins import str
__author__ = 'Tim Sutton'
__date__ = '20/08/2012'
__copyright__ = 'Copyright 2012, The QGIS Project'
import qgis # NOQA
from qgis.core import (QgsVectorLayer,
QgsFeature,
QgsField,
QgsGeometry,
QgsPointXY,
QgsCoordinateReferenceSystem,
QgsVectorFileWriter,
QgsFeatureRequest,
QgsProject,
QgsWkbTypes,
QgsRectangle,
QgsCoordinateTransform
)
from qgis.PyQt.QtCore import QDate, QTime, QDateTime, QVariant, QDir, QByteArray
import os
import tempfile
import osgeo.gdal # NOQA
from osgeo import gdal, ogr
from qgis.testing import start_app, unittest
from utilities import writeShape, compareWkt, unitTestDataPath
TEST_DATA_DIR = unitTestDataPath()
start_app()
def GDAL_COMPUTE_VERSION(maj, min, rev):
return ((maj) * 1000000 + (min) * 10000 + (rev) * 100)
class TestFieldValueConverter(QgsVectorFileWriter.FieldValueConverter):
def __init__(self, layer):
QgsVectorFileWriter.FieldValueConverter.__init__(self)
self.layer = layer
def fieldDefinition(self, field):
idx = self.layer.fields().indexFromName(field.name())
if idx == 0:
return self.layer.fields()[idx]
elif idx == 2:
return QgsField('conv_attr', QVariant.String)
return QgsField('unexpected_idx')
def convert(self, idx, value):
if idx == 0:
return value
elif idx == 2:
if value == 3:
return 'converted_val'
else:
return 'unexpected_val!'
return 'unexpected_idx'
class TestQgsVectorFileWriter(unittest.TestCase):
mMemoryLayer = None
def testWrite(self):
"""Check we can write a vector file."""
self.mMemoryLayer = QgsVectorLayer(
('Point?crs=epsg:4326&field=name:string(20)&'
'field=age:integer&field=size:double&index=yes'),
'test',
'memory')
self.assertIsNotNone(self.mMemoryLayer, 'Provider not initialized')
myProvider = self.mMemoryLayer.dataProvider()
self.assertIsNotNone(myProvider)
ft = QgsFeature()
ft.setGeometry(QgsGeometry.fromPointXY(QgsPointXY(10, 10)))
ft.setAttributes(['Johny', 20, 0.3])
myResult, myFeatures = myProvider.addFeatures([ft])
self.assertTrue(myResult)
self.assertTrue(myFeatures)
writeShape(self.mMemoryLayer, 'writetest.shp')
def testWriteWithLongLongField(self):
ml = QgsVectorLayer('NoGeometry?crs=epsg:4326&field=fldlonglong:long',
'test2', 'memory')
provider = ml.dataProvider()
feat = QgsFeature()
feat.setAttributes([2262000000])
provider.addFeatures([feat])
filename = os.path.join(str(QDir.tempPath()), 'with_longlong_field')
crs = QgsCoordinateReferenceSystem()
crs.createFromId(4326, QgsCoordinateReferenceSystem.EpsgCrsId)
rc, errmsg = QgsVectorFileWriter.writeAsVectorFormat(ml, filename, 'utf-8', crs, 'GPKG')
# open the resulting geopackage
vl = QgsVectorLayer(filename + '.gpkg', '', 'ogr')
self.assertTrue(vl.isValid())
# test values
idx = vl.fields().indexFromName('fldlonglong')
self.assertEqual(vl.getFeature(1).attributes()[idx], 2262000000)
del vl
os.unlink(filename + '.gpkg')
def testWriteWithBoolField(self):
# init connection string
dbconn = 'service=qgis_test'
if 'QGIS_PGTEST_DB' in os.environ:
dbconn = os.environ['QGIS_PGTEST_DB']
# create a vector layer
vl = QgsVectorLayer('{} table="qgis_test"."boolean_table" sql='.format(dbconn), "testbool", "postgres")
self.assertTrue(vl.isValid())
# check that 1 of its fields is a bool
fields = vl.fields()
self.assertEqual(fields.at(fields.indexFromName('fld1')).type(), QVariant.Bool)
# write a gpkg package with a bool field
crs = QgsCoordinateReferenceSystem()
crs.createFromId(4326, QgsCoordinateReferenceSystem.EpsgCrsId)
filename = os.path.join(str(QDir.tempPath()), 'with_bool_field')
rc, errmsg = QgsVectorFileWriter.writeAsVectorFormat(vl,
filename,
'utf-8',
crs,
'GPKG')
self.assertEqual(rc, QgsVectorFileWriter.NoError)
# open the resulting geopackage
vl = QgsVectorLayer(filename + '.gpkg', '', 'ogr')
self.assertTrue(vl.isValid())
fields = vl.fields()
# test type of converted field
idx = fields.indexFromName('fld1')
self.assertEqual(fields.at(idx).type(), QVariant.Bool)
# test values
self.assertEqual(vl.getFeature(1).attributes()[idx], 1)
self.assertEqual(vl.getFeature(2).attributes()[idx], 0)
del vl
os.unlink(filename + '.gpkg')
def testDateTimeWriteShapefile(self):
"""Check writing date and time fields to an ESRI shapefile."""
ml = QgsVectorLayer(
('Point?crs=epsg:4326&field=id:int&'
'field=date_f:date&field=time_f:time&field=dt_f:datetime'),
'test',
'memory')
self.assertTrue(ml.isValid())
provider = ml.dataProvider()
self.assertIsNotNone(provider)
ft = QgsFeature()
ft.setGeometry(QgsGeometry.fromPointXY(QgsPointXY(10, 10)))
ft.setAttributes([1, QDate(2014, 3, 5), QTime(13, 45, 22), QDateTime(QDate(2014, 3, 5), QTime(13, 45, 22))])
res, features = provider.addFeatures([ft])
self.assertTrue(res)
self.assertTrue(features)
dest_file_name = os.path.join(str(QDir.tempPath()), 'datetime.shp')
crs = QgsCoordinateReferenceSystem()
crs.createFromId(4326, QgsCoordinateReferenceSystem.EpsgCrsId)
write_result, error_message = QgsVectorFileWriter.writeAsVectorFormat(
ml,
dest_file_name,
'utf-8',
crs,
'ESRI Shapefile')
self.assertEqual(write_result, QgsVectorFileWriter.NoError, error_message)
# Open result and check
created_layer = QgsVectorLayer('{}|layerid=0'.format(dest_file_name), 'test', 'ogr')
fields = created_layer.dataProvider().fields()
self.assertEqual(fields.at(fields.indexFromName('date_f')).type(), QVariant.Date)
# shapefiles do not support time types, result should be string
self.assertEqual(fields.at(fields.indexFromName('time_f')).type(), QVariant.String)
# shapefiles do not support datetime types, result should be string
self.assertEqual(fields.at(fields.indexFromName('dt_f')).type(), QVariant.String)
f = next(created_layer.getFeatures(QgsFeatureRequest()))
date_idx = created_layer.fields().lookupField('date_f')
self.assertIsInstance(f.attributes()[date_idx], QDate)
self.assertEqual(f.attributes()[date_idx], QDate(2014, 3, 5))
time_idx = created_layer.fields().lookupField('time_f')
# shapefiles do not support time types
self.assertIsInstance(f.attributes()[time_idx], str)
self.assertEqual(f.attributes()[time_idx], '13:45:22')
# shapefiles do not support datetime types
datetime_idx = created_layer.fields().lookupField('dt_f')
self.assertIsInstance(f.attributes()[datetime_idx], str)
self.assertEqual(f.attributes()[datetime_idx],
QDateTime(QDate(2014, 3, 5), QTime(13, 45, 22)).toString("yyyy/MM/dd hh:mm:ss.zzz"))
def testWriterWithExtent(self):
"""Check writing using extent filter."""
source_file = os.path.join(TEST_DATA_DIR, 'points.shp')
source_layer = QgsVectorLayer(source_file, 'Points', 'ogr')
self.assertTrue(source_layer.isValid())
options = QgsVectorFileWriter.SaveVectorOptions()
options.driverName = 'ESRI Shapefile'
options.filterExtent = QgsRectangle(-111, 26, -96, 38)
dest_file_name = os.path.join(str(QDir.tempPath()), 'extent_no_transform.shp')
write_result, error_message = QgsVectorFileWriter.writeAsVectorFormat(
source_layer,
dest_file_name,
options)
self.assertEqual(write_result, QgsVectorFileWriter.NoError, error_message)
# Open result and check
created_layer = QgsVectorLayer('{}|layerid=0'.format(dest_file_name), 'test', 'ogr')
features = [f for f in created_layer.getFeatures()]
self.assertEqual(len(features), 5)
for f in features:
self.assertTrue(f.geometry().intersects(options.filterExtent))
def testWriterWithExtentAndReprojection(self):
"""Check writing using extent filter with reprojection."""
source_file = os.path.join(TEST_DATA_DIR, 'points.shp')
source_layer = QgsVectorLayer(source_file, 'Points', 'ogr')
self.assertTrue(source_layer.isValid())
options = QgsVectorFileWriter.SaveVectorOptions()
options.driverName = 'ESRI Shapefile'
options.filterExtent = QgsRectangle(-12511460, 3045157, -10646621, 4683497)
options.ct = QgsCoordinateTransform(source_layer.crs(), QgsCoordinateReferenceSystem.fromEpsgId(3785), QgsProject.instance())
dest_file_name = os.path.join(str(QDir.tempPath()), 'extent_transform.shp')
write_result, error_message = QgsVectorFileWriter.writeAsVectorFormat(
source_layer,
dest_file_name,
options)
self.assertEqual(write_result, QgsVectorFileWriter.NoError, error_message)
# Open result and check
created_layer = QgsVectorLayer('{}|layerid=0'.format(dest_file_name), 'test', 'ogr')
features = [f for f in created_layer.getFeatures()]
self.assertEqual(len(features), 5)
for f in features:
self.assertTrue(f.geometry().intersects(options.filterExtent))
def testDateTimeWriteTabfile(self):
"""Check writing date and time fields to an MapInfo tabfile."""
ml = QgsVectorLayer(
('Point?crs=epsg:4326&field=id:int&'
'field=date_f:date&field=time_f:time&field=dt_f:datetime'),
'test',
'memory')
self.assertIsNotNone(ml, 'Provider not initialized')
self.assertTrue(ml.isValid(), 'Source layer not valid')
provider = ml.dataProvider()
self.assertIsNotNone(provider)
ft = QgsFeature()
ft.setGeometry(QgsGeometry.fromPointXY(QgsPointXY(10, 10)))
ft.setAttributes([1, QDate(2014, 3, 5), QTime(13, 45, 22), QDateTime(QDate(2014, 3, 5), QTime(13, 45, 22))])
res, features = provider.addFeatures([ft])
self.assertTrue(res)
self.assertTrue(features)
dest_file_name = os.path.join(str(QDir.tempPath()), 'datetime.tab')
crs = QgsCoordinateReferenceSystem()
crs.createFromId(4326, QgsCoordinateReferenceSystem.EpsgCrsId)
write_result, error_message = QgsVectorFileWriter.writeAsVectorFormat(
ml,
dest_file_name,
'utf-8',
crs,
'MapInfo File')
self.assertEqual(write_result, QgsVectorFileWriter.NoError, error_message)
# Open result and check
created_layer = QgsVectorLayer('{}|layerid=0'.format(dest_file_name), 'test', 'ogr')
fields = created_layer.dataProvider().fields()
self.assertEqual(fields.at(fields.indexFromName('date_f')).type(), QVariant.Date)
self.assertEqual(fields.at(fields.indexFromName('time_f')).type(), QVariant.Time)
self.assertEqual(fields.at(fields.indexFromName('dt_f')).type(), QVariant.DateTime)
f = next(created_layer.getFeatures(QgsFeatureRequest()))
date_idx = created_layer.fields().lookupField('date_f')
self.assertIsInstance(f.attributes()[date_idx], QDate)
self.assertEqual(f.attributes()[date_idx], QDate(2014, 3, 5))
time_idx = created_layer.fields().lookupField('time_f')
self.assertIsInstance(f.attributes()[time_idx], QTime)
self.assertEqual(f.attributes()[time_idx], QTime(13, 45, 22))
datetime_idx = created_layer.fields().lookupField('dt_f')
self.assertIsInstance(f.attributes()[datetime_idx], QDateTime)
self.assertEqual(f.attributes()[datetime_idx], QDateTime(QDate(2014, 3, 5), QTime(13, 45, 22)))
def testWriteShapefileWithZ(self):
"""Check writing geometries with Z dimension to an ESRI shapefile."""
# start by saving a memory layer and forcing z
ml = QgsVectorLayer(
('Point?crs=epsg:4326&field=id:int'),
'test',
'memory')
self.assertIsNotNone(ml, 'Provider not initialized')
self.assertTrue(ml.isValid(), 'Source layer not valid')
provider = ml.dataProvider()
self.assertIsNotNone(provider)
ft = QgsFeature()
ft.setGeometry(QgsGeometry.fromWkt('PointZ (1 2 3)'))
ft.setAttributes([1])
res, features = provider.addFeatures([ft])
self.assertTrue(res)
self.assertTrue(features)
# check with both a standard PointZ and 25d style Point25D type
for t in [QgsWkbTypes.PointZ, QgsWkbTypes.Point25D]:
dest_file_name = os.path.join(str(QDir.tempPath()), 'point_{}.shp'.format(QgsWkbTypes.displayString(t)))
crs = QgsCoordinateReferenceSystem()
crs.createFromId(4326, QgsCoordinateReferenceSystem.EpsgCrsId)
write_result, error_message = QgsVectorFileWriter.writeAsVectorFormat(
ml,
dest_file_name,
'utf-8',
crs,
'ESRI Shapefile',
overrideGeometryType=t)
self.assertEqual(write_result, QgsVectorFileWriter.NoError, error_message)
# Open result and check
created_layer = QgsVectorLayer('{}|layerid=0'.format(dest_file_name), 'test', 'ogr')
f = next(created_layer.getFeatures(QgsFeatureRequest()))
g = f.geometry()
wkt = g.asWkt()
expWkt = 'PointZ (1 2 3)'
self.assertTrue(compareWkt(expWkt, wkt),
"saving geometry with Z failed: mismatch Expected:\n%s\nGot:\n%s\n" % (expWkt, wkt))
# also try saving out the shapefile version again, as an extra test
# this tests that saving a layer with z WITHOUT explicitly telling the writer to keep z values,
# will stay retain the z values
dest_file_name = os.path.join(str(QDir.tempPath()),
'point_{}_copy.shp'.format(QgsWkbTypes.displayString(t)))
crs = QgsCoordinateReferenceSystem()
crs.createFromId(4326, QgsCoordinateReferenceSystem.EpsgCrsId)
write_result, error_message = QgsVectorFileWriter.writeAsVectorFormat(
created_layer,
dest_file_name,
'utf-8',
crs,
'ESRI Shapefile')
self.assertEqual(write_result, QgsVectorFileWriter.NoError, error_message)
# Open result and check
created_layer_from_shp = QgsVectorLayer('{}|layerid=0'.format(dest_file_name), 'test', 'ogr')
f = next(created_layer_from_shp.getFeatures(QgsFeatureRequest()))
g = f.geometry()
wkt = g.asWkt()
self.assertTrue(compareWkt(expWkt, wkt),
"saving geometry with Z failed: mismatch Expected:\n%s\nGot:\n%s\n" % (expWkt, wkt))
def testWriteShapefileWithMultiConversion(self):
"""Check writing geometries to an ESRI shapefile with conversion to multi."""
ml = QgsVectorLayer(
('Point?crs=epsg:4326&field=id:int'),
'test',
'memory')
self.assertIsNotNone(ml, 'Provider not initialized')
self.assertTrue(ml.isValid(), 'Source layer not valid')
provider = ml.dataProvider()
self.assertIsNotNone(provider)
ft = QgsFeature()
ft.setGeometry(QgsGeometry.fromWkt('Point (1 2)'))
ft.setAttributes([1])
res, features = provider.addFeatures([ft])
self.assertTrue(res)
self.assertTrue(features)
dest_file_name = os.path.join(str(QDir.tempPath()), 'to_multi.shp')
crs = QgsCoordinateReferenceSystem()
crs.createFromId(4326, QgsCoordinateReferenceSystem.EpsgCrsId)
write_result, error_message = QgsVectorFileWriter.writeAsVectorFormat(
ml,
dest_file_name,
'utf-8',
crs,
'ESRI Shapefile',
forceMulti=True)
self.assertEqual(write_result, QgsVectorFileWriter.NoError, error_message)
# Open result and check
created_layer = QgsVectorLayer('{}|layerid=0'.format(dest_file_name), 'test', 'ogr')
f = next(created_layer.getFeatures(QgsFeatureRequest()))
g = f.geometry()
wkt = g.asWkt()
expWkt = 'MultiPoint ((1 2))'
self.assertTrue(compareWkt(expWkt, wkt),
"saving geometry with multi conversion failed: mismatch Expected:\n%s\nGot:\n%s\n" % (
expWkt, wkt))
def testWriteShapefileWithAttributeSubsets(self):
"""Tests writing subsets of attributes to files."""
ml = QgsVectorLayer(
('Point?crs=epsg:4326&field=id:int&field=field1:int&field=field2:int&field=field3:int'),
'test',
'memory')
self.assertIsNotNone(ml, 'Provider not initialized')
self.assertTrue(ml.isValid(), 'Source layer not valid')
provider = ml.dataProvider()
self.assertIsNotNone(provider)
ft = QgsFeature()
ft.setGeometry(QgsGeometry.fromWkt('Point (1 2)'))
ft.setAttributes([1, 11, 12, 13])
res, features = provider.addFeatures([ft])
self.assertTrue(res)
self.assertTrue(features)
# first write out with all attributes
dest_file_name = os.path.join(str(QDir.tempPath()), 'all_attributes.shp')
crs = QgsCoordinateReferenceSystem()
crs.createFromId(4326, QgsCoordinateReferenceSystem.EpsgCrsId)
write_result, error_message = QgsVectorFileWriter.writeAsVectorFormat(
ml,
dest_file_name,
'utf-8',
crs,
'ESRI Shapefile',
attributes=[])
self.assertEqual(write_result, QgsVectorFileWriter.NoError, error_message)
# Open result and check
created_layer = QgsVectorLayer('{}|layerid=0'.format(dest_file_name), 'test', 'ogr')
self.assertEqual(created_layer.fields().count(), 4)
f = next(created_layer.getFeatures(QgsFeatureRequest()))
self.assertEqual(f['id'], 1)
self.assertEqual(f['field1'], 11)
self.assertEqual(f['field2'], 12)
self.assertEqual(f['field3'], 13)
# now test writing out only a subset of attributes
dest_file_name = os.path.join(str(QDir.tempPath()), 'subset_attributes.shp')
write_result, error_message = QgsVectorFileWriter.writeAsVectorFormat(
ml,
dest_file_name,
'utf-8',
crs,
'ESRI Shapefile',
attributes=[1, 3])
self.assertEqual(write_result, QgsVectorFileWriter.NoError, error_message)
# Open result and check
created_layer = QgsVectorLayer('{}|layerid=0'.format(dest_file_name), 'test', 'ogr')
self.assertEqual(created_layer.fields().count(), 2)
f = next(created_layer.getFeatures(QgsFeatureRequest()))
self.assertEqual(f['field1'], 11)
self.assertEqual(f['field3'], 13)
# finally test writing no attributes
dest_file_name = os.path.join(str(QDir.tempPath()), 'no_attributes.shp')
write_result, error_message = QgsVectorFileWriter.writeAsVectorFormat(
ml,
dest_file_name,
'utf-8',
crs,
'ESRI Shapefile',
skipAttributeCreation=True)
self.assertEqual(write_result, QgsVectorFileWriter.NoError, error_message)
# Open result and check
created_layer = QgsVectorLayer('{}|layerid=0'.format(dest_file_name), 'test', 'ogr')
# expect only a default 'FID' field for shapefiles
self.assertEqual(created_layer.fields().count(), 1)
self.assertEqual(created_layer.fields()[0].name(), 'FID')
# in this case we also check that the geometry exists, to make sure feature has been correctly written
# even without attributes
f = next(created_layer.getFeatures(QgsFeatureRequest()))
g = f.geometry()
wkt = g.asWkt()
expWkt = 'Point (1 2)'
self.assertTrue(compareWkt(expWkt, wkt),
"geometry not saved correctly when saving without attributes : mismatch Expected:\n%s\nGot:\n%s\n" % (
expWkt, wkt))
self.assertEqual(f['FID'], 0)
def testValueConverter(self):
"""Tests writing a layer with a field value converter."""
ml = QgsVectorLayer(
('Point?field=nonconv:int&field=ignored:string&field=converted:int'),
'test',
'memory')
self.assertIsNotNone(ml, 'Provider not initialized')
self.assertTrue(ml.isValid(), 'Source layer not valid')
provider = ml.dataProvider()
self.assertIsNotNone(provider)
self.assertEqual(ml.fields().count(), 3)
ft = QgsFeature()
ft.setAttributes([1, 'ignored', 3])
res, features = provider.addFeatures([ft])
self.assertTrue(res)
self.assertTrue(features)
dest_file_name = os.path.join(str(QDir.tempPath()), 'value_converter.shp')
converter = TestFieldValueConverter(ml)
write_result, error_message = QgsVectorFileWriter.writeAsVectorFormat(
ml,
dest_file_name,
'utf-8',
QgsCoordinateReferenceSystem(),
'ESRI Shapefile',
attributes=[0, 2],
fieldValueConverter=converter)
self.assertEqual(write_result, QgsVectorFileWriter.NoError, error_message)
# Open result and check
created_layer = QgsVectorLayer('{}|layerid=0'.format(dest_file_name), 'test', 'ogr')
self.assertEqual(created_layer.fields().count(), 2)
f = next(created_layer.getFeatures(QgsFeatureRequest()))
self.assertEqual(f['nonconv'], 1)
self.assertEqual(f['conv_attr'], 'converted_val')
def testInteger64WriteTabfile(self):
"""Check writing Integer64 fields to an MapInfo tabfile (which does not support that type)."""
ml = QgsVectorLayer(
('Point?crs=epsg:4326&field=int8:int8'),
'test',
'memory')
self.assertIsNotNone(ml, 'Provider not initialized')
self.assertTrue(ml.isValid(), 'Source layer not valid')
provider = ml.dataProvider()
self.assertIsNotNone(provider)
ft = QgsFeature()
ft.setAttributes([2123456789])
res, features = provider.addFeatures([ft])
self.assertTrue(res)
self.assertTrue(features)
dest_file_name = os.path.join(str(QDir.tempPath()), 'integer64.tab')
crs = QgsCoordinateReferenceSystem()
crs.createFromId(4326, QgsCoordinateReferenceSystem.EpsgCrsId)
write_result, error_message = QgsVectorFileWriter.writeAsVectorFormat(
ml,
dest_file_name,
'utf-8',
crs,
'MapInfo File')
self.assertEqual(write_result, QgsVectorFileWriter.NoError, error_message)
# Open result and check
created_layer = QgsVectorLayer('{}|layerid=0'.format(dest_file_name), 'test', 'ogr')
fields = created_layer.dataProvider().fields()
self.assertEqual(fields.at(fields.indexFromName('int8')).type(), QVariant.Double)
f = next(created_layer.getFeatures(QgsFeatureRequest()))
int8_idx = created_layer.fields().lookupField('int8')
self.assertEqual(f.attributes()[int8_idx], 2123456789)
def testDefaultDatasetOptions(self):
""" Test retrieving default dataset options for a format """
# NOTE - feel free to adapt these if the defaults change!
options = QgsVectorFileWriter.defaultDatasetOptions('not a format')
self.assertEqual(options, [])
options = QgsVectorFileWriter.defaultDatasetOptions('ESRI Shapefile')
self.assertEqual(options, [])
options = QgsVectorFileWriter.defaultDatasetOptions('GML')
# just test a few
self.assertTrue('GML3_LONGSRS=YES' in options)
self.assertTrue('STRIP_PREFIX=NO' in options)
def testDefaultLayerOptions(self):
""" Test retrieving default layer options for a format """
# NOTE - feel free to adapt these if the defaults change!
options = QgsVectorFileWriter.defaultLayerOptions('not a format')
self.assertEqual(options, [])
options = QgsVectorFileWriter.defaultLayerOptions('ESRI Shapefile')
self.assertEqual(options, ['RESIZE=NO'])
options = QgsVectorFileWriter.defaultLayerOptions('GML')
self.assertEqual(options, [])
def testOverwriteLayer(self):
"""Tests writing a layer with a field value converter."""
ml = QgsVectorLayer('Point?field=firstfield:int', 'test', 'memory')
provider = ml.dataProvider()
ft = QgsFeature()
ft.setAttributes([1])
provider.addFeatures([ft])
options = QgsVectorFileWriter.SaveVectorOptions()
options.driverName = 'GPKG'
options.layerName = 'test'
filename = '/vsimem/out.gpkg'
write_result, error_message = QgsVectorFileWriter.writeAsVectorFormat(
ml,
filename,
options)
self.assertEqual(write_result, QgsVectorFileWriter.NoError, error_message)
ds = ogr.Open(filename, update=1)
lyr = ds.GetLayerByName('test')
self.assertIsNotNone(lyr)
f = lyr.GetNextFeature()
self.assertEqual(f['firstfield'], 1)
ds.CreateLayer('another_layer')
del f
del lyr
del ds
caps = QgsVectorFileWriter.editionCapabilities(filename)
self.assertTrue((caps & QgsVectorFileWriter.CanAddNewLayer))
self.assertTrue((caps & QgsVectorFileWriter.CanAppendToExistingLayer))
self.assertTrue((caps & QgsVectorFileWriter.CanAddNewFieldsToExistingLayer))
self.assertTrue((caps & QgsVectorFileWriter.CanDeleteLayer))
self.assertTrue(QgsVectorFileWriter.targetLayerExists(filename, 'test'))
self.assertFalse(QgsVectorFileWriter.areThereNewFieldsToCreate(filename, 'test', ml, [0]))
# Test CreateOrOverwriteLayer
ml = QgsVectorLayer('Point?field=firstfield:int', 'test', 'memory')
provider = ml.dataProvider()
ft = QgsFeature()
ft.setAttributes([2])
provider.addFeatures([ft])
options = QgsVectorFileWriter.SaveVectorOptions()
options.driverName = 'GPKG'
options.layerName = 'test'
options.actionOnExistingFile = QgsVectorFileWriter.CreateOrOverwriteLayer
filename = '/vsimem/out.gpkg'
write_result, error_message = QgsVectorFileWriter.writeAsVectorFormat(
ml,
filename,
options)
self.assertEqual(write_result, QgsVectorFileWriter.NoError, error_message)
ds = ogr.Open(filename)
lyr = ds.GetLayerByName('test')
self.assertIsNotNone(lyr)
f = lyr.GetNextFeature()
self.assertEqual(f['firstfield'], 2)
# another_layer should still exist
self.assertIsNotNone(ds.GetLayerByName('another_layer'))
del f
del lyr
del ds
# Test CreateOrOverwriteFile
ml = QgsVectorLayer('Point?field=firstfield:int', 'test', 'memory')
provider = ml.dataProvider()
ft = QgsFeature()
ft.setAttributes([3])
provider.addFeatures([ft])
options = QgsVectorFileWriter.SaveVectorOptions()
options.driverName = 'GPKG'
options.layerName = 'test'
filename = '/vsimem/out.gpkg'
write_result, error_message = QgsVectorFileWriter.writeAsVectorFormat(
ml,
filename,
options)
self.assertEqual(write_result, QgsVectorFileWriter.NoError, error_message)
ds = ogr.Open(filename)
lyr = ds.GetLayerByName('test')
self.assertIsNotNone(lyr)
f = lyr.GetNextFeature()
self.assertEqual(f['firstfield'], 3)
# another_layer should no longer exist
self.assertIsNone(ds.GetLayerByName('another_layer'))
del f
del lyr
del ds
# Test AppendToLayerNoNewFields
ml = QgsVectorLayer('Point?field=firstfield:int&field=secondfield:int', 'test', 'memory')
provider = ml.dataProvider()
ft = QgsFeature()
ft.setAttributes([4, -10])
provider.addFeatures([ft])
self.assertTrue(QgsVectorFileWriter.areThereNewFieldsToCreate(filename, 'test', ml, [0, 1]))
options = QgsVectorFileWriter.SaveVectorOptions()
options.driverName = 'GPKG'
options.layerName = 'test'
options.actionOnExistingFile = QgsVectorFileWriter.AppendToLayerNoNewFields
filename = '/vsimem/out.gpkg'
write_result, error_message = QgsVectorFileWriter.writeAsVectorFormat(
ml,
filename,
options)
self.assertEqual(write_result, QgsVectorFileWriter.NoError, error_message)
ds = ogr.Open(filename)
lyr = ds.GetLayerByName('test')
self.assertEqual(lyr.GetLayerDefn().GetFieldCount(), 1)
self.assertIsNotNone(lyr)
f = lyr.GetNextFeature()
self.assertEqual(f['firstfield'], 3)
f = lyr.GetNextFeature()
self.assertEqual(f['firstfield'], 4)
del f
del lyr
del ds
# Test AppendToLayerAddFields
ml = QgsVectorLayer('Point?field=firstfield:int&field=secondfield:int', 'test', 'memory')
provider = ml.dataProvider()
ft = QgsFeature()
ft.setAttributes([5, -1])
provider.addFeatures([ft])
self.assertTrue(QgsVectorFileWriter.areThereNewFieldsToCreate(filename, 'test', ml, [0, 1]))
options = QgsVectorFileWriter.SaveVectorOptions()
options.driverName = 'GPKG'
options.layerName = 'test'
options.actionOnExistingFile = QgsVectorFileWriter.AppendToLayerAddFields
filename = '/vsimem/out.gpkg'
write_result, error_message = QgsVectorFileWriter.writeAsVectorFormat(
ml,
filename,
options)
self.assertEqual(write_result, QgsVectorFileWriter.NoError, error_message)
ds = ogr.Open(filename)
lyr = ds.GetLayerByName('test')
self.assertEqual(lyr.GetLayerDefn().GetFieldCount(), 2)
self.assertIsNotNone(lyr)
f = lyr.GetNextFeature()
self.assertEqual(f['firstfield'], 3)
if hasattr(f, "IsFieldSetAndNotNull"):
# GDAL >= 2.2
self.assertFalse(f.IsFieldSetAndNotNull('secondfield'))
else:
self.assertFalse(f.IsFieldSet('secondfield'))
f = lyr.GetNextFeature()
self.assertEqual(f['firstfield'], 4)
if hasattr(f, "IsFieldSetAndNotNull"):
self.assertFalse(f.IsFieldSetAndNotNull('secondfield'))
else:
self.assertFalse(f.IsFieldSet('secondfield'))
f = lyr.GetNextFeature()
self.assertEqual(f['firstfield'], 5)
self.assertEqual(f['secondfield'], -1)
del f
del lyr
del ds
gdal.Unlink(filename)
def testSupportedFiltersAndFormat(self):
# test with formats in recommended order
formats = QgsVectorFileWriter.supportedFiltersAndFormats(QgsVectorFileWriter.SortRecommended)
self.assertEqual(formats[0].filterString, 'GeoPackage (*.gpkg *.GPKG)')
self.assertEqual(formats[0].driverName, 'GPKG')
self.assertEqual(formats[0].globs, ['*.gpkg'])
self.assertEqual(formats[1].filterString, 'ESRI Shapefile (*.shp *.SHP)')
self.assertEqual(formats[1].driverName, 'ESRI Shapefile')
self.assertEqual(formats[1].globs, ['*.shp'])
self.assertTrue('ODS' in [f.driverName for f in formats])
self.assertTrue('PGDUMP' in [f.driverName for f in formats])
interlis_format = [f for f in formats if f.driverName == 'Interlis 2'][0]
self.assertEqual(interlis_format.globs, ['*.xtf', '*.xml', '*.ili'])
# alphabetical sorting
formats2 = QgsVectorFileWriter.supportedFiltersAndFormats(QgsVectorFileWriter.VectorFormatOptions())
self.assertTrue(formats2[0].driverName < formats2[1].driverName)
self.assertCountEqual([f.driverName for f in formats], [f.driverName for f in formats2])
self.assertNotEqual(formats2[0].driverName, 'GeoPackage')
# skip non-spatial
formats = QgsVectorFileWriter.supportedFiltersAndFormats(QgsVectorFileWriter.SkipNonSpatialFormats)
self.assertFalse('ODS' in [f.driverName for f in formats])
def testOgrDriverList(self):
# test with drivers in recommended order
drivers = QgsVectorFileWriter.ogrDriverList(QgsVectorFileWriter.SortRecommended)
self.assertEqual(drivers[0].longName, 'GeoPackage')
self.assertEqual(drivers[0].driverName, 'GPKG')
self.assertEqual(drivers[1].longName, 'ESRI Shapefile')
self.assertEqual(drivers[1].driverName, 'ESRI Shapefile')
self.assertTrue('ODS' in [f.driverName for f in drivers])
# ensure that XLSX comes before SQLite, because we should sort on longName, not driverName!
ms_xlsx_index = next(i for i, v in enumerate(drivers) if v.driverName == 'XLSX')
sqlite_index = next(i for i, v in enumerate(drivers) if v.driverName == 'SQLite')
self.assertLess(ms_xlsx_index, sqlite_index)
self.assertIn('[XLSX]', drivers[ms_xlsx_index].longName)
# alphabetical sorting
drivers2 = QgsVectorFileWriter.ogrDriverList(QgsVectorFileWriter.VectorFormatOptions())
self.assertTrue(drivers2[0].longName < drivers2[1].longName)
self.assertCountEqual([d.driverName for d in drivers], [d.driverName for d in drivers2])
self.assertNotEqual(drivers2[0].driverName, 'GPKG')
# skip non-spatial
formats = QgsVectorFileWriter.ogrDriverList(QgsVectorFileWriter.SkipNonSpatialFormats)
self.assertFalse('ODS' in [f.driverName for f in formats])
def testSupportedFormatExtensions(self):
formats = QgsVectorFileWriter.supportedFormatExtensions()
self.assertTrue('gpkg' in formats)
self.assertFalse('exe' in formats)
self.assertEqual(formats[0], 'gpkg')
self.assertEqual(formats[1], 'shp')
self.assertTrue('ods' in formats)
self.assertTrue('xtf' in formats)
self.assertTrue('ili' in formats)
for i in range(2, len(formats) - 1):
self.assertLess(formats[i].lower(), formats[i + 1].lower())
# alphabetical sorting
formats2 = QgsVectorFileWriter.supportedFormatExtensions(QgsVectorFileWriter.VectorFormatOptions())
self.assertTrue(formats2[0] < formats2[1])
self.assertCountEqual(formats, formats2)
self.assertNotEqual(formats2[0], 'gpkg')
for i in range(0, len(formats2) - 1):
self.assertLess(formats2[i].lower(), formats2[i + 1].lower())
formats = QgsVectorFileWriter.supportedFormatExtensions(QgsVectorFileWriter.SkipNonSpatialFormats)
self.assertFalse('ods' in formats)
def testFileFilterString(self):
formats = QgsVectorFileWriter.fileFilterString()
self.assertTrue('gpkg' in formats)
self.assertTrue('shp' in formats)
self.assertLess(formats.index('gpkg'), formats.index('shp'))
self.assertTrue('ods' in formats)
parts = formats.split(';;')
for i in range(2, len(parts) - 1):
self.assertLess(parts[i].lower(), parts[i + 1].lower())
# alphabetical sorting
formats2 = QgsVectorFileWriter.fileFilterString(QgsVectorFileWriter.VectorFormatOptions())
self.assertNotEqual(formats.index('gpkg'), formats2.index('gpkg'))
parts = formats2.split(';;')
for i in range(len(parts) - 1):
self.assertLess(parts[i].lower(), parts[i + 1].lower())
# hide non spatial
formats = QgsVectorFileWriter.fileFilterString(QgsVectorFileWriter.SkipNonSpatialFormats)
self.assertFalse('ods' in formats)
def testDriverForExtension(self):
self.assertEqual(QgsVectorFileWriter.driverForExtension('shp'), 'ESRI Shapefile')
self.assertEqual(QgsVectorFileWriter.driverForExtension('SHP'), 'ESRI Shapefile')
self.assertEqual(QgsVectorFileWriter.driverForExtension('sHp'), 'ESRI Shapefile')
self.assertEqual(QgsVectorFileWriter.driverForExtension('.shp'), 'ESRI Shapefile')
self.assertEqual(QgsVectorFileWriter.driverForExtension('tab'), 'MapInfo File')
self.assertEqual(QgsVectorFileWriter.driverForExtension('.GML'), 'GML')
self.assertEqual(QgsVectorFileWriter.driverForExtension('not a format'), '')
self.assertEqual(QgsVectorFileWriter.driverForExtension(''), '')
def testSupportsFeatureStyles(self):
self.assertFalse(QgsVectorFileWriter.supportsFeatureStyles('ESRI Shapefile'))
self.assertFalse(QgsVectorFileWriter.supportsFeatureStyles('not a driver'))
self.assertTrue(QgsVectorFileWriter.supportsFeatureStyles('DXF'))
self.assertTrue(QgsVectorFileWriter.supportsFeatureStyles('KML'))
self.assertTrue(QgsVectorFileWriter.supportsFeatureStyles('MapInfo File'))
self.assertTrue(QgsVectorFileWriter.supportsFeatureStyles('MapInfo MIF'))
def testOverwriteGPKG(self):
"""Test that overwriting the same origin GPKG file works only if the layername is different"""
# Prepare test data
ml = QgsVectorLayer('Point?field=firstfield:int&field=secondfield:int', 'test', 'memory')
provider = ml.dataProvider()
ft = QgsFeature()
ft.setAttributes([4, -10])
provider.addFeatures([ft])
filehandle, filename = tempfile.mkstemp('.gpkg')
options = QgsVectorFileWriter.SaveVectorOptions()
options.driverName = 'GPKG'
options.layerName = 'test'
write_result, error_message = QgsVectorFileWriter.writeAsVectorFormat(
ml,
filename,
options)
self.assertEqual(write_result, QgsVectorFileWriter.NoError, error_message)
# Real test
vl = QgsVectorLayer("%s|layername=test" % filename, 'src_test', 'ogr')
self.assertTrue(vl.isValid())
self.assertEqual(vl.featureCount(), 1)
# This must fail
write_result, error_message = QgsVectorFileWriter.writeAsVectorFormat(
vl,
filename,
options)
self.assertEqual(write_result, QgsVectorFileWriter.ErrCreateDataSource)
self.assertEqual(error_message, 'Cannot overwrite a OGR layer in place')
options.layerName = 'test2'
write_result, error_message = QgsVectorFileWriter.writeAsVectorFormat(
vl,
filename,
options)
self.assertEqual(write_result, QgsVectorFileWriter.NoError, error_message)
def testCreateDGN(self):
ml = QgsVectorLayer('Point?crs=epsg:4326', 'test', 'memory')
provider = ml.dataProvider()
feat = QgsFeature()
feat.setGeometry(QgsGeometry.fromPointXY(QgsPointXY(10, 10)))
provider.addFeatures([feat])
filename = os.path.join(str(QDir.tempPath()), 'testCreateDGN.dgn')
crs = QgsCoordinateReferenceSystem()
crs.createFromId(4326, QgsCoordinateReferenceSystem.EpsgCrsId)
rc, errmsg = QgsVectorFileWriter.writeAsVectorFormat(ml, filename, 'utf-8', crs, 'DGN')
# open the resulting file
vl = QgsVectorLayer(filename, '', 'ogr')
self.assertTrue(vl.isValid())
self.assertEqual(vl.featureCount(), 1)
del vl
# append
options = QgsVectorFileWriter.SaveVectorOptions()
options.driverName = 'DGN'
options.layerName = 'test'
options.actionOnExistingFile = QgsVectorFileWriter.AppendToLayerNoNewFields
write_result, error_message = QgsVectorFileWriter.writeAsVectorFormat(
ml,
filename,
options)
self.assertEqual(write_result, QgsVectorFileWriter.NoError, error_message)
# open the resulting file
vl = QgsVectorLayer(filename, '', 'ogr')
self.assertTrue(vl.isValid())
self.assertEqual(vl.featureCount(), 2)
del vl
os.unlink(filename)
def testAddZ(self):
"""Check adding z values to non z input."""
input = QgsVectorLayer(
'Point?crs=epsg:4326&field=name:string(20)',
'test',
'memory')
self.assertTrue(input.isValid(), 'Provider not initialized')
ft = QgsFeature()
ft.setGeometry(QgsGeometry.fromPointXY(QgsPointXY(10, 10)))
myResult, myFeatures = input.dataProvider().addFeatures([ft])
self.assertTrue(myResult)
self.assertTrue(myFeatures)
dest_file_name = os.path.join(str(QDir.tempPath()), 'add_z.geojson')
options = QgsVectorFileWriter.SaveVectorOptions()
options.overrideGeometryType = QgsWkbTypes.PointZ
options.driverName = 'GeoJSON'
write_result, error_message = QgsVectorFileWriter.writeAsVectorFormat(
input,
dest_file_name,
options)
self.assertEqual(write_result, QgsVectorFileWriter.NoError, error_message)
# Open result and check
created_layer = QgsVectorLayer(dest_file_name, 'test', 'ogr')
self.assertTrue(created_layer.isValid())
f = next(created_layer.getFeatures(QgsFeatureRequest()))
self.assertEqual(f.geometry().asWkt(), 'PointZ (10 10 0)')
def testDropZ(self):
"""Check dropping z values input."""
input = QgsVectorLayer(
'PointZ?crs=epsg:4326&field=name:string(20)',
'test',
'memory')
self.assertTrue(input.isValid(), 'Provider not initialized')
ft = QgsFeature()
ft.setGeometry(QgsGeometry.fromWkt('PointM(10 10 2)'))
myResult, myFeatures = input.dataProvider().addFeatures([ft])
self.assertTrue(myResult)
self.assertTrue(myFeatures)
dest_file_name = os.path.join(str(QDir.tempPath()), 'drop_z.geojson')
options = QgsVectorFileWriter.SaveVectorOptions()
options.overrideGeometryType = QgsWkbTypes.PointM
options.driverName = 'GeoJSON'
write_result, error_message = QgsVectorFileWriter.writeAsVectorFormat(
input,
dest_file_name,
options)
self.assertEqual(write_result, QgsVectorFileWriter.NoError, error_message)
# Open result and check
created_layer = QgsVectorLayer(dest_file_name, 'test', 'ogr')
self.assertTrue(created_layer.isValid())
f = next(created_layer.getFeatures(QgsFeatureRequest()))
self.assertEqual(f.geometry().asWkt(), 'Point (10 10)')
@unittest.skip(int(gdal.VersionInfo('VERSION_NUM')) < GDAL_COMPUTE_VERSION(2, 4, 0))
def testWriteWithStringListField(self):
"""
Test writing with a string list field
:return:
"""
tmpfile = os.path.join(self.basetestpath, 'newstringlistfield.gml')
ds = ogr.GetDriverByName('GML').CreateDataSource(tmpfile)
lyr = ds.CreateLayer('test', geom_type=ogr.wkbPoint)
lyr.CreateField(ogr.FieldDefn('strfield', ogr.OFTString))
lyr.CreateField(ogr.FieldDefn('intfield', ogr.OFTInteger))
lyr.CreateField(ogr.FieldDefn('strlistfield', ogr.OFTStringList))
ds = None
vl = QgsVectorLayer(tmpfile)
self.assertTrue(vl.isValid())
# write a gml dataset with a string list field
filename = os.path.join(str(QDir.tempPath()), 'with_stringlist_field.gml')
rc, errmsg = QgsVectorFileWriter.writeAsVectorFormat(vl,
filename,
'utf-8',
vl.crs(),
'GML')
self.assertEqual(rc, QgsVectorFileWriter.NoError)
# open the resulting gml
vl = QgsVectorLayer(filename, '', 'ogr')
self.assertTrue(vl.isValid())
fields = vl.fields()
# test type of converted field
idx = fields.indexFromName('strlistfield')
self.assertEqual(fields.at(idx).type(), QVariant.List)
self.assertEqual(fields.at(idx).subType(), QVariant.String)
del vl
os.unlink(filename)
def testWriteWithBinaryField(self):
"""
Test writing with a binary field
:return:
"""
basetestpath = tempfile.mkdtemp()
tmpfile = os.path.join(basetestpath, 'binaryfield.sqlite')
ds = ogr.GetDriverByName('SQLite').CreateDataSource(tmpfile)
lyr = ds.CreateLayer('test', geom_type=ogr.wkbPoint, options=['FID=fid'])
lyr.CreateField(ogr.FieldDefn('strfield', ogr.OFTString))
lyr.CreateField(ogr.FieldDefn('intfield', ogr.OFTInteger))
lyr.CreateField(ogr.FieldDefn('binfield', ogr.OFTBinary))
lyr.CreateField(ogr.FieldDefn('binfield2', ogr.OFTBinary))
f = None
ds = None
vl = QgsVectorLayer(tmpfile)
self.assertTrue(vl.isValid())
# check that 1 of its fields is a bool
fields = vl.fields()
self.assertEqual(fields.at(fields.indexFromName('binfield')).type(), QVariant.ByteArray)
dp = vl.dataProvider()
f = QgsFeature(fields)
bin_1 = b'xxx'
bin_2 = b'yyy'
bin_val1 = QByteArray(bin_1)
bin_val2 = QByteArray(bin_2)
f.setAttributes([1, 'str', 100, bin_val1, bin_val2])
self.assertTrue(dp.addFeature(f))
# write a gpkg package with a binary field
filename = os.path.join(str(QDir.tempPath()), 'with_bin_field')
rc, errmsg = QgsVectorFileWriter.writeAsVectorFormat(vl,
filename,
'utf-8',
vl.crs(),
'GPKG')
self.assertEqual(rc, QgsVectorFileWriter.NoError)
# open the resulting geopackage
vl = QgsVectorLayer(filename + '.gpkg', '', 'ogr')
self.assertTrue(vl.isValid())
fields = vl.fields()
# test type of converted field
idx = fields.indexFromName('binfield')
self.assertEqual(fields.at(idx).type(), QVariant.ByteArray)
idx2 = fields.indexFromName('binfield2')
self.assertEqual(fields.at(idx2).type(), QVariant.ByteArray)
# test values
self.assertEqual(vl.getFeature(1).attributes()[idx], bin_val1)
self.assertEqual(vl.getFeature(1).attributes()[idx2], bin_val2)
del vl
os.unlink(filename + '.gpkg')
def testWriteKMLAxisOrderIssueGDAL3(self):
"""Check axis order issue when writing KML with EPSG:4326."""
if not ogr.GetDriverByName('KML'):
return
vl = QgsVectorLayer(
'PointZ?crs=epsg:4326&field=name:string(20)',
'test',
'memory')
self.assertTrue(vl.isValid(), 'Provider not initialized')
ft = QgsFeature()
ft.setGeometry(QgsGeometry.fromWkt('Point(2 49)'))
myResult, myFeatures = vl.dataProvider().addFeatures([ft])
self.assertTrue(myResult)
self.assertTrue(myFeatures)
dest_file_name = os.path.join(str(QDir.tempPath()), 'testWriteKMLAxisOrderIssueGDAL3.kml')
write_result, error_message = QgsVectorFileWriter.writeAsVectorFormat(
vl,
dest_file_name,
'utf-8',
vl.crs(),
'KML')
self.assertEqual(write_result, QgsVectorFileWriter.NoError, error_message)
# Open result and check
created_layer = QgsVectorLayer(dest_file_name, 'test', 'ogr')
self.assertTrue(created_layer.isValid())
f = next(created_layer.getFeatures(QgsFeatureRequest()))
self.assertEqual(f.geometry().asWkt(), 'PointZ (2 49 0)')
if __name__ == '__main__':
unittest.main()
| ahuarte47/QGIS | tests/src/python/test_qgsvectorfilewriter.py | Python | gpl-2.0 | 49,644 | 0.002397 |
import sys
import ctypes
def popcount(N):
if sys.platform.startswith('linux'):
libc = ctypes.cdll.LoadLibrary('libc.so.6')
return libc.__sched_cpucount(ctypes.sizeof(ctypes.c_long), (ctypes.c_long * 1)(N))
elif sys.platform == 'darwin':
libc = ctypes.cdll.LoadLibrary('libSystem.dylib')
return libc.__popcountdi2(N)
else:
assert(False)
def main():
N = int(input())
mod = 10 ** 9 + 7
A = [[int(x) for x in input().split()] for _ in range(N)]
dp = [0] * (1 << N)
dp[0] = 1
for state in range(1 << N):
dp[state] %= mod
i = popcount(state)
for j in range(N):
if (state >> j & 1) == 0 and A[i][j]:
dp[state | (1 << j)] += dp[state]
print(dp[-1])
if __name__ == '__main__':
main()
| knuu/competitive-programming | atcoder/dp/edu_dp_o.py | Python | mit | 818 | 0.002445 |
import teca.utils as tecautils
import teca.ConfigHandler as tecaconf
import unittest
class TestFileFilter(unittest.TestCase):
def setUp(self):
self.conf = tecaconf.ConfigHandler(
"tests/test_data/configuration.json",
{"starting_path": "tests/test_data/images"}
)
self.files_list = [
"foo.doc",
"yukinon.jpg",
"cuteflushadoingflushathings.webm"
]
def test_dothefiltering(self):
self.assertTrue("foo.doc" not in
tecautils.filterImages(self.files_list,
self.conf))
self.assertTrue("yukinon.jpg" in
tecautils.filterImages(self.files_list,
self.conf))
def test_nofiles(self):
self.assertEqual(0, len(tecautils.filterImages([], self.conf)))
| alfateam123/Teca | tests/test_utils.py | Python | mit | 902 | 0.001109 |
from typing import List
# Definition for a binary tree node.
class TreeNode:
def __init__(self, val: int = 0, left: "TreeNode" = None, right: "TreeNode" = None):
self.val = val
self.left = left
self.right = right
@classmethod
def serialize(cls, root: "TreeNode") -> str:
"""Encodes a tree to a single string."""
buffer = []
def _serialize(root: "TreeNode"):
if root is None:
buffer.append("#")
return
buffer.append(str(root.val))
_serialize(root.left)
_serialize(root.right)
_serialize(root)
return ",".join(buffer)
@classmethod
def deserialize(cls, data: str) -> "TreeNode":
"""Decodes your encoded data to tree."""
buffer = data.split(",")
def _deserialize(buffer: List[str]):
if len(buffer) == 0:
return None
val = buffer.pop(0)
if val == "#" or val == "":
return None
node = TreeNode(int(val))
node.left = _deserialize(buffer)
node.right = _deserialize(buffer)
return node
return _deserialize(buffer)
if __name__ == "__main__":
tests = [
"#",
"1,#,#",
"1,2,#,#,#",
"1,#,2,#,#",
"1,2,#,#,3,#,#",
"1,2,#,#,3,4,5,#,#,#,#",
]
for t in tests:
actual = TreeNode.serialize(TreeNode.deserialize(t))
print("serialize(deserialize) ->", actual)
assert t == TreeNode.serialize(TreeNode.deserialize(t))
| l33tdaima/l33tdaima | local_packages/binary_tree.py | Python | mit | 1,599 | 0.001251 |
# Copyright (C) 2017 FireEye, Inc. All Rights Reserved.
import contextlib
import envi
import viv_utils
class ApiMonitor(viv_utils.emulator_drivers.Monitor):
"""
The ApiMonitor observes emulation and cleans up API function returns.
"""
def __init__(self, vw, function_index):
viv_utils.emulator_drivers.Monitor.__init__(self, vw)
self.function_index = function_index
def apicall(self, emu, op, pc, api, argv):
# overridden from Monitor
self.d("apicall: %s %s %s %s %s", emu, op, pc, api, argv)
def prehook(self, emu, op, startpc):
# overridden from Monitor
pass
def posthook(self, emu, op, endpc):
# overridden from Monitor
if op.mnem == "ret":
try:
self._check_return(emu, op)
except Exception as e:
self.d(str(e))
def _check_return(self, emu, op):
"""
Ensure that the target of the return is within the allowed set of functions.
Do nothing, if return address is valid. If return address is invalid:
_fix_return modifies program counter and stack pointer if a valid return address is found
on the stack or raises an Exception if no valid return address is found.
"""
function_start = self.function_index[op.va]
return_addresses = self._get_return_vas(emu, function_start)
if op.opers:
# adjust stack in case of `ret imm16` instruction
emu.setStackCounter(emu.getStackCounter() - op.opers[0].imm)
return_address = self.getStackValue(emu, -4)
if return_address not in return_addresses:
self._logger.debug(
"Return address 0x%08X is invalid, expected one of: %s",
return_address,
", ".join(map(hex, return_addresses)),
)
self._fix_return(emu, return_address, return_addresses)
# TODO return, handle Exception
else:
self._logger.debug("Return address 0x%08X is valid, returning", return_address)
# TODO return?
def _get_return_vas(self, emu, function_start):
"""
Get the list of valid addresses to which a function should return.
"""
return_vas = []
callers = self._vw.getCallers(function_start)
for caller in callers:
call_op = emu.parseOpcode(caller)
return_va = call_op.va + call_op.size
return_vas.append(return_va)
return return_vas
def _fix_return(self, emu, return_address, return_addresses):
"""
Find a valid return address from return_addresses on the stack. Adjust the stack accordingly
or raise an Exception if no valid address is found within the search boundaries.
Modify program counter and stack pointer, so the emulator does not return to a garbage address.
"""
self.dumpStack(emu)
NUM_ADDRESSES = 4
pointer_size = emu.getPointerSize()
STACK_SEARCH_WINDOW = pointer_size * NUM_ADDRESSES
esp = emu.getStackCounter()
for offset in range(0, STACK_SEARCH_WINDOW, pointer_size):
ret_va_candidate = self.getStackValue(emu, offset)
if ret_va_candidate in return_addresses:
emu.setProgramCounter(ret_va_candidate)
emu.setStackCounter(esp + offset + pointer_size)
self._logger.debug("Returning to 0x%08X, adjusted stack:", ret_va_candidate)
self.dumpStack(emu)
return
self.dumpStack(emu)
raise Exception("No valid return address found...")
def dumpStack(self, emu):
"""
Convenience debugging routine for showing
state current state of the stack.
"""
esp = emu.getStackCounter()
stack_str = ""
for i in range(16, -16, -4):
if i == 0:
sp = "<= SP"
else:
sp = "%02x" % (-i)
stack_str = "%s\n0x%08x - 0x%08x %s" % (stack_str, (esp - i), self.getStackValue(emu, -i), sp)
self.d(stack_str)
def dumpState(self, emu):
self.i("eip: 0x%x", emu.getRegisterByName("eip"))
self.i("esp: 0x%x", emu.getRegisterByName("esp"))
self.i("eax: 0x%x", emu.getRegisterByName("eax"))
self.i("ebx: 0x%x", emu.getRegisterByName("ebx"))
self.i("ecx: 0x%x", emu.getRegisterByName("ecx"))
self.i("edx: 0x%x", emu.getRegisterByName("edx"))
self.dumpStack(emu)
def pointerSize(emu):
"""
Convenience method whose name might be more readable
than fetching emu.imem_psize.
Returns the size of a pointer in bytes for the given emulator.
:rtype: int
"""
return emu.imem_psize
def popStack(emu):
"""
Remove the element at the top of the stack.
:rtype: int
"""
v = emu.readMemoryFormat(emu.getStackCounter(), "<P")[0]
emu.setStackCounter(emu.getStackCounter() + pointerSize(emu))
return v
class GetProcessHeapHook(viv_utils.emulator_drivers.Hook):
"""
Hook and handle calls to GetProcessHeap, returning 0.
"""
def hook(self, callname, emu, callconv, api, argv):
if callname == "kernel32.GetProcessHeap":
# nop
callconv.execCallReturn(emu, 42, len(argv))
return True
raise viv_utils.emulator_drivers.UnsupportedFunction()
def round(i, size):
"""
Round `i` to the nearest greater-or-equal-to multiple of `size`.
:type i: int
:type size: int
:rtype: int
"""
if i % size == 0:
return i
return i + (size - (i % size))
class RtlAllocateHeapHook(viv_utils.emulator_drivers.Hook):
"""
Hook calls to RtlAllocateHeap, allocate memory in a "heap"
section, and return pointers to this memory.
The base heap address is 0x96960000.
The max allocation size is 10 MB.
"""
def __init__(self, *args, **kwargs):
super(RtlAllocateHeapHook, self).__init__(*args, **kwargs)
self._heap_addr = 0x96960000
MAX_ALLOCATION_SIZE = 10 * 1024 * 1024
def _allocate_mem(self, emu, size):
size = round(size, 0x1000)
if size > self.MAX_ALLOCATION_SIZE:
size = self.MAX_ALLOCATION_SIZE
va = self._heap_addr
self.d("RtlAllocateHeap: mapping %s bytes at %s", hex(size), hex(va))
emu.addMemoryMap(va, envi.memory.MM_RWX, "[heap allocation]", b"\x00" * (size + 4))
emu.writeMemory(va, b"\x00" * size)
self._heap_addr += size
return va
def hook(self, callname, driver, callconv, api, argv):
# works for kernel32.HeapAlloc
if callname == "ntdll.RtlAllocateHeap":
emu = driver
hheap, flags, size = argv
va = self._allocate_mem(emu, size)
callconv.execCallReturn(emu, va, len(argv))
return True
raise viv_utils.emulator_drivers.UnsupportedFunction()
class AllocateHeap(RtlAllocateHeapHook):
"""
Hook calls to AllocateHeap and handle them like calls to RtlAllocateHeapHook.
"""
def __init__(self, *args, **kwargs):
super(AllocateHeap, self).__init__(*args, **kwargs)
def hook(self, callname, driver, callconv, api, argv):
if (
callname == "kernel32.LocalAlloc"
or callname == "kernel32.GlobalAlloc"
or callname == "kernel32.VirtualAlloc"
):
size = argv[1]
elif callname == "kernel32.VirtualAllocEx":
size = argv[2]
else:
raise viv_utils.emulator_drivers.UnsupportedFunction()
va = self._allocate_mem(driver, size)
callconv.execCallReturn(driver, va, len(argv))
return True
class MallocHeap(RtlAllocateHeapHook):
"""
Hook calls to malloc and handle them like calls to RtlAllocateHeapHook.
"""
def __init__(self, *args, **kwargs):
super(MallocHeap, self).__init__(*args, **kwargs)
def hook(self, callname, driver, callconv, api, argv):
if callname == "msvcrt.malloc" or callname == "msvcrt.calloc":
size = argv[0]
va = self._allocate_mem(driver, size)
callconv.execCallReturn(driver, va, len(argv))
return True
raise viv_utils.emulator_drivers.UnsupportedFunction()
class MemcpyHook(viv_utils.emulator_drivers.Hook):
"""
Hook and handle calls to memcpy and memmove.
"""
MAX_COPY_SIZE = 1024 * 1024 * 32 # don't attempt to copy more than 32MB, or something is wrong
def __init__(self, *args, **kwargs):
super(MemcpyHook, self).__init__(*args, **kwargs)
def hook(self, callname, driver, callconv, api, argv):
if callname == "msvcrt.memcpy" or callname == "msvcrt.memmove":
emu = driver
dst, src, count = argv
if count > self.MAX_COPY_SIZE:
self.d("unusually large memcpy, truncating to 32MB: 0x%x", count)
count = self.MAX_COPY_SIZE
data = emu.readMemory(src, count)
emu.writeMemory(dst, data)
callconv.execCallReturn(emu, 0x0, len(argv))
return True
raise viv_utils.emulator_drivers.UnsupportedFunction()
def readStringAtRva(emu, rva, maxsize=None):
"""
Borrowed from vivisect/PE/__init__.py
:param emu: emulator
:param rva: virtual address of string
:param maxsize: maxsize of string
:return: the read string
"""
ret = bytearray()
while True:
if maxsize and maxsize <= len(ret):
break
x = emu.readMemory(rva, 1)
if x == b"\x00" or x is None:
break
ret += x
rva += 1
return bytes(ret)
class StrlenHook(viv_utils.emulator_drivers.Hook):
"""
Hook and handle calls to strlen
"""
def __init__(self, *args, **kwargs):
super(StrlenHook, self).__init__(*args, **kwargs)
def hook(self, callname, driver, callconv, api, argv):
if callname and callname.lower() in ["msvcrt.strlen", "kernel32.lstrlena"]:
emu = driver
string_va = argv[0]
s = readStringAtRva(emu, string_va, 256)
callconv.execCallReturn(emu, len(s), len(argv))
return True
raise viv_utils.emulator_drivers.UnsupportedFunction()
class StrnlenHook(viv_utils.emulator_drivers.Hook):
"""
Hook and handle calls to strnlen.
"""
MAX_COPY_SIZE = 1024 * 1024 * 32
def __init__(self, *args, **kwargs):
super(StrnlenHook, self).__init__(*args, **kwargs)
def hook(self, callname, driver, callconv, api, argv):
if callname == "msvcrt.strnlen":
emu = driver
string_va, maxlen = argv
if maxlen > self.MAX_COPY_SIZE:
self.d("unusually large strnlen, truncating to 32MB: 0x%x", maxlen)
maxlen = self.MAX_COPY_SIZE
s = readStringAtRva(emu, string_va, maxsize=maxlen)
slen = s.index(b"\x00")
callconv.execCallReturn(emu, slen, len(argv))
return True
raise viv_utils.emulator_drivers.UnsupportedFunction()
class StrncmpHook(viv_utils.emulator_drivers.Hook):
"""
Hook and handle calls to strncmp.
"""
MAX_COPY_SIZE = 1024 * 1024 * 32
def __init__(self, *args, **kwargs):
super(StrncmpHook, self).__init__(*args, **kwargs)
def hook(self, callname, driver, callconv, api, argv):
if callname == "msvcrt.strncmp":
emu = driver
s1va, s2va, num = argv
if num > self.MAX_COPY_SIZE:
self.d("unusually large strnlen, truncating to 32MB: 0x%x", num)
num = self.MAX_COPY_SIZE
s1 = readStringAtRva(emu, s1va, maxsize=num)
s2 = readStringAtRva(emu, s2va, maxsize=num)
s1 = s1.partition(b"\x00")[0]
s2 = s2.partition(b"\x00")[0]
def cmp(a, b):
return (a > b) - (a < b)
result = cmp(s1, s2)
callconv.execCallReturn(emu, result, len(argv))
return True
raise viv_utils.emulator_drivers.UnsupportedFunction()
class MemchrHook(viv_utils.emulator_drivers.Hook):
"""
Hook and handle calls to memchr
"""
def __init__(self, *args, **kwargs):
super(MemchrHook, self).__init__(*args, **kwargs)
def hook(self, callname, driver, callconv, api, argv):
if callname == "msvcrt.memchr":
emu = driver
ptr, value, num = argv
value = bytes([value])
memory = emu.readMemory(ptr, num)
try:
idx = memory.index(value)
callconv.execCallReturn(emu, ptr + idx, len(argv))
except ValueError: # substring not found
callconv.execCallReturn(emu, 0, len(argv))
return True
raise viv_utils.emulator_drivers.UnsupportedFunction()
class ExitProcessHook(viv_utils.emulator_drivers.Hook):
"""
Hook calls to ExitProcess and stop emulation when these are hit.
"""
def __init__(self, *args, **kwargs):
super(ExitProcessHook, self).__init__(*args, **kwargs)
def hook(self, callname, driver, callconv, api, argv):
if callname == "kernel32.ExitProcess":
raise viv_utils.emulator_drivers.StopEmulation()
class CriticalSectionHooks(viv_utils.emulator_drivers.Hook):
"""
Hook calls to:
- InitializeCriticalSection
"""
def hook(self, callname, emu, callconv, api, argv):
if callname == "kernel32.InitializeCriticalSection":
(hsection,) = argv
emu.writeMemory(hsection, "csec")
callconv.execCallReturn(emu, 0, len(argv))
return True
DEFAULT_HOOKS = [
GetProcessHeapHook(),
RtlAllocateHeapHook(),
AllocateHeap(),
MallocHeap(),
ExitProcessHook(),
MemcpyHook(),
StrlenHook(),
MemchrHook(),
StrnlenHook(),
StrncmpHook(),
CriticalSectionHooks(),
]
@contextlib.contextmanager
def defaultHooks(driver):
"""
Install and remove the default set of hooks to handle common functions.
intended usage:
with defaultHooks(driver):
driver.runFunction()
...
"""
try:
for hook in DEFAULT_HOOKS:
driver.add_hook(hook)
yield
finally:
for hook in DEFAULT_HOOKS:
driver.remove_hook(hook)
| fireeye/flare-floss | floss/api_hooks.py | Python | apache-2.0 | 14,484 | 0.001105 |
# -*- coding: iso-8859-1 -*-
#
# Bicycle Repair Man - the Python Refactoring Browser
# Copyright (C) 2001-2006 Phil Dawes <[email protected]>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
# A some of this code is take from Pythius -
# Copyright (GPL) 2001 Jurgen Hermann <[email protected]>
import os
def containsAny(str, set):
""" Check whether 'str' contains ANY of the chars in 'set'
"""
return 1 in [c in str for c in set]
def getPathOfModuleOrPackage(dotted_name, pathlist):
""" Get the filesystem path for a module or a package.
Return the file system path to a file for a module,
and to a directory for a package. Return None if
the name is not found, or is a builtin or extension module.
"""
import imp
# split off top-most name
parts = dotted_name.split('.', 1)
if len(parts) > 1:
# we have a dotted path, import top-level package
try:
file, pathname, description = imp.find_module(parts[0], pathlist)
if file: file.close()
except ImportError:
return None
# check if it's indeed a package
if description[2] == imp.PKG_DIRECTORY:
# recursively handle the remaining name parts
pathname = getPathOfModuleOrPackage(parts[1], [pathname])
else:
pathname = None
else:
# plain name
try:
file, pathname, description = imp.find_module(dotted_name, pathlist)
if file: file.close()
if description[2]not in[imp.PY_SOURCE, imp.PKG_DIRECTORY]:
pathname = None
except ImportError:
pathname = None
return pathname
def getFilesForName(name):
""" Get a list of module files for a filename, a module or package name,
or a directory.
"""
import imp
if not os.path.exists(name):
# check for glob chars
if containsAny(name, "*?[]"):
import glob
files = glob.glob(name)
list = []
for file in files:
list.extend(getFilesForName(file))
return list
# try to find module or package
name = getPathOfModuleOrPackage(name,[])
if not name:
return[]
if os.path.isdir(name):
# find all python files in directory
list = []
os.path.walk(name, _visit_pyfiles, list)
return list
elif os.path.exists(name) and not name.startswith("."):
# a single file
return [name]
return []
def _visit_pyfiles(list, dirname, names):
""" Helper for getFilesForName().
"""
# get extension for python source files
if not globals().has_key('_py_ext'):
import imp
global _py_ext
_py_ext = [triple[0]for triple in imp.get_suffixes()if triple[2] == imp.PY_SOURCE][0]
# don't recurse into CVS or Subversion directories
if 'CVS'in names:
names.remove('CVS')
if '.svn'in names:
names.remove('.svn')
names_copy = [] + names
for n in names_copy:
if os.path.isdir(os.path.join(dirname, n))and \
not os.path.exists(os.path.join(dirname, n, "__init__.py")):
names.remove(n)
# add all *.py files to list
list.extend(
[os.path.join(dirname, file)
for file in names
if os.path.splitext(file)[1] == _py_ext and not file.startswith(".")])
# returns the directory which holds the first package of the package
# hierarchy under which 'filename' belongs
def getRootDirectory(filename):
if os.path.isdir(filename):
dir = filename
else:
dir = os.path.dirname(filename)
while dir != "" and \
os.path.exists(os.path.join(dir, "__init__.py")):
dir = os.path.dirname(dir)
return dir
# Returns the higher most package directoryname of the package hierarchy
# under which 'filename' belongs
# **** NOT THE SAME AS THE ROOT DIRECTORY OF THE PACKAGE ***
def getPackageBaseDirectory(filename):
if os.path.isdir(filename):
dir = filename
else:
dir = os.path.dirname(filename)
if not os.path.exists(os.path.join(dir, "__init__.py")):
# parent dir is not a package
return dir
while dir != "" and \
os.path.exists(os.path.join(os.path.dirname(dir), "__init__.py")):
dir = os.path.dirname(dir)
return dir
def filenameToModulePath(fname):
directoriesPreceedingRoot = getRootDirectory(fname)
import os
# strip off directories preceeding root package directory
if directoriesPreceedingRoot != "" and directoriesPreceedingRoot != ".":
mpath = fname.replace(directoriesPreceedingRoot, "")
else:
if fname.startswith("."+os.sep): # if starts with './', lob it off
fname = fname[len("."+os.sep):]
mpath = fname
if(mpath[0] == os.path.normpath("/")):
mpath = mpath[1:]
mpath, ext = os.path.splitext(mpath)
mpath = mpath.replace(os.path.normpath("/"), ".")
return mpath
def filenameToModulePath(filename):
filename = os.path.abspath(filename)
package = ""
dot = ""
dir,modname = os.path.split(filename)
while dir != ""and \
os.path.exists(os.path.join(dir, "__init__.py")):
dir, dirname = os.path.split(dir)
package = dirname+dot+package
dot = "."
return package + dot + modname[:-3]
| lebauce/artub | bike/parsing/pathutils.py | Python | gpl-2.0 | 5,507 | 0.005084 |
# -*- coding: utf-8 -*-
"""
:copyright: 2005-2008 by The PIDA Project
:license: GPL 2 or later (see README/COPYING/LICENSE)
"""
# stdlib
import os.path
# gtk
import gtk, gobject
# PIDA Imports
# core
from kiwi.ui.objectlist import Column
from pida.core.service import Service
from pida.core.features import FeaturesConfig
from pida.core.events import EventsConfig
from pida.core.actions import (ActionsConfig, TYPE_NORMAL)
from pida.core.options import OptionsConfig
from pida.ui.views import PidaView, WindowConfig
from pida.services.language import DOCTYPES
from pida.core.indexer import Result
from pygtkhelpers.gthreads import gcall
import time
# locale
from pida.core.locale import Locale
locale = Locale('')
_ = locale.gettext
class QItem(object):
name = ''
path = ''
class QOpenView(PidaView):
key = 'qopen.view'
gladefile = 'qopen'
label_text = _('Quick Open')
def create_ui(self):
self._history = gtk.ListStore(gobject.TYPE_STRING)
self.filter.set_model(self._history)
self.filter.set_text_column(0)
self.last_entered = 0
self.olist.set_columns(
[
Column('basename', title=_('Name')),
Column('relpath', title=_('Path')),
]
)
self.olist.set_selection_mode(gtk.SELECTION_MULTIPLE)
self.filter.child.connect("changed", self.on_filter_changed)
self.filter.child.connect("activate", self.on_filter_activate)
self.filter.child.connect("key-press-event", self.on_filter_keypress)
#self.toplevel.connect_after("map", self.on_show)
self.filter.connect_after("map", self.on_show)
def set_filter(self, text, time_check=None):
if time_check and self.last_entered > time_check:
return False
self._history.insert(0, (text,))
self.olist.clear()
tokens = text.split()
if not len(tokens):
return
ftypes = []
fnames = []
fall = []
filters = self.svc.boss.get_service('filemanager').\
features['file_hidden_check']
for tok in tokens:
if not tok:
continue
if tok[0] == "#" and len(tok) > 1:
for lang in DOCTYPES.get_fuzzy_list(tok[1:]):
ftypes.append(lang.internal)
elif tok[0] == "!" and len(tok) > 1:
fnames.append(tok[1:])
else:
fall.append(tok)
def do_filter(item):
if len(self.olist) > 200:
Result(abort=True)
if not len(item.basename) or not len(item.relpath):
return
if "/." in item.relpath or item.relpath[0] == ".":
return
for chk in filters:
if not chk(item.basename, item.relpath, ''):
return
if item.is_dir:
return
if all((x in item.relpath for x in fall)) and \
all((x in item.basename for x in fnames)):
if len(ftypes):
if item.doctype in ftypes:
return Result(accept=True)
else:
return Result(accept=True)
project = self.svc.boss.cmd('project', 'get_current_project')
if not project:
return
for result in project.indexer.query(do_filter):
self.olist.append(result)
return False
def on_show(self, *args):
gcall(self.filter.child.grab_focus)
def on_olist__key_press_event(self, widget, event):
if event.keyval == gtk.keysyms.Escape and self.pane.get_params().detached:
self.can_be_closed()
def on_filter_keypress(self, widget, event):
if event.keyval == gtk.keysyms.Tab and len(self.olist):
gcall(self.olist.grab_focus)
if event.keyval == gtk.keysyms.Escape and self.pane.get_params().detached:
self.can_be_closed()
def on_filter_activate(self, *args):
if len(self.olist):
self.svc.open(self.olist[0])
def on_filter_changed(self, *args):
self.last_entered = time.time()
gobject.timeout_add(self.svc.opt('start_delay'),
self.set_filter, self.filter.child.props.text,
self.last_entered)
def on_olist__row_activated(self, widget, item):
self.svc.open(item)
def on_button_open__clicked(self, button):
for item in self.olist.get_selected_rows():
self.svc.open(item)
if self.pane.get_params().detached:
self.on_button_close__clicked(button)
def can_be_closed(self):
self.svc.boss.cmd('window', 'remove_view', view=self)
def on_button_close__clicked(self, button):
self.svc.boss.cmd('window', 'remove_view', view=self)
class QopenEventsConfig(EventsConfig):
def create(self):
#self.publish('something')
pass
def subscribe_all_foreign(self):
#self.subscribe_foreign('buffer', 'document-changed',
# self.on_document_changed)
pass
def on_document_changed(self, document):
pass
class QopenWindowConfig(WindowConfig):
key = QOpenView.key
label_text = QOpenView.label_text
class QopenFeaturesConfig(FeaturesConfig):
def subscribe_all_foreign(self):
self.subscribe_foreign('window', 'window-config',
QopenWindowConfig)
class QopenOptionsConfig(OptionsConfig):
def create_options(self):
self.create_option(
'start_delay',
_('Start after'),
int, # type of variable, like int, str, bool, ..
800,
_('Start search after n milliseconds'),
)
class QopenActionsConfig(ActionsConfig):
def create_actions(self):
QopenWindowConfig.action = self.create_action(
'qopen_show',
TYPE_NORMAL,
_('Open in project'),
_('Open file in project.'),
gtk.STOCK_OPEN,
self.on_qopen_show,
'' # default shortcut or '' to enable shortcut for action
)
def on_qopen_show(self, action):
self.svc.show_qopen()
class QuickOpen(Service):
#features_config = QopenFeaturesConfig
actions_config = QopenActionsConfig
options_config = QopenOptionsConfig
#events_config = QopenEventsConfig
label = "Quick Open"
def pre_start(self):
self._view = None
pass
def start(self):
pass
def stop(self):
pass
def show_qopen(self):
if not self._view:
self._view = QOpenView(self)
if not self.boss.cmd('window', 'is_added', view=self._view):
self.boss.cmd('window', 'add_detached_view',
paned='Buffer', view=self._view,
)
else:
self.boss.cmd('window', 'present_view', view=self._view)
def open(self, item):
project = self.boss.cmd('project', 'get_current_project')
if not project:
return
path = os.path.join(project.source_directory, item.relpath)
if item.is_dir:
self.boss.cmd('filemanager', 'browse', new_path=path)
self.boss.cmd('filemanager', 'present_view')
else:
self.boss.cmd('buffer', 'open_file', file_name=path)
# Required Service attribute for service loading
Service = QuickOpen
# vim:set shiftwidth=4 tabstop=4 expandtab textwidth=79:
| fermat618/pida | pida-plugins/quickopen/quickopen.py | Python | gpl-2.0 | 7,589 | 0.003031 |
__all__ = ('WebSocketResponse', 'MsgType')
import asyncio
import warnings
from . import hdrs
from .errors import HttpProcessingError, ClientDisconnectedError
from .websocket import do_handshake, Message, WebSocketError
from .websocket_client import MsgType, closedMessage
from .web_exceptions import (
HTTPBadRequest, HTTPMethodNotAllowed, HTTPInternalServerError)
from aiopy.required.aiohttp.web_reqrep import StreamResponse
THRESHOLD_CONNLOST_ACCESS = 5
class WebSocketResponse(StreamResponse):
def __init__(self, *,
timeout=10.0, autoclose=True, autoping=True, protocols=()):
super().__init__(status=101)
self._protocols = protocols
self._protocol = None
self._writer = None
self._reader = None
self._closed = False
self._closing = False
self._conn_lost = 0
self._close_code = None
self._loop = None
self._waiting = False
self._exception = None
self._timeout = timeout
self._autoclose = autoclose
self._autoping = autoping
def start(self, request):
# make pre-check to don't hide it by do_handshake() exceptions
resp_impl = self._start_pre_check(request)
if resp_impl is not None:
return resp_impl
try:
status, headers, parser, writer, protocol = do_handshake(
request.method, request.headers, request.transport,
self._protocols)
except HttpProcessingError as err:
if err.code == 405:
raise HTTPMethodNotAllowed(
request.method, [hdrs.METH_GET], body=b'')
elif err.code == 400:
raise HTTPBadRequest(text=err.message, headers=err.headers)
else: # pragma: no cover
raise HTTPInternalServerError() from err
if self.status != status:
self.set_status(status)
for k, v in headers:
self.headers[k] = v
self.force_close()
resp_impl = super().start(request)
self._reader = request._reader.set_parser(parser)
self._writer = writer
self._protocol = protocol
self._loop = request.app.loop
return resp_impl
def can_start(self, request):
if self._writer is not None:
raise RuntimeError('Already started')
try:
_, _, _, _, protocol = do_handshake(
request.method, request.headers, request.transport,
self._protocols)
except HttpProcessingError:
return False, None
else:
return True, protocol
@property
def closed(self):
return self._closed
@property
def close_code(self):
return self._close_code
@property
def protocol(self):
return self._protocol
def exception(self):
return self._exception
def ping(self, message='b'):
if self._writer is None:
raise RuntimeError('Call .start() first')
if self._closed:
raise RuntimeError('websocket connection is closing')
self._writer.ping(message)
def pong(self, message='b'):
# unsolicited pong
if self._writer is None:
raise RuntimeError('Call .start() first')
if self._closed:
raise RuntimeError('websocket connection is closing')
self._writer.pong(message)
def send_str(self, data):
if self._writer is None:
raise RuntimeError('Call .start() first')
if self._closed:
raise RuntimeError('websocket connection is closing')
if not isinstance(data, str):
raise TypeError('data argument must be str (%r)' % type(data))
self._writer.send(data, binary=False)
def send_bytes(self, data):
if self._writer is None:
raise RuntimeError('Call .start() first')
if self._closed:
raise RuntimeError('websocket connection is closing')
if not isinstance(data, (bytes, bytearray, memoryview)):
raise TypeError('data argument must be byte-ish (%r)' %
type(data))
self._writer.send(data, binary=True)
@asyncio.coroutine
def wait_closed(self): # pragma: no cover
warnings.warn(
'wait_closed() coroutine is deprecated. use close() instead',
DeprecationWarning)
return (yield from self.close())
@asyncio.coroutine
def write_eof(self):
if self._eof_sent:
return
if self._resp_impl is None:
raise RuntimeError("Response has not been started")
yield from self.close()
self._eof_sent = True
@asyncio.coroutine
def close(self, *, code=1000, message=b''):
if self._writer is None:
raise RuntimeError('Call .start() first')
if not self._closed:
self._closed = True
try:
self._writer.close(code, message)
except (asyncio.CancelledError, asyncio.TimeoutError):
self._close_code = 1006
raise
except Exception as exc:
self._close_code = 1006
self._exception = exc
return True
if self._closing:
return True
while True:
try:
msg = yield from asyncio.wait_for(
self._reader.read(),
timeout=self._timeout, loop=self._loop)
except asyncio.CancelledError:
self._close_code = 1006
raise
except Exception as exc:
self._close_code = 1006
self._exception = exc
return True
if msg.tp == MsgType.close:
self._close_code = msg.data
return True
else:
return False
@asyncio.coroutine
def receive(self):
if self._reader is None:
raise RuntimeError('Call .start() first')
if self._waiting:
raise RuntimeError('Concurrent call to receive() is not allowed')
self._waiting = True
try:
while True:
if self._closed:
self._conn_lost += 1
if self._conn_lost >= THRESHOLD_CONNLOST_ACCESS:
raise RuntimeError('WebSocket connection is closed.')
return closedMessage
try:
msg = yield from self._reader.read()
except (asyncio.CancelledError, asyncio.TimeoutError):
raise
except WebSocketError as exc:
self._close_code = exc.code
yield from self.close(code=exc.code)
return Message(MsgType.error, exc, None)
except ClientDisconnectedError:
self._closed = True
self._close_code = 1006
return Message(MsgType.close, None, None)
except Exception as exc:
self._exception = exc
self._closing = True
self._close_code = 1006
yield from self.close()
return Message(MsgType.error, exc, None)
if msg.tp == MsgType.close:
self._closing = True
self._close_code = msg.data
if not self._closed and self._autoclose:
yield from self.close()
return msg
elif not self._closed:
if msg.tp == MsgType.ping and self._autoping:
self._writer.pong(msg.data)
elif msg.tp == MsgType.pong and self._autoping:
continue
else:
return msg
finally:
self._waiting = False
@asyncio.coroutine
def receive_msg(self): # pragma: no cover
warnings.warn(
'receive_msg() coroutine is deprecated. use receive() instead',
DeprecationWarning)
return (yield from self.receive())
@asyncio.coroutine
def receive_str(self):
msg = yield from self.receive()
if msg.tp != MsgType.text:
raise TypeError(
"Received message {}:{!r} is not str".format(msg.tp, msg.data))
return msg.data
@asyncio.coroutine
def receive_bytes(self):
msg = yield from self.receive()
if msg.tp != MsgType.binary:
raise TypeError(
"Received message {}:{!r} is not bytes".format(msg.tp,
msg.data))
return msg.data
def write(self, data):
raise RuntimeError("Cannot call .write() for websocket")
| lfblogs/aiopy | aiopy/required/aiohttp/web_ws.py | Python | gpl-3.0 | 8,998 | 0 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from CTFd.plugins import (
bypass_csrf_protection,
get_admin_plugin_menu_bar,
get_user_page_menu_bar,
override_template,
register_admin_plugin_menu_bar,
register_admin_plugin_script,
register_admin_plugin_stylesheet,
register_plugin_asset,
register_plugin_assets_directory,
register_plugin_script,
register_user_page_menu_bar,
)
from tests.helpers import (
create_ctfd,
destroy_ctfd,
gen_challenge,
login_as_user,
setup_ctfd,
)
def test_register_plugin_asset():
"""Test that plugin asset registration works"""
app = create_ctfd(setup=False)
register_plugin_asset(app, asset_path="/plugins/__init__.py")
app = setup_ctfd(app)
with app.app_context():
with app.test_client() as client:
r = client.get("/plugins/__init__.py")
assert len(r.get_data(as_text=True)) > 0
assert r.status_code == 200
destroy_ctfd(app)
def test_register_plugin_assets_directory():
"""Test that plugin asset directory registration works"""
app = create_ctfd(setup=False)
register_plugin_assets_directory(app, base_path="/plugins/")
app = setup_ctfd(app)
with app.app_context():
with app.test_client() as client:
r = client.get("/plugins/__init__.py")
assert len(r.get_data(as_text=True)) > 0
assert r.status_code == 200
r = client.get("/plugins/challenges/__init__.py")
assert len(r.get_data(as_text=True)) > 0
assert r.status_code == 200
destroy_ctfd(app)
def test_override_template():
"""Does override_template work properly for regular themes when used from a plugin"""
app = create_ctfd()
with app.app_context():
override_template("login.html", "LOGIN OVERRIDE")
with app.test_client() as client:
r = client.get("/login")
assert r.status_code == 200
output = r.get_data(as_text=True)
assert "LOGIN OVERRIDE" in output
destroy_ctfd(app)
def test_admin_override_template():
"""Does override_template work properly for the admin panel when used from a plugin"""
app = create_ctfd()
with app.app_context():
override_template("admin/users/user.html", "ADMIN USER OVERRIDE")
client = login_as_user(app, name="admin", password="password")
r = client.get("/admin/users/1")
assert r.status_code == 200
output = r.get_data(as_text=True)
assert "ADMIN USER OVERRIDE" in output
destroy_ctfd(app)
def test_register_plugin_script():
"""Test that register_plugin_script adds script paths to the core theme when used from a plugin"""
app = create_ctfd()
with app.app_context():
register_plugin_script("/fake/script/path.js")
register_plugin_script("http://examplectf.com/fake/script/path.js")
with app.test_client() as client:
r = client.get("/")
output = r.get_data(as_text=True)
assert "/fake/script/path.js" in output
assert "http://examplectf.com/fake/script/path.js" in output
destroy_ctfd(app)
def test_register_plugin_stylesheet():
"""Test that register_plugin_stylesheet adds stylesheet paths to the core theme when used from a plugin"""
app = create_ctfd()
with app.app_context():
register_plugin_script("/fake/stylesheet/path.css")
register_plugin_script("http://examplectf.com/fake/stylesheet/path.css")
with app.test_client() as client:
r = client.get("/")
output = r.get_data(as_text=True)
assert "/fake/stylesheet/path.css" in output
assert "http://examplectf.com/fake/stylesheet/path.css" in output
destroy_ctfd(app)
def test_register_admin_plugin_script():
"""Test that register_admin_plugin_script adds script paths to the admin theme when used from a plugin"""
app = create_ctfd()
with app.app_context():
register_admin_plugin_script("/fake/script/path.js")
register_admin_plugin_script("http://examplectf.com/fake/script/path.js")
with login_as_user(app, name="admin") as client:
r = client.get("/admin/statistics")
output = r.get_data(as_text=True)
assert "/fake/script/path.js" in output
assert "http://examplectf.com/fake/script/path.js" in output
destroy_ctfd(app)
def test_register_admin_plugin_stylesheet():
"""Test that register_admin_plugin_stylesheet adds stylesheet paths to the admin theme when used from a plugin"""
app = create_ctfd()
with app.app_context():
register_admin_plugin_stylesheet("/fake/stylesheet/path.css")
register_admin_plugin_stylesheet(
"http://examplectf.com/fake/stylesheet/path.css"
)
with login_as_user(app, name="admin") as client:
r = client.get("/admin/statistics")
output = r.get_data(as_text=True)
assert "/fake/stylesheet/path.css" in output
assert "http://examplectf.com/fake/stylesheet/path.css" in output
destroy_ctfd(app)
def test_register_admin_plugin_menu_bar():
"""
Test that register_admin_plugin_menu_bar() properly inserts into HTML and get_admin_plugin_menu_bar()
returns the proper list.
"""
app = create_ctfd()
with app.app_context():
register_admin_plugin_menu_bar(
title="test_admin_plugin_name", route="/test_plugin"
)
client = login_as_user(app, name="admin", password="password")
r = client.get("/admin/statistics")
output = r.get_data(as_text=True)
assert "/test_plugin" in output
assert "test_admin_plugin_name" in output
menu_item = get_admin_plugin_menu_bar()[0]
assert menu_item.title == "test_admin_plugin_name"
assert menu_item.route == "/test_plugin"
destroy_ctfd(app)
def test_register_user_page_menu_bar():
"""
Test that the register_user_page_menu_bar() properly inserts into HTML and get_user_page_menu_bar() returns the
proper list.
"""
app = create_ctfd()
with app.app_context():
register_user_page_menu_bar(
title="test_user_menu_link", route="/test_user_href"
)
with app.test_client() as client:
r = client.get("/")
output = r.get_data(as_text=True)
assert "/test_user_href" in output
assert "test_user_menu_link" in output
with app.test_request_context():
menu_item = get_user_page_menu_bar()[0]
assert menu_item.title == "test_user_menu_link"
assert menu_item.route == "/test_user_href"
destroy_ctfd(app)
def test_bypass_csrf_protection():
"""
Test that the bypass_csrf_protection decorator functions properly
"""
app = create_ctfd()
with app.app_context():
with app.test_client() as client:
r = client.post("/login")
output = r.get_data(as_text=True)
assert r.status_code == 403
def bypass_csrf_protection_test_route():
return "Success", 200
# Hijack an existing route to avoid any kind of hacks to create a test route
app.view_functions["auth.login"] = bypass_csrf_protection(
bypass_csrf_protection_test_route
)
with app.test_client() as client:
r = client.post("/login")
output = r.get_data(as_text=True)
assert r.status_code == 200
assert output == "Success"
destroy_ctfd(app)
def test_challenges_model_access_plugin_class():
"""
Test that the Challenges model can access its plugin class
"""
app = create_ctfd()
with app.app_context():
from CTFd.plugins.challenges import get_chal_class
chal = gen_challenge(app.db)
assert chal.plugin_class == get_chal_class("standard")
destroy_ctfd(app)
| isislab/CTFd | tests/test_plugin_utils.py | Python | apache-2.0 | 7,989 | 0.001377 |
# vim:ts=4:et
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# <pep8 compliant>
from struct import unpack
import os.path
from math import pi, sqrt
import bpy
from bpy_extras.object_utils import object_data_add
from mathutils import Vector,Matrix,Quaternion
from bpy_extras.io_utils import ImportHelper
from bpy.props import BoolProperty, FloatProperty, StringProperty, EnumProperty
from bpy.props import FloatVectorProperty, PointerProperty
from .mu import MuEnum, Mu, MuColliderMesh, MuColliderSphere, MuColliderCapsule
from .mu import MuColliderBox, MuColliderWheel
from .shader import make_shader
from .material import make_material
from . import collider, properties
EXCLUDED_OBJECTS=['flare', 'busted', 'flag']
def create_uvs(mu, uvs, mesh, name):
uvlay = mesh.uv_textures.new(name)
uvloop = mesh.uv_layers[name]
for i, uvl in enumerate(uvloop.data):
v = mesh.loops[i].vertex_index
uvl.uv = uvs[v]
def create_mesh(mu, mumesh, name):
mesh = bpy.data.meshes.new(name)
faces = []
for sm in mumesh.submeshes:
faces.extend(sm)
mesh.from_pydata(mumesh.verts, [], faces)
if mumesh.uvs:
create_uvs(mu, mumesh.uvs, mesh, name + ".UV")
if mumesh.uv2s:
create_uvs(mu, mumesh.uv2s, mesh, name + ".UV2")
return mesh
def create_mesh_object(name, mesh, transform):
obj = bpy.data.objects.new(name, mesh)
obj.rotation_mode = 'QUATERNION'
if transform:
obj.location = Vector(transform.localPosition)
obj.rotation_quaternion = Quaternion(transform.localRotation)
obj.scale = Vector(transform.localScale)
else:
obj.location = Vector((0, 0, 0))
obj.rotation_quaternion = Quaternion((1,0,0,0))
obj.scale = Vector((1,1,1))
bpy.context.scene.objects.link(obj)
return obj
def copy_spring(dst, src):
dst.spring = src.spring
dst.damper = src.damper
dst.targetPosition = src.targetPosition
def copy_friction(dst, src):
dst.extremumSlip = src.extremumSlip
dst.extremumValue = src.extremumValue
dst.asymptoteSlip = src.asymptoteSlip
dst.extremumValue = src.extremumValue
dst.stiffness = src.stiffness
def create_light(mu, mulight, transform):
ltype = ('SPOT', 'SUN', 'POINT', 'AREA')[mulight.type]
light = bpy.data.lamps.new(transform.name, ltype)
light.color = mulight.color[:3]
light.distance = mulight.range
light.energy = mulight.intensity
if ltype == 'SPOT' and hasattr(mulight, "spotAngle"):
light.spot_size = mulight.spotAngle * pi / 180
obj = bpy.data.objects.new(transform.name, light)
obj.rotation_mode = 'QUATERNION'
obj.location = Vector(transform.localPosition)
# Blender points spotlights along local -Z, unity along local +Z
# which is Blender's +Y, so rotate 90 degrees around local X to
# go from Unity to Blender
rot = Quaternion((0.5**0.5,0.5**0.5,0,0))
obj.rotation_quaternion = rot * Quaternion(transform.localRotation)
obj.scale = Vector(transform.localScale)
properties.SetPropMask(obj.muproperties.cullingMask, mulight.cullingMask)
bpy.context.scene.objects.link(obj)
return obj
property_map = {
"m_LocalPosition.x": ("obj", "location", 0, 1),
"m_LocalPosition.y": ("obj", "location", 2, 1),
"m_LocalPosition.z": ("obj", "location", 1, 1),
"m_LocalRotation.x": ("obj", "rotation_quaternion", 1, -1),
"m_LocalRotation.y": ("obj", "rotation_quaternion", 3, -1),
"m_LocalRotation.z": ("obj", "rotation_quaternion", 2, -1),
"m_LocalRotation.w": ("obj", "rotation_quaternion", 0, 1),
"m_LocalScale.x": ("obj", "scale", 0, 1),
"m_LocalScale.y": ("obj", "scale", 2, 1),
"m_LocalScale.z": ("obj", "scale", 1, 1),
"m_Intensity": ("data", "energy", 0, 1),
}
def create_fcurve(action, curve, propmap):
dp, ind, mult = propmap
fps = bpy.context.scene.render.fps
fc = action.fcurves.new(data_path = dp, index = ind)
fc.keyframe_points.add(len(curve.keys))
for i, key in enumerate(curve.keys):
x,y = key.time * fps, key.value * mult
fc.keyframe_points[i].co = x, y
fc.keyframe_points[i].handle_left_type = 'FREE'
fc.keyframe_points[i].handle_right_type = 'FREE'
if i > 0:
dist = (key.time - curve.keys[i - 1].time) / 3
dx, dy = dist * fps, key.tangent[0] * dist * mult
else:
dx, dy = 10, 0.0
fc.keyframe_points[i].handle_left = x - dx, y - dy
if i < len(curve.keys) - 1:
dist = (curve.keys[i + 1].time - key.time) / 3
dx, dy = dist * fps, key.tangent[1] * dist * mult
else:
dx, dy = 10, 0.0
fc.keyframe_points[i].handle_right = x + dx, y + dy
return True
def create_action(mu, path, clip):
#print(clip.name)
actions = {}
for curve in clip.curves:
if not curve.path:
mu_path = path
else:
mu_path = "/".join([path, curve.path])
if mu_path not in mu.objects:
print("Unknown path: %s" % (mu_path))
continue
obj = mu.objects[mu_path]
if curve.property not in property_map:
print("%s: Unknown property: %s" % (mu_path, curve.property))
continue
propmap = property_map[curve.property]
subpath, propmap = propmap[0], propmap[1:]
if subpath != "obj":
obj = getattr (obj, subpath)
name = ".".join([clip.name, curve.path, subpath])
if name not in actions:
actions[name] = bpy.data.actions.new(name), obj
act, obj = actions[name]
if not create_fcurve(act, curve, propmap):
continue
for name in actions:
act, obj = actions[name]
if not obj.animation_data:
obj.animation_data_create()
track = obj.animation_data.nla_tracks.new()
track.name = clip.name
track.strips.new(act.name, 1.0, act)
def create_collider(mu, muobj):
col = muobj.collider
name = muobj.transform.name
if type(col) == MuColliderMesh:
name = name + ".collider"
mesh = create_mesh(mu, col.mesh, name)
else:
mesh = bpy.data.meshes.new(name)
obj = create_mesh_object(name, mesh, None)
obj.muproperties.isTrigger = False
if type(col) != MuColliderWheel:
obj.muproperties.isTrigger = col.isTrigger
if type(col) == MuColliderMesh:
obj.muproperties.collider = 'MU_COL_MESH'
elif type(col) == MuColliderSphere:
obj.muproperties.radius = col.radius
obj.muproperties.center = col.center
obj.muproperties.collider = 'MU_COL_SPHERE'
elif type(col) == MuColliderCapsule:
obj.muproperties.radius = col.radius
obj.muproperties.height = col.height
obj.muproperties.direction = properties.dir_map[col.direction]
obj.muproperties.center = col.center
obj.muproperties.collider = 'MU_COL_CAPSULE'
elif type(col) == MuColliderBox:
obj.muproperties.size = col.size
obj.muproperties.center = col.center
obj.muproperties.collider = 'MU_COL_BOX'
elif type(col) == MuColliderWheel:
obj.muproperties.radius = col.radius
obj.muproperties.suspensionDistance = col.suspensionDistance
obj.muproperties.center = col.center
obj.muproperties.mass = col.mass
copy_spring(obj.muproperties.suspensionSpring, col.suspensionSpring)
copy_friction(obj.muproperties.forwardFriction, col.forwardFriction)
copy_friction(obj.muproperties.sideFriction, col.sidewaysFriction)
obj.muproperties.collider = 'MU_COL_WHEEL'
if type(col) != MuColliderMesh:
collider.build_collider(obj)
return obj
def create_object(mu, muobj, parent, create_colliders, parents):
def isExcludedObject(muobj):
for obj in EXCLUDED_OBJECTS:
if obj in muobj.transform.name.lower():
return True
return False
obj = None
mesh = None
if isExcludedObject(muobj):
return None
if hasattr(muobj, "shared_mesh"):
mesh = create_mesh(mu, muobj.shared_mesh, muobj.transform.name)
for poly in mesh.polygons:
poly.use_smooth = True
obj = create_mesh_object(muobj.transform.name, mesh, muobj.transform)
elif hasattr(muobj, "skinned_mesh_renderer"):
smr = muobj.skinned_mesh_renderer
mesh = create_mesh(mu, smr.mesh, muobj.transform.name)
for poly in mesh.polygons:
poly.use_smooth = True
obj = create_mesh_object(muobj.transform.name, mesh, muobj.transform)
mumat = mu.materials[smr.materials[0]]
mesh.materials.append(mumat.material)
if hasattr(muobj, "renderer"):
if mesh:
mumat = mu.materials[muobj.renderer.materials[0]]
mesh.materials.append(mumat.material)
if not obj:
if hasattr(muobj, "light"):
obj = create_light(mu, muobj.light, muobj.transform)
if not obj:
obj = create_mesh_object(muobj.transform.name, None, muobj.transform)
parents.append(muobj.transform.name)
path = "/".join(parents)
mu.objects[path] = obj
if hasattr(muobj, "tag_and_layer"):
obj.muproperties.tag = muobj.tag_and_layer.tag
obj.muproperties.layer = muobj.tag_and_layer.layer
if create_colliders and hasattr(muobj, "collider"):
cobj = create_collider(mu, muobj)
cobj.parent = obj
obj.parent = parent
for child in muobj.children:
create_object(mu, child, obj, create_colliders, parents)
if hasattr(muobj, "animation"):
for clip in muobj.animation.clips:
create_action(mu, path, clip)
parents.remove(muobj.transform.name)
return obj
def convert_bump(pixels, width, height):
outp = list(pixels)
for y in range(1, height - 1):
for x in range(1, width - 1):
index = (y * width + x) * 4
p = pixels[index:index + 4]
nx = (p[3]-128) / 127.
nz = (p[2]-128) / 127.
#n = [p[3],p[2],int(sqrt(1-nx**2-nz**2)*127 + 128),255]
n = [p[3],p[2],255,255]
outp[index:index + 4] = n
return outp
def load_mbm(mbmpath):
mbmfile = open(mbmpath, "rb")
header = mbmfile.read(20)
magic, width, height, bump, bpp = unpack("<5i", header)
if magic != 0x50534b03: # "\x03KSP" as little endian
raise
if bpp == 32:
pixels = mbmfile.read(width * height * 4)
elif bpp == 24:
pixels = [0, 0, 0, 255] * width * height
for i in range(width * height):
p = mbmfile.read(3)
l = i * 4
pixels[l:l+3] = list(p)
else:
raise
if bump:
pixels = convert_bump(pixels, width, height)
return width, height, pixels
def load_dds(dds_image):
pixels = list(dds_image.pixels[:])
rowlen = dds_image.size[0] * 4
height = dds_image.size[1]
for y in range(int(height/2)):
ind1 = y * rowlen
ind2 = (height - 1 - y) * rowlen
t = pixels[ind1 : ind1 + rowlen]
pixels[ind1:ind1+rowlen] = pixels[ind2:ind2+rowlen]
pixels[ind2:ind2+rowlen] = t
if dds_image.name[-6:-4] == "_n":
pixels = convert_bump(pixels, dds_image.size[0], height)
dds_image.pixels = pixels[:]
def load_image(name, path):
img_path = os.path.join(path, name)
if any(name == os.path.basename(packed_img.filepath) \
for packed_img in bpy.data.images):
# Add the directory name between the file name and the extension
basename, ext = os.path.splitext(name)
img_path = basename + os.path.split(path)[-1] + ext
if name[-4:].lower() in [".png", ".tga"]:
img = bpy.data.images.load(os.path.join(path, name))
elif name[-4:].lower() == ".dds":
img = bpy.data.images.load(os.path.join(path, name))
load_dds(img)
elif name[-4:].lower() == ".mbm":
w,h, pixels = load_mbm(os.path.join(path, name))
img = bpy.data.images.new(name, w, h)
img.pixels[:] = map(lambda x: x / 255.0, pixels)
# Pack image and change filepath to avoid texture overriding
img.pack(True)
img.filepath = img_path
def create_textures(mu, path):
# Note: DDS textures are previously converted to .png in exporter
# so here the extension saved in .mu is not the good one
extensions = [".png" ,".dds", ".mbm", ".tga"]
#texture info is in the top level object
for tex in mu.textures:
base = os.path.splitext(tex.name)[0]
for e in extensions:
name = base + e
texture_path = os.path.join(path, name)
if os.path.exists(texture_path):
load_image(name, path)
tx = bpy.data.textures.new(tex.name, 'IMAGE')
tx.use_preview_alpha = True
tx.image = bpy.data.images[name]
break
pass
def add_texture(mu, mat, mattex):
i, s, o = mattex.index, mattex.scale, mattex.offset
mat.texture_slots.add()
ts = mat.texture_slots[0]
ts.texture = bpy.data.textures[mu.textures[i].name]
ts.use_map_alpha = True
ts.texture_coords = 'UV'
ts.scale = s + (1,)
ts.offset = o + (0,)
def create_materials(mu, use_classic=False):
#material info is in the top level object
for mumat in mu.materials:
if(use_classic):
mumat.material = make_material(mumat, mu)
else:
mumat.material = make_shader(mumat, mu)
def import_mu(self, context, filepath, create_colliders, use_classic_material=False):
operator = self
undo = bpy.context.user_preferences.edit.use_global_undo
bpy.context.user_preferences.edit.use_global_undo = False
for obj in bpy.context.scene.objects:
obj.select = False
mu = Mu()
if not mu.read(filepath):
bpy.context.user_preferences.edit.use_global_undo = undo
operator.report({'ERROR'},
"Unrecognized format: %s %d" % (mu.magic, mu.version))
return {'CANCELLED'}
create_textures(mu, os.path.dirname(filepath))
create_materials(mu, use_classic_material)
mu.objects = {}
obj = create_object(mu, mu.obj, None, create_colliders, [])
bpy.context.scene.objects.active = obj
obj.select = True
bpy.context.user_preferences.edit.use_global_undo = undo
return {'FINISHED'}
class ImportMu(bpy.types.Operator, ImportHelper):
'''Load a KSP Mu (.mu) File'''
bl_idname = "import_object.ksp_mu"
bl_label = "Import Mu"
bl_description = """Import a KSP .mu model."""
bl_options = {'REGISTER', 'UNDO'}
filename_ext = ".mu"
filter_glob = StringProperty(default="*.mu", options={'HIDDEN'})
create_colliders = BoolProperty(name="Create Colliders",
description="Disable to import only visual and hierarchy elements",
default=True)
def execute(self, context):
keywords = self.as_keywords (ignore=("filter_glob",))
return import_mu(self, context, **keywords)
| sketchfab/io_object_mu | import_mu.py | Python | gpl-2.0 | 15,783 | 0.003485 |
# A very basic settings file that allows Sphinx to build
# the docs (this is becuase autodoc is used).
import os
import sys
sys.path.insert(0, os.getcwd())
sys.path.insert(0, os.path.join(os.getcwd(), os.pardir))
SITE_ID = 303
DEBUG = True
TEMPLATE_DEBUG = DEBUG
DATABASES = {"default": {
"NAME": ":memory:",
"ENGINE": "django.db.backends.sqlite3",
"USER": '',
"PASSWORD": '',
"PORT": '',
}}
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'scaffold',
)
SECRET_KEY = "NULL"
SCAFFOLD_EXTENDING_APP_NAME = "scaffold"
SCAFFOLD_EXTENDING_MODEL_PATH = "scaffold.models.BaseSection"
| mazelife/django-scaffold | docs/settings.py | Python | bsd-3-clause | 698 | 0 |
# -*- coding: utf-8 -*-
import os
import subprocess
import ujson
from django.conf import settings
from django.test import TestCase, override_settings
from django.http import HttpResponse
from typing import Any, Dict, List
from zproject.settings import DEPLOY_ROOT
from zerver.lib.integrations import INTEGRATIONS
from zerver.lib.test_classes import ZulipTestCase
from zerver.lib.test_helpers import HostRequestMock
from zerver.lib.test_runner import slow
from zerver.lib.utils import split_by
from zerver.models import Realm, get_realm
from zerver.views.documentation import (
add_api_uri_context,
)
class DocPageTest(ZulipTestCase):
def get_doc(self, url: str, subdomain: str) -> HttpResponse:
if url[0:23] == "/integrations/doc-html/":
return self.client_get(url, subdomain=subdomain, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
return self.client_get(url, subdomain=subdomain)
def print_msg_if_error(self, response: HttpResponse) -> None: # nocoverage
if response.status_code != 200 and response.get('Content-Type') == 'application/json':
content = ujson.loads(response.content)
print()
print("======================================================================")
print("ERROR: {}".format(content.get('msg')))
print()
def _test(self, url: str, expected_content: str, extra_strings: List[str]=[],
landing_missing_strings: List[str]=[], landing_page: bool=True,
doc_html_str: bool=False) -> None:
# Test the URL on the "zephyr" subdomain
result = self.get_doc(url, subdomain="zephyr")
self.print_msg_if_error(result)
self.assertEqual(result.status_code, 200)
self.assertIn(expected_content, str(result.content))
for s in extra_strings:
self.assertIn(s, str(result.content))
if not doc_html_str:
self.assert_in_success_response(['<meta name="robots" content="noindex,nofollow">'], result)
# Test the URL on the root subdomain
result = self.get_doc(url, subdomain="")
self.print_msg_if_error(result)
self.assertEqual(result.status_code, 200)
self.assertIn(expected_content, str(result.content))
if not doc_html_str:
self.assert_in_success_response(['<meta name="robots" content="noindex,nofollow">'], result)
for s in extra_strings:
self.assertIn(s, str(result.content))
if not landing_page:
return
with self.settings(ROOT_DOMAIN_LANDING_PAGE=True):
# Test the URL on the root subdomain with the landing page setting
result = self.get_doc(url, subdomain="")
self.print_msg_if_error(result)
self.assertEqual(result.status_code, 200)
self.assertIn(expected_content, str(result.content))
for s in extra_strings:
self.assertIn(s, str(result.content))
for s in landing_missing_strings:
self.assertNotIn(s, str(result.content))
if not doc_html_str:
self.assert_in_success_response(['<meta name="description" content="Zulip combines'], result)
self.assert_not_in_success_response(['<meta name="robots" content="noindex,nofollow">'], result)
# Test the URL on the "zephyr" subdomain with the landing page setting
result = self.get_doc(url, subdomain="zephyr")
self.print_msg_if_error(result)
self.assertEqual(result.status_code, 200)
self.assertIn(expected_content, str(result.content))
for s in extra_strings:
self.assertIn(s, str(result.content))
if not doc_html_str:
self.assert_in_success_response(['<meta name="robots" content="noindex,nofollow">'], result)
@slow("Tests dozens of endpoints")
def test_api_doc_endpoints(self) -> None:
current_dir = os.path.dirname(os.path.abspath(__file__))
api_docs_dir = os.path.join(current_dir, '..', '..', 'templates/zerver/api/')
files = os.listdir(api_docs_dir)
def _filter_func(fp: str) -> bool:
ignored_files = ['sidebar_index.md', 'index.md', 'missing.md']
return fp.endswith('.md') and fp not in ignored_files
files = list(filter(_filter_func, files))
for f in files:
endpoint = '/api/{}'.format(os.path.splitext(f)[0])
self._test(endpoint, '', doc_html_str=True)
@slow("Tests dozens of endpoints, including generating lots of emails")
def test_doc_endpoints(self) -> None:
self._test('/api/', 'The Zulip API')
self._test('/api/api-keys', 'be careful with it')
self._test('/api/installation-instructions', 'No download required!')
self._test('/api/send-message', 'steal away your hearts')
self._test('/api/render-message', '**foo**')
self._test('/api/get-all-streams', 'include_public')
self._test('/api/get-stream-id', 'The name of the stream to retrieve the ID for.')
self._test('/api/get-subscribed-streams', 'Get all streams that the user is subscribed to.')
self._test('/api/get-all-users', 'client_gravatar')
self._test('/api/register-queue', 'apply_markdown')
self._test('/api/get-events-from-queue', 'dont_block')
self._test('/api/delete-queue', 'Delete a previously registered queue')
self._test('/api/update-message', 'propagate_mode')
self._test('/api/get-profile', 'takes no arguments')
self._test('/api/add-subscriptions', 'authorization_errors_fatal')
self._test('/api/create-user', 'zuliprc-admin')
self._test('/api/remove-subscriptions', 'not_subscribed')
self._test('/team/', 'industry veterans')
self._test('/history/', 'Cambridge, Massachusetts')
# Test the i18n version of one of these pages.
self._test('/en/history/', 'Cambridge, Massachusetts')
self._test('/apps/', 'Apps for every platform.')
self._test('/features/', 'Beautiful messaging')
self._test('/hello/', 'productive team chat', landing_missing_strings=["Login"])
self._test('/why-zulip/', 'Why Zulip?')
self._test('/for/open-source/', 'for open source projects')
self._test('/for/companies/', 'in a company')
self._test('/for/working-groups-and-communities/', 'standards bodies')
self._test('/for/mystery-hunt/', 'four SIPB alums')
self._test('/security/', 'TLS encryption')
self._test('/atlassian/', 'HipChat')
self._test('/devlogin/', 'Normal users', landing_page=False)
self._test('/devtools/', 'Useful development URLs')
self._test('/errors/404/', 'Page not found')
self._test('/errors/5xx/', 'Internal server error')
self._test('/emails/', 'manually generate most of the emails by clicking')
result = self.client_get('/integrations/doc-html/nonexistent_integration', follow=True,
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(result.status_code, 404)
result = self.client_get('/new-user/')
self.assertEqual(result.status_code, 301)
self.assertIn('hello', result['Location'])
result = self.client_get('/static/favicon.ico')
self.assertEqual(result.status_code, 200)
@slow("Tests dozens of endpoints, including all our integrations docs")
def test_integration_doc_endpoints(self) -> None:
self._test('/integrations/',
'native integrations.',
extra_strings=[
'And hundreds more through',
'Hubot',
'Zapier',
'IFTTT'
])
for integration in INTEGRATIONS.keys():
url = '/integrations/doc-html/{}'.format(integration)
self._test(url, '', doc_html_str=True)
def test_integration_pages_open_graph_metadata(self) -> None:
url = '/integrations/doc/github'
title = '<meta property="og:title" content="Connect GitHub to Zulip">'
description = '<meta property="og:description" content="Zulip comes with over'
self._test(url, title, doc_html_str=True)
self._test(url, description, doc_html_str=True)
# Test category pages
url = '/integrations/communication'
title = '<meta property="og:title" content="Connect your Communication tools to Zulip">'
description = '<meta property="og:description" content="Zulip comes with over'
self._test(url, title, doc_html_str=True)
self._test(url, description, doc_html_str=True)
def test_email_integration(self) -> None:
self._test('/integrations/doc-html/email',
'support+abcdefg@testserver', doc_html_str=True)
with self.settings(EMAIL_GATEWAY_PATTERN=''):
result = self.get_doc('integrations/doc-html/email', subdomain='zulip')
self.assertNotIn('support+abcdefg@testserver', str(result.content))
# if EMAIL_GATEWAY_PATTERN is empty, the main /integrations page should
# be rendered instead
self._test('/integrations/', 'native integrations.')
def test_doc_html_str_non_ajax_call(self) -> None:
# We don't need to test all the pages for 404
for integration in list(INTEGRATIONS.keys())[5]:
with self.settings(ROOT_DOMAIN_LANDING_PAGE=True):
url = '/en/integrations/doc-html/{}'.format(integration)
result = self.client_get(url, subdomain="", follow=True)
self.assertEqual(result.status_code, 404)
result = self.client_get(url, subdomain="zephyr", follow=True)
self.assertEqual(result.status_code, 404)
url = '/en/integrations/doc-html/{}'.format(integration)
result = self.client_get(url, subdomain="", follow=True)
self.assertEqual(result.status_code, 404)
result = self.client_get(url, subdomain="zephyr", follow=True)
self.assertEqual(result.status_code, 404)
result = self.client_get('/integrations/doc-html/nonexistent_integration', follow=True)
self.assertEqual(result.status_code, 404)
def test_electron_detection(self) -> None:
result = self.client_get("/accounts/password/reset/")
self.assertTrue('data-platform="website"' in result.content.decode("utf-8"))
result = self.client_get("/accounts/password/reset/",
HTTP_USER_AGENT="ZulipElectron/1.0.0")
self.assertTrue('data-platform="ZulipElectron"' in result.content.decode("utf-8"))
class HelpTest(ZulipTestCase):
def test_help_settings_links(self) -> None:
result = self.client_get('/help/change-the-time-format')
self.assertEqual(result.status_code, 200)
self.assertIn('Go to <a href="/#settings/display-settings">Display settings</a>', str(result.content))
# Check that the sidebar was rendered properly.
self.assertIn('Getting started with Zulip', str(result.content))
with self.settings(ROOT_DOMAIN_LANDING_PAGE=True):
result = self.client_get('/help/change-the-time-format', subdomain="")
self.assertEqual(result.status_code, 200)
self.assertIn('<strong>Display settings</strong>', str(result.content))
self.assertNotIn('/#settings', str(result.content))
def test_help_relative_links_for_gear(self) -> None:
result = self.client_get('/help/analytics')
self.assertIn('<a href="/stats">Statistics</a>', str(result.content))
self.assertEqual(result.status_code, 200)
with self.settings(ROOT_DOMAIN_LANDING_PAGE=True):
result = self.client_get('/help/analytics', subdomain="")
self.assertEqual(result.status_code, 200)
self.assertIn('<strong>Statistics</strong>', str(result.content))
self.assertNotIn('/stats', str(result.content))
def test_help_relative_links_for_stream(self) -> None:
result = self.client_get('/help/message-a-stream-by-email')
self.assertIn('<a href="/#streams/subscribed">Your streams</a>', str(result.content))
self.assertEqual(result.status_code, 200)
with self.settings(ROOT_DOMAIN_LANDING_PAGE=True):
result = self.client_get('/help/message-a-stream-by-email', subdomain="")
self.assertEqual(result.status_code, 200)
self.assertIn('<strong>Manage streams</strong>', str(result.content))
self.assertNotIn('/#streams', str(result.content))
class IntegrationTest(TestCase):
def test_check_if_every_integration_has_logo_that_exists(self) -> None:
for integration in INTEGRATIONS.values():
self.assertTrue(os.path.isfile(os.path.join(DEPLOY_ROOT, integration.logo)))
def test_api_url_view_subdomains_base(self) -> None:
context = dict() # type: Dict[str, Any]
add_api_uri_context(context, HostRequestMock())
self.assertEqual(context["api_url_scheme_relative"], "testserver/api")
self.assertEqual(context["api_url"], "http://testserver/api")
self.assertTrue(context["html_settings_links"])
@override_settings(ROOT_DOMAIN_LANDING_PAGE=True)
def test_api_url_view_subdomains_homepage_base(self) -> None:
context = dict() # type: Dict[str, Any]
add_api_uri_context(context, HostRequestMock())
self.assertEqual(context["api_url_scheme_relative"], "yourZulipDomain.testserver/api")
self.assertEqual(context["api_url"], "http://yourZulipDomain.testserver/api")
self.assertFalse(context["html_settings_links"])
def test_api_url_view_subdomains_full(self) -> None:
context = dict() # type: Dict[str, Any]
request = HostRequestMock(host="mysubdomain.testserver")
add_api_uri_context(context, request)
self.assertEqual(context["api_url_scheme_relative"], "mysubdomain.testserver/api")
self.assertEqual(context["api_url"], "http://mysubdomain.testserver/api")
self.assertTrue(context["html_settings_links"])
def test_html_settings_links(self) -> None:
context = dict() # type: Dict[str, Any]
with self.settings(ROOT_DOMAIN_LANDING_PAGE=True):
add_api_uri_context(context, HostRequestMock())
self.assertEqual(
context['settings_html'],
'Zulip settings page')
self.assertEqual(
context['subscriptions_html'],
'streams page')
context = dict()
with self.settings(ROOT_DOMAIN_LANDING_PAGE=True):
add_api_uri_context(context, HostRequestMock(host="mysubdomain.testserver"))
self.assertEqual(
context['settings_html'],
'<a href="/#settings">Zulip settings page</a>')
self.assertEqual(
context['subscriptions_html'],
'<a target="_blank" href="/#streams">streams page</a>')
context = dict()
add_api_uri_context(context, HostRequestMock())
self.assertEqual(
context['settings_html'],
'<a href="/#settings">Zulip settings page</a>')
self.assertEqual(
context['subscriptions_html'],
'<a target="_blank" href="/#streams">streams page</a>')
class AboutPageTest(ZulipTestCase):
def setUp(self) -> None:
""" Manual installation which did not execute `tools/provision`
would not have the `static/generated/github-contributors.json` fixture
file.
"""
# This block has unreliable test coverage due to the implicit
# caching here, so we exclude it from coverage.
if not os.path.exists(settings.CONTRIBUTORS_DATA):
# Copy the fixture file in `zerver/tests/fixtures` to `static/generated`
update_script = os.path.join(os.path.dirname(__file__),
'../../tools/update-authors-json') # nocoverage
subprocess.check_call([update_script, '--use-fixture']) # nocoverage
def test_endpoint(self) -> None:
""" We can't check the contributors list since it is rendered client-side """
result = self.client_get('/team/')
self.assert_in_success_response(['Our amazing community'], result)
def test_split_by(self) -> None:
"""Utility function primarily used in authors page"""
flat_list = [1, 2, 3, 4, 5, 6, 7, 8, 9]
expected_result = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
self.assertEqual(split_by(flat_list, 3, None), expected_result)
class ConfigErrorTest(ZulipTestCase):
@override_settings(GOOGLE_OAUTH2_CLIENT_ID=None)
def test_google(self) -> None:
result = self.client_get("/accounts/login/google/")
self.assertEqual(result.status_code, 302)
self.assertEqual(result.url, '/config-error/google')
result = self.client_get(result.url)
self.assert_in_success_response(["google_oauth2_client_id"], result)
self.assert_in_success_response(["google_oauth2_client_secret"], result)
self.assert_in_success_response(["zproject/dev-secrets.conf"], result)
self.assert_not_in_success_response(["GOOGLE_OAUTH2_CLIENT_ID"], result)
self.assert_not_in_success_response(["zproject/dev_settings.py"], result)
self.assert_not_in_success_response(["/etc/zulip/settings.py"], result)
self.assert_not_in_success_response(["/etc/zulip/zulip-secrets.conf"], result)
@override_settings(GOOGLE_OAUTH2_CLIENT_ID=None)
@override_settings(DEVELOPMENT=False)
def test_google_production_error(self) -> None:
result = self.client_get("/accounts/login/google/")
self.assertEqual(result.status_code, 302)
self.assertEqual(result.url, '/config-error/google')
result = self.client_get(result.url)
self.assert_in_success_response(["GOOGLE_OAUTH2_CLIENT_ID"], result)
self.assert_in_success_response(["/etc/zulip/settings.py"], result)
self.assert_in_success_response(["google_oauth2_client_secret"], result)
self.assert_in_success_response(["/etc/zulip/zulip-secrets.conf"], result)
self.assert_not_in_success_response(["google_oauth2_client_id"], result)
self.assert_not_in_success_response(["zproject/dev_settings.py"], result)
self.assert_not_in_success_response(["zproject/dev-secrets.conf"], result)
@override_settings(SOCIAL_AUTH_GITHUB_KEY=None)
def test_github(self) -> None:
result = self.client_get("/accounts/login/social/github")
self.assertEqual(result.status_code, 302)
self.assertEqual(result.url, '/config-error/github')
result = self.client_get(result.url)
self.assert_in_success_response(["social_auth_github_key"], result)
self.assert_in_success_response(["social_auth_github_secret"], result)
self.assert_in_success_response(["zproject/dev-secrets.conf"], result)
self.assert_not_in_success_response(["SOCIAL_AUTH_GITHUB_KEY"], result)
self.assert_not_in_success_response(["zproject/dev_settings.py"], result)
self.assert_not_in_success_response(["/etc/zulip/settings.py"], result)
self.assert_not_in_success_response(["/etc/zulip/zulip-secrets.conf"], result)
@override_settings(SOCIAL_AUTH_GITHUB_KEY=None)
@override_settings(DEVELOPMENT=False)
def test_github_production_error(self) -> None:
"""Test the !DEVELOPMENT code path of config-error."""
result = self.client_get("/accounts/login/social/github")
self.assertEqual(result.status_code, 302)
self.assertEqual(result.url, '/config-error/github')
result = self.client_get(result.url)
self.assert_in_success_response(["SOCIAL_AUTH_GITHUB_KEY"], result)
self.assert_in_success_response(["/etc/zulip/settings.py"], result)
self.assert_in_success_response(["social_auth_github_secret"], result)
self.assert_in_success_response(["/etc/zulip/zulip-secrets.conf"], result)
self.assert_not_in_success_response(["social_auth_github_key"], result)
self.assert_not_in_success_response(["zproject/dev_settings.py"], result)
self.assert_not_in_success_response(["zproject/dev-secrets.conf"], result)
def test_smtp_error(self) -> None:
result = self.client_get("/config-error/smtp")
self.assertEqual(result.status_code, 200)
self.assert_in_success_response(["email configuration"], result)
def test_dev_direct_production_error(self) -> None:
result = self.client_get("/config-error/dev")
self.assertEqual(result.status_code, 200)
self.assert_in_success_response(["DevAuthBackend"], result)
class PlansPageTest(ZulipTestCase):
def test_plans_auth(self) -> None:
# Test root domain
result = self.client_get("/plans/", subdomain="")
self.assert_in_success_response(["Sign up now"], result)
# Test non-existant domain
result = self.client_get("/plans/", subdomain="moo")
self.assertEqual(result.status_code, 404)
self.assert_in_response("does not exist", result)
# Test valid domain, no login
realm = get_realm("zulip")
realm.plan_type = Realm.STANDARD_FREE
realm.save(update_fields=["plan_type"])
result = self.client_get("/plans/", subdomain="zulip")
self.assertEqual(result.status_code, 302)
self.assertEqual(result["Location"], "/accounts/login/?next=plans")
# Test valid domain, with login
self.login(self.example_email('hamlet'))
result = self.client_get("/plans/", subdomain="zulip")
self.assert_in_success_response(["Current plan"], result)
# Test root domain, with login on different domain
result = self.client_get("/plans/", subdomain="")
# TODO: works in manual testing, but I suspect something is funny in
# the test environment
# self.assert_in_success_response(["Sign up now"], result)
def test_CTA_text_by_plan_type(self) -> None:
sign_up_now = "Sign up now"
buy_standard = "Buy Standard"
current_plan = "Current plan"
# Root domain
result = self.client_get("/plans/", subdomain="")
self.assert_in_success_response([sign_up_now, buy_standard], result)
self.assert_not_in_success_response([current_plan], result)
realm = get_realm("zulip")
realm.plan_type = Realm.SELF_HOSTED
realm.save(update_fields=["plan_type"])
result = self.client_get("/plans/", subdomain="zulip")
self.assertEqual(result.status_code, 302)
self.assertEqual(result["Location"], "https://zulipchat.com/plans")
self.login(self.example_email("iago"))
# SELF_HOSTED should hide the local plans page, even if logged in
result = self.client_get("/plans/", subdomain="zulip")
self.assertEqual(result.status_code, 302)
self.assertEqual(result["Location"], "https://zulipchat.com/plans")
realm.plan_type = Realm.LIMITED
realm.save(update_fields=["plan_type"])
result = self.client_get("/plans/", subdomain="zulip")
self.assert_in_success_response([current_plan, buy_standard], result)
self.assert_not_in_success_response([sign_up_now], result)
realm.plan_type = Realm.STANDARD_FREE
realm.save(update_fields=["plan_type"])
result = self.client_get("/plans/", subdomain="zulip")
self.assert_in_success_response([current_plan], result)
self.assert_not_in_success_response([sign_up_now, buy_standard], result)
realm.plan_type = Realm.STANDARD
realm.save(update_fields=["plan_type"])
result = self.client_get("/plans/", subdomain="zulip")
self.assert_in_success_response([current_plan], result)
self.assert_not_in_success_response([sign_up_now, buy_standard], result)
| rishig/zulip | zerver/tests/test_docs.py | Python | apache-2.0 | 24,048 | 0.002828 |
import sqlite3
import os
from mitmproxy.io import protobuf
class DBHandler:
"""
This class is wrapping up connection to SQLITE DB.
"""
def __init__(self, db_path, mode='load'):
if mode == 'write':
if os.path.isfile(db_path):
os.remove(db_path)
self.db_path = db_path
self._con = sqlite3.connect(self.db_path)
self._c = self._con.cursor()
self._create_db()
def _create_db(self):
with self._con:
self._con.execute('CREATE TABLE IF NOT EXISTS FLOWS('
'id INTEGER PRIMARY KEY,'
'pbuf_blob BLOB)')
def store(self, flows):
blobs = []
for flow in flows:
blobs.append((protobuf.dumps(flow),))
with self._con:
self._con.executemany('INSERT INTO FLOWS (pbuf_blob) values (?)', blobs)
def load(self):
flows = []
self._c.execute('SELECT pbuf_blob FROM FLOWS')
for row in self._c.fetchall():
flows.append((protobuf.loads(row[0])))
return flows
| ujjwal96/mitmproxy | mitmproxy/io/db.py | Python | mit | 1,107 | 0.000903 |
from kivy.app import App
from kivy.factory import Factory
from kivy.properties import ObjectProperty
from kivy.lang import Builder
from electrum.i18n import _
from electrum.util import base_units
import os
from label_dialog import LabelDialog
Builder.load_string('''
#:import os os
<WalletDialog@Popup>:
title: _('Wallets')
id: popup
path: ''
BoxLayout:
orientation: 'vertical'
FileChooserListView:
id: wallet_selector
dirselect: False
filter_dirs: True
filter: '*.*'
path: os.path.dirname(app.wallet.storage.path)
size_hint_y: 0.6
Widget
size_hint_y: 0.1
GridLayout:
cols: 2
size_hint_y: 0.1
Button:
size_hint: 0.1, None
height: '48dp'
text: _('Cancel')
on_release:
popup.dismiss()
Button:
id: open_button
size_hint: 0.1, None
height: '48dp'
text: _('Open') if wallet_selector.selection else _('New Wallet')
on_release:
popup.dismiss()
root.new_wallet(app, wallet_selector.path)
''')
class WalletDialog(Factory.Popup):
def new_wallet(self, app, dirname):
def cb(text):
if text:
app.load_wallet_by_name(os.path.join(dirname, text))
if self.ids.wallet_selector.selection:
app.load_wallet_by_name(self.ids.wallet_selector.selection[0])
else:
d = LabelDialog(_('Enter wallet name'), '', cb)
d.open()
| valesi/electrum | gui/kivy/uix/dialogs/wallets.py | Python | gpl-3.0 | 1,677 | 0.001193 |
import smtplib
import base64
from email.MIMEText import MIMEText
from email.MIMEMultipart import MIMEMultipart
from email.MIMEText import MIMEText
from email.MIMEBase import MIMEBase
from email import encoders
from __builtin__ import file
import flogger_settings
import os
import datetime
def email_msg(sender, receiver, msg, date, settings):
# print "Send take off msg"
if settings.FLOGGER_TAKEOFF_EMAIL != "y" and settings.FLOGGER_TAKEOFF_EMAIL != "Y":
# Don't send take off email msg
return
# body = "Msg from %s. %s taken off @ %s" % (settings.APRS_USER, msg, date)
body = "%s. %s taken off @ %s" % (settings.APRS_USER, msg, date)
print body
msg = MIMEMultipart()
msg.attach(MIMEText(body, 'plain'))
fromaddr = sender
toaddr = receiver
msg['From'] = fromaddr
msg['To'] = toaddr
msg['Subject'] = body
server = smtplib.SMTP(settings.FLOGGER_SMTP_SERVER_URL, settings.FLOGGER_SMTP_SERVER_PORT)
text = msg.as_string()
# print "Msg string is: ", text
try:
server.sendmail(fromaddr, toaddr, text)
except Exception as e:
print "Send email_msg failed, reason: ", e
server.quit()
return
| tobiz/OGN-Flight-Logger_V3 | flogger_email_msg.py | Python | gpl-3.0 | 1,212 | 0.012376 |
import torch
from .Criterion import Criterion
from .utils import recursiveResizeAs, recursiveFill, recursiveAdd
class ParallelCriterion(Criterion):
def __init__(self, repeatTarget=False):
super(ParallelCriterion, self).__init__()
self.criterions = []
self.weights = []
self.gradInput = []
self.repeatTarget = repeatTarget
def add(self, criterion, weight=1):
self.criterions.append(criterion)
self.weights.append(weight)
return self
def updateOutput(self, input, target):
self.output = 0
for i, criterion in enumerate(self.criterions):
current_target = target if self.repeatTarget else target[i]
self.output += self.weights[i] * criterion.updateOutput(input[i], current_target)
return self.output
def updateGradInput(self, input, target):
self.gradInput = recursiveResizeAs(self.gradInput, input)[0]
recursiveFill(self.gradInput, 0)
for i, criterion in enumerate(self.criterions):
current_target = target if self.repeatTarget else target[i]
recursiveAdd(self.gradInput[i], self.weights[i], criterion.updateGradInput(input[i], current_target))
return self.gradInput
def type(self, type=None, tensorCache=None):
self.gradInput = []
return super(ParallelCriterion, self).type(type, tensorCache)
| RPGOne/Skynet | pytorch-master/torch/legacy/nn/ParallelCriterion.py | Python | bsd-3-clause | 1,404 | 0.001425 |
#!/usr/bin/env python
import sys
def convert_str(infile, outfile):
f = open(infile, 'r')
lines = f.readlines()
f.close()
f = open(outfile, 'w')
f.writelines(['"%s\\n"\n' % i.rstrip() for i in lines])
f.close()
def main():
convert_str('fountain.vert', 'fountain.vert.inc')
convert_str('fountain.frag', 'fountain.frag.inc')
if __name__ == '__main__':
main()
| fountainment/FountainEngineImproved | fountain/render/convert_shader.py | Python | mit | 396 | 0.007576 |
# Copyright 2008-2018 Davide Alberani <[email protected]>
# 2008-2018 H. Turgut Uyar <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
This module provides the classes (and the instances) that are used to parse
the results of a search for a given company.
For example, when searching for the name "Columbia Pictures", the parsed page
would be:
http://www.imdb.com/find?q=Columbia+Pictures&s=co
"""
from __future__ import absolute_import, division, print_function, unicode_literals
from imdb.utils import analyze_company_name
from .piculet import Path, Rule, Rules, reducers
from .searchMovieParser import DOMHTMLSearchMovieParser
from .utils import analyze_imdbid
class DOMHTMLSearchCompanyParser(DOMHTMLSearchMovieParser):
"""A parser for the company search page."""
rules = [
Rule(
key='data',
extractor=Rules(
foreach='//td[@class="result_text"]',
rules=[
Rule(
key='link',
extractor=Path('./a/@href', reduce=reducers.first)
),
Rule(
key='name',
extractor=Path('./a/text()')
),
Rule(
key='notes',
extractor=Path('./text()')
)
],
transform=lambda x: (
analyze_imdbid(x.get('link')),
analyze_company_name(x.get('name') + x.get('notes', ''), stripNotes=True)
)
)
)
]
_OBJECTS = {
'search_company_parser': ((DOMHTMLSearchCompanyParser,), {'kind': 'company'})
}
| Vagab0nd/SiCKRAGE | lib3/imdb/parser/http/searchCompanyParser.py | Python | gpl-3.0 | 2,406 | 0.001247 |
# -*- coding: utf-8 -*-
__version__ = '$Id: 7e07cc8b51fa2cdfb23c34c8652adf4a94003dc8 $'
import family
# The Wikimedia i18n family
class Family(family.Family):
def __init__(self):
family.Family.__init__(self)
self.name = 'i18n'
self.langs = {
'i18n': 'translatewiki.net',
}
self.namespaces[4] = {
'_default': [u'Project'],
}
self.namespaces[5] = {
'_default': [u'Project talk'],
}
self.namespaces[6] = {
'_default': [u'File'],
}
self.namespaces[7] = {
'_default': [u'File talk'],
}
self.namespaces[90] = {
'_default': [u'Thread'],
}
self.namespaces[91] = {
'_default': [u'Thread talk'],
}
self.namespaces[92] = {
'_default': [u'Summary'],
}
self.namespaces[93] = {
'_default': [u'Summary talk'],
}
self.namespaces[100] = {
'_default': [u'Portal'],
}
self.namespaces[101] = {
'_default': [u'Portal talk'],
}
self.namespaces[202] = {
'_default': [u'Property'],
}
self.namespaces[203] = {
'_default': [u'Property talk'],
}
self.namespaces[206] = {
'_default': [u'Form'],
}
self.namespaces[207] = {
'_default': [u'Form talk'],
}
self.namespaces[208] = {
'_default': [u'Concept'],
}
self.namespaces[209] = {
'_default': [u'Concept talk'],
}
self.namespaces[420] = {
'_default': [u'Layer'],
}
self.namespaces[421] = {
'_default': [u'Layer talk'],
}
self.namespaces[1102] = {
'_default': [u'Translating'],
}
self.namespaces[1103] = {
'_default': [u'Translating talk'],
}
self.namespaces[1198] = {
'_default': [u'Translations'],
}
self.namespaces[1199] = {
'_default': [u'Translations talk'],
}
self.namespaces[1200] = {
'_default': [u'Voctrain'],
}
self.namespaces[1201] = {
'_default': [u'Voctrain talk'],
}
self.namespaces[1202] = {
'_default': [u'FreeCol'],
}
self.namespaces[1203] = {
'_default': [u'FreeCol talk'],
}
self.namespaces[1204] = {
'_default': [u'Nocc'],
}
self.namespaces[1205] = {
'_default': [u'Nocc talk'],
}
self.namespaces[1206] = {
'_default': [u'Wikimedia'],
}
self.namespaces[1207] = {
'_default': [u'Wikimedia talk'],
}
self.namespaces[1208] = {
'_default': [u'StatusNet'],
}
self.namespaces[1209] = {
'_default': [u'StatusNet talk'],
}
self.namespaces[1210] = {
'_default': [u'Mantis'],
}
self.namespaces[1211] = {
'_default': [u'Mantis talk'],
}
self.namespaces[1212] = {
'_default': [u'Mwlib'],
}
self.namespaces[1213] = {
'_default': [u'Mwlib talk'],
}
self.namespaces[1214] = {
'_default': [u'Commonist'],
}
self.namespaces[1215] = {
'_default': [u'Commonist talk'],
}
self.namespaces[1216] = {
'_default': [u'OpenLayers'],
}
self.namespaces[1217] = {
'_default': [u'OpenLayers talk'],
}
self.namespaces[1218] = {
'_default': [u'FUDforum'],
}
self.namespaces[1219] = {
'_default': [u'FUDforum talk'],
}
self.namespaces[1220] = {
'_default': [u'Okawix'],
}
self.namespaces[1221] = {
'_default': [u'Okawix talk'],
}
self.namespaces[1222] = {
'_default': [u'Osm'],
}
self.namespaces[1223] = {
'_default': [u'Osm talk'],
}
self.namespaces[1224] = {
'_default': [u'WikiReader'],
}
self.namespaces[1225] = {
'_default': [u'WikiReader talk'],
}
self.namespaces[1226] = {
'_default': [u'Shapado'],
}
self.namespaces[1227] = {
'_default': [u'Shapado talk'],
}
self.namespaces[1228] = {
'_default': [u'iHRIS'],
}
self.namespaces[1229] = {
'_default': [u'iHRIS talk'],
}
self.namespaces[1230] = {
'_default': [u'Mifos'],
}
self.namespaces[1231] = {
'_default': [u'Mifos talk'],
}
self.namespaces[1232] = {
'_default': [u'Wikia'],
}
self.namespaces[1233] = {
'_default': [u'Wikia talk'],
}
self.namespaces[1234] = {
'_default': [u'OpenImages'],
}
self.namespaces[1235] = {
'_default': [u'OpenImages talk'],
}
self.namespaces[1236] = {
'_default': [u'Europeana'],
}
self.namespaces[1237] = {
'_default': [u'Europeana talk'],
}
self.namespaces[1238] = {
'_default': [u'Pywikipedia'],
}
self.namespaces[1239] = {
'_default': [u'Pywikipedia talk'],
}
self.namespaces[1240] = {
'_default': [u'Toolserver'],
}
self.namespaces[1241] = {
'_default': [u'Toolserver talk'],
}
self.namespaces[1242] = {
'_default': [u'EOL'],
}
self.namespaces[1243] = {
'_default': [u'EOL talk'],
}
self.namespaces[1244] = {
'_default': [u'Kiwix'],
}
self.namespaces[1245] = {
'_default': [u'Kiwix talk'],
}
self.namespaces[1246] = {
'_default': [u'Mozilla'],
}
self.namespaces[1247] = {
'_default': [u'Mozilla talk'],
}
self.namespaces[1248] = {
'_default': [u'FrontlineSMS'],
}
self.namespaces[1249] = {
'_default': [u'FrontlineSMS talk'],
}
self.namespaces[1250] = {
'_default': [u'EtherpadLite'],
}
self.namespaces[1251] = {
'_default': [u'EtherpadLite talk'],
}
self.namespaces[1252] = {
'_default': [u'Vicuna'],
}
self.namespaces[1253] = {
'_default': [u'Vicuna talk'],
}
def version(self, code):
return "1.21alpha"
| races1986/SafeLanguage | CEM/families/i18n_family.py | Python | epl-1.0 | 6,800 | 0.000294 |
import unittest
import time
import pprint
import logging
import scanner.logSetup as logSetup
import pyximport
print("Have Cython")
pyximport.install()
import dbPhashApi
class TestCompareDatabaseInterface(unittest.TestCase):
def __init__(self, *args, **kwargs):
logSetup.initLogging()
super().__init__(*args, **kwargs)
def setUp(self):
# We set up and tear down the tree a few times to validate the dropTree function
self.log = logging.getLogger("Main.TestCompareDatabaseInterface")
self.tree = dbPhashApi.PhashDbApi()
self.tree.forceReload()
def dist_check(self, distance, dbid, phash):
qtime1 = time.time()
have1 = self.tree.getWithinDistance_db(phash, distance=distance)
qtime2 = time.time()
qtime3 = time.time()
have2 = self.tree.getIdsWithinDistance(phash, distance=distance)
qtime4 = time.time()
# print(dbid, have1)
if have1 != have2:
self.log.error("Mismatch!")
for line in pprint.pformat(have1).split("\n"):
self.log.error(line)
for line in pprint.pformat(have2).split("\n"):
self.log.error(line)
self.assertTrue(dbid in have1)
self.assertTrue(dbid in have2)
self.assertEqual(have1, have2)
self.log.info('Dist %s %s, %s', distance, qtime2-qtime1, qtime4-qtime3)
def test_0(self):
rand_r = self.tree.getRandomPhashRows(0.001)
self.log.info("Have %s items to test with", len(rand_r))
stepno = 0
for dbid, phash in rand_r:
self.dist_check(1, dbid, phash)
self.dist_check(2, dbid, phash)
self.dist_check(3, dbid, phash)
self.dist_check(4, dbid, phash)
self.dist_check(5, dbid, phash)
self.dist_check(6, dbid, phash)
self.dist_check(7, dbid, phash)
self.dist_check(8, dbid, phash)
stepno += 1
self.log.info("On step %s of %s", stepno, len(rand_r))
| fake-name/IntraArchiveDeduplicator | Tests/Test_db_BKTree_Compare.py | Python | bsd-3-clause | 1,765 | 0.026062 |
# -*- coding: utf-8 -*-
"""
test/test_api
~~~~~~~~~
Tests of the top-level SPDYPy API. These will be relatively sparse for the
moment.
"""
# Nasty little path hack.
import sys
sys.path.append('.')
class TestAPI(object):
"""
Tests for the top-level spdypy API.
"""
def test_can_import_spdypy_on_py_33(self):
import spdypy
assert True
| Lukasa/spdypy | test/test_api.py | Python | mit | 368 | 0 |
# -*- encoding: utf-8 -*-
# Copyright © 2012 New Dream Network, LLC (DreamHost)
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import keystonemiddleware.audit as audit_middleware
from oslo_config import cfg
import oslo_middleware.cors as cors_middleware
import pecan
from ironic.api import config
from ironic.api.controllers import base
from ironic.api import hooks
from ironic.api import middleware
from ironic.api.middleware import auth_token
from ironic.common import exception
from ironic.conf import CONF
class IronicCORS(cors_middleware.CORS):
"""Ironic-specific CORS class
We're adding the Ironic-specific version headers to the list of simple
headers in order that a request bearing those headers might be accepted by
the Ironic REST API.
"""
simple_headers = cors_middleware.CORS.simple_headers + [
'X-Auth-Token',
base.Version.max_string,
base.Version.min_string,
base.Version.string
]
def get_pecan_config():
# Set up the pecan configuration
filename = config.__file__.replace('.pyc', '.py')
return pecan.configuration.conf_from_file(filename)
def setup_app(pecan_config=None, extra_hooks=None):
app_hooks = [hooks.ConfigHook(),
hooks.DBHook(),
hooks.ContextHook(pecan_config.app.acl_public_routes),
hooks.RPCHook(),
hooks.NoExceptionTracebackHook(),
hooks.PublicUrlHook()]
if extra_hooks:
app_hooks.extend(extra_hooks)
if not pecan_config:
pecan_config = get_pecan_config()
pecan.configuration.set_config(dict(pecan_config), overwrite=True)
app = pecan.make_app(
pecan_config.app.root,
debug=CONF.pecan_debug,
static_root=pecan_config.app.static_root if CONF.pecan_debug else None,
force_canonical=getattr(pecan_config.app, 'force_canonical', True),
hooks=app_hooks,
wrap_app=middleware.ParsableErrorMiddleware,
)
if CONF.audit.enabled:
try:
app = audit_middleware.AuditMiddleware(
app,
audit_map_file=CONF.audit.audit_map_file,
ignore_req_list=CONF.audit.ignore_req_list
)
except (EnvironmentError, OSError,
audit_middleware.PycadfAuditApiConfigError) as e:
raise exception.InputFileError(
file_name=CONF.audit.audit_map_file,
reason=e
)
if CONF.auth_strategy == "keystone":
app = auth_token.AuthTokenMiddleware(
app, dict(cfg.CONF),
public_api_routes=pecan_config.app.acl_public_routes)
# Create a CORS wrapper, and attach ironic-specific defaults that must be
# included in all CORS responses.
app = IronicCORS(app, CONF)
cors_middleware.set_defaults(
allow_methods=['GET', 'PUT', 'POST', 'DELETE', 'PATCH'],
expose_headers=[base.Version.max_string, base.Version.min_string,
base.Version.string]
)
return app
class VersionSelectorApplication(object):
def __init__(self):
pc = get_pecan_config()
self.v1 = setup_app(pecan_config=pc)
def __call__(self, environ, start_response):
return self.v1(environ, start_response)
| SauloAislan/ironic | ironic/api/app.py | Python | apache-2.0 | 3,844 | 0 |
# -*- coding: utf-8 -*-
"""
pen: terminal notes
"""
__title__ = 'pen'
__author__ = 'cwoebker'
__version__ = '0.4.2'
__license__ = 'BSD'
__copyright__ = '© 2013-2018 Cecil Wöbker'
| cwoebker/pen | pen/__init__.py | Python | bsd-3-clause | 182 | 0 |
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Post.posts'
db.add_column(u'posts_post', 'posts',
self.gf('django.db.models.fields.PositiveIntegerField')(default=0),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Post.posts'
db.delete_column(u'posts_post', 'posts')
models = {
u'posts.author': {
'Meta': {'object_name': 'Author'},
'email': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'full_name': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'posts.post': {
'Meta': {'object_name': 'Post'},
'author_original': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['posts.Author']"}),
'contents': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_checked_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'posted_at': ('django.db.models.fields.DateTimeField', [], {}),
'posts': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'tickets': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['posts.Ticket']", 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'url': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'views': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
u'posts.postchild': {
'Meta': {'object_name': 'PostChild'},
'author_original': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['posts.Author']"}),
'contents': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['posts.Post']"}),
'posted_at': ('django.db.models.fields.DateTimeField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '256'})
},
u'posts.ticket': {
'Meta': {'object_name': 'Ticket'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'number': ('django.db.models.fields.CharField', [], {'max_length': '6'})
}
}
complete_apps = ['posts'] | elena/dev-parse | posts/migrations/0002_auto__add_field_post_posts.py | Python | bsd-2-clause | 3,048 | 0.007874 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2017, Bruno Calogero <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: aci_interface_policy_leaf_policy_group
short_description: Add Fabric Interface Policy Leaf Policy Groups on Cisco ACI fabrics.
description:
- Add Fabric Interface Policy Leaf Policy Groups on Cisco ACI fabrics.
- More information from the internal APIC class I(infra:AccBndlGrp), I(infra:AccPortGrp) at
U(https://developer.cisco.com/site/aci/docs/apis/apic-mim-ref/).
author:
- Bruno Calogero (@brunocalogero)
version_added: '2.5'
notes:
- When using the module please select the appropriate link_aggregation_type (lag_type).
C(link) for Port Channel(PC), C(node) for Virtual Port Channel(VPC) and C(leaf) for Leaf Access Port Policy Group.
options:
policy_group:
description:
- Name of the leaf policy group to be added/deleted.
aliases: [ name, policy_group_name ]
description:
description:
- Description for the leaf policy group to be created.
aliases: [ descr ]
lag_type:
description:
- Selector for the type of leaf policy group we want to create.
aliases: [ lag_type_name ]
link_level_policy:
description:
- Choice of link_level_policy to be used as part of the leaf policy group to be created.
aliases: [ link_level_policy_name ]
cdp_policy:
description:
- Choice of cdp_policy to be used as part of the leaf policy group to be created.
aliases: [ cdp_policy_name ]
mcp_policy:
description:
- Choice of mcp_policy to be used as part of the leaf policy group to be created.
aliases: [ mcp_policy_name ]
lldp_policy:
description:
- Choice of lldp_policy to be used as part of the leaf policy group to be created.
aliases: [ lldp_policy_name ]
stp_interface_policy:
description:
- Choice of stp_interface_policy to be used as part of the leaf policy group to be created.
aliases: [ stp_interface_policy_name ]
egress_data_plane_policing_policy:
description:
- Choice of egress_data_plane_policing_policy to be used as part of the leaf policy group to be created.
aliases: [ egress_data_plane_policing_policy_name ]
ingress_data_plane_policing_policy:
description:
- Choice of ingress_data_plane_policing_policy to be used as part of the leaf policy group to be created.
aliases: [ ingress_data_plane_policing_policy_name ]
priority_flow_control_policy:
description:
- Choice of priority_flow_control_policy to be used as part of the leaf policy group to be created.
aliases: [ priority_flow_control_policy_name ]
fibre_channel_interface_policy:
description:
- Choice of fibre_channel_interface_policy to be used as part of the leaf policy group to be created.
aliases: [ fibre_channel_interface_policy_name ]
slow_drain_policy:
description:
- Choice of slow_drain_policy to be used as part of the leaf policy group to be created.
aliases: [ slow_drain_policy_name ]
port_channel_policy:
description:
- Choice of port_channel_policy to be used as part of the leaf policy group to be created.
aliases: [ port_channel_policy_name ]
monitoring_policy:
description:
- Choice of monitoring_policy to be used as part of the leaf policy group to be created.
aliases: [ monitoring_policy_name ]
storm_control_interface_policy:
description:
- Choice of storm_control_interface_policy to be used as part of the leaf policy group to be created.
aliases: [ storm_control_interface_policy_name ]
l2_interface_policy:
description:
- Choice of l2_interface_policy to be used as part of the leaf policy group to be created.
aliases: [ l2_interface_policy_name ]
port_security_policy:
description:
- Choice of port_security_policy to be used as part of the leaf policy group to be created.
aliases: [ port_security_policy_name ]
aep:
description:
- Choice of attached_entity_profile (AEP) to be used as part of the leaf policy group to be created.
aliases: [ aep_name ]
state:
description:
- Use C(present) or C(absent) for adding or removing.
- Use C(query) for listing an object or multiple objects.
choices: [ absent, present, query ]
default: present
extends_documentation_fragment: aci
'''
EXAMPLES = r'''
- name: creating a Port Channel (PC) Interface Policy Group
aci_interface_policy_leaf_policy_group:
hostname: apic
username: yourusername
password: yourpassword
policy_group: policygroupname
description: policygroupname description
lag_type: link
link_level_policy: whateverlinklevelpolicy
fibre_channel_interface_policy: whateverfcpolicy
state: present
- name: creating a Virtual Port Channel (VPC) Interface Policy Group (no description)
aci_interface_policy_leaf_policy_group:
hostname: apic
username: yourusername
password: yourpassword
policy_group: policygroupname
lag_type: node
link_level_policy: whateverlinklevelpolicy
fibre_channel_interface_policy: whateverfcpolicy
state: present
- name: creating a Leaf Access Port Policy Group (no description)
aci_interface_policy_leaf_policy_group:
hostname: apic
username: yourusername
password: yourpassword
policy_group: policygroupname
lag_type: leaf
link_level_policy: whateverlinklevelpolicy
fibre_channel_interface_policy: whateverfcpolicy
state: present
- name: deleting an Interface policy Leaf Policy Group
aci_interface_policy_leaf_policy_group:
hostname: apic
username: yourusername
password: yourpassword
policy_group: policygroupname
lag_type: type_name
state: absent
'''
RETURN = ''' # '''
from ansible.module_utils.network.aci.aci import ACIModule, aci_argument_spec
from ansible.module_utils.basic import AnsibleModule
def main():
argument_spec = aci_argument_spec()
argument_spec.update({
'policy_group': dict(type='str', aliases=['name', 'policy_group_name']),
'description': dict(type='str', aliases=['descr']),
# NOTE: Since this module needs to include both infra:AccBndlGrp (for PC andVPC) and infra:AccPortGrp (for leaf access port policy group):
# NOTE: I'll allow the user to make the choice here (link(PC), node(VPC), leaf(leaf-access port policy group))
'lag_type': dict(type='str', aliases=['lag_type_name']),
'link_level_policy': dict(type='str', aliases=['link_level_policy_name']),
'cdp_policy': dict(type='str', aliases=['cdp_policy_name']),
'mcp_policy': dict(type='str', aliases=['mcp_policy_name']),
'lldp_policy': dict(type='str', aliases=['lldp_policy_name']),
'stp_interface_policy': dict(type='str', aliases=['stp_interface_policy_name']),
'egress_data_plane_policing_policy': dict(type='str', aliases=['egress_data_plane_policing_policy_name']),
'ingress_data_plane_policing_policy': dict(type='str', aliases=['ingress_data_plane_policing_policy_name']),
'priority_flow_control_policy': dict(type='str', aliases=['priority_flow_control_policy_name']),
'fibre_channel_interface_policy': dict(type='str', aliases=['fibre_channel_interface_policy_name']),
'slow_drain_policy': dict(type='str', aliases=['slow_drain_policy_name']),
'port_channel_policy': dict(type='str', aliases=['port_channel_policy_name']),
'monitoring_policy': dict(type='str', aliases=['monitoring_policy_name']),
'storm_control_interface_policy': dict(type='str', aliases=['storm_control_interface_policy_name']),
'l2_interface_policy': dict(type='str', aliases=['l2_interface_policy_name']),
'port_security_policy': dict(type='str', aliases=['port_security_policy_name']),
'aep': dict(type='str', aliases=['aep_name']),
'state': dict(type='str', default='present', choices=['absent', 'present', 'query'])
})
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
required_if=[
['state', 'absent', ['policy_group', 'lag_type']],
['state', 'present', ['policy_group', 'lag_type']]
]
)
policy_group = module.params['policy_group']
description = module.params['description']
lag_type = module.params['lag_type']
link_level_policy = module.params['link_level_policy']
cdp_policy = module.params['cdp_policy']
mcp_policy = module.params['mcp_policy']
lldp_policy = module.params['lldp_policy']
stp_interface_policy = module.params['stp_interface_policy']
egress_data_plane_policing_policy = module.params['egress_data_plane_policing_policy']
ingress_data_plane_policing_policy = module.params['ingress_data_plane_policing_policy']
priority_flow_control_policy = module.params['priority_flow_control_policy']
fibre_channel_interface_policy = module.params['fibre_channel_interface_policy']
slow_drain_policy = module.params['slow_drain_policy']
port_channel_policy = module.params['port_channel_policy']
monitoring_policy = module.params['monitoring_policy']
storm_control_interface_policy = module.params['storm_control_interface_policy']
l2_interface_policy = module.params['l2_interface_policy']
port_security_policy = module.params['port_security_policy']
aep = module.params['aep']
state = module.params['state']
aci_class_name = ''
dn_name = ''
class_config_dict = {}
if lag_type == 'leaf':
aci_class_name = 'infraAccPortGrp'
dn_name = 'accportgrp'
class_config_dict = dict(
name=policy_group,
descr=description,
dn='uni/infra/funcprof/{0}-{1}'.format(dn_name, policy_group)
)
elif lag_type == 'link' or lag_type == 'node':
aci_class_name = 'infraAccBndlGrp'
dn_name = 'accbundle'
class_config_dict = dict(
name=policy_group,
descr=description,
lagT=lag_type,
dn='uni/infra/funcprof/{0}-{1}'.format(dn_name, policy_group)
)
aci = ACIModule(module)
aci.construct_url(
root_class=dict(
aci_class=aci_class_name,
aci_rn='infra/funcprof/{0}-{1}'.format(dn_name, policy_group),
filter_target='eq({0}.name, "{1}")'.format(aci_class_name, policy_group),
module_object=policy_group
),
child_classes=[
'infraRsMonIfInfraPol', 'infraRsLldpIfPol', 'infraRsFcIfPol',
'infraRsLacpPol', 'infraRsL2PortSecurityPol', 'infraRsHIfPol',
'infraRsQosPfcIfPol', 'infraRsStpIfPol', 'infraRsQosIngressDppIfPol',
'infraRsStormctrlIfPol', 'infraRsQosEgressDppIfPol', 'infraRsQosSdIfPol',
'infraRsAttEntP', 'infraRsMcpIfPol', 'infraRsCdpIfPol', 'infraRsL2IfPol'
]
)
aci.get_existing()
if state == 'present':
# Filter out module params with null values
aci.payload(
aci_class=aci_class_name,
class_config=class_config_dict,
child_configs=[
dict(
infraRsMonIfInfraPol=dict(
attributes=dict(
tnMonInfraPolName=monitoring_policy
)
)
),
dict(
infraRsLldpIfPol=dict(
attributes=dict(
tnLldpIfPolName=lldp_policy
)
)
),
dict(
infraRsFcIfPol=dict(
attributes=dict(
tnFcIfPolName=fibre_channel_interface_policy
)
)
),
dict(
infraRsLacpPol=dict(
attributes=dict(
tnLacpLagPolName=port_channel_policy
)
)
),
dict(
infraRsL2PortSecurityPol=dict(
attributes=dict(
tnL2PortSecurityPolName=port_security_policy
)
)
),
dict(
infraRsHIfPol=dict(
attributes=dict(
tnFabricHIfPolName=link_level_policy
)
)
),
dict(
infraRsQosPfcIfPol=dict(
attributes=dict(
tnQosPfcIfPolName=priority_flow_control_policy
)
)
),
dict(
infraRsStpIfPol=dict(
attributes=dict(
tnStpIfPolName=stp_interface_policy
)
)
),
dict(
infraRsQosIngressDppIfPol=dict(
attributes=dict(
tnQosDppPolName=ingress_data_plane_policing_policy
)
)
),
dict(
infraRsStormctrlIfPol=dict(
attributes=dict(
tnStormctrlIfPolName=storm_control_interface_policy
)
)
),
dict(
infraRsQosEgressDppIfPol=dict(
attributes=dict(
tnQosDppPolName=egress_data_plane_policing_policy
)
)
),
dict(
infraRsQosSdIfPol=dict(
attributes=dict(
tnQosSdIfPolName=slow_drain_policy
)
)
),
dict(
infraRsMcpIfPol=dict(
attributes=dict(
tnMcpIfPolName=mcp_policy
)
)
),
dict(
infraRsCdpIfPol=dict(
attributes=dict(
tnCdpIfPolName=cdp_policy
)
)
),
dict(
infraRsL2IfPol=dict(
attributes=dict(
tnL2IfPolName=l2_interface_policy
)
)
),
dict(
infraRsAttEntP=dict(
attributes=dict(
tDn='uni/infra/attentp-{0}'.format(aep)
)
)
)
],
)
# Generate config diff which will be used as POST request body
aci.get_diff(aci_class=aci_class_name)
# Submit changes if module not in check_mode and the proposed is different than existing
aci.post_config()
elif state == 'absent':
aci.delete_config()
module.exit_json(**aci.result)
if __name__ == "__main__":
main()
| tima/ansible | lib/ansible/modules/network/aci/aci_interface_policy_leaf_policy_group.py | Python | gpl-3.0 | 15,589 | 0.003207 |
# -*- coding: utf-8 -*-
"""
################################################################################
# #
# media_editing #
# #
################################################################################
# #
# LICENCE INFORMATION #
# #
# This program provides media editing utilities. #
# #
# copyright (C) 2018 Will Breaden Madden, [email protected] #
# #
# This software is released under the terms of the GNU General Public License #
# version 3 (GPLv3). #
# #
# This program is free software: you can redistribute it and/or modify it #
# under the terms of the GNU General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# This program is distributed in the hope that it will be useful, but WITHOUT #
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or #
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for #
# more details. #
# #
# For a copy of the GNU General Public License, see #
# <http://www.gnu.org/licenses/>. #
# #
################################################################################
"""
| wdbm/media_editing | media_editing.py | Python | gpl-3.0 | 2,382 | 0.010915 |
import os
import enums
class dm_server:
def __init__ (self, Name, Ip, statistics, Os = None):
self.__name = Name
self.__ip = Ip
self.__services = []
self.__statistics = statistics
if Os:
self.__os = Os
else:
self.__os = None
def addService (self, Service):
self.__services.append(Service)
def check (self):
# for i in self.__services:
# i.check()
self.__test()
def __test(self):
a = 0
for i in range(10):
a += os.system("ping -c 1" + self.__ip)
self.__pingProb = a/10
self.__setState()
def __setState(self):
if self.__pingProb <= 1 and self.__pingProb >= 0.8:
self.__state = 1
elif self.__pingProb <= 0.79 and self.__pingProb >= 0.41:
self.__state = 2
elif self.__pingProb <= 0.4 and self.__pingProb >= 0:
self.__state = 3
def __checkOs(self):
pass
def __checkVersionMac(self):
pass
def __checkVersionLinux(self):
pass
def __checkVersionWin(self):
pass
def getOs(self):
return self.__os
def getVersion(self):
pass
def getList(self):
return self.__services
def getState(self):
return self.__state
def getName(self):
return self.__name
def getIp(self):
return self.__ip
def getStatistic(self):
return self.__statistic
| PC-fit-Christian-Rupp/serverstatepage | code/testroutine/dm_server.py | Python | mit | 1,222 | 0.047463 |
##########################################################################
#
# Copyright (c) 2011-2012, John Haddon. All rights reserved.
# Copyright (c) 2011-2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import math
import IECore
import Gaffer
import GafferUI
QtCore = GafferUI._qtImport( "QtCore" )
QtGui = GafferUI._qtImport( "QtGui" )
## The Slider class allows a user to specify a number of positions on a scale of 0.0 at one end
# of the Widget and 1.0 at the other. Positions off the ends of the widget are mapped
# to negative numbers and numbers greater than 1.0 respectively. Derived classes may
# provide alternative interpretations for the scale and clamp values as appropriate. In
# particular see the NumericSlider which allows the specification of the values at either
# end of the scale along with hard minimum and maximum values.
class Slider( GafferUI.Widget ) :
PositionChangedReason = IECore.Enum.create( "Invalid", "SetPositions", "Click", "IndexAdded", "IndexRemoved", "DragBegin", "DragMove", "DragEnd", "Increment" )
def __init__( self, position=None, positions=None, **kw ) :
GafferUI.Widget.__init__( self, _Widget(), **kw )
assert( ( position is None ) or ( positions is None ) )
if positions is not None :
self.__positions = positions
else :
self.__positions = [ 0.5 if position is None else position ]
self.__selectedIndex = None
self.__sizeEditable = False
self.__minimumSize = 1
self._entered = False
self.__enterConnection = self.enterSignal().connect( Gaffer.WeakMethod( self.__enter ) )
self.__leaveConnection = self.leaveSignal().connect( Gaffer.WeakMethod( self.__leave ) )
self.__mouseMoveConnection = self.mouseMoveSignal().connect( Gaffer.WeakMethod( self.__mouseMove ) )
self.__buttonPressConnection = self.buttonPressSignal().connect( Gaffer.WeakMethod( self.__buttonPress ) )
self.__dragBeginConnection = self.dragBeginSignal().connect( Gaffer.WeakMethod( self.__dragBegin ) )
self.__dragEnterConnection = self.dragEnterSignal().connect( Gaffer.WeakMethod( self.__dragEnter ) )
self.__dragMoveConnection = self.dragMoveSignal().connect( Gaffer.WeakMethod( self.__dragMove ) )
self.__dragEndConnection = self.dragEndSignal().connect( Gaffer.WeakMethod( self.__dragEnd ) )
self.__keyPressConnection = self.keyPressSignal().connect( Gaffer.WeakMethod( self.__keyPress ) )
## Convenience function to call setPositions( [ position ] )
def setPosition( self, p ) :
self.setPositions( [ p ] )
## Convenience function returning getPositions()[0] if there
# is only one position, and raising ValueError if not.
def getPosition( self ) :
if len( self.__positions ) != 1 :
raise ValueError
return self.__positions[0]
def setPositions( self, positions ) :
self._setPositionsInternal( positions, self.PositionChangedReason.SetPositions )
def getPositions( self ) :
return self.__positions
## A signal emitted whenever a position has been changed. Slots should
# have the signature slot( Slider, PositionChangedReason ).
def positionChangedSignal( self ) :
signal = getattr( self, "_positionChangedSignal", None )
if signal is None :
signal = Gaffer.Signal2()
self._positionChangedSignal = signal
return signal
## Returns True if a user would expect the specified sequence
# of changes to be merged into one undoable event.
@classmethod
def changesShouldBeMerged( cls, firstReason, secondReason ) :
if type( firstReason ) != type( secondReason ) :
return False
return ( firstReason, secondReason ) in (
# click and drag
( cls.PositionChangedReason.Click, cls.PositionChangedReason.DragBegin ),
( cls.PositionChangedReason.DragBegin, cls.PositionChangedReason.DragMove ),
( cls.PositionChangedReason.DragMove, cls.PositionChangedReason.DragMove ),
( cls.PositionChangedReason.DragMove, cls.PositionChangedReason.DragEnd ),
# increment
( cls.PositionChangedReason.Increment, cls.PositionChangedReason.Increment ),
)
def indexRemovedSignal( self ) :
signal = getattr( self, "_indexRemovedSignal", None )
if signal is None :
signal = GafferUI.WidgetEventSignal()
self._indexRemovedSignal = signal
return signal
def setSelectedIndex( self, index ) :
if self.__selectedIndex == index :
return
if index is not None :
if not len( self.__positions ) or index < 0 or index >= len( self.__positions ) :
raise IndexError
self.__selectedIndex = index
self._qtWidget().update()
signal = getattr( self, "_selectedIndexChangedSignal", None )
if signal is not None :
signal( self )
## May return None to indicate that no index is selected.
def getSelectedIndex( self ) :
return self.__selectedIndex
def selectedIndexChangedSignal( self ) :
signal = getattr( self, "_selectedIndexChangedSignal", None )
if signal is None :
signal = GafferUI.WidgetSignal()
self._selectedIndexChangedSignal = signal
return signal
## Determines whether or not positions may be added/removed
def setSizeEditable( self, editable ) :
self.__sizeEditable = editable
def getSizeEditable( self ) :
return self.__sizeEditable
## Sets a size after which no more positions can
# be removed.
def setMinimumSize( self, minimumSize ) :
self.__minimumSize = minimumSize
def getMinimumSize( self ) :
return self.__minimumSize
## May be overridden by derived classes if necessary, but
# implementations must call the base class implementation
# after performing their own work, as the base class is
# responsible for emitting positionChangedSignal().
def _setPositionsInternal( self, positions, reason ) :
dragBeginOrEnd = reason in ( self.PositionChangedReason.DragBegin, self.PositionChangedReason.DragEnd )
if positions == self.__positions and not dragBeginOrEnd :
# early out if the positions haven't changed, but not if the
# reason is either end of a drag - we always signal those so
# that they will always come in matching pairs.
return
self.__positions = positions
self._qtWidget().update()
self.__emitPositionChanged( reason )
## \todo Colours should come from some unified style somewhere
def _drawBackground( self, painter ) :
size = self.size()
pen = QtGui.QPen( QtGui.QColor( 0, 0, 0 ) )
pen.setWidth( 1 )
painter.setPen( pen )
painter.drawLine( 0, size.y / 2, size.x, size.y / 2 )
def _drawPosition( self, painter, position, highlighted, opacity=1 ) :
size = self.size()
pen = QtGui.QPen( QtGui.QColor( 0, 0, 0, 255 * opacity ) )
pen.setWidth( 1 )
painter.setPen( pen )
## \todo These colours need to come from the style, once we've
# unified the Gadget and Widget styling.
if highlighted :
brush = QtGui.QBrush( QtGui.QColor( 119, 156, 255, 255 * opacity ) )
else :
brush = QtGui.QBrush( QtGui.QColor( 128, 128, 128, 255 * opacity ) )
painter.setBrush( brush )
if position < 0 :
painter.drawPolygon(
QtCore.QPoint( 8, 4 ),
QtCore.QPoint( 8, size.y - 4 ),
QtCore.QPoint( 2, size.y / 2 ),
)
elif position > 1 :
painter.drawPolygon(
QtCore.QPoint( size.x - 8, 4 ),
QtCore.QPoint( size.x - 8, size.y - 4 ),
QtCore.QPoint( size.x - 2, size.y / 2 ),
)
else :
painter.drawEllipse( QtCore.QPoint( position * size.x, size.y / 2 ), size.y / 4, size.y / 4 )
def _indexUnderMouse( self ) :
size = self.size()
mousePosition = GafferUI.Widget.mousePosition( relativeTo = self ).x / float( size.x )
result = None
for i, p in enumerate( self.__positions ) :
# clamp position inside 0-1 range so we can select
# handles representing points outside the widget.
p = max( min( p, 1.0 ), 0.0 )
dist = math.fabs( mousePosition - p )
if result is None or dist < minDist :
result = i
minDist = dist
if not self.getSizeEditable() :
# when the size isn't editable, we consider the closest
# position to be under the mouse, this makes it easy
# to just click anywhere to move the closest point.
return result
else :
# but when the size is editable, we consider points to
# be under the mouse when they genuinely are beneath it,
# so that clicks elsewhere can add points.
pixelDist = minDist * size.x
if pixelDist < size.y / 2.0 :
return result
else :
return None
def __enter( self, widget ) :
self._entered = True
self._qtWidget().update()
def __leave( self, widget ) :
self._entered = False
self._qtWidget().update()
def __mouseMove( self, widget, event ) :
self._qtWidget().update()
def __buttonPress( self, widget, event ) :
if event.buttons != GafferUI.ButtonEvent.Buttons.Left :
return
index = self._indexUnderMouse()
if index is not None :
self.setSelectedIndex( index )
if len( self.getPositions() ) == 1 :
self.__setPositionInternal( index, event.line.p0.x, self.PositionChangedReason.Click )
elif self.getSizeEditable() :
positions = self.getPositions()[:]
positions.append( float( event.line.p0.x ) / self.size().x )
self._setPositionsInternal( positions, self.PositionChangedReason.IndexAdded )
self.setSelectedIndex( len( positions ) - 1 )
return True
def __dragBegin( self, widget, event ) :
if event.buttons == GafferUI.ButtonEvent.Buttons.Left and self.getSelectedIndex() is not None :
return IECore.NullObject.defaultNullObject()
return None
def __dragEnter( self, widget, event ) :
if event.sourceWidget is self :
self.__setPositionInternal( self.getSelectedIndex(), event.line.p0.x, self.PositionChangedReason.DragBegin )
return True
return False
def __dragMove( self, widget, event ) :
self.__setPositionInternal( self.getSelectedIndex(), event.line.p0.x, self.PositionChangedReason.DragMove )
def __dragEnd( self, widget, event ) :
self.__setPositionInternal( self.getSelectedIndex(), event.line.p0.x, self.PositionChangedReason.DragEnd )
def __keyPress( self, widget, event ) :
if self.getSelectedIndex() is None :
return False
if event.key in ( "Left", "Right", "Up", "Down" ) :
x = self.getPositions()[self.getSelectedIndex()] * self.size().x
x += 1 if event.key in ( "Right", "Up" ) else - 1
self.__setPositionInternal( self.getSelectedIndex(), x, self.PositionChangedReason.Increment )
return True
elif event.key in ( "Backspace", "Delete" ) :
index = self.getSelectedIndex()
if index is not None and self.getSizeEditable() and len( self.getPositions() ) > self.getMinimumSize() :
del self.__positions[index]
signal = getattr( self, "_indexRemovedSignal", None )
if signal is not None :
signal( self, index )
self.__emitPositionChanged( self.PositionChangedReason.IndexRemoved )
self._qtWidget().update()
return True
return False
def __setPositionInternal( self, index, widgetX, reason ) :
positions = self.getPositions()[:]
positions[index] = float( widgetX ) / self.size().x
self._setPositionsInternal( positions, reason )
def __emitPositionChanged( self, reason ) :
signal = getattr( self, "_positionChangedSignal", None )
if signal is not None :
signal( self, reason )
class _Widget( QtGui.QWidget ) :
def __init__( self, parent=None ) :
QtGui.QWidget.__init__( self, parent )
self.setSizePolicy( QtGui.QSizePolicy( QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum ) )
self.setFocusPolicy( QtCore.Qt.ClickFocus )
def sizeHint( self ) :
return QtCore.QSize( 150, 18 )
def paintEvent( self, event ) :
owner = GafferUI.Widget._owner( self )
painter = QtGui.QPainter( self )
painter.setRenderHint( QtGui.QPainter.Antialiasing )
owner._drawBackground( painter )
indexUnderMouse = owner._indexUnderMouse()
for index, position in enumerate( owner.getPositions() ) :
owner._drawPosition(
painter,
position,
highlighted = index == indexUnderMouse or index == owner.getSelectedIndex()
)
if indexUnderMouse is None and owner.getSizeEditable() and owner._entered :
mousePosition = GafferUI.Widget.mousePosition( relativeTo = owner ).x / float( owner.size().x )
owner._drawPosition(
painter,
mousePosition,
highlighted = True,
opacity = 0.5
)
def event( self, event ) :
if event.type() == event.ShortcutOverride :
if event.key() in ( QtCore.Qt.Key_Delete, QtCore.Qt.Key_Backspace ) :
event.accept()
return True
return QtGui.QWidget.event( self, event )
| DoubleNegativeVisualEffects/gaffer | python/GafferUI/Slider.py | Python | bsd-3-clause | 14,206 | 0.062509 |
# -*- coding: utf-8 -*-
#----------------------------------------------------------------------------
# ModRana config files handling
#----------------------------------------------------------------------------
# Copyright 2012, Martin Kolman
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#---------------------------------------------------------------------------
import os
import shutil
from configobj import ConfigObj
import logging
log = logging.getLogger("core.config")
CONFIGS = ["map_config.conf", "user_config.conf"]
class Configs(object):
def __init__(self, modrana):
self.modrana = modrana
self.paths = modrana.paths
self.userConfig = {}
self.mapConfig = {}
# check if config files exist
self.checkConfigFilesExist()
def checkConfigFilesExist(self):
"""
assure that configuration files are available in the profile folder
- provided the default configuration files exist and that the profile folder
exists and is writable
"""
profilePath = self.modrana.paths.getProfilePath()
for config in CONFIGS:
configPath = os.path.join(profilePath, config)
if not os.path.exists(configPath):
try:
source = os.path.join("data/default_configuration_files", config)
log.info(" ** copying default configuration file to profile folder")
log.info(" ** from: %s", source)
log.info(" ** to: %s", configPath)
shutil.copy(source, configPath)
log.info(" ** default config file copying DONE")
except Exception:
log.exception("copying default configuration file to profile folder failed")
def upgradeConfigFiles(self):
"""
upgrade config files, if needed
"""
upgradeCount = 0
profilePath = self.modrana.paths.getProfilePath()
log.info("upgrading modRana configuration files in %s", profilePath)
# first check the configs actually exist
self.checkConfigFilesExist()
for config in CONFIGS:
# load default config
defaultConfigPath = os.path.join("data/default_configuration_files", config)
installedConfigPath = os.path.join(profilePath, config)
try:
defaultRev = int(ConfigObj(defaultConfigPath).get("revision", 0))
installedRev = int(ConfigObj(installedConfigPath).get("revision", 0))
if defaultRev > installedRev: # is installed config is outdated ?
log.info('config file %s is outdated, upgrading', config)
# rename installed config as the user might have modified it
newName = "%s_old_revision_%d" % (config, installedRev)
newPath = os.path.join(profilePath, newName)
shutil.move(installedConfigPath, newPath)
log.info('old config file renamed to %s' % newName)
# install the (newer) default config
shutil.copy(defaultConfigPath, profilePath)
# update upgrade counter
upgradeCount += 1
except Exception:
log.exception("upgrading config file: %s failed", config)
if upgradeCount:
log.info("%d configuration files upgraded", upgradeCount)
else:
log.info("no configuration files needed upgrade")
def loadAll(self):
"""
load all configuration files
"""
self.loadMapConfig()
self.loadUserConfig()
def getUserConfig(self):
return self.userConfig
def loadUserConfig(self):
"""load the user oriented configuration file."""
path = os.path.join(self.modrana.paths.getProfilePath(), "user_config.conf")
try:
config = ConfigObj(path)
if 'enabled' in config:
if config['enabled'] == 'True':
self.userConfig = config
except Exception:
msg = "loading user_config.conf failed, check the syntax\n" \
"and if the config file is present in the modRana profile directory"
log.exception(msg)
def getMapConfig(self):
"""
get the "raw" map config
"""
return self.mapConfig
def loadMapConfig(self):
"""
load the map configuration file
"""
configVariables = {
'label': 'label',
'url': 'tiles',
'max_zoom': 'maxZoom',
'min_zoom': 'minZoom',
'type': 'type',
'folder_prefix': 'folderPrefix',
'coordinates': 'coordinates',
}
def allNeededIn(needed, layerDict):
"""
check if all required values are filled in
"""
# TODO: optimize this ?
for key in needed:
if key in layerDict:
continue
else:
return False
return True
mapConfigPath = os.path.join(self.modrana.paths.getProfilePath(), 'map_config.conf')
# check if the map configuration file is installed
if not os.path.exists(mapConfigPath):
# nothing in profile folder -> try to use the default config
log.info("no config in profile folder, using default map layer configuration file")
mapConfigPath = os.path.join("data/default_configuration_files", 'map_config.conf')
if not os.path.exists(mapConfigPath):
# no map layer config available
log.info("map layer configuration file not available")
return False
try:
self.mapConfig = ConfigObj(mapConfigPath)
except Exception:
log.exception("loading map_config.conf failed")
return False
return True
def getUserAgent(self):
"""return the default modRana User-Agent"""
#debugging:
# return "Mozilla/5.0 (compatible; MSIE 5.5; Linux)"
#TODO: setting from configuration file, CLI & interface
return "modRana flexible GPS navigation system (compatible; Linux)" | ryfx/modrana | core/configs.py | Python | gpl-3.0 | 6,907 | 0.00304 |
# -*- coding: utf-8 -*-
import sys
import csv
from itertools import izip
# https://pypi.python.org/pypi/unicodecsv
# http://semver.org/
VERSION = (0, 9, 4)
__version__ = ".".join(map(str, VERSION))
pass_throughs = [
'register_dialect',
'unregister_dialect',
'get_dialect',
'list_dialects',
'field_size_limit',
'Dialect',
'excel',
'excel_tab',
'Sniffer',
'QUOTE_ALL',
'QUOTE_MINIMAL',
'QUOTE_NONNUMERIC',
'QUOTE_NONE',
'Error'
]
__all__ = [
'reader',
'writer',
'DictReader',
'DictWriter',
] + pass_throughs
for prop in pass_throughs:
globals()[prop] = getattr(csv, prop)
def _stringify(s, encoding, errors):
if s is None:
return ''
if isinstance(s, unicode):
return s.encode(encoding, errors)
elif isinstance(s, (int, float)):
pass # let csv.QUOTE_NONNUMERIC do its thing.
elif not isinstance(s, str):
s = str(s)
return s
def _stringify_list(l, encoding, errors='strict'):
try:
return [_stringify(s, encoding, errors) for s in iter(l)]
except TypeError, e:
raise csv.Error(str(e))
def _unicodify(s, encoding):
if s is None:
return None
if isinstance(s, (unicode, int, float)):
return s
elif isinstance(s, str):
return s.decode(encoding)
return s
class UnicodeWriter(object):
"""
>>> import unicodecsv
>>> from cStringIO import StringIO
>>> f = StringIO()
>>> w = unicodecsv.writer(f, encoding='utf-8')
>>> w.writerow((u'é', u'ñ'))
>>> f.seek(0)
>>> r = unicodecsv.reader(f, encoding='utf-8')
>>> row = r.next()
>>> row[0] == u'é'
True
>>> row[1] == u'ñ'
True
"""
def __init__(self, f, dialect=csv.excel, encoding='utf-8', errors='strict',
*args, **kwds):
self.encoding = encoding
self.writer = csv.writer(f, dialect, *args, **kwds)
self.encoding_errors = errors
def writerow(self, row):
self.writer.writerow(
_stringify_list(row, self.encoding, self.encoding_errors))
def writerows(self, rows):
for row in rows:
self.writerow(row)
@property
def dialect(self):
return self.writer.dialect
writer = UnicodeWriter
class UnicodeReader(object):
def __init__(self, f, dialect=None, encoding='utf-8', errors='strict',
**kwds):
format_params = ['delimiter', 'doublequote', 'escapechar',
'lineterminator', 'quotechar', 'quoting', 'skipinitialspace']
if dialect is None:
if not any([kwd_name in format_params for kwd_name in kwds.keys()]):
dialect = csv.excel
self.reader = csv.reader(f, dialect, **kwds)
self.encoding = encoding
self.encoding_errors = errors
def next(self):
row = self.reader.next()
encoding = self.encoding
encoding_errors = self.encoding_errors
float_ = float
unicode_ = unicode
try:
val = [(value if isinstance(value, float_) else unicode_(value, encoding, encoding_errors))
for value in row]
except UnicodeDecodeError as e:
# attempt a different encoding...
encoding = 'ISO-8859-1'
val = [(value if isinstance(value, float_) else unicode_(value, encoding, encoding_errors))
for value in row]
return val
def __iter__(self):
return self
@property
def dialect(self):
return self.reader.dialect
@property
def line_num(self):
return self.reader.line_num
reader = UnicodeReader
class DictWriter(csv.DictWriter):
"""
>>> from cStringIO import StringIO
>>> f = StringIO()
>>> w = DictWriter(f, ['a', u'ñ', 'b'], restval=u'î')
>>> w.writerow({'a':'1', u'ñ':'2'})
>>> w.writerow({'a':'1', u'ñ':'2', 'b':u'ø'})
>>> w.writerow({'a':u'é', u'ñ':'2'})
>>> f.seek(0)
>>> r = DictReader(f, fieldnames=['a', u'ñ'], restkey='r')
>>> r.next() == {'a': u'1', u'ñ':'2', 'r': [u'î']}
True
>>> r.next() == {'a': u'1', u'ñ':'2', 'r': [u'\xc3\xb8']}
True
>>> r.next() == {'a': u'\xc3\xa9', u'ñ':'2', 'r': [u'\xc3\xae']}
True
"""
def __init__(self, csvfile, fieldnames, restval='', extrasaction='raise', dialect='excel', encoding='utf-8', errors='strict', *args, **kwds):
self.encoding = encoding
csv.DictWriter.__init__(
self, csvfile, fieldnames, restval, extrasaction, dialect, *args, **kwds)
self.writer = UnicodeWriter(
csvfile, dialect, encoding=encoding, errors=errors, *args, **kwds)
self.encoding_errors = errors
def writeheader(self):
fieldnames = _stringify_list(
self.fieldnames, self.encoding, self.encoding_errors)
header = dict(zip(self.fieldnames, self.fieldnames))
self.writerow(header)
class DictReader(csv.DictReader):
"""
>>> from cStringIO import StringIO
>>> f = StringIO()
>>> w = DictWriter(f, fieldnames=['name', 'place'])
>>> w.writerow({'name': 'Cary Grant', 'place': 'hollywood'})
>>> w.writerow({'name': 'Nathan Brillstone', 'place': u'øLand'})
>>> w.writerow({'name': u'Willam ø. Unicoder', 'place': u'éSpandland'})
>>> f.seek(0)
>>> r = DictReader(f, fieldnames=['name', 'place'])
>>> print r.next() == {'name': 'Cary Grant', 'place': 'hollywood'}
True
>>> print r.next() == {'name': 'Nathan Brillstone', 'place': u'øLand'}
True
>>> print r.next() == {'name': u'Willam ø. Unicoder', 'place': u'éSpandland'}
True
"""
def __init__(self, csvfile, fieldnames=None, restkey=None, restval=None,
dialect='excel', encoding='utf-8', errors='strict', *args,
**kwds):
if fieldnames is not None:
fieldnames = _stringify_list(fieldnames, encoding)
csv.DictReader.__init__(
self, csvfile, fieldnames, restkey, restval, dialect, *args, **kwds)
self.reader = UnicodeReader(csvfile, dialect, encoding=encoding,
errors=errors, *args, **kwds)
if fieldnames is None and not hasattr(csv.DictReader, 'fieldnames'):
# Python 2.5 fieldnames workaround.
# (http://bugs.python.org/issue3436)
reader = UnicodeReader(
csvfile, dialect, encoding=encoding, *args, **kwds)
self.fieldnames = _stringify_list(reader.next(), reader.encoding)
self.unicode_fieldnames = [_unicodify(f, encoding) for f in
self.fieldnames]
self.unicode_restkey = _unicodify(restkey, encoding)
def next(self):
row = csv.DictReader.next(self)
result = dict((uni_key, row[str_key]) for (str_key, uni_key) in
izip(self.fieldnames, self.unicode_fieldnames))
rest = row.get(self.restkey)
if rest:
result[self.unicode_restkey] = rest
return result
| archives-new-zealand/archwayimportgenerator | libs/unicodecsv.py | Python | gpl-3.0 | 7,077 | 0.001559 |
import pytest
from tests.functional.ucare_cli.helpers import arg_namespace
from pyuploadcare.ucare_cli.commands.update_webhook import update_webhook
@pytest.mark.vcr
def test_update_webhooks(capsys, uploadcare):
update_webhook(
arg_namespace(
"update_webhook 865715 --deactivate "
"--target_url=https://webhook.site/updated"
),
uploadcare,
)
captured = capsys.readouterr()
assert '"is_active": false' in captured.out
| uploadcare/pyuploadcare | tests/functional/ucare_cli/test_update_webhook.py | Python | mit | 484 | 0 |
"""
The Plaid API
The Plaid REST API. Please see https://plaid.com/docs/api for more details. # noqa: E501
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from plaid.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
class LinkTokenCreateRequestUpdate(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
additional_properties_type = None
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
'account_selection_enabled': (bool,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'account_selection_enabled': 'account_selection_enabled', # noqa: E501
}
_composed_schemas = {}
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""LinkTokenCreateRequestUpdate - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
account_selection_enabled (bool): If `true`, enables [update mode with Account Select](https://plaid.com/docs/link/update-mode/#using-update-mode-to-request-new-accounts).. [optional] if omitted the server will use the default value of False # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
| plaid/plaid-python | plaid/model/link_token_create_request_update.py | Python | mit | 6,717 | 0.000447 |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
#
# generated by wxGlade 0.6.8 (standalone edition) on Tue Jan 14 10:41:03 2014
#
import wx
import wx.grid
# begin wxGlade: dependencies
# end wxGlade
# begin wxGlade: extracode
# end wxGlade
class MyFrame(wx.Frame):
def __init__(self, *args, **kwds):
# begin wxGlade: MyFrame.__init__
kwds["style"] = wx.DEFAULT_FRAME_STYLE
wx.Frame.__init__(self, *args, **kwds)
self.statusbar = self.CreateStatusBar(5, wx.ST_SIZEGRIP)
self.SplitterWindow = wx.SplitterWindow(self, wx.ID_ANY, style=wx.SP_3D | wx.SP_BORDER)
self.window_1_pane_1 = wx.ScrolledWindow(self.SplitterWindow, wx.ID_ANY, style=wx.SIMPLE_BORDER | wx.TAB_TRAVERSAL)
self.pnlSettingBar = wx.Panel(self.window_1_pane_1, wx.ID_ANY)
self.btnHideBar = wx.Button(self.pnlSettingBar, wx.ID_ANY, "Hide")
self.btnEnumPorts = wx.Button(self.pnlSettingBar, wx.ID_ANY, "EnumPorts")
self.label_1 = wx.StaticText(self.pnlSettingBar, wx.ID_ANY, "Port")
self.cmbPort = wx.ComboBox(self.pnlSettingBar, wx.ID_ANY, choices=[], style=wx.CB_DROPDOWN)
self.label_2 = wx.StaticText(self.pnlSettingBar, wx.ID_ANY, "Baud Rate")
self.cmbBaudRate = wx.ComboBox(self.pnlSettingBar, wx.ID_ANY, choices=["300", "600", "1200", "1800", "2400", "4800", "9600", "19200", "38400", "57600", "115200", "230400", "460800", "500000", "576000", "921600", "1000000", "1152000", "1500000", "2000000", "2500000", "3000000", "3500000", "4000000"], style=wx.CB_DROPDOWN)
self.label_3 = wx.StaticText(self.pnlSettingBar, wx.ID_ANY, "Data Bits")
self.choiceDataBits = wx.Choice(self.pnlSettingBar, wx.ID_ANY, choices=["5", "6", "7", "8"])
self.label_4 = wx.StaticText(self.pnlSettingBar, wx.ID_ANY, "Parity")
self.choiceParity = wx.Choice(self.pnlSettingBar, wx.ID_ANY, choices=["None", "Even", "Odd", "Mark", "Space"])
self.label_5 = wx.StaticText(self.pnlSettingBar, wx.ID_ANY, "Stop Bits")
self.choiceStopBits = wx.Choice(self.pnlSettingBar, wx.ID_ANY, choices=["1", "1.5", "2"])
self.chkboxrtscts = wx.CheckBox(self.pnlSettingBar, wx.ID_ANY, "RTS/CTS")
self.chkboxxonxoff = wx.CheckBox(self.pnlSettingBar, wx.ID_ANY, "Xon/Xoff")
self.sizer_6_staticbox = wx.StaticBox(self.pnlSettingBar, wx.ID_ANY, "HandShake")
self.btnOpen = wx.Button(self.pnlSettingBar, wx.ID_ANY, "Open")
self.btnClear = wx.Button(self.pnlSettingBar, wx.ID_ANY, "Clear Screen")
self.window_1_pane_2 = wx.Panel(self.SplitterWindow, wx.ID_ANY)
self.pnlGrid = wx.ScrolledWindow(self.window_1_pane_2, wx.ID_ANY, style=wx.SIMPLE_BORDER | wx.TAB_TRAVERSAL)
self.grid_csv = wx.grid.Grid(self.pnlGrid, wx.ID_ANY, size=(1, 1))
self.button_1 = wx.Button(self.pnlGrid, wx.ID_ANY, "Send1")
self.button_2 = wx.Button(self.pnlGrid, wx.ID_ANY, "Send2")
self.button_3 = wx.Button(self.pnlGrid, wx.ID_ANY, "Send3")
self.button_4 = wx.Button(self.pnlGrid, wx.ID_ANY, "Send4")
self.button_5 = wx.Button(self.pnlGrid, wx.ID_ANY, "Send5")
self.button_6 = wx.Button(self.pnlGrid, wx.ID_ANY, "Send6")
self.button_7 = wx.Button(self.pnlGrid, wx.ID_ANY, "Send7")
self.button_8 = wx.Button(self.pnlGrid, wx.ID_ANY, "Send8")
self.button_9 = wx.Button(self.pnlGrid, wx.ID_ANY, "Send9")
self.button_10 = wx.Button(self.pnlGrid, wx.ID_ANY, "Send10")
self.button_11 = wx.Button(self.pnlGrid, wx.ID_ANY, "Send 11")
self.button_12 = wx.Button(self.pnlGrid, wx.ID_ANY, "Send 12")
self.button_13 = wx.Button(self.pnlGrid, wx.ID_ANY, "Send 13")
self.button_14 = wx.Button(self.pnlGrid, wx.ID_ANY, "Send 14")
self.button_15 = wx.Button(self.pnlGrid, wx.ID_ANY, "Send 15")
self.button_16 = wx.Button(self.pnlGrid, wx.ID_ANY, "Send 16")
self.button_17 = wx.Button(self.pnlGrid, wx.ID_ANY, "Send 17")
self.button_18 = wx.Button(self.pnlGrid, wx.ID_ANY, "Send 18")
self.button_19 = wx.Button(self.pnlGrid, wx.ID_ANY, "Send 19")
self.button_20 = wx.Button(self.pnlGrid, wx.ID_ANY, "Send 20")
self.button_21 = wx.Button(self.pnlGrid, wx.ID_ANY, "Send 21")
self.button_22 = wx.Button(self.pnlGrid, wx.ID_ANY, "Send 22")
self.button_23 = wx.Button(self.pnlGrid, wx.ID_ANY, "Send 23")
self.button_24 = wx.Button(self.pnlGrid, wx.ID_ANY, "Send 24")
self.button_25 = wx.Button(self.pnlGrid, wx.ID_ANY, "Send 25")
self.button_26 = wx.Button(self.pnlGrid, wx.ID_ANY, "Send 26")
self.button_27 = wx.Button(self.pnlGrid, wx.ID_ANY, "Send 27")
self.button_28 = wx.Button(self.pnlGrid, wx.ID_ANY, "Send 28")
self.button_29 = wx.Button(self.pnlGrid, wx.ID_ANY, "Send 29")
self.button_30 = wx.Button(self.pnlGrid, wx.ID_ANY, "Send 30")
self.button_31 = wx.Button(self.pnlGrid, wx.ID_ANY, "Send 31")
self.button_32 = wx.Button(self.pnlGrid, wx.ID_ANY, "Send 32")
self.button_33 = wx.Button(self.pnlGrid, wx.ID_ANY, "Send 33")
self.button_34 = wx.Button(self.pnlGrid, wx.ID_ANY, "Send 34")
self.button_35 = wx.Button(self.pnlGrid, wx.ID_ANY, "Send 35")
self.button_36 = wx.Button(self.pnlGrid, wx.ID_ANY, "Send 36")
self.button_37 = wx.Button(self.pnlGrid, wx.ID_ANY, "Send 37")
self.button_38 = wx.Button(self.pnlGrid, wx.ID_ANY, "Send 38")
self.button_39 = wx.Button(self.pnlGrid, wx.ID_ANY, "Send 39")
self.button_40 = wx.Button(self.pnlGrid, wx.ID_ANY, "Send 40")
self.button_41 = wx.Button(self.pnlGrid, wx.ID_ANY, "Send 41")
self.button_42 = wx.Button(self.pnlGrid, wx.ID_ANY, "Send 42")
self.button_43 = wx.Button(self.pnlGrid, wx.ID_ANY, "Send 43")
self.button_44 = wx.Button(self.pnlGrid, wx.ID_ANY, "Send 44")
self.button_45 = wx.Button(self.pnlGrid, wx.ID_ANY, "Send 45")
self.button_46 = wx.Button(self.pnlGrid, wx.ID_ANY, "Send 46")
self.button_47 = wx.Button(self.pnlGrid, wx.ID_ANY, "Send 47")
self.button_48 = wx.Button(self.pnlGrid, wx.ID_ANY, "Send 48")
self.button_49 = wx.Button(self.pnlGrid, wx.ID_ANY, "Send 49")
self.button_50 = wx.Button(self.pnlGrid, wx.ID_ANY, "Send 50")
self.txtctlMain = wx.TextCtrl(self.window_1_pane_2, wx.ID_ANY, "", style=wx.TE_MULTILINE | wx.TE_RICH | wx.TE_RICH2 | wx.TE_AUTO_URL | wx.TE_LINEWRAP | wx.TE_WORDWRAP)
self.pnlTransmitHex = wx.Panel(self.window_1_pane_2, wx.ID_ANY)
self.label_6 = wx.StaticText(self.pnlTransmitHex, wx.ID_ANY, "Transmit Hex")
self.btnTransmitHex = wx.Button(self.pnlTransmitHex, wx.ID_ANY, "Transmit")
self.txtTransmitHex = wx.TextCtrl(self.pnlTransmitHex, wx.ID_ANY, "", style=wx.TE_MULTILINE | wx.TE_RICH | wx.TE_RICH2 | wx.TE_AUTO_URL | wx.TE_LINEWRAP | wx.TE_WORDWRAP)
self.__set_properties()
self.__do_layout()
# end wxGlade
def __set_properties(self):
# begin wxGlade: MyFrame.__set_properties
self.SetTitle("MyTerm")
self.SetSize((834, 603))
self.statusbar.SetStatusWidths([-28, -10, -10, 55, 105])
# statusbar fields
statusbar_fields = ["", "Rx:0", "Tx:0", "Rx:Ascii", "Local echo:Off"]
for i in range(len(statusbar_fields)):
self.statusbar.SetStatusText(statusbar_fields[i], i)
self.cmbBaudRate.SetSelection(7)
self.choiceDataBits.SetSelection(3)
self.choiceParity.SetSelection(0)
self.choiceStopBits.SetSelection(0)
self.btnOpen.SetMinSize((-1, 30))
self.btnClear.SetMinSize((-1, 30))
self.pnlSettingBar.SetMinSize((158, -1))
self.window_1_pane_1.SetScrollRate(1, 1)
self.grid_csv.CreateGrid(50, 9)
self.grid_csv.SetRowLabelSize(25)
self.grid_csv.SetColLabelSize(21)
self.button_1.SetMinSize((-1, 20))
self.button_2.SetMinSize((-1, 20))
self.button_3.SetMinSize((-1, 20))
self.button_4.SetMinSize((-1, 20))
self.button_5.SetMinSize((-1, 20))
self.button_6.SetMinSize((-1, 20))
self.button_7.SetMinSize((-1, 20))
self.button_8.SetMinSize((-1, 20))
self.button_9.SetMinSize((-1, 20))
self.button_10.SetMinSize((-1, 20))
self.button_11.SetMinSize((-1, 20))
self.button_12.SetMinSize((-1, 20))
self.button_13.SetMinSize((-1, 20))
self.button_14.SetMinSize((-1, 20))
self.button_15.SetMinSize((-1, 20))
self.button_16.SetMinSize((-1, 20))
self.button_17.SetMinSize((-1, 20))
self.button_18.SetMinSize((-1, 20))
self.button_19.SetMinSize((-1, 20))
self.button_20.SetMinSize((-1, 20))
self.button_21.SetMinSize((-1, 20))
self.button_22.SetMinSize((-1, 20))
self.button_23.SetMinSize((-1, 20))
self.button_24.SetMinSize((-1, 20))
self.button_25.SetMinSize((-1, 20))
self.button_26.SetMinSize((-1, 20))
self.button_27.SetMinSize((-1, 20))
self.button_28.SetMinSize((-1, 20))
self.button_29.SetMinSize((-1, 20))
self.button_30.SetMinSize((-1, 20))
self.button_31.SetMinSize((-1, 20))
self.button_32.SetMinSize((-1, 20))
self.button_33.SetMinSize((-1, 20))
self.button_34.SetMinSize((-1, 20))
self.button_35.SetMinSize((-1, 20))
self.button_36.SetMinSize((-1, 20))
self.button_37.SetMinSize((-1, 20))
self.button_38.SetMinSize((-1, 20))
self.button_39.SetMinSize((-1, 20))
self.button_40.SetMinSize((-1, 20))
self.button_41.SetMinSize((-1, 20))
self.button_42.SetMinSize((-1, 20))
self.button_43.SetMinSize((-1, 20))
self.button_44.SetMinSize((-1, 20))
self.button_45.SetMinSize((-1, 20))
self.button_46.SetMinSize((-1, 20))
self.button_47.SetMinSize((-1, 20))
self.button_48.SetMinSize((-1, 20))
self.button_49.SetMinSize((-1, 20))
self.button_50.SetMinSize((-1, 20))
self.pnlGrid.SetMinSize((-1, 225))
self.pnlGrid.SetScrollRate(10, 20)
self.txtctlMain.SetFont(wx.Font(10, wx.MODERN, wx.NORMAL, wx.NORMAL, 0, "Consolas"))
self.pnlTransmitHex.SetMinSize((-1, 80))
# end wxGlade
def __do_layout(self):
# begin wxGlade: MyFrame.__do_layout
sizer_1 = wx.BoxSizer(wx.HORIZONTAL)
sizer_5 = wx.BoxSizer(wx.VERTICAL)
sizer_7 = wx.BoxSizer(wx.VERTICAL)
sizer_8 = wx.BoxSizer(wx.HORIZONTAL)
sizer_7_copy = wx.BoxSizer(wx.HORIZONTAL)
sizer_8_copy = wx.BoxSizer(wx.VERTICAL)
sizer_9 = wx.BoxSizer(wx.HORIZONTAL)
sizer_2 = wx.BoxSizer(wx.HORIZONTAL)
sizer_3 = wx.BoxSizer(wx.VERTICAL)
self.sizer_6_staticbox.Lower()
sizer_6 = wx.StaticBoxSizer(self.sizer_6_staticbox, wx.HORIZONTAL)
grid_sizer_1 = wx.GridSizer(6, 2, 0, 0)
sizer_4 = wx.BoxSizer(wx.HORIZONTAL)
sizer_4.Add(self.btnHideBar, 1, wx.ALL | wx.EXPAND, 1)
sizer_4.Add(self.btnEnumPorts, 1, wx.ALL | wx.EXPAND, 1)
sizer_3.Add(sizer_4, 0, wx.EXPAND, 0)
grid_sizer_1.Add(self.label_1, 0, wx.ALL, 1)
grid_sizer_1.Add(self.cmbPort, 0, wx.ALL | wx.EXPAND, 1)
grid_sizer_1.Add(self.label_2, 0, wx.ALL, 1)
grid_sizer_1.Add(self.cmbBaudRate, 0, wx.ALL | wx.EXPAND, 1)
grid_sizer_1.Add(self.label_3, 0, wx.ALL, 1)
grid_sizer_1.Add(self.choiceDataBits, 0, wx.ALL | wx.EXPAND, 1)
grid_sizer_1.Add(self.label_4, 0, wx.ALL, 1)
grid_sizer_1.Add(self.choiceParity, 0, wx.ALL | wx.EXPAND, 1)
grid_sizer_1.Add(self.label_5, 0, wx.ALL, 1)
grid_sizer_1.Add(self.choiceStopBits, 0, wx.ALL | wx.EXPAND, 1)
sizer_3.Add(grid_sizer_1, 0, wx.ALL | wx.EXPAND, 1)
sizer_6.Add(self.chkboxrtscts, 1, wx.ALL | wx.EXPAND, 1)
sizer_6.Add(self.chkboxxonxoff, 1, wx.ALL | wx.EXPAND, 1)
sizer_3.Add(sizer_6, 0, wx.LEFT | wx.RIGHT | wx.EXPAND, 2)
sizer_3.Add(self.btnOpen, 0, wx.ALL | wx.EXPAND, 5)
sizer_3.Add(self.btnClear, 0, wx.ALL | wx.EXPAND, 5)
self.pnlSettingBar.SetSizer(sizer_3)
sizer_2.Add(self.pnlSettingBar, 1, wx.EXPAND, 0)
self.window_1_pane_1.SetSizer(sizer_2)
sizer_9.Add(self.grid_csv, 1, wx.EXPAND, 0)
sizer_7_copy.Add(sizer_9, 1, wx.EXPAND, 0)
sizer_8_copy.Add((20, 20), 0, 0, 0)
sizer_8_copy.Add(self.button_1, 0, 0, 0)
sizer_8_copy.Add(self.button_2, 0, 0, 0)
sizer_8_copy.Add(self.button_3, 0, 0, 0)
sizer_8_copy.Add(self.button_4, 0, 0, 0)
sizer_8_copy.Add(self.button_5, 0, 0, 0)
sizer_8_copy.Add(self.button_6, 0, 0, 0)
sizer_8_copy.Add(self.button_7, 0, 0, 0)
sizer_8_copy.Add(self.button_8, 0, 0, 0)
sizer_8_copy.Add(self.button_9, 0, 0, 0)
sizer_8_copy.Add(self.button_10, 0, 0, 0)
sizer_8_copy.Add(self.button_11, 0, 0, 0)
sizer_8_copy.Add(self.button_12, 0, 0, 0)
sizer_8_copy.Add(self.button_13, 0, 0, 0)
sizer_8_copy.Add(self.button_14, 0, 0, 0)
sizer_8_copy.Add(self.button_15, 0, 0, 0)
sizer_8_copy.Add(self.button_16, 0, 0, 0)
sizer_8_copy.Add(self.button_17, 0, 0, 0)
sizer_8_copy.Add(self.button_18, 0, 0, 0)
sizer_8_copy.Add(self.button_19, 0, 0, 0)
sizer_8_copy.Add(self.button_20, 0, 0, 0)
sizer_8_copy.Add(self.button_21, 0, 0, 0)
sizer_8_copy.Add(self.button_22, 0, 0, 0)
sizer_8_copy.Add(self.button_23, 0, 0, 0)
sizer_8_copy.Add(self.button_24, 0, 0, 0)
sizer_8_copy.Add(self.button_25, 0, 0, 0)
sizer_8_copy.Add(self.button_26, 0, 0, 0)
sizer_8_copy.Add(self.button_27, 0, 0, 0)
sizer_8_copy.Add(self.button_28, 0, 0, 0)
sizer_8_copy.Add(self.button_29, 0, 0, 0)
sizer_8_copy.Add(self.button_30, 0, 0, 0)
sizer_8_copy.Add(self.button_31, 0, 0, 0)
sizer_8_copy.Add(self.button_32, 0, 0, 0)
sizer_8_copy.Add(self.button_33, 0, 0, 0)
sizer_8_copy.Add(self.button_34, 0, 0, 0)
sizer_8_copy.Add(self.button_35, 0, 0, 0)
sizer_8_copy.Add(self.button_36, 0, 0, 0)
sizer_8_copy.Add(self.button_37, 0, 0, 0)
sizer_8_copy.Add(self.button_38, 0, 0, 0)
sizer_8_copy.Add(self.button_39, 0, 0, 0)
sizer_8_copy.Add(self.button_40, 0, 0, 0)
sizer_8_copy.Add(self.button_41, 0, 0, 0)
sizer_8_copy.Add(self.button_42, 0, 0, 0)
sizer_8_copy.Add(self.button_43, 0, 0, 0)
sizer_8_copy.Add(self.button_44, 0, 0, 0)
sizer_8_copy.Add(self.button_45, 0, 0, 0)
sizer_8_copy.Add(self.button_46, 0, 0, 0)
sizer_8_copy.Add(self.button_47, 0, 0, 0)
sizer_8_copy.Add(self.button_48, 0, 0, 0)
sizer_8_copy.Add(self.button_49, 0, 0, 0)
sizer_8_copy.Add(self.button_50, 0, 0, 0)
sizer_7_copy.Add(sizer_8_copy, 0, 0, 0)
self.pnlGrid.SetSizer(sizer_7_copy)
sizer_5.Add(self.pnlGrid, 0, wx.EXPAND, 0)
sizer_5.Add(self.txtctlMain, 1, wx.EXPAND, 0)
sizer_8.Add(self.label_6, 0, wx.ALIGN_CENTER_VERTICAL, 0)
sizer_8.Add((50, 20), 1, wx.ALIGN_CENTER_VERTICAL, 0)
sizer_8.Add(self.btnTransmitHex, 0, wx.TOP | wx.BOTTOM | wx.ALIGN_CENTER_VERTICAL, 2)
sizer_8.Add((10, 20), 0, 0, 0)
sizer_7.Add(sizer_8, 0, wx.EXPAND, 0)
sizer_7.Add(self.txtTransmitHex, 1, wx.EXPAND, 0)
self.pnlTransmitHex.SetSizer(sizer_7)
sizer_5.Add(self.pnlTransmitHex, 0, wx.EXPAND, 0)
self.window_1_pane_2.SetSizer(sizer_5)
self.SplitterWindow.SplitVertically(self.window_1_pane_1, self.window_1_pane_2, 16)
sizer_1.Add(self.SplitterWindow, 1, wx.EXPAND, 0)
self.SetSizer(sizer_1)
self.Layout()
self.Centre()
# end wxGlade
# end of class MyFrame
class MyApp(wx.App):
def OnInit(self):
wx.InitAllImageHandlers()
mainFrame = MyFrame(None, wx.ID_ANY, "")
self.SetTopWindow(mainFrame)
mainFrame.Show()
return 1
# end of class MyApp
if __name__ == "__main__":
app = MyApp(0)
app.MainLoop() | gamesun/MyTerm-for-WangH | GUI.py | Python | bsd-3-clause | 16,544 | 0.001572 |
from django.shortcuts import render, get_object_or_404
from django.http import HttpResponseRedirect
from django.utils.translation import ugettext_lazy as _
from django.core.urlresolvers import reverse
from computes.models import Compute
from networks.forms import AddNetPool
from vrtManager.network import wvmNetwork, wvmNetworks
from vrtManager.network import network_size
from libvirt import libvirtError
def networks(request, compute_id):
"""
:param request:
:return:
"""
if not request.user.is_authenticated():
return HttpResponseRedirect(reverse('index'))
if not request.user.is_superuser:
return HttpResponseRedirect(reverse('index'))
error_messages = []
compute = get_object_or_404(Compute, pk=compute_id)
try:
conn = wvmNetworks(compute.hostname,
compute.login,
compute.password,
compute.type)
networks = conn.get_networks_info()
if request.method == 'POST':
if 'create' in request.POST:
form = AddNetPool(request.POST)
if form.is_valid():
data = form.cleaned_data
if data['name'] in networks:
msg = _("Pool name already in use")
error_messages.append(msg)
if data['forward'] == 'bridge' and data['bridge_name'] == '':
error_messages.append('Please enter bridge name')
try:
gateway, netmask, dhcp = network_size(data['subnet'], data['dhcp'])
except:
error_msg = _("Input subnet pool error")
error_messages.append(error_msg)
if not error_messages:
conn.create_network(data['name'], data['forward'], gateway, netmask,
dhcp, data['bridge_name'], data['openvswitch'], data['fixed'])
return HttpResponseRedirect(reverse('network', args=[compute_id, data['name']]))
else:
for msg_err in form.errors.values():
error_messages.append(msg_err.as_text())
conn.close()
except libvirtError as lib_err:
error_messages.append(lib_err)
return render(request, 'networks.html', locals())
def network(request, compute_id, pool):
"""
:param request:
:return:
"""
if not request.user.is_authenticated():
return HttpResponseRedirect(reverse('index'))
if not request.user.is_superuser:
return HttpResponseRedirect(reverse('index'))
error_messages = []
compute = get_object_or_404(Compute, pk=compute_id)
try:
conn = wvmNetwork(compute.hostname,
compute.login,
compute.password,
compute.type,
pool)
networks = conn.get_networks()
state = conn.is_active()
device = conn.get_bridge_device()
autostart = conn.get_autostart()
ipv4_forward = conn.get_ipv4_forward()
ipv4_dhcp_range_start = conn.get_ipv4_dhcp_range_start()
ipv4_dhcp_range_end = conn.get_ipv4_dhcp_range_end()
ipv4_network = conn.get_ipv4_network()
fixed_address = conn.get_mac_ipaddr()
except libvirtError as lib_err:
error_messages.append(lib_err)
if request.method == 'POST':
if 'start' in request.POST:
try:
conn.start()
return HttpResponseRedirect(request.get_full_path())
except libvirtError as lib_err:
error_messages.append(lib_err.message)
if 'stop' in request.POST:
try:
conn.stop()
return HttpResponseRedirect(request.get_full_path())
except libvirtError as lib_err:
error_messages.append(lib_err.message)
if 'delete' in request.POST:
try:
conn.delete()
return HttpResponseRedirect(reverse('networks', args=[compute_id]))
except libvirtError as lib_err:
error_messages.append(lib_err.message)
if 'set_autostart' in request.POST:
try:
conn.set_autostart(1)
return HttpResponseRedirect(request.get_full_path())
except libvirtError as lib_err:
error_messages.append(lib_err.message)
if 'unset_autostart' in request.POST:
try:
conn.set_autostart(0)
return HttpResponseRedirect(request.get_full_path())
except libvirtError as lib_err:
error_messages.append(lib_err.message)
conn.close()
return render(request, 'network.html', locals())
| harry-ops/opencloud | webvirtcloud/networks/views.py | Python | gpl-2.0 | 4,905 | 0.001427 |
import numpy as np
from itertools import combinations, imap
from timer import show_progress
from connectedness import is_connected
def brute_main():
for N in show_progress(xrange(4,1000)):
print N,brute(N)
def brute(N):
return sum(is_connected(s)
for s in combinations(xrange(N),3))
def mu(N,k=3):
data = [is_connected(np.random.choice(N, k, replace=False))
for _ in xrange(10**3)]
return np.mean(data)
def bootstrap(N,k=3):
data = [mu(N,k) for _ in xrange(10)]
return np.mean(data), (max(data)-min(data))/2
def boot_main():
for N in show_progress(xrange(10,10**3)):
print ",".join(map(str,[N]+list(bootstrap(N,k=int(N**.5)))))
if __name__=="__main__":
boot_main()
| gabegaster/connectedness | experiment.py | Python | mit | 748 | 0.022727 |
import numpy as np
import pystencils as ps
from pystencils_walberla import CodeGeneration, generate_sweep
with CodeGeneration() as ctx:
# ----- Stencil 2D - created by specifying weights in nested list --------------------------
src, dst = ps.fields("src, src_tmp: [2D]", layout='fzyx')
stencil = [[1.11, 2.22, 3.33],
[4.44, 5.55, 6.66],
[7.77, 8.88, 9.99]]
assignments = ps.assignment_from_stencil(stencil, src, dst, normalization_factor=1 / np.sum(stencil))
generate_sweep(ctx, 'JacobiKernel2D', assignments, field_swaps=[(src, dst)])
# ----- Stencil 3D - created by using kernel_decorator with assignments in '@=' format -----
src, dst = ps.fields("src, src_tmp: [3D]", layout='fzyx')
@ps.kernel
def kernel_func():
dst[0, 0, 0] @= (3 * src[1, 0, 0] + 4 * src[-1, 0, 0]
+ 5 * src[0, 1, 0] + 6 * src[0, -1, 0]
+ 7 * src[0, 0, 1] + 8 * src[0, 0, -1]) / 33
generate_sweep(ctx, 'JacobiKernel3D', kernel_func, field_swaps=[(src, dst)])
| lssfau/walberla | tests/field/codegen/JacobiKernel.py | Python | gpl-3.0 | 1,066 | 0.00469 |
# -*- coding: utf-8 -*-
# 本题难度:★
# 不使用额外储存空间,判断一个整数是否为回文数字。例如:
# -22 -> false
# 1221 -> true
# 1221221 -> true
# 1234321 -> true
# 234 -> false
# 需要注意的是:
# 考虑负数
# 不允许使用额外储存空间,比如将数字转换成字符串
# 反转数字需要考虑溢出问题
# 参考答案:https://github.com/barretlee/daily-algorithms/blob/master/answers/7.md
import math
def Palindrome_Number(num):
if num < 0:
return False;
# 计算数字所拥有的位数
LenOfNum = 0;
while(10**LenOfNum <= num):
LenOfNum = LenOfNum+1;
if LenOfNum == 1:
return True;
while(LenOfNum >= 2):
a = num % 10;
b = num / 10**(LenOfNum-1);
if (a == b):
num = num % (10**(LenOfNum-1))
num = num /10;
LenOfNum -= 2;
else:
return False;
return True;
print(Palindrome_Number(10))
print(Palindrome_Number(1))
print(Palindrome_Number(-22))
print(Palindrome_Number(1221))
print(Palindrome_Number(1221221))
print(Palindrome_Number(1234321))
print(Palindrome_Number(234)) | pengkobe/leetcode | questions/Palindrome_Number.py | Python | gpl-3.0 | 1,166 | 0.014315 |
import json
import sure # noqa # pylint: disable=unused-import
import moto.server as server
def test_es_list():
backend = server.create_backend_app("es")
test_client = backend.test_client()
resp = test_client.get("/2015-01-01/domain")
resp.status_code.should.equal(200)
json.loads(resp.data).should.equals({"DomainNames": []})
| spulec/moto | tests/test_es/test_server.py | Python | apache-2.0 | 352 | 0 |
#
# Copyright (C) 2014 Jonathan Finlay <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
The patient visit module
========================
Implements the classes:
* Visit: Main visit module
* ConsultingRoom: Consultings room module
"""
from openerp.osv import osv, fields
class Visit(osv.osv):
"""
The visit module
"""
_name = 'visit'
_description = 'The visit module'
_states = [
('draft', 'Draft'),
('confirmed', 'Confirmed'),
('canceled', 'Canceled'),
('assisted', 'Assisted'),
]
def _default_room(self, cr, uid, id, context=None):
consulroom_obj = self.pool.get('consulting.room')
room = consulroom_obj.search(cr, uid, [('default', '=', '1')])
if room:
return room[0]
return 1
def check_duration(self, cr, uid, id, context=None):
"""
Check the consistency of the visit duration
:param cr:
:param uid:
:param id:
:param context:
:return:
"""
return {}
def onchange_consulting_room(self, cr, uid, id, consulting_room, context=None):
"""
:param cr:
:param uid:
:param id:
:param starts:
:param consulting_room:
:param context:
:return:
"""
if consulting_room:
consulroom_obj = self.pool.get('consulting.room')
duration = consulroom_obj.browse(cr, uid, consulting_room, context=context)[0].duration
else:
duration = 0.0
vals = {
'value': {
'duration': duration,
}
}
return vals
_columns = {
'name': fields.char('Identifier'),
'starts': fields.datetime('Start date'),
'duration': fields.float('Duration',
help='Duration in minutes'),
'patient_id': fields.many2one('patient', 'Patient'),
'consultingroom_id': fields.many2one('consulting.room',
'Consulting room'),
'state': fields.selection(_states, 'State')
}
_defaults = {
'consultingroom_id': _default_room,
}
class ConsultingRoom(osv.osv):
""" Consulting rooms """
_name = 'consulting.room'
_description = 'Consulting rooms configuration module'
_columns = {
'name': fields.char('Name'),
'duration': fields.float('Standard duration',
help='Visit standard duration time in minutes'),
'price': fields.float('Price',
help='Standard consultation fee'),
'address': fields.text('Address'),
'default': fields.boolean('Default', help='Set as default consulting room'),
}
| jonathanf/infosalud | visit/visit.py | Python | agpl-3.0 | 3,450 | 0.002319 |
#!/usr/bin/env python
#-*- coding: ascii -*-
from __future__ import print_function
import sys
import platform
def copy_to_dst(src_name, dst_dir):
print("Copy %s to %s" % (src_name, dst_dir))
import shutil
shutil.copy(src_name, dst_dir)
#cfg, address_model=32/64, version_type=debug/release;
def getEnvInfo(address_model, version_type) :
import os
env = os.environ
plat = sys.platform
if 0 == plat.find("linux"):
plat = "linux"
print("\nplatform="+plat)
# print("\nplatform="+env)
cfg = ""
arch = ""
## if "" == cfg:
if "win32" == plat:
if "VS140COMNTOOLS" in env:
cfg = "vc15"
print("platform1.0 : " + cfg)
elif "VS120COMNTOOLS" in env:
cfg = "vc12"
print("platform1.1 : " + cfg)
elif "VS110COMNTOOLS" in env:
cfg = "vc11"
print("platform1 : " + cfg)
elif "VS100COMNTOOLS" in env:
cfg = "vc10"
print("platform2 : " + cfg)
elif "VS90COMNTOOLS" in env:
cfg = "vc9"
print("platform3 : " + cfg)
elif "VS80COMNTOOLS" in env:
cfg = "vc8"
print("platform4 : " + cfg)
## elif os.path.exists("C:\MinGW\bin\gcc.exe"):
## print("platform5 : " + cfg)
## cfg = "mingw"
else:
print("Unsupported vin32 develop!\n")
elif "linux" == plat:
cfg = "gcc"
print("platform6 : " + cfg)
elif "cygwin" == plat:
cfg = "gcc"
print("platform7 : " + cfg)
else:
print("Unsupported platform!\n")
sys.exit(1)
print("platform8 : " + cfg)
if "vc15" == cfg :
generator = "Visual Studio 14 2015"
compiler_name = "vc"
compiler_version = 14
elif "vc12" == cfg :
generator = "Visual Studio 12"
compiler_name = "vc"
compiler_version = 12
elif "vc11" == cfg :
generator = "Visual Studio 11"
compiler_name = "vc"
compiler_version = 11
elif "vc10" == cfg:
generator = "Visual Studio 10"
compiler_name = "vc"
compiler_version = 10
elif "vc9" == cfg:
generator = "Visual Studio 9 2008"
compiler_name = "vc"
compiler_version = 9
elif "vc8" == cfg:
generator = "Visual Studio 8 2005"
compiler_name = "vc"
compiler_version = 8
elif "mingw" == cfg:
generator = "MinGW Makefiles"
compiler_name = "gcc"
compiler_version = 0
elif "gcc" == cfg:
generator = "Unix Makefiles"
compiler_name = "gcc"
compiler_version = 0
else:
print("Wrong compiler configuration\n")
sys.exit(1)
# prepare file suffix
if "win32" == plat:
bat_suffix = "bat"
dll_suffix = "dll"
exe_suffix = "exe"
elif "linux" == plat:
bat_suffix = "sh"
dll_suffix = "so"
exe_suffix = ""
elif "cygwin" == plat:
bat_suffix = "sh"
dll_suffix = "dll"
exe_suffix = "exe"
# set evn for boost
## try:
## boost_root = os.environ["BOOST_ROOT"]
## os.environ["BOOST_LIBRARYDIR"] = "%s/lib_%s%d_m%s_%s/lib" % (boost_root, compiler_name, compiler_version, address_model, version_type) # not update?????
## print("boost_lib_path="+os.environ["BOOST_LIBRARYDIR"])
## except KeyError:
## print("Please set the environment variable BOOST_ROOT")
## sys.exit(1)
if "win32" == plat:
machine = platform.machine()
print("arch="+machine)
if(machine == "x86_64" or machine == "AMD64"):
generator = generator + " Win64"
# if "win32" == plat:
# if (CMAKE_CL_64 or CMAKE_GENERATOR MATCHES Win64)
# generator = generator + " Win64"
return (plat, compiler_name, compiler_version, generator, bat_suffix, dll_suffix, exe_suffix)
| bigvat/vat | common.py | Python | apache-2.0 | 3,323 | 0.040024 |
import functools
import httplib as http
import itertools
from operator import itemgetter
from dateutil.parser import parse as parse_date
from django.core.exceptions import ValidationError
from django.db.models import Q
from django.utils import timezone
from flask import request, redirect
import pytz
from framework.database import get_or_http_error, autoload
from framework.exceptions import HTTPError
from framework.status import push_status_message
from osf.utils.sanitize import strip_html
from osf.utils.permissions import ADMIN
from osf.utils.functional import rapply
from osf.models import NodeLog, RegistrationSchema, DraftRegistration, Sanction
from website.exceptions import NodeStateError
from website.project.decorators import (
must_be_valid_project,
must_have_permission,
http_error_if_disk_saving_mode
)
from website import language, settings
from website.ember_osf_web.decorators import ember_flag_is_active
from website.prereg import utils as prereg_utils
from website.project import utils as project_utils
from website.project.metadata.schemas import LATEST_SCHEMA_VERSION, METASCHEMA_ORDERING
from website.project.metadata.utils import serialize_meta_schema, serialize_draft_registration
from website.project.utils import serialize_node
get_schema_or_fail = lambda query: get_or_http_error(RegistrationSchema, query)
autoload_draft = functools.partial(autoload, DraftRegistration, 'draft_id', 'draft')
def must_be_branched_from_node(func):
@autoload_draft
@must_be_valid_project
@functools.wraps(func)
def wrapper(*args, **kwargs):
node = kwargs['node']
draft = kwargs['draft']
if draft.deleted:
raise HTTPError(http.GONE)
if not draft.branched_from._id == node._id:
raise HTTPError(
http.BAD_REQUEST,
data={
'message_short': 'Not a draft of this node',
'message_long': 'This draft registration is not created from the given node.'
}
)
return func(*args, **kwargs)
return wrapper
def validate_embargo_end_date(end_date_string, node):
"""
Our reviewers have a window of time in which to review a draft reg. submission.
If an embargo end_date that is within that window is at risk of causing
validation errors down the line if the draft is approved and registered.
The draft registration approval window is always greater than the time span
for disallowed embargo end dates.
:raises: HTTPError if end_date is less than the approval window or greater than the
max embargo end date
"""
end_date = parse_date(end_date_string, ignoretz=True).replace(tzinfo=pytz.utc)
today = timezone.now()
if (end_date - today) <= settings.DRAFT_REGISTRATION_APPROVAL_PERIOD:
raise HTTPError(http.BAD_REQUEST, data={
'message_short': 'Invalid embargo end date',
'message_long': 'Embargo end date for this submission must be at least {0} days in the future.'.format(settings.DRAFT_REGISTRATION_APPROVAL_PERIOD)
})
elif not node._is_embargo_date_valid(end_date):
max_end_date = today + settings.DRAFT_REGISTRATION_APPROVAL_PERIOD
raise HTTPError(http.BAD_REQUEST, data={
'message_short': 'Invalid embargo end date',
'message_long': 'Embargo end date must on or before {0}.'.format(max_end_date.isoformat())
})
def validate_registration_choice(registration_choice):
if registration_choice not in ('embargo', 'immediate'):
raise HTTPError(
http.BAD_REQUEST,
data={
'message_short': "Invalid 'registrationChoice'",
'message_long': "Values for 'registrationChoice' must be either 'embargo' or 'immediate'."
}
)
def check_draft_state(draft):
registered_and_deleted = draft.registered_node and draft.registered_node.is_deleted
if draft.registered_node and not registered_and_deleted:
raise HTTPError(http.FORBIDDEN, data={
'message_short': 'This draft has already been registered',
'message_long': 'This draft has already been registered and cannot be modified.'
})
if draft.is_pending_review:
raise HTTPError(http.FORBIDDEN, data={
'message_short': 'This draft is pending review',
'message_long': 'This draft is pending review and cannot be modified.'
})
if draft.requires_approval and draft.is_approved and (not registered_and_deleted):
raise HTTPError(http.FORBIDDEN, data={
'message_short': 'This draft has already been approved',
'message_long': 'This draft has already been approved and cannot be modified.'
})
@must_have_permission(ADMIN)
@must_be_branched_from_node
def submit_draft_for_review(auth, node, draft, *args, **kwargs):
"""Submit for approvals and/or notifications
:return: serialized registration
:rtype: dict
:raises: HTTPError if embargo end date is invalid
"""
data = request.get_json()
meta = {}
registration_choice = data.get('registrationChoice', 'immediate')
validate_registration_choice(registration_choice)
if registration_choice == 'embargo':
# Initiate embargo
end_date_string = data['embargoEndDate']
validate_embargo_end_date(end_date_string, node)
meta['embargo_end_date'] = end_date_string
meta['registration_choice'] = registration_choice
if draft.registered_node and not draft.registered_node.is_deleted:
raise HTTPError(http.BAD_REQUEST, data=dict(message_long='This draft has already been registered, if you wish to '
'register it again or submit it for review please create '
'a new draft.'))
# Don't allow resubmission unless submission was rejected
if draft.approval and draft.approval.state != Sanction.REJECTED:
raise HTTPError(http.CONFLICT, data=dict(message_long='Cannot resubmit previously submitted draft.'))
draft.submit_for_review(
initiated_by=auth.user,
meta=meta,
save=True
)
if prereg_utils.get_prereg_schema() == draft.registration_schema:
node.add_log(
action=NodeLog.PREREG_REGISTRATION_INITIATED,
params={'node': node._primary_key},
auth=auth,
save=False
)
node.save()
push_status_message(language.AFTER_SUBMIT_FOR_REVIEW,
kind='info',
trust=False)
return {
'status': 'initiated',
'urls': {
'registrations': node.web_url_for('node_registrations')
}
}, http.ACCEPTED
@must_have_permission(ADMIN)
@must_be_branched_from_node
def draft_before_register_page(auth, node, draft, *args, **kwargs):
"""Allow the user to select an embargo period and confirm registration
:return: serialized Node + DraftRegistration
:rtype: dict
"""
ret = serialize_node(node, auth, primary=True)
ret['draft'] = serialize_draft_registration(draft, auth)
return ret
@must_have_permission(ADMIN)
@must_be_branched_from_node
@http_error_if_disk_saving_mode
def register_draft_registration(auth, node, draft, *args, **kwargs):
"""Initiate a registration from a draft registration
:return: success message; url to registrations page
:rtype: dict
"""
data = request.get_json()
registration_choice = data.get('registrationChoice', 'immediate')
validate_registration_choice(registration_choice)
# Don't allow resubmission unless submission was rejected
if draft.approval and draft.approval.state != Sanction.REJECTED:
raise HTTPError(http.CONFLICT, data=dict(message_long='Cannot resubmit previously submitted draft.'))
register = draft.register(auth)
draft.save()
if registration_choice == 'embargo':
# Initiate embargo
embargo_end_date = parse_date(data['embargoEndDate'], ignoretz=True).replace(tzinfo=pytz.utc)
try:
register.embargo_registration(auth.user, embargo_end_date)
except ValidationError as err:
raise HTTPError(http.BAD_REQUEST, data=dict(message_long=err.message))
else:
try:
register.require_approval(auth.user)
except NodeStateError as err:
raise HTTPError(http.BAD_REQUEST, data=dict(message_long=err.message))
register.save()
push_status_message(language.AFTER_REGISTER_ARCHIVING,
kind='info',
trust=False)
return {
'status': 'initiated',
'urls': {
'registrations': node.web_url_for('node_registrations')
}
}, http.ACCEPTED
@must_have_permission(ADMIN)
@must_be_branched_from_node
def get_draft_registration(auth, node, draft, *args, **kwargs):
"""Return a single draft registration
:return: serialized draft registration
:rtype: dict
"""
return serialize_draft_registration(draft, auth), http.OK
@must_have_permission(ADMIN)
@must_be_valid_project
def get_draft_registrations(auth, node, *args, **kwargs):
"""List draft registrations for a node
:return: serialized draft registrations
:rtype: dict
"""
#'updated': '2016-08-03T14:24:12Z'
count = request.args.get('count', 100)
drafts = itertools.islice(node.draft_registrations_active, 0, count)
serialized_drafts = [serialize_draft_registration(d, auth) for d in drafts]
sorted_serialized_drafts = sorted(serialized_drafts, key=itemgetter('updated'), reverse=True)
return {
'drafts': sorted_serialized_drafts
}, http.OK
@must_have_permission(ADMIN)
@must_be_valid_project
@ember_flag_is_active('ember_create_draft_registration_page')
def new_draft_registration(auth, node, *args, **kwargs):
"""Create a new draft registration for the node
:return: Redirect to the new draft's edit page
:rtype: flask.redirect
:raises: HTTPError
"""
if node.is_registration:
raise HTTPError(http.FORBIDDEN, data={
'message_short': "Can't create draft",
'message_long': 'Creating draft registrations on registered projects is not allowed.'
})
data = request.values
schema_name = data.get('schema_name')
if not schema_name:
raise HTTPError(
http.BAD_REQUEST,
data={
'message_short': 'Must specify a schema_name',
'message_long': 'Please specify a schema_name'
}
)
schema_version = data.get('schema_version', 2)
meta_schema = get_schema_or_fail(Q(name=schema_name, schema_version=int(schema_version)))
draft = DraftRegistration.create_from_node(
node,
user=auth.user,
schema=meta_schema,
data={}
)
return redirect(node.web_url_for('edit_draft_registration_page', draft_id=draft._id))
@must_have_permission(ADMIN)
@ember_flag_is_active('ember_edit_draft_registration_page')
@must_be_branched_from_node
def edit_draft_registration_page(auth, node, draft, **kwargs):
"""Draft registration editor
:return: serialized DraftRegistration
:rtype: dict
"""
check_draft_state(draft)
ret = project_utils.serialize_node(node, auth, primary=True)
ret['draft'] = serialize_draft_registration(draft, auth)
return ret
@must_have_permission(ADMIN)
@must_be_branched_from_node
def update_draft_registration(auth, node, draft, *args, **kwargs):
"""Update an existing draft registration
:return: serialized draft registration
:rtype: dict
:raises: HTTPError
"""
check_draft_state(draft)
data = request.get_json()
schema_data = data.get('schema_data', {})
schema_data = rapply(schema_data, strip_html)
schema_name = data.get('schema_name')
schema_version = data.get('schema_version', 1)
if schema_name:
meta_schema = get_schema_or_fail(Q(name=schema_name, schema_version=schema_version))
existing_schema = draft.registration_schema
if (existing_schema.name, existing_schema.schema_version) != (meta_schema.name, meta_schema.schema_version):
draft.registration_schema = meta_schema
draft.update_metadata(schema_data)
draft.save()
return serialize_draft_registration(draft, auth), http.OK
@must_have_permission(ADMIN)
@must_be_branched_from_node
def delete_draft_registration(auth, node, draft, *args, **kwargs):
"""Permanently delete a draft registration
:return: None
:rtype: NoneType
"""
if draft.registered_node and not draft.registered_node.is_deleted:
raise HTTPError(
http.FORBIDDEN,
data={
'message_short': 'Can\'t delete draft',
'message_long': 'This draft has already been registered and cannot be deleted.'
}
)
draft.deleted = timezone.now()
draft.save(update_fields=['deleted'])
return None, http.NO_CONTENT
def get_metaschemas(*args, **kwargs):
"""
List metaschemas with which a draft registration may be created. Only fetch the newest version for each schema.
:return: serialized metaschemas
:rtype: dict
"""
count = request.args.get('count', 100)
include = request.args.get('include', 'latest')
meta_schemas = RegistrationSchema.objects.filter(active=True)
if include == 'latest':
meta_schemas.filter(schema_version=LATEST_SCHEMA_VERSION)
meta_schemas = sorted(meta_schemas, key=lambda x: METASCHEMA_ORDERING.index(x.name))
return {
'meta_schemas': [
serialize_meta_schema(ms) for ms in meta_schemas[:count]
]
}, http.OK
| erinspace/osf.io | website/project/views/drafts.py | Python | apache-2.0 | 13,819 | 0.003329 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('clients', '0017_auto_20151025_1240'),
('clients', '0015_auto_20151025_1209'),
]
operations = [
]
| deafhhs/adapt | clients/migrations/0018_merge.py | Python | mit | 297 | 0 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.conf.urls.defaults import patterns
from django.conf.urls.defaults import url
from openstack_dashboard.dashboards.admin.projects.views \
import CreateProjectView
from openstack_dashboard.dashboards.admin.projects.views import CreateUserView
from openstack_dashboard.dashboards.admin.projects.views import IndexView
from openstack_dashboard.dashboards.admin.projects.views \
import ProjectUsageView
from openstack_dashboard.dashboards.admin.projects.views \
import UpdateProjectView
urlpatterns = patterns('',
url(r'^$', IndexView.as_view(), name='index'),
url(r'^create$', CreateProjectView.as_view(), name='create'),
url(r'^(?P<tenant_id>[^/]+)/update/$',
UpdateProjectView.as_view(), name='update'),
url(r'^(?P<tenant_id>[^/]+)/usage/$',
ProjectUsageView.as_view(), name='usage'),
url(r'^(?P<tenant_id>[^/]+)/create_user/$',
CreateUserView.as_view(), name='create_user'),
)
| rackerlabs/horizon | openstack_dashboard/dashboards/admin/projects/urls.py | Python | apache-2.0 | 1,755 | 0.00057 |
# Copyright 2015 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
cells_group = cfg.OptGroup('cells',
title='Cells Options',
help="""
Cells options allow you to use cells functionality in openstack
deployment.
""")
cells_opts = [
cfg.StrOpt('topic',
default='cells',
deprecated_for_removal=True,
deprecated_since='15.0.0',
deprecated_reason="""
Configurable RPC topics provide little value and can result in a wide variety
of errors. They should not be used.
""",
help="""
Topic.
This is the message queue topic that cells nodes listen on. It is
used when the cells service is started up to configure the queue,
and whenever an RPC call to the scheduler is made.
Possible values:
* cells: This is the recommended and the default value.
"""),
cfg.BoolOpt('enable',
default=False,
help="""
Enable cell functionality.
When this functionality is enabled, it lets you to scale an OpenStack
Compute cloud in a more distributed fashion without having to use
complicated technologies like database and message queue clustering.
Cells are configured as a tree. The top-level cell should have a host
that runs a nova-api service, but no nova-compute services. Each
child cell should run all of the typical nova-* services in a regular
Compute cloud except for nova-api. You can think of cells as a normal
Compute deployment in that each cell has its own database server and
message queue broker.
Related options:
* name: A unique cell name must be given when this functionality
is enabled.
* cell_type: Cell type should be defined for all cells.
"""),
cfg.StrOpt('name',
default='nova',
help="""
Name of the current cell.
This value must be unique for each cell. Name of a cell is used as
its id, leaving this option unset or setting the same name for
two or more cells may cause unexpected behaviour.
Related options:
* enabled: This option is meaningful only when cells service
is enabled
"""),
cfg.ListOpt('capabilities',
default=['hypervisor=xenserver;kvm', 'os=linux;windows'],
help="""
Cell capabilities.
List of arbitrary key=value pairs defining capabilities of the
current cell to be sent to the parent cells. These capabilities
are intended to be used in cells scheduler filters/weighers.
Possible values:
* key=value pairs list for example;
``hypervisor=xenserver;kvm,os=linux;windows``
"""),
cfg.IntOpt('call_timeout',
default=60,
min=0,
help="""
Call timeout.
Cell messaging module waits for response(s) to be put into the
eventlet queue. This option defines the seconds waited for
response from a call to a cell.
Possible values:
* An integer, corresponding to the interval time in seconds.
"""),
# TODO(sfinucan): Add min parameter
cfg.FloatOpt('reserve_percent',
default=10.0,
help="""
Reserve percentage
Percentage of cell capacity to hold in reserve, so the minimum
amount of free resource is considered to be;
min_free = total * (reserve_percent / 100.0)
This option affects both memory and disk utilization.
The primary purpose of this reserve is to ensure some space is
available for users who want to resize their instance to be larger.
Note that currently once the capacity expands into this reserve
space this option is ignored.
Possible values:
* An integer or float, corresponding to the percentage of cell capacity to
be held in reserve.
"""),
cfg.StrOpt('cell_type',
default='compute',
choices=('api', 'compute'),
help="""
Type of cell.
When cells feature is enabled the hosts in the OpenStack Compute
cloud are partitioned into groups. Cells are configured as a tree.
The top-level cell's cell_type must be set to ``api``. All other
cells are defined as a ``compute cell`` by default.
Related option:
* quota_driver: Disable quota checking for the child cells.
(nova.quota.NoopQuotaDriver)
"""),
# TODO(sfinucan): Add min parameter
cfg.IntOpt('mute_child_interval',
default=300,
help="""
Mute child interval.
Number of seconds after which a lack of capability and capacity
update the child cell is to be treated as a mute cell. Then the
child cell will be weighed as recommend highly that it be skipped.
Possible values:
* An integer, corresponding to the interval time in seconds.
"""),
# TODO(sfinucan): Add min parameter
cfg.IntOpt('bandwidth_update_interval',
default=600,
help="""
Bandwidth update interval.
Seconds between bandwidth usage cache updates for cells.
Possible values:
* An integer, corresponding to the interval time in seconds.
"""),
# TODO(sfinucan): Add min parameter
cfg.IntOpt('instance_update_sync_database_limit',
default=100,
help="""
Instance update sync database limit.
Number of instances to pull from the database at one time for
a sync. If there are more instances to update the results will
be paged through.
Possible values:
* An integer, corresponding to a number of instances.
"""),
]
mute_weigher_opts = [
# TODO(sfinucan): Add max parameter
cfg.FloatOpt('mute_weight_multiplier',
default=-10000.0,
help="""
Mute weight multiplier.
Multiplier used to weigh mute children. Mute children cells are
recommended to be skipped so their weight is multiplied by this
negative value.
Possible values:
* Negative numeric number
"""),
]
ram_weigher_opts = [
# TODO(sfinucan): Add min parameter
cfg.FloatOpt('ram_weight_multiplier',
default=10.0,
help="""
Ram weight multiplier.
Multiplier used for weighing ram. Negative numbers indicate that
Compute should stack VMs on one host instead of spreading out new
VMs to more hosts in the cell.
Possible values:
* Numeric multiplier
"""),
]
weigher_opts = [
# TODO(sfinucan): Add min parameter
cfg.FloatOpt('offset_weight_multiplier',
default=1.0,
help="""
Offset weight multiplier
Multiplier used to weigh offset weigher. Cells with higher
weight_offsets in the DB will be preferred. The weight_offset
is a property of a cell stored in the database. It can be used
by a deployer to have scheduling decisions favor or disfavor
cells based on the setting.
Possible values:
* Numeric multiplier
"""),
]
cell_manager_opts = [
# TODO(sfinucan): Add min parameter
cfg.IntOpt('instance_updated_at_threshold',
default=3600,
help="""
Instance updated at threshold
Number of seconds after an instance was updated or deleted to
continue to update cells. This option lets cells manager to only
attempt to sync instances that have been updated recently.
i.e., a threshold of 3600 means to only update instances that
have modified in the last hour.
Possible values:
* Threshold in seconds
Related options:
* This value is used with the ``instance_update_num_instances``
value in a periodic task run.
"""),
# TODO(sfinucan): Add min parameter
cfg.IntOpt("instance_update_num_instances",
default=1,
help="""
Instance update num instances
On every run of the periodic task, nova cells manager will attempt to
sync instance_updated_at_threshold number of instances. When the
manager gets the list of instances, it shuffles them so that multiple
nova-cells services do not attempt to sync the same instances in
lockstep.
Possible values:
* Positive integer number
Related options:
* This value is used with the ``instance_updated_at_threshold``
value in a periodic task run.
""")
]
cell_messaging_opts = [
# TODO(sfinucan): Add min parameter
cfg.IntOpt('max_hop_count',
default=10,
help="""
Maximum hop count
When processing a targeted message, if the local cell is not the
target, a route is defined between neighbouring cells. And the
message is processed across the whole routing path. This option
defines the maximum hop counts until reaching the target.
Possible values:
* Positive integer value
"""),
cfg.StrOpt('scheduler',
default='nova.cells.scheduler.CellsScheduler',
help="""
Cells scheduler.
The class of the driver used by the cells scheduler. This should be
the full Python path to the class to be used. If nothing is specified
in this option, the CellsScheduler is used.
""")
]
cell_rpc_driver_opts = [
cfg.StrOpt('rpc_driver_queue_base',
default='cells.intercell',
help="""
RPC driver queue base.
When sending a message to another cell by JSON-ifying the message
and making an RPC cast to 'process_message', a base queue is used.
This option defines the base queue name to be used when communicating
between cells. Various topics by message type will be appended to this.
Possible values:
* The base queue name to be used when communicating between cells.
""")
]
cell_scheduler_opts = [
cfg.ListOpt('scheduler_filter_classes',
default=['nova.cells.filters.all_filters'],
help="""
Scheduler filter classes.
Filter classes the cells scheduler should use. An entry of
"nova.cells.filters.all_filters" maps to all cells filters
included with nova. As of the Mitaka release the following
filter classes are available:
Different cell filter: A scheduler hint of 'different_cell'
with a value of a full cell name may be specified to route
a build away from a particular cell.
Image properties filter: Image metadata named
'hypervisor_version_requires' with a version specification
may be specified to ensure the build goes to a cell which
has hypervisors of the required version. If either the version
requirement on the image or the hypervisor capability of the
cell is not present, this filter returns without filtering out
the cells.
Target cell filter: A scheduler hint of 'target_cell' with a
value of a full cell name may be specified to route a build to
a particular cell. No error handling is done as there's no way
to know whether the full path is a valid.
As an admin user, you can also add a filter that directs builds
to a particular cell.
"""),
cfg.ListOpt('scheduler_weight_classes',
default=['nova.cells.weights.all_weighers'],
help="""
Scheduler weight classes.
Weigher classes the cells scheduler should use. An entry of
"nova.cells.weights.all_weighers" maps to all cell weighers
included with nova. As of the Mitaka release the following
weight classes are available:
mute_child: Downgrades the likelihood of child cells being
chosen for scheduling requests, which haven't sent capacity
or capability updates in a while. Options include
mute_weight_multiplier (multiplier for mute children; value
should be negative).
ram_by_instance_type: Select cells with the most RAM capacity
for the instance type being requested. Because higher weights
win, Compute returns the number of available units for the
instance type requested. The ram_weight_multiplier option defaults
to 10.0 that adds to the weight by a factor of 10. Use a negative
number to stack VMs on one host instead of spreading out new VMs
to more hosts in the cell.
weight_offset: Allows modifying the database to weight a particular
cell. The highest weight will be the first cell to be scheduled for
launching an instance. When the weight_offset of a cell is set to 0,
it is unlikely to be picked but it could be picked if other cells
have a lower weight, like if they're full. And when the weight_offset
is set to a very high value (for example, '999999999999999'), it is
likely to be picked if another cell do not have a higher weight.
"""),
# TODO(sfinucan): Add min parameter
cfg.IntOpt('scheduler_retries',
default=10,
help="""
Scheduler retries.
How many retries when no cells are available. Specifies how many
times the scheduler tries to launch a new instance when no cells
are available.
Possible values:
* Positive integer value
Related options:
* This value is used with the ``scheduler_retry_delay`` value
while retrying to find a suitable cell.
"""),
# TODO(sfinucan): Add min parameter
cfg.IntOpt('scheduler_retry_delay',
default=2,
help="""
Scheduler retry delay.
Specifies the delay (in seconds) between scheduling retries when no
cell can be found to place the new instance on. When the instance
could not be scheduled to a cell after ``scheduler_retries`` in
combination with ``scheduler_retry_delay``, then the scheduling
of the instance failed.
Possible values:
* Time in seconds.
Related options:
* This value is used with the ``scheduler_retries`` value
while retrying to find a suitable cell.
""")
]
cell_state_manager_opts = [
# TODO(sfinucan): Add min parameter
cfg.IntOpt('db_check_interval',
default=60,
help="""
DB check interval.
Cell state manager updates cell status for all cells from the DB
only after this particular interval time is passed. Otherwise cached
status are used. If this value is 0 or negative all cell status are
updated from the DB whenever a state is needed.
Possible values:
* Interval time, in seconds.
"""),
cfg.StrOpt('cells_config',
help="""
Optional cells configuration.
Configuration file from which to read cells configuration. If given,
overrides reading cells from the database.
Cells store all inter-cell communication data, including user names
and passwords, in the database. Because the cells data is not updated
very frequently, use this option to specify a JSON file to store
cells data. With this configuration, the database is no longer
consulted when reloading the cells data. The file must have columns
present in the Cell model (excluding common database fields and the
id column). You must specify the queue connection information through
a transport_url field, instead of username, password, and so on.
The transport_url has the following form:
rabbit://USERNAME:PASSWORD@HOSTNAME:PORT/VIRTUAL_HOST
Possible values:
The scheme can be either qpid or rabbit, the following sample shows
this optional configuration:
{
"parent": {
"name": "parent",
"api_url": "http://api.example.com:8774",
"transport_url": "rabbit://rabbit.example.com",
"weight_offset": 0.0,
"weight_scale": 1.0,
"is_parent": true
},
"cell1": {
"name": "cell1",
"api_url": "http://api.example.com:8774",
"transport_url": "rabbit://rabbit1.example.com",
"weight_offset": 0.0,
"weight_scale": 1.0,
"is_parent": false
},
"cell2": {
"name": "cell2",
"api_url": "http://api.example.com:8774",
"transport_url": "rabbit://rabbit2.example.com",
"weight_offset": 0.0,
"weight_scale": 1.0,
"is_parent": false
}
}
""")
]
ALL_CELLS_OPTS = (cells_opts +
mute_weigher_opts +
ram_weigher_opts +
weigher_opts +
cell_manager_opts +
cell_messaging_opts +
cell_rpc_driver_opts +
cell_scheduler_opts +
cell_state_manager_opts)
def register_opts(conf):
conf.register_group(cells_group)
conf.register_opts(ALL_CELLS_OPTS, group=cells_group)
def list_opts():
return {cells_group: ALL_CELLS_OPTS}
| hanlind/nova | nova/conf/cells.py | Python | apache-2.0 | 15,948 | 0.003386 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2018, Shuang Wang <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: aws_codecommit
version_added: "2.8"
short_description: Manage repositories in AWS CodeCommit
description:
- Supports creation and deletion of CodeCommit repositories.
- See U(https://aws.amazon.com/codecommit/) for more information about CodeCommit.
author: Shuang Wang (@ptux)
requirements:
- botocore
- boto3
- python >= 2.6
options:
name:
description:
- name of repository.
required: true
comment:
description:
- description or comment of repository.
required: false
state:
description:
- Specifies the state of repository.
required: true
choices: [ 'present', 'absent' ]
extends_documentation_fragment:
- aws
- ec2
'''
RETURN = '''
repository_metadata:
description: "Information about the repository."
returned: always
type: complex
contains:
account_id:
description: "The ID of the AWS account associated with the repository."
returned: when state is present
type: str
sample: "268342293637"
arn:
description: "The Amazon Resource Name (ARN) of the repository."
returned: when state is present
type: str
sample: "arn:aws:codecommit:ap-northeast-1:268342293637:username"
clone_url_http:
description: "The URL to use for cloning the repository over HTTPS."
returned: when state is present
type: str
sample: "https://git-codecommit.ap-northeast-1.amazonaws.com/v1/repos/reponame"
clone_url_ssh:
description: "The URL to use for cloning the repository over SSH."
returned: when state is present
type: str
sample: "ssh://git-codecommit.ap-northeast-1.amazonaws.com/v1/repos/reponame"
creation_date:
description: "The date and time the repository was created, in timestamp format."
returned: when state is present
type: str
sample: "2018-10-16T13:21:41.261000+09:00"
last_modified_date:
description: "The date and time the repository was last modified, in timestamp format."
returned: when state is present
type: str
sample: "2018-10-16T13:21:41.261000+09:00"
repository_description:
description: "A comment or description about the repository."
returned: when state is present
type: str
sample: "test from ptux"
repository_id:
description: "The ID of the repository that was created or deleted"
returned: always
type: str
sample: "e62a5c54-i879-497b-b62f-9f99e4ebfk8e"
repository_name:
description: "The repository's name."
returned: when state is present
type: str
sample: "reponame"
response_metadata:
description: "Information about the response."
returned: always
type: complex
contains:
http_headers:
description: "http headers of http response"
returned: always
type: dict
http_status_code:
description: "http status code of http response"
returned: always
type: str
sample: "200"
request_id:
description: "http request id"
returned: always
type: str
sample: "fb49cfca-d0fa-11e8-85cb-b3cc4b5045ef"
retry_attempts:
description: "numbers of retry attempts"
returned: always
type: str
sample: "0"
'''
EXAMPLES = '''
# Create a new repository
- aws_codecommit:
name: repo
state: present
# Delete a repository
- aws_codecommit:
name: repo
state: absent
'''
try:
import botocore
except ImportError:
pass # Handled by AnsibleAWSModule
from ansible.module_utils.aws.core import AnsibleAWSModule
from ansible.module_utils.ec2 import camel_dict_to_snake_dict
class CodeCommit(object):
def __init__(self, module=None):
self._module = module
self._client = self._module.client('codecommit')
self._check_mode = self._module.check_mode
def process(self):
result = dict(changed=False)
if self._module.params['state'] == 'present' and not self._repository_exists():
if not self._module.check_mode:
result = self._create_repository()
result['changed'] = True
if self._module.params['state'] == 'absent' and self._repository_exists():
if not self._module.check_mode:
result = self._delete_repository()
result['changed'] = True
return result
def _repository_exists(self):
try:
paginator = self._client.get_paginator('list_repositories')
for page in paginator.paginate():
repositories = page['repositories']
for item in repositories:
if self._module.params['name'] in item.values():
return True
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
self._module.fail_json_aws(e, msg="couldn't get repository")
return False
def _create_repository(self):
try:
result = self._client.create_repository(
repositoryName=self._module.params['name'],
repositoryDescription=self._module.params['comment']
)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
self._module.fail_json_aws(e, msg="couldn't create repository")
return result
def _delete_repository(self):
try:
result = self._client.delete_repository(
repositoryName=self._module.params['name']
)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
self._module.fail_json_aws(e, msg="couldn't delete repository")
return result
def main():
argument_spec = dict(
name=dict(required=True),
state=dict(choices=['present', 'absent'], required=True),
comment=dict(default='')
)
ansible_aws_module = AnsibleAWSModule(
argument_spec=argument_spec,
supports_check_mode=True
)
aws_codecommit = CodeCommit(module=ansible_aws_module)
result = aws_codecommit.process()
ansible_aws_module.exit_json(**camel_dict_to_snake_dict(result))
if __name__ == '__main__':
main()
| kvar/ansible | lib/ansible/modules/cloud/amazon/aws_codecommit.py | Python | gpl-3.0 | 6,565 | 0.00198 |
#Copyright 2009 Humanitarian International Services Group
#
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
'''
Created July, 2009
BadRequestException and subclasses, all subclass UtakaException with an httpStatus of 400
@author: Andrew
'''
from utaka.src.exceptions.UtakaException import UtakaException
import utaka.src.Config as Config
#400
class BadRequestException(UtakaException):
def __init__(self, args):
UtakaException.__init__(self, args, 400)
class AmbiguousGrantByEmailAddress(BadRequestException):
def __init__(self):
BadRequestException.__init__(self,
{'Message' : 'The e-mail address you provided is associated with more than one account.',
'Code' : 'BadRequest'})
class BadDigestException(BadRequestException):
def __init__(self, expectedDigest, calculatedDigest):
BadRequestException.__init__(self,
{'Message' : 'The Content-MD5 you specified did not match what we received',
'ExpectedDigest' : expectedDigest,
'CalculatedDigest' : calculatedDigest,
'Code' : 'BadDigest'})
class CredentialsNotSupported(BadRequestException):
def __init__(self):
BadRequestException.__init__(self,
{'Message' : 'This request does not support credentials',
'Code' : 'CredentialsNotSupported'})
class EntityTooSmallException(BadRequestException):
def __init__(self):
BadRequestException.__init__(self,
{'Message' : 'Your proposed upload is smaller than the minimum allowed object size',
'Code' : 'EntityTooSmall'})
class EntityTooLargeException(BadRequestException):
def __init__(self):
BadRequestException.__init__(self,
{'Message' : 'Your proposed upload exceeds the maximum allowed object size',
'Code' : 'EntityTooLarge'})
class ExpiredTokenException(BadRequestException):
def __init__(self):
BadRequestException.__init__(self,
{'Message' : 'The provided token has expired.',
'Code' : 'ExpiredToken'})
class IncompleteBodyException(BadRequestException):
def __init__(self):
BadRequestException.__init__(self,
{'Message' : 'You did not provide the number of bytes specified by the Content-Length HTTP Header',
'Code' : 'IncompleteBody'})
class IncorrectNumberOfFilesInPostRequestException(BadRequestException):
def __init__(self):
BadRequestException.__init__(self,
{'Message' : 'POST requires exactly one file upload per request',
'Code' : 'IncorrectNumberOfFilesInPostRequest'})
class InlineDataTooLargeException(BadRequestException):
def __init__(self):
BadRequestException.__init__(self,
{'Message' : 'Inline data exceeds the maximum allowed size',
'Code' : 'InlineDataTooLarge'})
class InvalidArgumentException(BadRequestException):
def __init__(self, argValue, argName, msg='Invalid Argument'):
BadRequestException.__init__(self,
{'Message' : msg,
'Code' : 'InvalidArgument',
'ArgumentValue' : argValue,
'ArgumentName' : argName})
class InvalidArgumentAuthorizationException(InvalidArgumentException):
def __init__(self, argValue):
headerPrefix = str(Config.get('authentication', 'prefix'))
InvalidArgumentException.__init__(self, argValue, 'Authorization', ("Authorization header is invalid. Expected " + headerPrefix + " AccessKeyId:signature"))
class InvalidArgumentAuthorizationSpacingException(InvalidArgumentException):
def __init__(self, argValue):
InvalidArgumentException.__init__(self, argValue, 'Authorization', "Authorization header is invalid -- one and only one ' '(space) required")
class InvalidArgumentMetadataDirectiveException(InvalidArgumentException):
def __init__(self, argValue):
InvalidArgumentException.__init__(self, argValue, 'MetadataDirective', 'A specified metadata directive value must be either REPLACE or COPY.')
class InvalidArgumentQueryStringConflictException(InvalidArgumentException):
def __init__(self, conflictParamA, conflictParamB):
InvalidArgumentException.__init__(self, conflictParamA, 'ResourceType', "Conflicting query string parameters: %s and %s" % (str(conflictParamA), str(conflictParamB)))
class InvalidBucketNameException(BadRequestException):
def __init__(self, bucketName):
BadRequestException.__init__(self,
{'Message' : 'The specified bucket is not valid',
'Code' : 'InvalidBucketName',
'BucketName' : bucketName})
class InvalidDigestException(BadRequestException):
def __init__(self, contentMD5):
BadRequestException.__init__(self,
{'Message' : 'The Content-MD5 you specified is not valid',
'Code' : 'InvalidDigest',
'Content-MD5' : contentMD5})
class InvalidLocationConstraintException(BadRequestException):
def __init__(self):
BadRequestException.__init__(self,
{'Message' : 'The specified location constraint is not valid',
'Code' : 'InvalidLocationConstraint'})
class InvalidPolicyDocumentException(BadRequestException):
def __init__(self):
BadRequestException.__init__(self,
{'Message' : 'The content of the form does not meet the conditions specified in the policy document',
'Code' : 'InvalidPolicyDocument'})
class InvalidSOAPRequestException(BadRequestException):
def __init__(self):
BadRequestException.__init__(self,
{'Message' : 'The SOAP request body is invalid',
'Code' : 'InvalidSOAPRequest'})
class InvalidStorageClassException(BadRequestException):
def __init__(self):
BadRequestException.__init__(self,
{'Message' : 'The storage class you specified is not valid',
'Code' : 'InvalidStorageClass'})
class InvalidTargetBucketForLoggingException(BadRequestException):
def __init__(self, targetBucket):
BadRequestException.__init__(self,
{'Message' : 'The target bucket for logging does not exist, is not owned by you, or does not have the appropriate grants for the log-delivery group.',
'Code' : 'InvalidTargetBucketForLogging',
'TargetBucket' : targetBucket})
class InvalidTokenException(BadRequestException):
def __init__(self):
BadRequestException.__init__(self,
{'Message' : 'The provided token is malformed or otherwise invalid',
'Code' : 'InvalidTokenException'})
class InvalidURIException(BadRequestException):
def __init__(self):
BadRequestException.__init__(self,
{'Message' : "Couldn't parse the specified URI.",
'Code' : 'InvalidURI'})
class KeyTooLongException(BadRequestException):
def __init__(self, args):
BadRequestException.__init__(self,
{'Message' : 'Your key is too long',
'Code' : 'KeyTooLong'})
class MalformedACLErrorException(BadRequestException):
def __init__(self):
BadRequestException.__init__(self,
{'Message' :'The XML you provided was not well-formed or did not validate against our published schema',
'Code' : 'MalformedACL'})
class MalformedPOSTRequestException(BadRequestException):
def __init__(self):
BadRequestException.__init__(self,
{'Message' : 'The body of your POST request is not well-formed multipart/form-data.',
'Code' : 'MalformedPOSTRequest'})
class MalformedXMLException(BadRequestException):
def __init__(self):
BadRequestException.__init__(self,
{'Message' : 'The XML you provided was not well-formed or did not validate against our published schema',
'Code' : 'MalformedXML'})
class MaxMessageLengthExceededException(BadRequestException):
def __init__(self):
BadRequestException.__init__(self,
{'Message' : 'Your request was too big',
'Code' : 'MaxMessageLengthExceeded'})
class MaxPostPreDataLengthExceededErrorException(BadRequestException):
def __init__(self):
BadRequestException.__init__(self,
{'Message' : 'Your POST request fields preceding the upload file were too large.',
'Code' : 'MaxPostPreDataLengthExceededError'})
class MetadataTooLargeException(BadRequestException):
def __init__(self):
BadRequestException.__init__(self,
{'Message' : 'Your metadata eaders exceed the maximum allowed metadata size.',
'Code' : 'MetadataTooLarge'})
class MissingRequestBodyErrorException(BadRequestException):
def __init__(self):
BadRequestException.__init__(self,
{'Message' : 'Request body is empty',
'Code' : 'MissingRequestBodyError'})
class MissingSecurityElementException(BadRequestException):
def __init__(self):
BadRequestException.__init__(self,
{'Message' : 'The SOAP 1.1 request is missing a security element',
'Code' : 'MissingSecurityElement'})
class MissingSecurityHeaderException(BadRequestException):
def __init__(self):
BadRequestException.__init__(self,
{'Message' : 'Your request was missing a required header',
'Code' : 'MissingSecurityHeader'})
class MissingHeaderException(BadRequestException):
def __init__(self, header, headerDescription):
BadRequestException.__init__(self,
{'Message' : 'Your request was missing a required header',
'Code' : 'MissingHeader',
'Header' : header,
'HeaderDescription' : headerDescription})
class NoLoggingStatusForKeyException(BadRequestException):
def __init__(self):
BadRequestException.__init__(self,
{'Message' : 'There is no such thing as a logging status sub-resource for a key',
'Code' : 'NoLoggingStatusForKey'})
class RequestIsNotMultiPartContentException(BadRequestException):
def __init__(self):
BadRequestException.__init__(self,
{'Message' : 'Bucket POST must be of the enclosure-type multipart/form-data.',
'Code' : 'RequestIsNotMultiPartContent'})
class RequestTimeoutException(BadRequestException):
def __init__(self):
BadRequestException.__init__(self,
{'Message' : 'Your socket connection to the server was not read from or written to within the timeout period',
'Code' : 'RequestTimeout'})
class RequestTorrentOfBucketErrorException(BadRequestException):
def __init__(self):
BadRequestException.__init__(self,
{'Message' : 'Requesting the torrent file of a bucket is not permitted',
'Code' : 'RequestTorrentOfBucketError'})
class TokenRefreshRequiredException(BadRequestException):
def __init__(self):
BadRequestException.__init__(self,
{'Message' : 'The provided token must be refreshed',
'Code' : 'TokenRefreshRequired'})
class TooManyBucketsException(BadRequestException):
def __init__(self):
BadRequestException.__init__(self,
{'Message' : 'You have attempted to create more buckets than allowed',
'Code' : 'TooManyBuckets'})
class UnexpectedContentException(BadRequestException):
def __init__(self):
BadRequestException.__init__(self,
{'Message' : 'This request does not support content',
'Code' : 'UnexpectedContent'})
class UnresolvableGrantByEmailAddressException(BadRequestException):
def __init__(self, email):
BadRequestException.__init__(self,
{'Message' : 'The e-mail address you provided does not match any account on record',
'Code' : 'UnresolvableGrantByEmailAddress',
'E-mail' : email})
class UserKeyMustBeSpecifiedException(BadRequestException):
def __init__(self):
BadRequestException.__init__(self,
{'Message' : 'The bucket POST must contain the specified field name. If it is specified, please check the order of the fields.',
'Code' : 'UserKeyMustBeSpecified'})
class UseridNotValidException(BadRequestException):
def __init__(self, userid):
BadRequestException.__init__(self,
{'Message' : 'Userid should be a positive integer greater than 2.',
'Code' : 'UseridNotValid',
'Userid' : userid})
class UseridNotFoundException(BadRequestException):
def __init__(self, userid):
BadRequestException.__init__(self,
{'Code' : 'UseridNotFound',
'Description' : 'The userid you provided was not found',
'Userid' : userid}) | mattmillr/utaka | src/exceptions/BadRequestException.py | Python | apache-2.0 | 11,951 | 0.038407 |
# Script Name : main.py
# Author : Shy Ruparel
# Created : September 8 2015
# Pulls in data from "data.csv" which is 2 columns wide
# Uses a base image as the background
# Uses the data - school name, and venue address -
# and prints onto the base image
# and saves every image as a .PNG
from PIL import Image, ImageDraw,ImageFont
import csv
# Main image from base.jpg
im = Image.open('base.jpg').convert('RGBA')
W, H = im.size
MaxSize = 200
maxFontW = W * .90
# Text writing onto image
with open('data.csv', 'rb') as csvfile:
spamreader = csv.reader(csvfile, delimiter=',', quotechar='"')
for row in spamreader:
im = Image.open('base.jpg').convert('RGBA')
venueSize = MaxSize
addressSize = MaxSize/2
# Grab name and address
venueName = row[0].decode('utf-8')
addressDetails = row[1].decode('utf-8')
# Set font and size
venue = ImageFont.truetype('fonts/Outage.ttf', venueSize)
address = ImageFont.truetype('fonts/Lato.ttf', addressSize)
draw = ImageDraw.Draw(im)
# Find size of text
wVenue, hVenue = draw.textsize(venueName,font=venue)
# Make size smaller until width is less than size of maxFontW
while (wVenue > maxFontW):
venueSize = venueSize - 10
venue = ImageFont.truetype('fonts/Outage.ttf', venueSize)
wVenue, hVenue = draw.textsize(venueName,font=venue)
wAddress, hAddress = draw.textsize(addressDetails,font=address)
while (wAddress > maxFontW):
addressSize = addressSize - 10
address = ImageFont.truetype('fonts/OpenSansRegular.ttf', addressSize)
wAddress, hAddress = draw.textsize(addressDetails,font=address)
# Put text onto the image
draw.text(((W-wVenue)/2,(H-hVenue)/2 + 100), venueName,font=venue, fill="white")
draw.text(((W-wAddress)/2,((H-hAddress)/2)+hVenue+125), addressDetails,font=address, fill="white")
# Save out the image
filename = 'output/' + venueName.strip() + '.png'
filename = filename.replace (" ", "_")
print filename
im.save(filename,'PNG') | niespodd/flyer_generator | main.py | Python | mit | 2,191 | 0.006846 |
""" Convert Excel document (.xls) delivered from MAF to a ped.txt file.
"""
import argparse
import csv
import xlrd
def main(xls_file, out_file, sheet_name):
with xlrd.open_workbook(xls_file) as workbook:
worksheet = workbook.sheet_by_name(sheet_name)
with open(out_file, 'w') as fh:
c = csv.writer(fh, delimiter='\t')
for row in range(worksheet.nrows):
c.writerow(worksheet.row_values(row))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('xls_file')
parser.add_argument('out_file')
parser.add_argument('--sheet_name', default='HaploView_ped_0')
args = parser.parse_args()
main(args.xls_file, args.out_file, args.sheet_name)
| SciLifeLab/scilifelab | scripts/xls2ped.py | Python | mit | 751 | 0 |
"""Support for Pilight binary sensors."""
import datetime
import logging
import voluptuous as vol
from homeassistant.components import pilight
from homeassistant.components.binary_sensor import PLATFORM_SCHEMA, BinarySensorDevice
from homeassistant.const import (
CONF_DISARM_AFTER_TRIGGER,
CONF_NAME,
CONF_PAYLOAD,
CONF_PAYLOAD_OFF,
CONF_PAYLOAD_ON,
)
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.event import track_point_in_time
from homeassistant.util import dt as dt_util
_LOGGER = logging.getLogger(__name__)
CONF_VARIABLE = "variable"
CONF_RESET_DELAY_SEC = "reset_delay_sec"
DEFAULT_NAME = "Pilight Binary Sensor"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_VARIABLE): cv.string,
vol.Required(CONF_PAYLOAD): vol.Schema(dict),
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_PAYLOAD_ON, default="on"): vol.Any(
cv.positive_int, cv.small_float, cv.string
),
vol.Optional(CONF_PAYLOAD_OFF, default="off"): vol.Any(
cv.positive_int, cv.small_float, cv.string
),
vol.Optional(CONF_DISARM_AFTER_TRIGGER, default=False): cv.boolean,
vol.Optional(CONF_RESET_DELAY_SEC, default=30): cv.positive_int,
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up Pilight Binary Sensor."""
disarm = config.get(CONF_DISARM_AFTER_TRIGGER)
if disarm:
add_entities(
[
PilightTriggerSensor(
hass=hass,
name=config.get(CONF_NAME),
variable=config.get(CONF_VARIABLE),
payload=config.get(CONF_PAYLOAD),
on_value=config.get(CONF_PAYLOAD_ON),
off_value=config.get(CONF_PAYLOAD_OFF),
rst_dly_sec=config.get(CONF_RESET_DELAY_SEC),
)
]
)
else:
add_entities(
[
PilightBinarySensor(
hass=hass,
name=config.get(CONF_NAME),
variable=config.get(CONF_VARIABLE),
payload=config.get(CONF_PAYLOAD),
on_value=config.get(CONF_PAYLOAD_ON),
off_value=config.get(CONF_PAYLOAD_OFF),
)
]
)
class PilightBinarySensor(BinarySensorDevice):
"""Representation of a binary sensor that can be updated using Pilight."""
def __init__(self, hass, name, variable, payload, on_value, off_value):
"""Initialize the sensor."""
self._state = False
self._hass = hass
self._name = name
self._variable = variable
self._payload = payload
self._on_value = on_value
self._off_value = off_value
hass.bus.listen(pilight.EVENT, self._handle_code)
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def is_on(self):
"""Return True if the binary sensor is on."""
return self._state
def _handle_code(self, call):
"""Handle received code by the pilight-daemon.
If the code matches the defined payload
of this sensor the sensor state is changed accordingly.
"""
# Check if received code matches defined playoad
# True if payload is contained in received code dict
payload_ok = True
for key in self._payload:
if key not in call.data:
payload_ok = False
continue
if self._payload[key] != call.data[key]:
payload_ok = False
# Read out variable if payload ok
if payload_ok:
if self._variable not in call.data:
return
value = call.data[self._variable]
self._state = value == self._on_value
self.schedule_update_ha_state()
class PilightTriggerSensor(BinarySensorDevice):
"""Representation of a binary sensor that can be updated using Pilight."""
def __init__(
self, hass, name, variable, payload, on_value, off_value, rst_dly_sec=30
):
"""Initialize the sensor."""
self._state = False
self._hass = hass
self._name = name
self._variable = variable
self._payload = payload
self._on_value = on_value
self._off_value = off_value
self._reset_delay_sec = rst_dly_sec
self._delay_after = None
self._hass = hass
hass.bus.listen(pilight.EVENT, self._handle_code)
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def is_on(self):
"""Return True if the binary sensor is on."""
return self._state
def _reset_state(self, call):
self._state = False
self._delay_after = None
self.schedule_update_ha_state()
def _handle_code(self, call):
"""Handle received code by the pilight-daemon.
If the code matches the defined payload
of this sensor the sensor state is changed accordingly.
"""
# Check if received code matches defined payload
# True if payload is contained in received code dict
payload_ok = True
for key in self._payload:
if key not in call.data:
payload_ok = False
continue
if self._payload[key] != call.data[key]:
payload_ok = False
# Read out variable if payload ok
if payload_ok:
if self._variable not in call.data:
return
value = call.data[self._variable]
self._state = value == self._on_value
if self._delay_after is None:
self._delay_after = dt_util.utcnow() + datetime.timedelta(
seconds=self._reset_delay_sec
)
track_point_in_time(self._hass, self._reset_state, self._delay_after)
self.schedule_update_ha_state()
| leppa/home-assistant | homeassistant/components/pilight/binary_sensor.py | Python | apache-2.0 | 6,144 | 0.000488 |
#!/usr/bin/env python
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
from __future__ import with_statement
__license__ = 'GPL v3'
__copyright__ = '2009, Kovid Goyal <[email protected]>'
__docformat__ = 'restructuredtext en'
import textwrap, os, shlex, subprocess, glob, shutil
from distutils import sysconfig
from multiprocessing import cpu_count
from PyQt4.pyqtconfig import QtGuiModuleMakefile
from setup import Command, islinux, isbsd, isosx, SRC, iswindows
from setup.build_environment import (chmlib_inc_dirs,
podofo_inc, podofo_lib, podofo_error, pyqt, OSX_SDK, NMAKE, QMAKE,
msvc, MT, win_inc, win_lib, win_ddk, magick_inc_dirs, magick_lib_dirs,
magick_libs, chmlib_lib_dirs, sqlite_inc_dirs, icu_inc_dirs,
icu_lib_dirs, win_ddk_lib_dirs, ft_libs, ft_lib_dirs, ft_inc_dirs,
zlib_libs, zlib_lib_dirs, zlib_inc_dirs, is64bit, qt_private_inc)
MT
isunix = islinux or isosx or isbsd
make = 'make' if isunix else NMAKE
class Extension(object):
def absolutize(self, paths):
return list(set([x if os.path.isabs(x) else os.path.join(SRC, x.replace('/',
os.sep)) for x in paths]))
def __init__(self, name, sources, **kwargs):
self.name = name
self.needs_cxx = bool([1 for x in sources if os.path.splitext(x)[1] in
('.cpp', '.c++', '.cxx')])
self.sources = self.absolutize(sources)
self.headers = self.absolutize(kwargs.get('headers', []))
self.sip_files = self.absolutize(kwargs.get('sip_files', []))
self.inc_dirs = self.absolutize(kwargs.get('inc_dirs', []))
self.lib_dirs = self.absolutize(kwargs.get('lib_dirs', []))
self.extra_objs = self.absolutize(kwargs.get('extra_objs', []))
self.error = kwargs.get('error', None)
self.libraries = kwargs.get('libraries', [])
self.cflags = kwargs.get('cflags', [])
self.ldflags = kwargs.get('ldflags', [])
self.optional = kwargs.get('optional', False)
self.needs_ddk = kwargs.get('needs_ddk', False)
of = kwargs.get('optimize_level', None)
if of is None:
of = '/Ox' if iswindows else '-O3'
else:
flag = '/O%d' if iswindows else '-O%d'
of = flag % of
self.cflags.insert(0, of)
def preflight(self, obj_dir, compiler, linker, builder, cflags, ldflags):
pass
reflow_sources = glob.glob(os.path.join(SRC, 'calibre', 'ebooks', 'pdf', '*.cpp'))
reflow_headers = glob.glob(os.path.join(SRC, 'calibre', 'ebooks', 'pdf', '*.h'))
icu_libs = ['icudata', 'icui18n', 'icuuc', 'icuio']
icu_cflags = []
if iswindows:
icu_libs = ['icudt', 'icuin', 'icuuc', 'icuio']
if isosx:
icu_libs = ['icucore']
icu_cflags = ['-DU_DISABLE_RENAMING'] # Needed to use system libicucore.dylib
extensions = [
Extension('hunspell',
['hunspell/'+x for x in
'affentry.cxx affixmgr.cxx csutil.cxx dictmgr.cxx filemgr.cxx hashmgr.cxx hunspell.cxx phonet.cxx replist.cxx suggestmgr.cxx'.split()
] + ['calibre/utils/spell/hunspell_wrapper.cpp',],
inc_dirs=['hunspell'],
cflags='/DHUNSPELL_STATIC /D_CRT_SECURE_NO_WARNINGS /DUNICODE /D_UNICODE'.split() if iswindows else ['-DHUNSPELL_STATIC'],
optimize_level=2,
),
Extension('_regex',
['regex/_regex.c', 'regex/_regex_unicode.c'],
headers=['regex/_regex.h'],
optimize_level=2,
),
Extension('speedup',
['calibre/utils/speedup.c'],
),
Extension('_patiencediff_c',
['calibre/gui2/tweak_book/diff/_patiencediff_c.c'],
),
Extension('icu',
['calibre/utils/icu.c'],
headers=['calibre/utils/icu_calibre_utils.h'],
libraries=icu_libs,
lib_dirs=icu_lib_dirs,
inc_dirs=icu_inc_dirs,
cflags=icu_cflags
),
Extension('sqlite_custom',
['calibre/library/sqlite_custom.c'],
inc_dirs=sqlite_inc_dirs
),
Extension('chmlib',
['calibre/utils/chm/swig_chm.c'],
libraries=['ChmLib' if iswindows else 'chm'],
inc_dirs=chmlib_inc_dirs,
lib_dirs=chmlib_lib_dirs,
cflags=["-DSWIG_COBJECT_TYPES"]),
Extension('chm_extra',
['calibre/utils/chm/extra.c'],
libraries=['ChmLib' if iswindows else 'chm'],
inc_dirs=chmlib_inc_dirs,
lib_dirs=chmlib_lib_dirs,
cflags=["-D__PYTHON__"]),
Extension('magick',
['calibre/utils/magick/magick.c'],
headers=['calibre/utils/magick/magick_constants.h'],
libraries=magick_libs,
lib_dirs=magick_lib_dirs,
inc_dirs=magick_inc_dirs,
cflags=['-DMAGICKCORE_QUANTUM_DEPTH=16', '-DMAGICKCORE_HDRI_ENABLE=0']
),
Extension('lzx',
['calibre/utils/lzx/lzxmodule.c',
'calibre/utils/lzx/compressor.c',
'calibre/utils/lzx/lzxd.c',
'calibre/utils/lzx/lzc.c',
'calibre/utils/lzx/lzxc.c'],
headers=['calibre/utils/lzx/msstdint.h',
'calibre/utils/lzx/lzc.h',
'calibre/utils/lzx/lzxmodule.h',
'calibre/utils/lzx/system.h',
'calibre/utils/lzx/lzxc.h',
'calibre/utils/lzx/lzxd.h',
'calibre/utils/lzx/mspack.h'],
inc_dirs=['calibre/utils/lzx']),
Extension('freetype',
['calibre/utils/fonts/freetype.cpp'],
inc_dirs=ft_inc_dirs,
libraries=ft_libs,
lib_dirs=ft_lib_dirs),
Extension('woff',
['calibre/utils/fonts/woff/main.c',
'calibre/utils/fonts/woff/woff.c'],
headers=[
'calibre/utils/fonts/woff/woff.h',
'calibre/utils/fonts/woff/woff-private.h'],
libraries=zlib_libs,
lib_dirs=zlib_lib_dirs,
inc_dirs=zlib_inc_dirs,
),
Extension('msdes',
['calibre/utils/msdes/msdesmodule.c',
'calibre/utils/msdes/des.c'],
headers=['calibre/utils/msdes/spr.h',
'calibre/utils/msdes/d3des.h'],
inc_dirs=['calibre/utils/msdes']),
Extension('cPalmdoc',
['calibre/ebooks/compression/palmdoc.c']),
Extension('bzzdec',
['calibre/ebooks/djvu/bzzdecoder.c'],
inc_dirs=(['calibre/utils/chm'] if iswindows else []) # For stdint.h
),
Extension('matcher',
['calibre/utils/matcher.c'],
headers=['calibre/utils/icu_calibre_utils.h'],
libraries=icu_libs,
lib_dirs=icu_lib_dirs,
cflags=icu_cflags,
inc_dirs=icu_inc_dirs
),
Extension('podofo',
[
'calibre/utils/podofo/utils.cpp',
'calibre/utils/podofo/output.cpp',
'calibre/utils/podofo/doc.cpp',
'calibre/utils/podofo/outline.cpp',
'calibre/utils/podofo/podofo.cpp',
],
headers=[
'calibre/utils/podofo/global.h',
],
libraries=['podofo'],
lib_dirs=[podofo_lib],
inc_dirs=[podofo_inc, os.path.dirname(podofo_inc)],
error=podofo_error),
Extension('pictureflow',
['calibre/gui2/pictureflow/pictureflow.cpp'],
inc_dirs=['calibre/gui2/pictureflow'],
headers=['calibre/gui2/pictureflow/pictureflow.h'],
sip_files=['calibre/gui2/pictureflow/pictureflow.sip']
),
Extension('progress_indicator',
['calibre/gui2/progress_indicator/QProgressIndicator.cpp'],
inc_dirs=['calibre/gui2/progress_indicator'],
headers=['calibre/gui2/progress_indicator/QProgressIndicator.h'],
sip_files=['calibre/gui2/progress_indicator/QProgressIndicator.sip']
),
Extension('qt_hack',
['calibre/ebooks/pdf/render/qt_hack.cpp'],
inc_dirs=qt_private_inc + ['calibre/ebooks/pdf/render', 'qt-harfbuzz/src'],
headers=['calibre/ebooks/pdf/render/qt_hack.h'],
sip_files=['calibre/ebooks/pdf/render/qt_hack.sip']
),
Extension('unrar',
['unrar/%s.cpp'%(x.partition('.')[0]) for x in '''
rar.o strlist.o strfn.o pathfn.o savepos.o smallfn.o global.o file.o
filefn.o filcreat.o archive.o arcread.o unicode.o system.o
isnt.o crypt.o crc.o rawread.o encname.o resource.o match.o
timefn.o rdwrfn.o consio.o options.o ulinks.o errhnd.o rarvm.o
secpassword.o rijndael.o getbits.o sha1.o extinfo.o extract.o
volume.o list.o find.o unpack.o cmddata.o filestr.o scantree.o
'''.split()] + ['calibre/utils/unrar.cpp'],
inc_dirs=['unrar'],
cflags=[('/' if iswindows else '-') + x for x in (
'DSILENT', 'DRARDLL', 'DUNRAR')] + (
[] if iswindows else ['-D_FILE_OFFSET_BITS=64',
'-D_LARGEFILE_SOURCE']),
optimize_level=2,
libraries=['User32', 'Advapi32', 'kernel32', 'Shell32'] if iswindows else []
),
]
if iswindows:
extensions.extend([
Extension('winutil',
['calibre/utils/windows/winutil.c'],
libraries=['shell32', 'setupapi', 'wininet'],
cflags=['/X']
),
Extension('wpd',
[
'calibre/devices/mtp/windows/utils.cpp',
'calibre/devices/mtp/windows/device_enumeration.cpp',
'calibre/devices/mtp/windows/content_enumeration.cpp',
'calibre/devices/mtp/windows/device.cpp',
'calibre/devices/mtp/windows/wpd.cpp',
],
headers=[
'calibre/devices/mtp/windows/global.h',
],
libraries=['ole32', 'oleaut32', 'portabledeviceguids', 'user32'],
# needs_ddk=True,
cflags=['/X']
),
Extension('winfonts',
['calibre/utils/fonts/winfonts.cpp'],
libraries=['Gdi32', 'User32'],
cflags=['/X']
),
])
if isosx:
extensions.append(Extension('usbobserver',
['calibre/devices/usbobserver/usbobserver.c'],
ldflags=['-framework', 'CoreServices', '-framework', 'IOKit'])
)
if islinux or isosx:
extensions.append(Extension('libusb',
['calibre/devices/libusb/libusb.c'],
libraries=['usb-1.0']
))
extensions.append(Extension('libmtp',
[
'calibre/devices/mtp/unix/devices.c',
'calibre/devices/mtp/unix/libmtp.c'
],
headers=[
'calibre/devices/mtp/unix/devices.h',
'calibre/devices/mtp/unix/upstream/music-players.h',
'calibre/devices/mtp/unix/upstream/device-flags.h',
],
libraries=['mtp']
))
if isunix:
cc = os.environ.get('CC', 'gcc')
cxx = os.environ.get('CXX', 'g++')
debug = ''
# debug = '-ggdb'
cflags = os.environ.get('OVERRIDE_CFLAGS',
'-Wall -DNDEBUG %s -fno-strict-aliasing -pipe' % debug)
cflags = shlex.split(cflags) + ['-fPIC']
ldflags = os.environ.get('OVERRIDE_LDFLAGS', '-Wall')
ldflags = shlex.split(ldflags)
cflags += shlex.split(os.environ.get('CFLAGS', ''))
ldflags += shlex.split(os.environ.get('LDFLAGS', ''))
if islinux:
cflags.append('-pthread')
ldflags.append('-shared')
cflags.append('-I'+sysconfig.get_python_inc())
ldflags.append('-lpython'+sysconfig.get_python_version())
if isbsd:
cflags.append('-pthread')
ldflags.append('-shared')
cflags.append('-I'+sysconfig.get_python_inc())
ldflags.append('-lpython'+sysconfig.get_python_version())
if isosx:
x, p = ('i386', 'x86_64')
archs = ['-arch', x, '-arch', p, '-isysroot',
OSX_SDK]
cflags.append('-D_OSX')
cflags.extend(archs)
ldflags.extend(archs)
ldflags.extend('-bundle -undefined dynamic_lookup'.split())
cflags.extend(['-fno-common', '-dynamic'])
cflags.append('-I'+sysconfig.get_python_inc())
if iswindows:
cc = cxx = msvc.cc
cflags = '/c /nologo /MD /W3 /EHsc /DNDEBUG'.split()
ldflags = '/DLL /nologo /INCREMENTAL:NO /NODEFAULTLIB:libcmt.lib'.split()
#cflags = '/c /nologo /Ox /MD /W3 /EHsc /Zi'.split()
#ldflags = '/DLL /nologo /INCREMENTAL:NO /DEBUG'.split()
if is64bit:
cflags.append('/GS-')
for p in win_inc:
cflags.append('-I'+p)
for p in win_lib:
ldflags.append('/LIBPATH:'+p)
cflags.append('-I%s'%sysconfig.get_python_inc())
ldflags.append('/LIBPATH:'+os.path.join(sysconfig.PREFIX, 'libs'))
class Build(Command):
short_description = 'Build calibre C/C++ extension modules'
description = textwrap.dedent('''\
calibre depends on several python extensions written in C/C++.
This command will compile them. You can influence the compile
process by several environment variables, listed below:
CC - C Compiler defaults to gcc
CXX - C++ Compiler, defaults to g++
CFLAGS - Extra compiler flags
LDFLAGS - Extra linker flags
POPPLER_INC_DIR - poppler header files
POPPLER_LIB_DIR - poppler-qt4 library
PODOFO_INC_DIR - podofo header files
PODOFO_LIB_DIR - podofo library files
QMAKE - Path to qmake
VS90COMNTOOLS - Location of Microsoft Visual Studio 9 Tools (windows only)
''')
def add_options(self, parser):
choices = [e.name for e in extensions]+['all', 'style']
parser.add_option('-1', '--only', choices=choices, default='all',
help=('Build only the named extension. Available: '+
', '.join(choices)+'. Default:%default'))
parser.add_option('--no-compile', default=False, action='store_true',
help='Skip compiling all C/C++ extensions.')
def run(self, opts):
if opts.no_compile:
self.info('--no-compile specified, skipping compilation')
return
self.obj_dir = os.path.join(os.path.dirname(SRC), 'build', 'objects')
if not os.path.exists(self.obj_dir):
os.makedirs(self.obj_dir)
if opts.only in {'all', 'style'}:
self.build_style(self.j(self.SRC, 'calibre', 'plugins'))
for ext in extensions:
if opts.only != 'all' and opts.only != ext.name:
continue
if ext.error is not None:
if ext.optional:
self.warn(ext.error)
continue
else:
raise Exception(ext.error)
dest = self.dest(ext)
if not os.path.exists(self.d(dest)):
os.makedirs(self.d(dest))
self.info('\n####### Building extension', ext.name, '#'*7)
self.build(ext, dest)
def dest(self, ext):
ex = '.pyd' if iswindows else '.so'
return os.path.join(SRC, 'calibre', 'plugins', ext.name)+ex
def inc_dirs_to_cflags(self, dirs):
return ['-I'+x for x in dirs]
def lib_dirs_to_ldflags(self, dirs):
pref = '/LIBPATH:' if iswindows else '-L'
return [pref+x for x in dirs]
def libraries_to_ldflags(self, dirs):
pref = '' if iswindows else '-l'
suff = '.lib' if iswindows else ''
return [pref+x+suff for x in dirs]
def build(self, ext, dest):
if ext.sip_files:
return self.build_pyqt_extension(ext, dest)
compiler = cxx if ext.needs_cxx else cc
linker = msvc.linker if iswindows else compiler
objects = []
obj_dir = self.j(self.obj_dir, ext.name)
ext.preflight(obj_dir, compiler, linker, self, cflags, ldflags)
einc = self.inc_dirs_to_cflags(ext.inc_dirs)
if ext.needs_ddk:
ddk_flags = ['-I'+x for x in win_ddk]
cflags.extend(ddk_flags)
ldflags.extend(['/LIBPATH:'+x for x in win_ddk_lib_dirs])
if not os.path.exists(obj_dir):
os.makedirs(obj_dir)
for src in ext.sources:
obj = self.j(obj_dir, os.path.splitext(self.b(src))[0]+'.o')
objects.append(obj)
if self.newer(obj, [src]+ext.headers):
inf = '/Tp' if src.endswith('.cpp') or src.endswith('.cxx') else '/Tc'
sinc = [inf+src] if iswindows else ['-c', src]
oinc = ['/Fo'+obj] if iswindows else ['-o', obj]
cmd = [compiler] + cflags + ext.cflags + einc + sinc + oinc
self.info(' '.join(cmd))
self.check_call(cmd)
dest = self.dest(ext)
elib = self.lib_dirs_to_ldflags(ext.lib_dirs)
xlib = self.libraries_to_ldflags(ext.libraries)
if self.newer(dest, objects+ext.extra_objs):
print 'Linking', ext.name
cmd = [linker]
if iswindows:
cmd += ldflags + ext.ldflags + elib + xlib + \
['/EXPORT:init'+ext.name] + objects + ext.extra_objs + ['/OUT:'+dest]
else:
cmd += objects + ext.extra_objs + ['-o', dest] + ldflags + ext.ldflags + elib + xlib
self.info('\n\n', ' '.join(cmd), '\n\n')
self.check_call(cmd)
if iswindows:
#manifest = dest+'.manifest'
#cmd = [MT, '-manifest', manifest, '-outputresource:%s;2'%dest]
# self.info(*cmd)
# self.check_call(cmd)
# os.remove(manifest)
for x in ('.exp', '.lib'):
x = os.path.splitext(dest)[0]+x
if os.path.exists(x):
os.remove(x)
def check_call(self, *args, **kwargs):
"""print cmdline if an error occured
If something is missing (qmake e.g.) you get a non-informative error
self.check_call(qmc + [ext.name+'.pro'])
so you would have to look a the source to see the actual command.
"""
try:
subprocess.check_call(*args, **kwargs)
except:
cmdline = ' '.join(['"%s"' % (arg) if ' ' in arg else arg for arg in args[0]])
print "Error while executing: %s\n" % (cmdline)
raise
def build_style(self, dest):
self.info('\n####### Building calibre style', '#'*7)
sdir = self.j(self.SRC, 'qtcurve')
def path(x):
x=self.j(sdir, x)
return ('"%s"'%x).replace(os.sep, '/')
headers = [
"common/colorutils.h",
"common/common.h",
"common/config_file.h",
"style/blurhelper.h",
"style/fixx11h.h",
"style/pixmaps.h",
"style/qtcurve.h",
"style/shortcuthandler.h",
"style/utils.h",
"style/windowmanager.h",
]
sources = [
"common/colorutils.c",
"common/common.c",
"common/config_file.c",
"style/blurhelper.cpp",
"style/qtcurve.cpp",
"style/shortcuthandler.cpp",
"style/utils.cpp",
"style/windowmanager.cpp",
]
if not iswindows and not isosx:
headers.append("style/shadowhelper.h")
sources.append('style/shadowhelper.cpp')
pro = textwrap.dedent('''
TEMPLATE = lib
CONFIG += qt plugin release
CONFIG -= embed_manifest_dll
VERSION = 1.0.0
DESTDIR = .
TARGET = calibre
QT *= svg
INCLUDEPATH *= {conf} {inc}
win32-msvc*:DEFINES *= _CRT_SECURE_NO_WARNINGS
# Force C++ language
*g++*:QMAKE_CFLAGS *= -x c++
*msvc*:QMAKE_CFLAGS *= -TP
*msvc*:QMAKE_CXXFLAGS += /MP
''').format(conf=path(''), inc=path('common'))
if isosx:
pro += '\nCONFIG += x86 x86_64\n'
else:
pro += '\nunix:QT *= dbus\n'
for x in headers:
pro += 'HEADERS += %s\n'%path(x)
for x in sources:
pro += 'SOURCES += %s\n'%path(x)
odir = self.j(self.d(self.SRC), 'build', 'qtcurve')
if not os.path.exists(odir):
os.makedirs(odir)
ocwd = os.getcwdu()
os.chdir(odir)
try:
if not os.path.exists('qtcurve.pro') or (open('qtcurve.pro',
'rb').read() != pro):
with open('qtcurve.pro', 'wb') as f:
f.write(pro)
qmc = [QMAKE, '-o', 'Makefile']
if iswindows:
qmc += ['-spec', 'win32-msvc2008']
self.check_call(qmc + ['qtcurve.pro'])
self.check_call([make]+([] if iswindows else ['-j%d'%(cpu_count()
or 1)]))
src = (glob.glob('*.so') + glob.glob('release/*.dll') +
glob.glob('*.dylib'))
ext = 'pyd' if iswindows else 'so'
if not os.path.exists(dest):
os.makedirs(dest)
shutil.copy2(src[0], self.j(dest, 'calibre_style.'+ext))
finally:
os.chdir(ocwd)
def build_qt_objects(self, ext):
obj_pat = 'release\\*.obj' if iswindows else '*.o'
objects = glob.glob(obj_pat)
if not objects or self.newer(objects, ext.sources+ext.headers):
archs = 'x86 x86_64'
pro = textwrap.dedent('''\
TARGET = %s
TEMPLATE = lib
HEADERS = %s
SOURCES = %s
VERSION = 1.0.0
CONFIG += %s
''')%(ext.name, ' '.join(ext.headers), ' '.join(ext.sources), archs)
if ext.inc_dirs:
idir = ' '.join(ext.inc_dirs)
pro += 'INCLUDEPATH = %s\n'%idir
pro = pro.replace('\\', '\\\\')
open(ext.name+'.pro', 'wb').write(pro)
qmc = [QMAKE, '-o', 'Makefile']
if iswindows:
qmc += ['-spec', 'win32-msvc2008']
self.check_call(qmc + [ext.name+'.pro'])
self.check_call([make, '-f', 'Makefile'])
objects = glob.glob(obj_pat)
return list(map(self.a, objects))
def build_pyqt_extension(self, ext, dest):
pyqt_dir = self.j(self.d(self.SRC), 'build', 'pyqt')
src_dir = self.j(pyqt_dir, ext.name)
qt_dir = self.j(src_dir, 'qt')
if not self.e(qt_dir):
os.makedirs(qt_dir)
cwd = os.getcwd()
try:
os.chdir(qt_dir)
qt_objects = self.build_qt_objects(ext)
finally:
os.chdir(cwd)
sip_files = ext.sip_files
ext.sip_files = []
sipf = sip_files[0]
sbf = self.j(src_dir, self.b(sipf)+'.sbf')
if self.newer(sbf, [sipf]+ext.headers):
exe = '.exe' if iswindows else ''
cmd = [pyqt.sip_bin+exe, '-w', '-c', src_dir, '-b', sbf, '-I'+
pyqt.pyqt_sip_dir] + shlex.split(pyqt.pyqt_sip_flags) + [sipf]
self.info(' '.join(cmd))
self.check_call(cmd)
module = self.j(src_dir, self.b(dest))
if self.newer(dest, [sbf]+qt_objects):
mf = self.j(src_dir, 'Makefile')
makefile = QtGuiModuleMakefile(configuration=pyqt, build_file=sbf,
makefile=mf, universal=OSX_SDK, qt=1)
makefile.extra_lflags = qt_objects
makefile.extra_include_dirs = ext.inc_dirs
makefile.generate()
self.check_call([make, '-f', mf], cwd=src_dir)
shutil.copy2(module, dest)
def clean(self):
for ext in extensions:
dest = self.dest(ext)
for x in (dest, dest+'.manifest'):
if os.path.exists(x):
os.remove(x)
build_dir = self.j(self.d(self.SRC), 'build')
if os.path.exists(build_dir):
shutil.rmtree(build_dir)
| insomnia-lab/calibre | setup/extensions.py | Python | gpl-3.0 | 24,319 | 0.005222 |
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Presubmit script for rappor.xml.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details on the presubmit API built into gcl.
"""
def CheckChange(input_api, output_api):
"""Checks that rappor.xml is pretty-printed and well-formatted."""
for f in input_api.AffectedTextFiles():
p = f.AbsoluteLocalPath()
if (input_api.basename(p) == 'rappor.xml'
and input_api.os_path.dirname(p) == input_api.PresubmitLocalPath()):
cwd = input_api.os_path.dirname(p)
exit_code = input_api.subprocess.call(
['python', 'pretty_print.py', '--presubmit'], cwd=cwd)
if exit_code != 0:
return [output_api.PresubmitError(
'rappor.xml is not formatted correctly; run pretty_print.py '
'to fix')]
return []
def CheckChangeOnUpload(input_api, output_api):
return CheckChange(input_api, output_api)
def CheckChangeOnCommit(input_api, output_api):
return CheckChange(input_api, output_api)
| guorendong/iridium-browser-ubuntu | tools/metrics/rappor/PRESUBMIT.py | Python | bsd-3-clause | 1,156 | 0.007785 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.