code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
---|---|---|---|---|---|
#! /usr/bin/env python
#
# __init__.py ---
#
# Filename: __init__.py
# Description:
# Author: Werther Zhang
# Maintainer:
# Created: Sat Oct 21 20:23:38 2017 (+0800)
#
# Change Log:
#
#
from filemanager import FileManager
| pengzhangdev/slackbot | slackbot/plugins/component/filemanager/__init__.py | Python | mit | 224 |
from datetime import datetime
import requests
from django.db import models
from django.db.models.signals import post_save
from django.contrib.auth import user_logged_in
from django.contrib.auth.models import User
from django.dispatch import receiver
from django.conf import settings
from foundry.models import Member
# Get imports and errors right for json/simplejson
try:
import simplejson as json
except ImportError:
import json
try:
JSONDecodeError = json.JSONDecodeError
except AttributeError:
JSONDecodeError = ValueError
# Janrain client id and secret
JANRAIN_URL = settings.JANRAIN_URL
STATUS_SYNCED = 1
STATUS_DIRTY = 2
STATUS_CONFLICT = 4
base_payload = {'client_id': settings.JANRAIN_CLIENT_ID,
'client_secret': settings.JANRAIN_CLIENT_SECRET,
'type_name': 'user',
}
# Mapping user and profile fields to Janrain equivalents. Each tuple is in the
# form (jmbo_name, janrain_name).
field_mappings = (
('first_name', 'givenName'),
('last_name', 'familyName'),
('email', 'email'),
('username', 'displayName'),
)
# TODO: Provide a way for the field mappings to be overridden from settings.py
def map_user_to_janrain(user):
"""
Given a user object, provide a dictionary of mapped fields and values to
give to Janrain to update the user.
"""
maps = {x[0]:x[1] for x in field_mappings}
attributes = {}
user_fields = ('first_name', 'last_name', 'username', 'email')
for field in user_fields:
value = getattr(user, field, None)
if value:
attributes[maps[field]] = value
return attributes
#TODO: This is for reference only. Remove.
"""
first_name = models.CharField(_('first name'), max_length=30, blank=True)
username = models.CharField(_('username'), max_length=30, unique=True,
help_text=_('Required. 30 characters or fewer. Letters, numbers and '
'@/./+/-/_ characters'))
last_name = models.CharField(_('last name'), max_length=30, blank=True)
email = models.EmailField(_('e-mail address'), blank=True)
last_login = models.DateTimeField(_('last login'), default=timezone.now)
date_joined = models.DateTimeField(_('date joined'), default=timezone.now)
from prefs:
last_login
date_joined
image
date_taken
view_count
crop_from
effect
facebook_id
twitter_username
address
city
zipcode
province
dob
gender
about_me
mobile_number
receive_sms
receive_email
country
is_profile_complete
"""
@receiver(user_logged_in)
def on_user_logged_in(sender, **kwargs):
"""
Receiver for when the user logs in. It's placed here on recommendation of
the Django docs.
"""
print 'user logged in'
user = kwargs['user']
# Get or create the user's Janrain profile
janrain_profile, created = JanrainProfile.objects.get_or_create(user=user)
if not janrain_profile:
janrain_profile = JanrainProfile(user=user)
# TODO: The following parts may be slow, since the calls are synchronous.
# Consider queueing.
# Try to find a user on Janrain to tie to our own user.
# If no corresponding Janrain user exists, create one.
# TODO: For now, we don't try to tie these up. We just create a user if we
# don't have a uuid.
payload = base_payload.copy()
if janrain_profile.janrain_uuid:
# Just update the last logged in time on Janrain. TODO
pass
else:
# Create a Janrain user.
user_attributes = map_user_to_janrain(user)
payload ['attributes'] = json.dumps(user_attributes)
response = requests.post("%s/entity.create" % JANRAIN_URL, data=payload)
# TODO: Handle response codes other than 200
struct = json.loads(response.content)
if struct['stat'] == 'ok':
janrain_profile.janrain_uuid = struct['uuid']
janrain_profile.save()
# Populate the profile of the logged-in user, if possible.
@receiver(post_save, sender=Member)
def on_user_profile_saved(sender, **kwargs):
"""
Receiver for when the user profile is saved. Push to Janrain.
"""
# On initial profile save during registration, just return.
janrain_profiles = JanrainProfile.objects.filter(user=kwargs['instance'])
if len(janrain_profiles) == 0:
print 'Initial profile save'
return
janrain_profile = janrain_profiles[0]
# The dirty status is set on profile form save. This prevents spurious
# overwrites and allows us to retrigger an update if this fails.
if janrain_profile.status == STATUS_DIRTY:
payload = base_payload.copy()
payload['uuid'] = janrain_profile.janrain_uuid
user_attributes = map_user_to_janrain(janrain_profile.user)
payload['value'] = json.dumps(user_attributes)
response = requests.post("%s/entity.update" % JANRAIN_URL, data=payload)
# TODO: Handle response codes other than 200
struct = json.loads(response.content)
if struct['stat'] == 'ok':
janrain_profile.status = STATUS_SYNCED
janrain_profile.last_synced = datetime.now()
#TODO: last synced time set
janrain_profile.save()
class JanrainProfile(models.Model):
user = models.ForeignKey(
User,
unique=True,
)
janrain_uuid = models.CharField(
max_length=128,
blank=True,
null=True,
)
last_synced = models.DateTimeField(
auto_now_add=True,
null=True,
)
# Status indicator. Might be refactored to flags.
status = models.PositiveIntegerField(
default=0,
)
def __unicode__(self):
return self.user.username
| praekelt/jmbo-janrain | janrain/models.py | Python | bsd-3-clause | 5,684 |
from setuptools import setup, find_packages
import os
HERE = os.path.abspath(os.path.dirname(__file__))
def get_long_description():
dirs = [HERE]
if os.getenv("TRAVIS"):
dirs.append(os.getenv("TRAVIS_BUILD_DIR"))
long_description = ""
for d in dirs:
rst_readme = os.path.join(d, "README.rst")
if not os.path.exists(rst_readme):
continue
with open(rst_readme) as fp:
long_description = fp.read()
return long_description
return long_description
long_description = get_long_description()
version = "0.2.18"
setup(
name="funcserver",
version=version,
description="Simple and opiniated way to build APIs in Python",
long_description=long_description,
keywords="funcserver",
author="Deep Compute, LLC",
author_email="[email protected]",
url="https://github.com/deep-compute/funcserver",
download_url="https://github.com/deep-compute/funcserver/tarball/%s" % version,
license="MIT License",
install_requires=[
"statsd==3.2.2",
"requests==2.20.0",
"tornado==5.0.1",
"msgpack-python==0.5.6",
"basescript==0.2.2",
],
package_dir={"funcserver": "funcserver"},
packages=find_packages("."),
include_package_data=True,
classifiers=[
"Programming Language :: Python :: 2.7",
"Environment :: Console",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
],
)
| deep-compute/funcserver | setup.py | Python | mit | 1,512 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-06-14 16:07
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('core', '0005_auto_20170614_1607'),
('paypal', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='PaymentRecord',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('paymentId', models.CharField(max_length=50, unique=True, verbose_name='Paypal Payment ID')),
('payerId', models.CharField(blank=True, max_length=50, null=True, verbose_name='Paypal Payer ID')),
('status', models.CharField(blank=True, max_length=30, null=True, verbose_name='Current status')),
('creationDate', models.DateTimeField(auto_now_add=True, verbose_name='Created')),
('modifiedDate', models.DateTimeField(auto_now=True, verbose_name='Last updated')),
],
),
migrations.RemoveField(
model_name='ipncartitem',
name='ipn',
),
migrations.RemoveField(
model_name='ipncartitem',
name='revenueItem',
),
migrations.RemoveField(
model_name='ipnmessage',
name='finalRegistration',
),
migrations.RemoveField(
model_name='ipnmessage',
name='paypalInvoice',
),
migrations.RemoveField(
model_name='ipnmessage',
name='priorTransaction',
),
migrations.RemoveField(
model_name='ipnmessage',
name='registration',
),
migrations.RemoveField(
model_name='paynowformmodel',
name='cancellationPage',
),
migrations.DeleteModel(
name='Invoice',
),
migrations.DeleteModel(
name='IPNCartItem',
),
migrations.DeleteModel(
name='IPNMessage',
),
migrations.AddField(
model_name='paymentrecord',
name='invoice',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='paypalpayments', to='core.Invoice', verbose_name='Invoice'),
),
]
| django-danceschool/django-danceschool | danceschool/payments/paypal/migrations/0002_auto_20170614_1607.py | Python | bsd-3-clause | 2,443 |
"""Support for Brunt Blind Engine covers."""
from __future__ import annotations
from collections.abc import MutableMapping
import logging
from typing import Any
from aiohttp.client_exceptions import ClientResponseError
from brunt import BruntClientAsync, Thing
from homeassistant.components.cover import (
ATTR_POSITION,
DEVICE_CLASS_SHADE,
SUPPORT_CLOSE,
SUPPORT_OPEN,
SUPPORT_SET_POSITION,
CoverEntity,
)
from homeassistant.config_entries import SOURCE_IMPORT, ConfigEntry
from homeassistant.core import HomeAssistant, callback
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers.entity import DeviceInfo
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.typing import ConfigType, DiscoveryInfoType
from homeassistant.helpers.update_coordinator import (
CoordinatorEntity,
DataUpdateCoordinator,
)
from .const import (
ATTR_REQUEST_POSITION,
ATTRIBUTION,
CLOSED_POSITION,
DATA_BAPI,
DATA_COOR,
DOMAIN,
FAST_INTERVAL,
OPEN_POSITION,
REGULAR_INTERVAL,
)
_LOGGER = logging.getLogger(__name__)
COVER_FEATURES = SUPPORT_OPEN | SUPPORT_CLOSE | SUPPORT_SET_POSITION
async def async_setup_platform(
hass: HomeAssistant,
config: ConfigType,
add_entities: AddEntitiesCallback,
discovery_info: DiscoveryInfoType | None = None,
) -> None:
"""Component setup, run import config flow for each entry in config."""
_LOGGER.warning(
"Loading brunt via platform config is deprecated; The configuration has been migrated to a config entry and can be safely removed from configuration.yaml"
)
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_IMPORT}, data=config
)
)
async def async_setup_entry(
hass: HomeAssistant,
entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up the brunt platform."""
bapi: BruntClientAsync = hass.data[DOMAIN][entry.entry_id][DATA_BAPI]
coordinator: DataUpdateCoordinator = hass.data[DOMAIN][entry.entry_id][DATA_COOR]
async_add_entities(
BruntDevice(coordinator, serial, thing, bapi, entry.entry_id)
for serial, thing in coordinator.data.items()
)
class BruntDevice(CoordinatorEntity, CoverEntity):
"""
Representation of a Brunt cover device.
Contains the common logic for all Brunt devices.
"""
def __init__(
self,
coordinator: DataUpdateCoordinator,
serial: str,
thing: Thing,
bapi: BruntClientAsync,
entry_id: str,
) -> None:
"""Init the Brunt device."""
super().__init__(coordinator)
self._attr_unique_id = serial
self._bapi = bapi
self._thing = thing
self._entry_id = entry_id
self._remove_update_listener = None
self._attr_name = self._thing.name
self._attr_device_class = DEVICE_CLASS_SHADE
self._attr_supported_features = COVER_FEATURES
self._attr_attribution = ATTRIBUTION
self._attr_device_info = DeviceInfo(
identifiers={(DOMAIN, self._attr_unique_id)},
name=self._attr_name,
via_device=(DOMAIN, self._entry_id),
manufacturer="Brunt",
sw_version=self._thing.fw_version,
model=self._thing.model,
)
async def async_added_to_hass(self) -> None:
"""When entity is added to hass."""
await super().async_added_to_hass()
self.async_on_remove(
self.coordinator.async_add_listener(self._brunt_update_listener)
)
@property
def current_cover_position(self) -> int | None:
"""
Return current position of cover.
None is unknown, 0 is closed, 100 is fully open.
"""
return self.coordinator.data[self.unique_id].current_position
@property
def request_cover_position(self) -> int | None:
"""
Return request position of cover.
The request position is the position of the last request
to Brunt, at times there is a diff of 1 to current
None is unknown, 0 is closed, 100 is fully open.
"""
return self.coordinator.data[self.unique_id].request_position
@property
def move_state(self) -> int | None:
"""
Return current moving state of cover.
None is unknown, 0 when stopped, 1 when opening, 2 when closing
"""
return self.coordinator.data[self.unique_id].move_state
@property
def is_opening(self) -> bool:
"""Return if the cover is opening or not."""
return self.move_state == 1
@property
def is_closing(self) -> bool:
"""Return if the cover is closing or not."""
return self.move_state == 2
@property
def extra_state_attributes(self) -> MutableMapping[str, Any]:
"""Return the detailed device state attributes."""
return {
ATTR_REQUEST_POSITION: self.request_cover_position,
}
@property
def is_closed(self) -> bool:
"""Return true if cover is closed, else False."""
return self.current_cover_position == CLOSED_POSITION
async def async_open_cover(self, **kwargs: Any) -> None:
"""Set the cover to the open position."""
await self._async_update_cover(OPEN_POSITION)
async def async_close_cover(self, **kwargs: Any) -> None:
"""Set the cover to the closed position."""
await self._async_update_cover(CLOSED_POSITION)
async def async_set_cover_position(self, **kwargs: Any) -> None:
"""Set the cover to a specific position."""
await self._async_update_cover(int(kwargs[ATTR_POSITION]))
async def _async_update_cover(self, position: int) -> None:
"""Set the cover to the new position and wait for the update to be reflected."""
try:
await self._bapi.async_change_request_position(
position, thing_uri=self._thing.thing_uri
)
except ClientResponseError as exc:
raise HomeAssistantError(
f"Unable to reposition {self._thing.name}"
) from exc
self.coordinator.update_interval = FAST_INTERVAL
await self.coordinator.async_request_refresh()
@callback
def _brunt_update_listener(self) -> None:
"""Update the update interval after each refresh."""
if (
self.request_cover_position
== self._bapi.last_requested_positions[self._thing.thing_uri]
and self.move_state == 0
):
self.coordinator.update_interval = REGULAR_INTERVAL
else:
self.coordinator.update_interval = FAST_INTERVAL
| jawilson/home-assistant | homeassistant/components/brunt/cover.py | Python | apache-2.0 | 6,797 |
from rig.place_and_route import Cores, SDRAM
import struct
from nengo_spinnaker.builder.ports import InputPort
from nengo_spinnaker.builder.netlist import netlistspec
from nengo_spinnaker.netlist import Vertex
from nengo_spinnaker.regions import Region
from nengo_spinnaker.regions.filters import make_filter_regions
from nengo_spinnaker.regions import utils as region_utils
from nengo_spinnaker.utils.application import get_application
class SDPTransmitter(object):
"""An operator which receives multicast packets, performs filtering and
transmits the filtered vector as an SDP packet.
"""
def __init__(self, size_in):
self.size_in = size_in
self._vertex = None
self._sys_region = None
self._filter_region = None
self._routing_region = None
def make_vertices(self, model, *args, **kwargs):
"""Create vertices that will simulate the SDPTransmitter."""
# Build the system region
self._sys_region = SystemRegion(model.machine_timestep,
self.size_in, 1)
# Build the filter regions
in_sigs = model.get_signals_to_object(self)[InputPort.standard]
self._filter_region, self._routing_region = make_filter_regions(
in_sigs, model.dt, True, model.keyspaces.filter_routing_tag)
# Get the resources
resources = {
Cores: 1,
SDRAM: region_utils.sizeof_regions(
[self._sys_region, self._filter_region, self._routing_region],
None
)
}
# Create the vertex
self._vertex = Vertex(get_application("tx"), resources)
# Return the netlist specification
return netlistspec((self._vertex, ), # Tuple is required
load_function=self.load_to_machine)
def get_signal_constraints(self):
"""Return a set of constraints on which signal parameters may share the
same keyspace.
Returns
-------
{id(SignalParameters): {id(SignalParameters), ...}}
A (moderately unpleasant) dictionary of which signal parameters
cannot share a routing identifier.
"""
return self._routing_region.get_signal_constraints()
def load_to_machine(self, netlist, controller):
"""Load data to the machine."""
# Prepare the filter routing region
self._routing_region.build_routes(minimise=True)
# Get the memory
sys_mem, filter_mem, routing_mem = \
region_utils.create_app_ptr_and_region_files(
netlist.vertices_memory[self._vertex],
[self._sys_region,
self._filter_region,
self._routing_region],
None
)
# Write the regions into memory
self._sys_region.write_region_to_file(sys_mem)
self._filter_region.write_subregion_to_file(filter_mem)
self._routing_region.write_subregion_to_file(routing_mem)
class SystemRegion(Region):
"""System region for SDP Tx."""
def __init__(self, machine_timestep, size_in, delay):
self.machine_timestep = machine_timestep
self.size_in = size_in
self.transmission_delay = delay
def sizeof(self, *args, **kwargs):
return 12
def write_region_to_file(self, fp, *args, **kwargs):
"""Write the region to file."""
fp.write(struct.pack("<3I", self.size_in, self.machine_timestep,
self.transmission_delay))
| project-rig/nengo_spinnaker | nengo_spinnaker/operators/sdp_transmitter.py | Python | mit | 3,555 |
# -*- coding: utf-8 -*-
"""
Created on Sat Jun 18 10:21:22 2016
@author: Wasit
"""
import numpy as np
import cv2
import os
cmax=800
rmax=600
dataset_path=os.path.join('dataset')
r=5
cmap=np.array([
( 0 , 255, 255, ),
( 64 , 177, 255, ),
( 94 , 210, 94 , ),
( 40 , 39 , 214, ),
( 150 , 0 , 255, ),
( 0 , 255, 0 , ),
( 194, 119, 227, ),
( 255, 0 , 0 , ),
( 34 , 189, 188, ),
( 207, 190, 23 , ),])
def callback(event,x,y,flags,param):
#print "event: %s"%event
#print "(x, y), (flag param): (%d %d) (%d %s)"% (x, y,flags,param)
#print cl.shape
if flags==cv2.EVENT_FLAG_LBUTTON + cv2.EVENT_FLAG_CTRLKEY:
for i in xrange(-r,r):
for j in xrange(-r,r):
if np.sqrt( i**2 + j**2 )<r:
cl[y+j,x+i]=cc
ol[y+j,x+i] = cmap[cc-1]
if flags==cv2.EVENT_FLAG_LBUTTON + cv2.EVENT_FLAG_ALTKEY:
r2=2*r
for i in xrange(-r2,r2):
for j in xrange(-r2,r2):
if np.sqrt( i**2 + j**2 )<r2:
cl[y+j,x+i]=0
ol[y+j,x+i] = im[y+j,x+i]
#cv2.imshow('class',cl)
if event==cv2.EVENT_LBUTTONDOWN:
for i in xrange(-r,r):
for j in xrange(-r,r):
if np.sqrt( i**2 + j**2 )<r:
cl[y+j,x+i]=cc
ol[y+j,x+i] = cmap[cc-1]
cv2.imshow(window_name,ol)
def reload(flist,index):
oim=cv2.imread(flist[index])#BGR
#height, width, depth = im.shape
#im=cv2.resize(im, (width/2, height/2),interpolation = cv2.INTER_CUBIC)
im=cv2.resize(oim, (cmax,rmax),interpolation = cv2.INTER_CUBIC)
ol=im.copy()
ocl=cv2.imread(flist[index][:-3]+'png',0)
#print cl.shape
if ocl is None:# not found
cl=np.zeros( (im.shape[0],im.shape[1]),dtype=np.uint8 )
else:
cl=cv2.resize(ocl, (cmax,rmax),interpolation = cv2.INTER_NEAREST )
for x in xrange(cl.shape[1]):
for y in xrange(cl.shape[0]):
if cl[y,x]:
ol[y,x] = cmap[cl[y,x]-1]
return flist[index],im,cl,ol
if __name__=='__main__':
import os, fnmatch
fname=fnmatch.filter(os.listdir(dataset_path), '*.jpg')
flist=[os.path.join(dataset_path,i) for i in fname]
index=0
filename,im,cl,ol=reload(flist, index)
window_name="algae_marker"
cv2.imshow(window_name, ol)
#cv2.imshow('class',255*cl)
cv2.setMouseCallback(window_name,callback)
#Upkey : 2490368
#DownKey : 2621440
#LeftKey : 2424832
#RightKey: 2555904
key=''
cc=1
while key!= 27:
key=cv2.waitKey(25)
if key==48:
cc=10
print "class: %d"%cc
elif 48<key<=57:
cc=key-48
print "class: %d"%cc
elif key == 2424832:
index=(index+len(flist)-1)%len(flist)
filename,im,cl,ol=reload(flist, index)
cv2.imshow(window_name, ol)
print filename
elif key ==2555904:
index=(index+len(flist)+1)%len(flist)
filename,im,cl,ol=reload(flist, index)
cv2.imshow(window_name, ol)
print filename
elif key == ord(' '):
f=filename[:-3]+'png'
cv2.imwrite(f,cl)
print '--saved to %s'%f
cv2.destroyAllWindows() | wasit7/algae2 | shrimp/beta/marker/algae_mark.py | Python | gpl-2.0 | 3,445 |
# Copyright 2019 The Magenta Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for pipeline."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tempfile
from magenta.common import testing_lib
from magenta.pipelines import pipeline
from magenta.pipelines import statistics
import tensorflow as tf
MockStringProto = testing_lib.MockStringProto # pylint: disable=invalid-name
class MockPipeline(pipeline.Pipeline):
def __init__(self):
super(MockPipeline, self).__init__(
input_type=str,
output_type={'dataset_1': MockStringProto,
'dataset_2': MockStringProto})
def transform(self, input_object):
return {
'dataset_1': [
MockStringProto(input_object + '_A'),
MockStringProto(input_object + '_B')],
'dataset_2': [MockStringProto(input_object + '_C')]}
class PipelineTest(tf.test.TestCase):
def testFileIteratorRecursive(self):
target_files = [
('0.ext', b'hello world'),
('a/1.ext', b'123456'),
('a/2.ext', b'abcd'),
('b/c/3.ext', b'9999'),
('b/z/3.ext', b'qwerty'),
('d/4.ext', b'mary had a little lamb'),
('d/e/5.ext', b'zzzzzzzz'),
('d/e/f/g/6.ext', b'yyyyyyyyyyy')]
extra_files = [
('stuff.txt', b'some stuff'),
('a/q/r/file', b'more stuff')]
root_dir = tempfile.mkdtemp(dir=self.get_temp_dir())
for path, contents in target_files + extra_files:
abs_path = os.path.join(root_dir, path)
tf.gfile.MakeDirs(os.path.dirname(abs_path))
tf.gfile.GFile(abs_path, mode='w').write(contents)
file_iterator = pipeline.file_iterator(root_dir, 'ext', recurse=True)
self.assertEqual(set(contents for _, contents in target_files),
set(file_iterator))
def testFileIteratorNotRecursive(self):
target_files = [
('0.ext', b'hello world'),
('1.ext', b'hi')]
extra_files = [
('a/1.ext', b'123456'),
('a/2.ext', b'abcd'),
('b/c/3.ext', b'9999'),
('d/e/5.ext', b'zzzzzzzz'),
('d/e/f/g/6.ext', b'yyyyyyyyyyy'),
('stuff.txt', b'some stuff'),
('a/q/r/file', b'more stuff')]
root_dir = tempfile.mkdtemp(dir=self.get_temp_dir())
for path, contents in target_files + extra_files:
abs_path = os.path.join(root_dir, path)
tf.gfile.MakeDirs(os.path.dirname(abs_path))
tf.gfile.GFile(abs_path, mode='w').write(contents)
file_iterator = pipeline.file_iterator(root_dir, 'ext', recurse=False)
self.assertEqual(set(contents for _, contents in target_files),
set(file_iterator))
def testTFRecordIterator(self):
tfrecord_file = os.path.join(
tf.resource_loader.get_data_files_path(),
'../testdata/tfrecord_iterator_test.tfrecord')
self.assertEqual(
[MockStringProto(string)
for string in [b'hello world', b'12345', b'success']],
list(pipeline.tf_record_iterator(tfrecord_file, MockStringProto)))
def testRunPipelineSerial(self):
strings = ['abcdefg', 'helloworld!', 'qwerty']
root_dir = tempfile.mkdtemp(dir=self.get_temp_dir())
pipeline.run_pipeline_serial(
MockPipeline(), iter(strings), root_dir)
dataset_1_dir = os.path.join(root_dir, 'dataset_1.tfrecord')
dataset_2_dir = os.path.join(root_dir, 'dataset_2.tfrecord')
self.assertTrue(tf.gfile.Exists(dataset_1_dir))
self.assertTrue(tf.gfile.Exists(dataset_2_dir))
dataset_1_reader = tf.python_io.tf_record_iterator(dataset_1_dir)
self.assertEqual(
set([('serialized:%s_A' % s).encode('utf-8') for s in strings] +
[('serialized:%s_B' % s).encode('utf-8') for s in strings]),
set(dataset_1_reader))
dataset_2_reader = tf.python_io.tf_record_iterator(dataset_2_dir)
self.assertEqual(
set(('serialized:%s_C' % s).encode('utf-8') for s in strings),
set(dataset_2_reader))
def testPipelineIterator(self):
strings = ['abcdefg', 'helloworld!', 'qwerty']
result = pipeline.load_pipeline(MockPipeline(), iter(strings))
self.assertEqual(
set([MockStringProto(s + '_A') for s in strings] +
[MockStringProto(s + '_B') for s in strings]),
set(result['dataset_1']))
self.assertEqual(
set(MockStringProto(s + '_C') for s in strings),
set(result['dataset_2']))
def testPipelineKey(self):
# This happens if PipelineKey() is used on a pipeline with out a dictionary
# output, or the key is not in the output_type dict.
pipeline_inst = MockPipeline()
pipeline_key = pipeline_inst['dataset_1']
self.assertTrue(isinstance(pipeline_key, pipeline.PipelineKey))
self.assertEqual(pipeline_key.key, 'dataset_1')
self.assertEqual(pipeline_key.unit, pipeline_inst)
self.assertEqual(pipeline_key.output_type, MockStringProto)
with self.assertRaises(KeyError):
_ = pipeline_inst['abc']
class TestPipeline(pipeline.Pipeline):
def __init__(self):
super(TestPipeline, self).__init__(str, str)
def transform(self, input_object):
pass
pipeline_inst = TestPipeline()
with self.assertRaises(KeyError):
_ = pipeline_inst['abc']
with self.assertRaises(ValueError):
_ = pipeline.PipelineKey(1234, 'abc')
def testInvalidTypeSignatureError(self):
class PipelineShell(pipeline.Pipeline):
def transform(self, input_object):
pass
_ = PipelineShell(str, str)
_ = PipelineShell({'name': str}, {'name': str})
good_type = str
for bad_type in [123, {1: str}, {'name': 123},
{'name': str, 'name2': 123}, [str, int]]:
with self.assertRaises(pipeline.InvalidTypeSignatureError):
PipelineShell(bad_type, good_type)
with self.assertRaises(pipeline.InvalidTypeSignatureError):
PipelineShell(good_type, bad_type)
def testPipelineGivenName(self):
class TestPipeline123(pipeline.Pipeline):
def __init__(self):
super(TestPipeline123, self).__init__(str, str, 'TestName')
self.stats = []
def transform(self, input_object):
self._set_stats([statistics.Counter('counter_1', 5),
statistics.Counter('counter_2', 10)])
return []
pipe = TestPipeline123()
self.assertEqual(pipe.name, 'TestName')
pipe.transform('hello')
stats = pipe.get_stats()
self.assertEqual(
set((stat.name, stat.count) for stat in stats),
set([('TestName_counter_1', 5), ('TestName_counter_2', 10)]))
def testPipelineDefaultName(self):
class TestPipeline123(pipeline.Pipeline):
def __init__(self):
super(TestPipeline123, self).__init__(str, str)
self.stats = []
def transform(self, input_object):
self._set_stats([statistics.Counter('counter_1', 5),
statistics.Counter('counter_2', 10)])
return []
pipe = TestPipeline123()
self.assertEqual(pipe.name, 'TestPipeline123')
pipe.transform('hello')
stats = pipe.get_stats()
self.assertEqual(
set((stat.name, stat.count) for stat in stats),
set([('TestPipeline123_counter_1', 5),
('TestPipeline123_counter_2', 10)]))
def testInvalidStatisticsError(self):
class TestPipeline1(pipeline.Pipeline):
def __init__(self):
super(TestPipeline1, self).__init__(object, object)
self.stats = []
def transform(self, input_object):
self._set_stats([statistics.Counter('counter_1', 5), 12345])
return []
class TestPipeline2(pipeline.Pipeline):
def __init__(self):
super(TestPipeline2, self).__init__(object, object)
self.stats = []
def transform(self, input_object):
self._set_stats(statistics.Counter('counter_1', 5))
return [input_object]
tp1 = TestPipeline1()
with self.assertRaises(pipeline.InvalidStatisticsError):
tp1.transform('hello')
tp2 = TestPipeline2()
with self.assertRaises(pipeline.InvalidStatisticsError):
tp2.transform('hello')
if __name__ == '__main__':
tf.test.main()
| jesseengel/magenta | magenta/pipelines/pipeline_test.py | Python | apache-2.0 | 8,715 |
class Solution:
# @param A a list of integers
# @param m an integer, length of A
# @param B a list of integers
# @param n an integer, length of B
# @return nothing
def merge(self, A, m, B, n):
A[:] = sorted(A[:m] + B[:n])
| rahul-ramadas/leetcode | merge-sorted-array/Solution.6603525.py | Python | mit | 266 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from collections import OrderedDict
import argparse
import glob
import itertools
import json
import os
import six
import string
import subprocess
import tempfile
import uuid
import numpy as np
ARROW_HOME = os.path.abspath(__file__).rsplit("/", 2)[0]
# Control for flakiness
np.random.seed(12345)
def load_version_from_pom():
import xml.etree.ElementTree as ET
tree = ET.parse(os.path.join(ARROW_HOME, 'java', 'pom.xml'))
version_tag = list(tree.getroot().findall('{http://maven.apache.org/POM/4.0.0}version'))[0]
return version_tag.text
def guid():
return uuid.uuid4().hex
# from pandas
RANDS_CHARS = np.array(list(string.ascii_letters + string.digits),
dtype=(np.str_, 1))
def rands(nchars):
"""
Generate one random byte string.
See `rands_array` if you want to create an array of random strings.
"""
return ''.join(np.random.choice(RANDS_CHARS, nchars))
def str_from_bytes(x):
if six.PY2:
return x
else:
return x.decode('utf-8')
# from the merge_arrow_pr.py script
def run_cmd(cmd):
if isinstance(cmd, six.string_types):
cmd = cmd.split(' ')
try:
output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
# this avoids hiding the stdout / stderr of failed processes
print('Command failed: %s' % ' '.join(cmd))
print('With output:')
print('--------------')
print(str_from_bytes(e.output))
print('--------------')
raise e
return str_from_bytes(output)
# ----------------------------------------------------------------------
# Data generation
class DataType(object):
def __init__(self, name, nullable=True):
self.name = name
self.nullable = nullable
def get_json(self):
return OrderedDict([
('name', self.name),
('type', self._get_type()),
('nullable', self.nullable),
('children', self._get_children()),
('typeLayout', self._get_type_layout())
])
def _make_is_valid(self, size):
if self.nullable:
return np.random.randint(0, 2, size=size)
else:
return np.ones(size)
class Column(object):
def __init__(self, name, count):
self.name = name
self.count = count
def _get_children(self):
return []
def _get_buffers(self):
return []
def get_json(self):
entries = [
('name', self.name),
('count', self.count)
]
buffers = self._get_buffers()
entries.extend(buffers)
children = self._get_children()
if len(children) > 0:
entries.append(('children', children))
return OrderedDict(entries)
class PrimitiveType(DataType):
def _get_children(self):
return []
def _get_type_layout(self):
return OrderedDict([
('vectors',
[OrderedDict([('type', 'VALIDITY'),
('typeBitWidth', 1)]),
OrderedDict([('type', 'DATA'),
('typeBitWidth', self.bit_width)])])])
class PrimitiveColumn(Column):
def __init__(self, name, count, is_valid, values):
Column.__init__(self, name, count)
self.is_valid = is_valid
self.values = values
def _get_buffers(self):
return [
('VALIDITY', [int(v) for v in self.is_valid]),
('DATA', list(self.values))
]
TEST_INT_MIN = - 2**31 + 1
TEST_INT_MAX = 2**31 - 1
class IntegerType(PrimitiveType):
def __init__(self, name, is_signed, bit_width, nullable=True):
PrimitiveType.__init__(self, name, nullable=nullable)
self.is_signed = is_signed
self.bit_width = bit_width
@property
def numpy_type(self):
return ('int' if self.is_signed else 'uint') + str(self.bit_width)
def _get_type(self):
return OrderedDict([
('name', 'int'),
('isSigned', self.is_signed),
('bitWidth', self.bit_width)
])
def generate_column(self, size):
iinfo = np.iinfo(self.numpy_type)
values = [int(x) for x in
np.random.randint(max(iinfo.min, TEST_INT_MIN),
min(iinfo.max, TEST_INT_MAX),
size=size)]
is_valid = self._make_is_valid(size)
return PrimitiveColumn(self.name, size, is_valid, values)
class FloatingPointType(PrimitiveType):
def __init__(self, name, bit_width, nullable=True):
PrimitiveType.__init__(self, name, nullable=nullable)
self.bit_width = bit_width
self.precision = {
16: 'HALF',
32: 'SINGLE',
64: 'DOUBLE'
}[self.bit_width]
@property
def numpy_type(self):
return 'float' + str(self.bit_width)
def _get_type(self):
return OrderedDict([
('name', 'floatingpoint'),
('precision', self.precision)
])
def generate_column(self, size):
values = np.random.randn(size) * 1000
values = np.round(values, 3)
is_valid = self._make_is_valid(size)
return PrimitiveColumn(self.name, size, is_valid, values)
class BooleanType(PrimitiveType):
bit_width = 1
def _get_type(self):
return OrderedDict([('name', 'bool')])
@property
def numpy_type(self):
return 'bool'
def generate_column(self, size):
values = list(map(bool, np.random.randint(0, 2, size=size)))
is_valid = self._make_is_valid(size)
return PrimitiveColumn(self.name, size, is_valid, values)
class BinaryType(PrimitiveType):
@property
def numpy_type(self):
return object
@property
def column_class(self):
return BinaryColumn
def _get_type(self):
return OrderedDict([('name', 'binary')])
def _get_type_layout(self):
return OrderedDict([
('vectors',
[OrderedDict([('type', 'VALIDITY'),
('typeBitWidth', 1)]),
OrderedDict([('type', 'OFFSET'),
('typeBitWidth', 32)]),
OrderedDict([('type', 'DATA'),
('typeBitWidth', 8)])])])
def generate_column(self, size):
K = 7
is_valid = self._make_is_valid(size)
values = []
for i in range(size):
if is_valid[i]:
draw = (np.random.randint(0, 255, size=K)
.astype(np.uint8)
.tostring())
values.append(draw)
else:
values.append("")
return self.column_class(self.name, size, is_valid, values)
class StringType(BinaryType):
@property
def column_class(self):
return StringColumn
def _get_type(self):
return OrderedDict([('name', 'utf8')])
def generate_column(self, size):
K = 7
is_valid = self._make_is_valid(size)
values = []
for i in range(size):
if is_valid[i]:
values.append(rands(K))
else:
values.append("")
return self.column_class(self.name, size, is_valid, values)
class JSONSchema(object):
def __init__(self, fields):
self.fields = fields
def get_json(self):
return OrderedDict([
('fields', [field.get_json() for field in self.fields])
])
class BinaryColumn(PrimitiveColumn):
def _encode_value(self, x):
return ''.join('{:02x}'.format(c).upper() for c in x)
def _get_buffers(self):
offset = 0
offsets = [0]
data = []
for i, v in enumerate(self.values):
if self.is_valid[i]:
offset += len(v)
else:
v = ""
offsets.append(offset)
data.append(self._encode_value(v))
return [
('VALIDITY', [int(x) for x in self.is_valid]),
('OFFSET', offsets),
('DATA', data)
]
class StringColumn(BinaryColumn):
def _encode_value(self, x):
return x
class ListType(DataType):
def __init__(self, name, value_type, nullable=True):
DataType.__init__(self, name, nullable=nullable)
self.value_type = value_type
def _get_type(self):
return OrderedDict([
('name', 'list')
])
def _get_children(self):
return [self.value_type.get_json()]
def _get_type_layout(self):
return OrderedDict([
('vectors',
[OrderedDict([('type', 'VALIDITY'),
('typeBitWidth', 1)]),
OrderedDict([('type', 'OFFSET'),
('typeBitWidth', 32)])])])
def generate_column(self, size):
MAX_LIST_SIZE = 4
is_valid = self._make_is_valid(size)
list_sizes = np.random.randint(0, MAX_LIST_SIZE + 1, size=size)
offsets = [0]
offset = 0
for i in range(size):
if is_valid[i]:
offset += int(list_sizes[i])
offsets.append(offset)
# The offset now is the total number of elements in the child array
values = self.value_type.generate_column(offset)
return ListColumn(self.name, size, is_valid, offsets, values)
class ListColumn(Column):
def __init__(self, name, count, is_valid, offsets, values):
Column.__init__(self, name, count)
self.is_valid = is_valid
self.offsets = offsets
self.values = values
def _get_buffers(self):
return [
('VALIDITY', [int(v) for v in self.is_valid]),
('OFFSET', list(self.offsets))
]
def _get_children(self):
return [self.values.get_json()]
class StructType(DataType):
def __init__(self, name, field_types, nullable=True):
DataType.__init__(self, name, nullable=nullable)
self.field_types = field_types
def _get_type(self):
return OrderedDict([
('name', 'struct')
])
def _get_children(self):
return [type_.get_json() for type_ in self.field_types]
def _get_type_layout(self):
return OrderedDict([
('vectors',
[OrderedDict([('type', 'VALIDITY'),
('typeBitWidth', 1)])])])
def generate_column(self, size):
is_valid = self._make_is_valid(size)
field_values = [type_.generate_column(size)
for type_ in self.field_types]
return StructColumn(self.name, size, is_valid, field_values)
class StructColumn(Column):
def __init__(self, name, count, is_valid, field_values):
Column.__init__(self, name, count)
self.is_valid = is_valid
self.field_values = field_values
def _get_buffers(self):
return [
('VALIDITY', [int(v) for v in self.is_valid])
]
def _get_children(self):
return [field.get_json() for field in self.field_values]
class JSONRecordBatch(object):
def __init__(self, count, columns):
self.count = count
self.columns = columns
def get_json(self):
return OrderedDict([
('count', self.count),
('columns', [col.get_json() for col in self.columns])
])
class JSONFile(object):
def __init__(self, schema, batches):
self.schema = schema
self.batches = batches
def get_json(self):
return OrderedDict([
('schema', self.schema.get_json()),
('batches', [batch.get_json() for batch in self.batches])
])
def write(self, path):
with open(path, 'wb') as f:
f.write(json.dumps(self.get_json(), indent=2).encode('utf-8'))
def get_field(name, type_, nullable=True):
if type_ == 'binary':
return BinaryType(name, nullable=nullable)
elif type_ == 'utf8':
return StringType(name, nullable=nullable)
dtype = np.dtype(type_)
if dtype.kind in ('i', 'u'):
return IntegerType(name, dtype.kind == 'i', dtype.itemsize * 8,
nullable=nullable)
elif dtype.kind == 'f':
return FloatingPointType(name, dtype.itemsize * 8,
nullable=nullable)
elif dtype.kind == 'b':
return BooleanType(name, nullable=nullable)
else:
raise TypeError(dtype)
def generate_primitive_case():
types = ['bool', 'int8', 'int16', 'int32', 'int64',
'uint8', 'uint16', 'uint32', 'uint64',
'float32', 'float64', 'binary', 'utf8']
fields = []
for type_ in types:
fields.append(get_field(type_ + "_nullable", type_, True))
fields.append(get_field(type_ + "_nonnullable", type_, False))
schema = JSONSchema(fields)
batch_sizes = [7, 10]
batches = []
for size in batch_sizes:
columns = []
for field in fields:
col = field.generate_column(size)
columns.append(col)
batches.append(JSONRecordBatch(size, columns))
return JSONFile(schema, batches)
def generate_nested_case():
fields = [
ListType('list_nullable', get_field('item', 'int32')),
StructType('struct_nullable', [get_field('f1', 'int32'),
get_field('f2', 'utf8')]),
# TODO(wesm): this causes segfault
# ListType('list_nonnullable', get_field('item', 'int32'), False),
]
schema = JSONSchema(fields)
batch_sizes = [7, 10]
batches = []
for size in batch_sizes:
columns = []
for field in fields:
col = field.generate_column(size)
columns.append(col)
batches.append(JSONRecordBatch(size, columns))
return JSONFile(schema, batches)
def get_generated_json_files():
temp_dir = tempfile.mkdtemp()
def _temp_path():
return
file_objs = []
K = 10
for i in range(K):
file_objs.append(generate_primitive_case())
file_objs.append(generate_nested_case())
generated_paths = []
for file_obj in file_objs:
out_path = os.path.join(temp_dir, guid() + '.json')
file_obj.write(out_path)
generated_paths.append(out_path)
return generated_paths
# ----------------------------------------------------------------------
# Testing harness
class IntegrationRunner(object):
def __init__(self, json_files, testers, debug=False):
self.json_files = json_files
self.testers = testers
self.temp_dir = tempfile.mkdtemp()
self.debug = debug
def run(self):
for producer, consumer in itertools.product(self.testers,
self.testers):
if producer is consumer:
continue
print('-- {0} producing, {1} consuming'.format(producer.name,
consumer.name))
for json_path in self.json_files:
print('Testing file {0}'.format(json_path))
# Make the random access file
print('-- Creating binary inputs')
producer_file_path = os.path.join(self.temp_dir, guid())
producer.json_to_file(json_path, producer_file_path)
# Validate the file
print('-- Validating file')
consumer.validate(json_path, producer_file_path)
print('-- Validating stream')
producer_stream_path = os.path.join(self.temp_dir, guid())
consumer_file_path = os.path.join(self.temp_dir, guid())
producer.file_to_stream(producer_file_path,
producer_stream_path)
consumer.stream_to_file(producer_stream_path,
consumer_file_path)
consumer.validate(json_path, consumer_file_path)
class Tester(object):
def __init__(self, debug=False):
self.debug = debug
def json_to_file(self, json_path, arrow_path):
raise NotImplementedError
def stream_to_file(self, stream_path, file_path):
raise NotImplementedError
def file_to_stream(self, file_path, stream_path):
raise NotImplementedError
def validate(self, json_path, arrow_path):
raise NotImplementedError
class JavaTester(Tester):
_arrow_version = load_version_from_pom()
ARROW_TOOLS_JAR = os.environ.get(
'ARROW_JAVA_INTEGRATION_JAR',
os.path.join(ARROW_HOME,
'java/tools/target/arrow-tools-{}-'
'jar-with-dependencies.jar'.format(_arrow_version)))
name = 'Java'
def _run(self, arrow_path=None, json_path=None, command='VALIDATE'):
cmd = ['java', '-cp', self.ARROW_TOOLS_JAR,
'org.apache.arrow.tools.Integration']
if arrow_path is not None:
cmd.extend(['-a', arrow_path])
if json_path is not None:
cmd.extend(['-j', json_path])
cmd.extend(['-c', command])
if self.debug:
print(' '.join(cmd))
run_cmd(cmd)
def validate(self, json_path, arrow_path):
return self._run(arrow_path, json_path, 'VALIDATE')
def json_to_file(self, json_path, arrow_path):
return self._run(arrow_path, json_path, 'JSON_TO_ARROW')
def stream_to_file(self, stream_path, file_path):
cmd = ['java', '-cp', self.ARROW_TOOLS_JAR,
'org.apache.arrow.tools.StreamToFile',
stream_path, file_path]
run_cmd(cmd)
def file_to_stream(self, file_path, stream_path):
cmd = ['java', '-cp', self.ARROW_TOOLS_JAR,
'org.apache.arrow.tools.FileToStream',
file_path, stream_path]
run_cmd(cmd)
class CPPTester(Tester):
EXE_PATH = os.environ.get(
'ARROW_CPP_EXE_PATH',
os.path.join(ARROW_HOME, 'cpp/test-build/debug'))
CPP_INTEGRATION_EXE = os.path.join(EXE_PATH, 'json-integration-test')
STREAM_TO_FILE = os.path.join(EXE_PATH, 'stream-to-file')
FILE_TO_STREAM = os.path.join(EXE_PATH, 'file-to-stream')
name = 'C++'
def _run(self, arrow_path=None, json_path=None, command='VALIDATE'):
cmd = [self.CPP_INTEGRATION_EXE, '--integration']
if arrow_path is not None:
cmd.append('--arrow=' + arrow_path)
if json_path is not None:
cmd.append('--json=' + json_path)
cmd.append('--mode=' + command)
if self.debug:
print(' '.join(cmd))
run_cmd(cmd)
def validate(self, json_path, arrow_path):
return self._run(arrow_path, json_path, 'VALIDATE')
def json_to_file(self, json_path, arrow_path):
return self._run(arrow_path, json_path, 'JSON_TO_ARROW')
def stream_to_file(self, stream_path, file_path):
cmd = ['cat', stream_path, '|', self.STREAM_TO_FILE, '>', file_path]
cmd = ' '.join(cmd)
if self.debug:
print(cmd)
os.system(cmd)
def file_to_stream(self, file_path, stream_path):
cmd = [self.FILE_TO_STREAM, file_path, '>', stream_path]
cmd = ' '.join(cmd)
if self.debug:
print(cmd)
os.system(cmd)
def get_static_json_files():
glob_pattern = os.path.join(ARROW_HOME, 'integration', 'data', '*.json')
return glob.glob(glob_pattern)
def run_all_tests(debug=False):
testers = [CPPTester(debug=debug), JavaTester(debug=debug)]
static_json_files = get_static_json_files()
generated_json_files = get_generated_json_files()
json_files = static_json_files + generated_json_files
runner = IntegrationRunner(json_files, testers, debug=debug)
runner.run()
print('-- All tests passed!')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Arrow integration test CLI')
parser.add_argument('--debug', dest='debug', action='store_true',
default=False,
help='Run executables in debug mode as relevant')
args = parser.parse_args()
run_all_tests(debug=args.debug)
| TheNeuralBit/arrow | integration/integration_test.py | Python | apache-2.0 | 21,106 |
###############################################################################
# Name: ed_mdlg.py #
# Purpose: Commonly used message dialogs #
# Author: Cody Precord <[email protected]> #
# Copyright: (c) 2008 Cody Precord <[email protected]> #
# License: wxWindows License #
###############################################################################
"""
This module provides a number of message dialogs that are commonly used
throughout Editra. Its purpose is to promote reuse of the common dialogs for
consistancy and reduction in redundant code.
@summary: Common dialogs and related convenience functions
"""
__author__ = "Cody Precord <[email protected]>"
__svnid__ = "$Id: ed_mdlg.py 66817 2011-01-29 21:32:20Z CJP $"
__revision__ = "$Revision: 66817 $"
#--------------------------------------------------------------------------#
# Imports
import wx
import wx.stc
from extern.embeddedimage import PyEmbeddedImage
# Editra Library
import ed_glob
import util
import eclib
#--------------------------------------------------------------------------#
# Globals
_ = wx.GetTranslation
FileIcon = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAACAAAAAgCAYAAABzenr0AAAABHNCSVQICAgIfAhkiAAABthJ"
"REFUWIW9l21sVFkZgJ/7Pd8zLdOWQloiIA2WbiqBLWhhdw3IAhuyRqMxq64uv1B/GMJqYvzK"
"qqkmfqx/3UiC7q4QN5o1UnbRPyVLgdYtCAjdju2Uwhbabmd6O9PpdO7H8Ud7h2lnaLv+8E1u"
"zj3vnXPe57znfd9zRqJcIseOHXsxGo1W5fP5nOu6xQ+OIwAXT+e6LrZtUygUdE3T3O7u7pP9"
"/f03K8y5egkGgy2maTpiBXEcR1iWJXK5nBgbGxPJZFJ0d3eL3bt3H/4w9qSlinA43NbT03O5"
"oaGBdDoNgBAC13WLreM4xb4QAtu2SaVSzMzMEI/HOX78+KGLFy+eWw2AXEYkSWKhLdWhKAqy"
"LCPLMoqiFPuSJKFpGrFYjImJCUzTpLOzs7O1tfXg/wTgGaz0eIZVVS2+eyC6rhOLxRgaGiIS"
"idDV1dXZ0tKyIkQZgGVZopJxb/XeqksBvCcQCCCEIJPJEIlEuHLlSueOHTuWhajogVJjnkG3"
"JA48AE3Tit7wtsHv99Pf38/g4CCWZXH27NnOtra2R0KoSxXV1dWKruvFrXDd+bRzhUC47iJv"
"2LZDKGigqgqTqSy1tbVks1nOnDnDyMgIwWCQXbt20dHR0XnixImn+/r63l5qrywLDh48uP/0"
"6dPnFUXBNE0cy0Lz+9Grq3EdB5jPfyc7Q33cRyo1zcR4hnhjPXmhUJjLk0gkGLpxA3t2ltm5"
"ORobGzEMI3306NHHUqnUvWU9sHXr1ng4HMY0TRRFwRcIYOdymAMDyJqGNZmm1nCItGzm0nWT"
"F37Yx8c32Jz8js34/TkwdOK2Q9W2baiBAIV8nkwmQ01NTVVTU9OnL126dHJZgLVr18a91DIM"
"A7/fz8TwMOaXn6chGmBNewsDH32Cb/xxlOvvJWleU8vVbD2dL/+Zw9fOM2FaCOHi/OznRJub"
"sSYmiMViyLJMLpfzrxgDmqb5AAzDQFEUAHyFAi2BAsqhw5xSW3n5wizbxACnmsdpbdV4IxNk"
"w2QM4wOTUP8gbjhM1tBxFgqVYRgEAgE0TVtqrhzAsqwcgKIoxYj3r1vLXz73I875d3H15k1+"
"teMuTwUNHiR0JmerOLAlTu+4Rr69HXfGxhEOuqZh6Dr5hSzy+/0YhlEWc2UAyWTyfXhYjKYn"
"U3z/lb9zJRVAQqLev4XaDQ5EFLJOlM0HdnI7rfLcrx/Q9ewetoyNku4fJuTzEfL7wedDCIGq"
"qchyedaXabq7uycymUyxPxeuYn+Dj4vSGxwI/pO3bmn8picMbU1sfuEQd2b8dLzyHx70K7yU"
"qIP9e1nf+jFq6msxAJ/Ph67rqIpK6cn6SIBkMlnI5/MAFCyLGl2ifUcz6X/0ccT3Lvvb5kik"
"6/nbhTR/Opei7bnXyZq3ee17Phx5kluBOq637OHUhQQaYPh8xYIFiBW3AJA8V3kb5kQi3Pv8"
"19i+r4Uv3XufjrONvPhbhTX2X3n1x4+z75Nb4NYgz1h3MXqv8qrSzC97E3zxQDPBUDXZhQJW"
"Sco8oKqqJMnzP/ZAFKDRdWBgki80zrK+apzEgxDPf7aVffubYFzCHpki2NWLoZnkwptI3A0x"
"en9s0TyVYqDMA7ZtC89RHrWwHXJ3htHyc4RrdL7ZrnAnHeP1y2v5RPRdmqU8qgY8+yl+/2+D"
"H/TYfGWPReO6mkXzrMoDpeIFjSRc3A8mcadSzF4e4EhdhiNtGW6PxXjtXzroM1ybinKgt56X"
"+mf5ae0Ffnd8O1owTi6XWxagUgwgxOJYEbYNd+8iWRZzcwX87wi++pEC4ztruJbaxTPnrzI2"
"PcxeaZQ3Iwl8l3sxx48SqlvsyVUBWJZVBChts/k8SiaDpRuEJoM0PxnDvHqf0fvDtFfd5CfG"
"NVpHhsjcGGFQ1YjrKhEe1hOgWFlX9IAnkiThAqFNm1j/1jkkSSJSFeK9xCjf+sXbhKI+/vDt"
"x2nZ+BnE0JOkbBc34KdOUQisW4dtO4sAVuWBpeLaNqphEN24sagbJc2e9ga++/XDoEQQgPtY"
"I1EPHLALBWyrgFR+4q8M4BF7rXcT9t73bt/EUzu3AGDbNm5Jnns3ZSHmxwtAkh4d66sCmL+O"
"C2D+WlawCsj24vshzOe5Bzs/VEIIgbxQV7xFfGiA+VYsTCYX/x94xh+CLh7vSaUCVPz2yC9L"
"JvBWWwq5VCfLi2/SlWCWSpkHVFWVFg6ORYMrXSaWg60kmqatfB+wbduZmpoiHA4zPT1d1Jf+"
"PxBCIFyBK9zyolXS9941TSMUClEoFMrO40r+qQ6FQk/Islznuq5NyREaCARkwzBk27ZFPp93"
"LcsqO14fIaokSblMJvMOkFzlmP+P/BeZah5l10evBAAAAABJRU5ErkJggg==")
#--------------------------------------------------------------------------#
def OpenErrorDlg(parent, fname, err):
"""Show a file open error dialog
@param parent: parent window
@param fname: file that failed to open
@param err: error message
"""
argmap = dict(filename=fname, errormsg=err)
dlg = wx.MessageDialog(parent,
_("Editra could not open %(filename)s\n\n"
"Error:\n%(errormsg)s") % \
argmap, _("Error Opening File"),
style=wx.OK|wx.CENTER|wx.ICON_ERROR)
dlg.CenterOnParent()
result = dlg.ShowModal()
dlg.Destroy()
return result
def SaveErrorDlg(parent, fname, err):
"""Show a file save error modal dialog
@param parent: window that the dialog is the child of
@param fname: name of file that error occured
@param err: the err message/description
@return: wxID_OK if dialog was shown and dismissed properly
"""
argmap = dict(filename=fname, errormsg=err)
dlg = wx.MessageDialog(parent,
_("Failed to save file: %(filename)s\n\n"
"Error:\n%(errormsg)s") % argmap,
_("Save Error"), wx.OK|wx.ICON_ERROR)
dlg.CenterOnParent()
result = dlg.ShowModal()
dlg.Destroy()
return result
#--------------------------------------------------------------------------#
class EdFileInfoDlg(eclib.FileInfoDlg):
"""File information dialog"""
def __init__(self, parent, fname):
"""General file information dialog
@param parent: parent window
@param fname: file path
"""
super(EdFileInfoDlg, self).__init__(parent, fname=fname, ftype=None,
bmp=FileIcon.GetBitmap())
# Setup
self.SetFileTypeLabel(util.GetFileType(fname))
#--------------------------------------------------------------------------#
class EdFormatEOLDlg(eclib.ChoiceDialog):
"""Dialog for selecting EOL format"""
def __init__(self, parent, msg=u'', title=u'', selection=0):
"""Create the dialog
@keyword selection: default selection (wx.stc.STC_EOL_*)
"""
choices = [_("Old Machintosh (\\r)"), _("Unix (\\n)"),
_("Windows (\\r\\n)")]
self._eol = [wx.stc.STC_EOL_CR, wx.stc.STC_EOL_LF, wx.stc.STC_EOL_CRLF]
idx = self._eol.index(selection)
super(EdFormatEOLDlg, self).__init__(parent, msg=msg, title=title,
choices=choices,
style=wx.YES_NO|wx.YES_DEFAULT)
self.SetSelection(idx)
# Setup
bmp = wx.ArtProvider.GetBitmap(str(ed_glob.ID_DOCPROP), wx.ART_OTHER)
if bmp.IsOk():
self.SetBitmap(bmp)
self.CenterOnParent()
def GetSelection(self):
"""Get the selected eol mode
@return: wx.stc.STC_EOL_*
"""
sel = super(EdFormatEOLDlg, self).GetSelection()
return self._eol[sel]
| ktan2020/legacy-automation | win/Lib/site-packages/wx-3.0-msw/wx/tools/Editra/src/ed_mdlg.py | Python | mit | 7,259 |
# Copyright (c) 2012 The Khronos Group Inc.
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and /or associated documentation files (the "Materials "), to deal in the Materials without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Materials, and to permit persons to whom the Materials are furnished to do so, subject to
# the following conditions:
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Materials.
# THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
# See Core.Logic.FJudgementContext for the information
# of the 'context' parameter.
# This sample judging object does the following:
#
# JudgeBaseline: just verifies that the standard steps did not crash.
# JudgeSuperior: also verifies that the validation steps are not in error.
# JudgeExemplary: same as intermediate badge.
# We import an assistant script that includes the common verifications
# methods. The assistant buffers its checks, so that running them again
# does not incurs an unnecessary performance hint.
from StandardDataSets.scripts import JudgeAssistant
# Please feed your node list here:
tagLst = []
attrName = ''
attrVal = ''
dataToCheck = ''
class SimpleJudgingObject:
def __init__(self, _tagLst, _attrName, _attrVal, _data):
self.tagList = _tagLst
self.attrName = _attrName
self.attrVal = _attrVal
self.dataToCheck = _data
self.status_baseline = False
self.status_superior = False
self.status_exemplary = False
self.__assistant = JudgeAssistant.JudgeAssistant()
def JudgeBaseline(self, context):
# No step should not crash
self.__assistant.CheckCrashes(context)
# Import/export/validate must exist and pass, while Render must only exist.
self.__assistant.CheckSteps(context, ["Import", "Export", "Validate"], ["Render"])
if (self.__assistant.GetResults() == False):
self.status_baseline = False
return False
# Compare the rendered images between import and export
# Then compare images against reference test to check for non-equivalence
self.__assistant.CompareRenderedImages(context)
self.status_baseline = self.__assistant.DeferJudgement(context)
return self.status_baseline
# To pass intermediate you need to pass basic, this object could also include additional
# tests that were specific to the intermediate badge.
def JudgeSuperior(self, context):
self.status_superior = self.status_baseline
return self.status_superior
# To pass advanced you need to pass intermediate, this object could also include additional
# tests that were specific to the advanced badge
def JudgeExemplary(self, context):
self.status_exemplary = self.status_superior
return self.status_exemplary
# This is where all the work occurs: "judgingObject" is an absolutely necessary token.
# The dynamic loader looks very specifically for a class instance named "judgingObject".
#
judgingObject = SimpleJudgingObject(tagLst, attrName, attrVal, dataToCheck);
| KhronosGroup/COLLADA-CTS | StandardDataSets/collada/library_visual_scenes/visual_scene/node/_reference/_reference_node_translate_xyz_cube/_reference_node_translate_xyz_cube.py | Python | mit | 3,826 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-08-19 07:40
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('practiceapp', '0009_auto_20160819_1038'),
]
operations = [
migrations.AlterModelOptions(
name='mysiteprofile',
options={'verbose_name': 'Профиль', 'verbose_name_plural': 'Профили'},
),
migrations.RemoveField(
model_name='mysiteprofile',
name='group',
),
migrations.RemoveField(
model_name='mysiteprofile',
name='lucky_number',
),
migrations.RemoveField(
model_name='mysiteprofile',
name='status',
),
migrations.AlterField(
model_name='mysiteprofile',
name='user',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
| MrHarigo/StudyConfirmationProject | StudyConfirmationProject/practiceapp/migrations/0010_auto_20160819_1040.py | Python | apache-2.0 | 1,103 |
#MenuTitle: Enable alignment for selected glyphs
# -*- coding: utf-8 -*-
__doc__="""
Enables automatic alignment for all components in all selected glyphs. (The opposite to makkablue's Enable alignment for selected glyphs)
"""
import GlyphsApp
Font = Glyphs.font
selectedLayers = Font.selectedLayers
def process( thisLayer ):
for thisComp in thisLayer.components:
thisComp.setDisableAlignment_( False )
for thisLayer in selectedLayers:
thisGlyph = thisLayer.parent
print "Enabling automatic alignment in", thisGlyph.name
thisGlyph.beginUndo()
process( thisLayer )
thisGlyph.endUndo()
| juandelperal/glyphsScripts | Utils/Enable alignment.py | Python | mit | 596 |
"""Test queues inspection SB APIs."""
from __future__ import print_function
import unittest2
import os
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
class TestQueues(TestBase):
mydir = TestBase.compute_mydir(__file__)
@skipUnlessDarwin
@add_test_categories(['pyapi'])
def test_with_python_api_queues(self):
"""Test queues inspection SB APIs."""
self.build()
self.queues()
@skipUnlessDarwin
@add_test_categories(['pyapi'])
def test_with_python_api_queues_with_backtrace(self):
"""Test queues inspection SB APIs."""
self.build()
self.queues_with_libBacktraceRecording()
def setUp(self):
# Call super's setUp().
TestBase.setUp(self)
# Find the line numbers that we will step to in main:
self.main_source = "main.c"
def check_queue_for_valid_queue_id(self, queue):
self.assertTrue(
queue.GetQueueID() != 0, "Check queue %s for valid QueueID (got 0x%x)" %
(queue.GetName(), queue.GetQueueID()))
def check_running_and_pending_items_on_queue(
self, queue, expected_running, expected_pending):
self.assertTrue(
queue.GetNumPendingItems() == expected_pending,
"queue %s should have %d pending items, instead has %d pending items" %
(queue.GetName(),
expected_pending,
(queue.GetNumPendingItems())))
self.assertTrue(
queue.GetNumRunningItems() == expected_running,
"queue %s should have %d running items, instead has %d running items" %
(queue.GetName(),
expected_running,
(queue.GetNumRunningItems())))
def describe_threads(self):
desc = []
for x in self.inferior_process:
id = x.GetIndexID()
reason_str = lldbutil.stop_reason_to_str(x.GetStopReason())
location = "\t".join([lldbutil.get_description(
x.GetFrameAtIndex(i)) for i in range(x.GetNumFrames())])
desc.append(
"thread %d: %s (queue id: %s) at\n\t%s" %
(id, reason_str, x.GetQueueID(), location))
print('\n'.join(desc))
def check_number_of_threads_owned_by_queue(self, queue, number_threads):
if (queue.GetNumThreads() != number_threads):
self.describe_threads()
self.assertTrue(
queue.GetNumThreads() == number_threads,
"queue %s should have %d thread executing, but has %d" %
(queue.GetName(),
number_threads,
queue.GetNumThreads()))
def check_queue_kind(self, queue, kind):
expected_kind_string = "Unknown"
if kind == lldb.eQueueKindSerial:
expected_kind_string = "Serial queue"
if kind == lldb.eQueueKindConcurrent:
expected_kind_string = "Concurrent queue"
actual_kind_string = "Unknown"
if queue.GetKind() == lldb.eQueueKindSerial:
actual_kind_string = "Serial queue"
if queue.GetKind() == lldb.eQueueKindConcurrent:
actual_kind_string = "Concurrent queue"
self.assertTrue(
queue.GetKind() == kind,
"queue %s is expected to be a %s but it is actually a %s" %
(queue.GetName(),
expected_kind_string,
actual_kind_string))
def check_queues_threads_match_queue(self, queue):
for idx in range(0, queue.GetNumThreads()):
t = queue.GetThreadAtIndex(idx)
self.assertTrue(
t.IsValid(), "Queue %s's thread #%d must be valid" %
(queue.GetName(), idx))
self.assertTrue(
t.GetQueueID() == queue.GetQueueID(),
"Queue %s has a QueueID of %d but its thread #%d has a QueueID of %d" %
(queue.GetName(),
queue.GetQueueID(),
idx,
t.GetQueueID()))
self.assertTrue(
t.GetQueueName() == queue.GetName(),
"Queue %s has a QueueName of %s but its thread #%d has a QueueName of %s" %
(queue.GetName(),
queue.GetName(),
idx,
t.GetQueueName()))
self.assertTrue(
t.GetQueue().GetQueueID() == queue.GetQueueID(),
"Thread #%d's Queue's QueueID of %d is not the same as the QueueID of its owning queue %d" %
(idx,
t.GetQueue().GetQueueID(),
queue.GetQueueID()))
def queues(self):
"""Test queues inspection SB APIs without libBacktraceRecording."""
exe = self.getBuildArtifact("a.out")
target = self.dbg.CreateTarget(exe)
self.assertTrue(target, VALID_TARGET)
self.main_source_spec = lldb.SBFileSpec(self.main_source)
break1 = target.BreakpointCreateByName("stopper", 'a.out')
self.assertTrue(break1, VALID_BREAKPOINT)
process = target.LaunchSimple(
[], None, self.get_process_working_directory())
self.assertTrue(process, PROCESS_IS_VALID)
threads = lldbutil.get_threads_stopped_at_breakpoint(process, break1)
if len(threads) != 1:
self.fail("Failed to stop at breakpoint 1.")
self.inferior_process = process
queue_submittor_1 = lldb.SBQueue()
queue_performer_1 = lldb.SBQueue()
queue_performer_2 = lldb.SBQueue()
queue_performer_3 = lldb.SBQueue()
for idx in range(0, process.GetNumQueues()):
q = process.GetQueueAtIndex(idx)
if q.GetName() == "com.apple.work_submittor_1":
queue_submittor_1 = q
if q.GetName() == "com.apple.work_performer_1":
queue_performer_1 = q
if q.GetName() == "com.apple.work_performer_2":
queue_performer_2 = q
if q.GetName() == "com.apple.work_performer_3":
queue_performer_3 = q
self.assertTrue(
queue_submittor_1.IsValid() and queue_performer_1.IsValid() and queue_performer_2.IsValid() and queue_performer_3.IsValid(),
"Got all four expected queues: %s %s %s %s" %
(queue_submittor_1.IsValid(),
queue_performer_1.IsValid(),
queue_performer_2.IsValid(),
queue_performer_3.IsValid()))
self.check_queue_for_valid_queue_id(queue_submittor_1)
self.check_queue_for_valid_queue_id(queue_performer_1)
self.check_queue_for_valid_queue_id(queue_performer_2)
self.check_queue_for_valid_queue_id(queue_performer_3)
self.check_number_of_threads_owned_by_queue(queue_submittor_1, 1)
self.check_number_of_threads_owned_by_queue(queue_performer_1, 1)
self.check_number_of_threads_owned_by_queue(queue_performer_2, 1)
self.check_number_of_threads_owned_by_queue(queue_performer_3, 4)
self.check_queue_kind(queue_submittor_1, lldb.eQueueKindSerial)
self.check_queue_kind(queue_performer_1, lldb.eQueueKindSerial)
self.check_queue_kind(queue_performer_2, lldb.eQueueKindSerial)
self.check_queue_kind(queue_performer_3, lldb.eQueueKindConcurrent)
self.check_queues_threads_match_queue(queue_submittor_1)
self.check_queues_threads_match_queue(queue_performer_1)
self.check_queues_threads_match_queue(queue_performer_2)
self.check_queues_threads_match_queue(queue_performer_3)
# We have threads running with all the different dispatch QoS service
# levels - find those threads and check that we can get the correct
# QoS name for each of them.
user_initiated_thread = lldb.SBThread()
user_interactive_thread = lldb.SBThread()
utility_thread = lldb.SBThread()
unspecified_thread = lldb.SBThread()
background_thread = lldb.SBThread()
for th in process.threads:
if th.GetName() == "user initiated QoS":
user_initiated_thread = th
if th.GetName() == "user interactive QoS":
user_interactive_thread = th
if th.GetName() == "utility QoS":
utility_thread = th
if th.GetName() == "unspecified QoS":
unspecified_thread = th
if th.GetName() == "background QoS":
background_thread = th
self.assertTrue(
user_initiated_thread.IsValid(),
"Found user initiated QoS thread")
self.assertTrue(
user_interactive_thread.IsValid(),
"Found user interactive QoS thread")
self.assertTrue(utility_thread.IsValid(), "Found utility QoS thread")
self.assertTrue(
unspecified_thread.IsValid(),
"Found unspecified QoS thread")
self.assertTrue(
background_thread.IsValid(),
"Found background QoS thread")
stream = lldb.SBStream()
self.assertTrue(
user_initiated_thread.GetInfoItemByPathAsString(
"requested_qos.printable_name",
stream),
"Get QoS printable string for user initiated QoS thread")
self.assertTrue(
stream.GetData() == "User Initiated",
"user initiated QoS thread name is valid")
stream.Clear()
self.assertTrue(
user_interactive_thread.GetInfoItemByPathAsString(
"requested_qos.printable_name",
stream),
"Get QoS printable string for user interactive QoS thread")
self.assertTrue(
stream.GetData() == "User Interactive",
"user interactive QoS thread name is valid")
stream.Clear()
self.assertTrue(
utility_thread.GetInfoItemByPathAsString(
"requested_qos.printable_name",
stream),
"Get QoS printable string for utility QoS thread")
self.assertTrue(
stream.GetData() == "Utility",
"utility QoS thread name is valid")
stream.Clear()
self.assertTrue(
unspecified_thread.GetInfoItemByPathAsString(
"requested_qos.printable_name",
stream),
"Get QoS printable string for unspecified QoS thread")
qosName = stream.GetData()
self.assertTrue(
qosName == "User Initiated" or qosName == "Default",
"unspecified QoS thread name is valid")
stream.Clear()
self.assertTrue(
background_thread.GetInfoItemByPathAsString(
"requested_qos.printable_name",
stream),
"Get QoS printable string for background QoS thread")
self.assertTrue(
stream.GetData() == "Background",
"background QoS thread name is valid")
@skipIfDarwin # rdar://50379398
def queues_with_libBacktraceRecording(self):
"""Test queues inspection SB APIs with libBacktraceRecording present."""
exe = self.getBuildArtifact("a.out")
if not os.path.isfile(
'/Applications/Xcode.app/Contents/Developer/usr/lib/libBacktraceRecording.dylib'):
self.skipTest(
"Skipped because libBacktraceRecording.dylib was present on the system.")
if not os.path.isfile(
'/usr/lib/system/introspection/libdispatch.dylib'):
self.skipTest(
"Skipped because introspection libdispatch dylib is not present.")
target = self.dbg.CreateTarget(exe)
self.assertTrue(target, VALID_TARGET)
self.main_source_spec = lldb.SBFileSpec(self.main_source)
break1 = target.BreakpointCreateByName("stopper", 'a.out')
self.assertTrue(break1, VALID_BREAKPOINT)
# Now launch the process, and do not stop at entry point.
libbtr_path = "/Applications/Xcode.app/Contents/Developer/usr/lib/libBacktraceRecording.dylib"
if self.getArchitecture() in ['arm', 'arm64', 'arm64e', 'arm64_32', 'armv7', 'armv7k']:
libbtr_path = "/Developer/usr/lib/libBacktraceRecording.dylib"
process = target.LaunchSimple(
[],
[
'DYLD_INSERT_LIBRARIES=%s' % (libbtr_path),
'DYLD_LIBRARY_PATH=/usr/lib/system/introspection'],
self.get_process_working_directory())
self.assertTrue(process, PROCESS_IS_VALID)
# The stop reason of the thread should be breakpoint.
threads = lldbutil.get_threads_stopped_at_breakpoint(process, break1)
if len(threads) != 1:
self.fail("Failed to stop at breakpoint 1.")
self.inferior_process = process
libbtr_module_filespec = lldb.SBFileSpec("libBacktraceRecording.dylib")
libbtr_module = target.FindModule(libbtr_module_filespec)
if not libbtr_module.IsValid():
self.skipTest(
"Skipped because libBacktraceRecording.dylib was not loaded into the process.")
self.assertTrue(
process.GetNumQueues() >= 4,
"Found the correct number of queues.")
queue_submittor_1 = lldb.SBQueue()
queue_performer_1 = lldb.SBQueue()
queue_performer_2 = lldb.SBQueue()
queue_performer_3 = lldb.SBQueue()
for idx in range(0, process.GetNumQueues()):
q = process.GetQueueAtIndex(idx)
if "LLDB_COMMAND_TRACE" in os.environ:
print("Queue with id %s has name %s" % (q.GetQueueID(), q.GetName()))
if q.GetName() == "com.apple.work_submittor_1":
queue_submittor_1 = q
if q.GetName() == "com.apple.work_performer_1":
queue_performer_1 = q
if q.GetName() == "com.apple.work_performer_2":
queue_performer_2 = q
if q.GetName() == "com.apple.work_performer_3":
queue_performer_3 = q
if q.GetName() == "com.apple.main-thread":
if q.GetNumThreads() == 0:
print("Cannot get thread <=> queue associations")
return
self.assertTrue(
queue_submittor_1.IsValid() and queue_performer_1.IsValid() and queue_performer_2.IsValid() and queue_performer_3.IsValid(),
"Got all four expected queues: %s %s %s %s" %
(queue_submittor_1.IsValid(),
queue_performer_1.IsValid(),
queue_performer_2.IsValid(),
queue_performer_3.IsValid()))
self.check_queue_for_valid_queue_id(queue_submittor_1)
self.check_queue_for_valid_queue_id(queue_performer_1)
self.check_queue_for_valid_queue_id(queue_performer_2)
self.check_queue_for_valid_queue_id(queue_performer_3)
self.check_running_and_pending_items_on_queue(queue_submittor_1, 1, 0)
self.check_running_and_pending_items_on_queue(queue_performer_1, 1, 3)
self.check_running_and_pending_items_on_queue(
queue_performer_2, 1, 9999)
self.check_running_and_pending_items_on_queue(queue_performer_3, 4, 0)
self.check_number_of_threads_owned_by_queue(queue_submittor_1, 1)
self.check_number_of_threads_owned_by_queue(queue_performer_1, 1)
self.check_number_of_threads_owned_by_queue(queue_performer_2, 1)
self.check_number_of_threads_owned_by_queue(queue_performer_3, 4)
self.check_queue_kind(queue_submittor_1, lldb.eQueueKindSerial)
self.check_queue_kind(queue_performer_1, lldb.eQueueKindSerial)
self.check_queue_kind(queue_performer_2, lldb.eQueueKindSerial)
self.check_queue_kind(queue_performer_3, lldb.eQueueKindConcurrent)
self.check_queues_threads_match_queue(queue_submittor_1)
self.check_queues_threads_match_queue(queue_performer_1)
self.check_queues_threads_match_queue(queue_performer_2)
self.check_queues_threads_match_queue(queue_performer_3)
self.assertTrue(queue_performer_2.GetPendingItemAtIndex(
0).IsValid(), "queue 2's pending item #0 is valid")
self.assertTrue(queue_performer_2.GetPendingItemAtIndex(0).GetAddress().GetSymbol(
).GetName() == "doing_the_work_2", "queue 2's pending item #0 should be doing_the_work_2")
self.assertTrue(
queue_performer_2.GetNumPendingItems() == 9999,
"verify that queue 2 still has 9999 pending items")
self.assertTrue(queue_performer_2.GetPendingItemAtIndex(
9998).IsValid(), "queue 2's pending item #9998 is valid")
self.assertTrue(queue_performer_2.GetPendingItemAtIndex(9998).GetAddress().GetSymbol(
).GetName() == "doing_the_work_2", "queue 2's pending item #0 should be doing_the_work_2")
self.assertTrue(queue_performer_2.GetPendingItemAtIndex(
9999).IsValid() == False, "queue 2's pending item #9999 is invalid")
| endlessm/chromium-browser | third_party/llvm/lldb/test/API/macosx/queues/TestQueues.py | Python | bsd-3-clause | 17,019 |
import json
import requests
import base64
from . import check_input_attribute, checking_error, standard_error_message
from pymisp import MISPEvent, MISPObject
from urllib.parse import quote
moduleinfo = {'version': '1.0',
'author': 'Ben Verschaeren',
'description': 'SOPHOSLabs Intelix Integration',
'module-type': ['expansion']}
moduleconfig = ['client_id', 'client_secret']
misperrors = {'error': 'Error'}
misp_types_in = ['sha256', 'ip', 'ip-src', 'ip-dst', 'uri', 'url', 'domain', 'hostname']
mispattributes = {'input': misp_types_in,
'format': 'misp_standard'}
class SophosLabsApi():
def __init__(self, client_id, client_secret):
self.misp_event = MISPEvent()
self.client_id = client_id
self.client_secret = client_secret
self.authToken = f"{self.client_id}:{self.client_secret}"
self.baseurl = 'de.api.labs.sophos.com'
d = {'grant_type': 'client_credentials'}
h = {'Authorization': f"Basic {base64.b64encode(self.authToken.encode('UTF-8')).decode('ascii')}",
'Content-Type': 'application/x-www-form-urlencoded'}
r = requests.post('https://api.labs.sophos.com/oauth2/token', headers=h, data=d)
if r.status_code == 200:
j = json.loads(r.text)
self.accessToken = j['access_token']
def get_result(self):
event = json.loads(self.misp_event.to_json())
results = {key: event[key] for key in ('Attribute', 'Object') if (key in event and event[key])}
return {'results': results}
def hash_lookup(self, filehash):
sophos_object = MISPObject('SOPHOSLabs Intelix SHA256 Report')
h = {"Authorization": f"{self.accessToken}"}
r = requests.get(f"https://{self.baseurl}/lookup/files/v1/{filehash}", headers=h)
if r.status_code == 200:
j = json.loads(r.text)
if 'reputationScore' in j:
sophos_object.add_attribute('Reputation Score', type='text', value=j['reputationScore'])
if 0 <= j['reputationScore'] <= 19:
sophos_object.add_attribute('Decision', type='text', value='This file is malicious')
if 20 <= j['reputationScore'] <= 29:
sophos_object.add_attribute('Decision', type='text', value='This file is potentially unwanted')
if 30 <= j['reputationScore'] <= 69:
sophos_object.add_attribute('Decision', type='text', value='This file is unknown and suspicious')
if 70 <= j['reputationScore'] <= 100:
sophos_object.add_attribute('Decision', type='text', value='This file is known good')
if 'detectionName' in j:
sophos_object.add_attribute('Detection Name', type='text', value=j['detectionName'])
else:
sophos_object.add_attribute('Detection Name', type='text', value='No name associated with this IoC')
self.misp_event.add_object(**sophos_object)
def ip_lookup(self, ip):
sophos_object = MISPObject('SOPHOSLabs Intelix IP Category Lookup')
h = {"Authorization": f"{self.accessToken}"}
r = requests.get(f"https://{self.baseurl}/lookup/ips/v1/{ip}", headers=h)
if r.status_code == 200:
j = json.loads(r.text)
if 'category' in j:
for c in j['category']:
sophos_object.add_attribute('IP Address Categorisation', type='text', value=c)
else:
sophos_object.add_attribute('IP Address Categorisation', type='text', value='No category assocaited with IoC')
self.misp_event.add_object(**sophos_object)
def url_lookup(self, url):
sophos_object = MISPObject('SOPHOSLabs Intelix URL Lookup')
h = {"Authorization": f"{self.accessToken}"}
r = requests.get(f"https://{self.baseurl}/lookup/urls/v1/{quote(url, safe='')}", headers=h)
if r.status_code == 200:
j = json.loads(r.text)
if 'productivityCategory' in j:
sophos_object.add_attribute('URL Categorisation', type='text', value=j['productivityCategory'])
else:
sophos_object.add_attribute('URL Categorisation', type='text', value='No category assocaited with IoC')
if 'riskLevel' in j:
sophos_object.add_attribute('URL Risk Level', type='text', value=j['riskLevel'])
else:
sophos_object.add_attribute('URL Risk Level', type='text', value='No risk level associated with IoC')
if 'securityCategory' in j:
sophos_object.add_attribute('URL Security Category', type='text', value=j['securityCategory'])
else:
sophos_object.add_attribute('URL Security Category', type='text', value='No Security Category associated with IoC')
self.misp_event.add_object(**sophos_object)
def handler(q=False):
if q is False:
return False
j = json.loads(q)
if not j.get('config') or not j['config'].get('client_id') or not j['config'].get('client_secret'):
misperrors['error'] = "Missing client_id or client_secret value for SOPHOSLabs Intelix. \
It's free to sign up here https://aws.amazon.com/marketplace/pp/B07SLZPMCS."
return misperrors
to_check = (('type', 'value'), ('type', 'value1'))
if not j.get('attribute') or not any(check_input_attribute(j['attribute'], requirements=check) for check in to_check):
return {'error': f'{standard_error_message}, {checking_error}.'}
attribute = j['attribute']
if attribute['type'] not in misp_types_in:
return {'error': 'Unsupported attribute type.'}
client = SophosLabsApi(j['config']['client_id'], j['config']['client_secret'])
mapping = {
'sha256': 'hash_lookup',
'ip-dst': 'ip_lookup',
'ip-src': 'ip_lookup',
'ip': 'ip_lookup',
'uri': 'url_lookup',
'url': 'url_lookup',
'domain': 'url_lookup',
'hostname': 'url_lookup'
}
attribute_value = attribute['value'] if 'value' in attribute else attribute['value1']
getattr(client, mapping[attribute['type']])(attribute_value)
return client.get_result()
def introspection():
return mispattributes
def version():
moduleinfo['config'] = moduleconfig
return moduleinfo
| VirusTotal/misp-modules | misp_modules/modules/expansion/sophoslabs_intelix.py | Python | agpl-3.0 | 6,396 |
import os
import random
import unittest
import requests
import json
import datetime
from app.config import basedir
from app import create_app, db
from app import models
from app import config
class TestCase(unittest.TestCase):
def setUp(self):
self.app = create_app(config.TestingConfig)
self.ctx = self.app.app_context()
self.ctx.push()
db.drop_all()
db.create_all()
self.client = self.app.test_client()
def tearDown(self):
db.session.remove()
db.drop_all()
self.ctx.pop()
def test_thermocouple_api(self):
temp_url = 'http://127.0.0.1:5000/temperature/'
req = self.client.get(temp_url, content_type='application/json')
json_resp = json.loads(req.data.decode('utf-8'))
self.assertEqual(json_resp, []) # Expect a blank array when querying an empty database
thermocouple_temp = 23 + 5*random.random()
cold_junction_temp = 12 + 3*random.random()
timestamp = datetime.datetime.now(datetime.timezone.utc).isoformat()
post_data = json.dumps({'cold_junction_temp': cold_junction_temp, 'thermocouple_temp': thermocouple_temp, 'timestamp': timestamp})
req = self.client.post(temp_url, data = post_data, content_type='application/json')
self.assertEqual(200, req.status_code)
req = self.client.get(temp_url, content_type='application/json')
json_resp = json.loads(req.data.decode('utf-8'))
self.assertEqual(json_resp[0]['id'], 1) # Expect first id to be 1
self.assertEqual(json_resp[0]['thermocouple_temp'], thermocouple_temp) # Expect thermocouple_temp to be the same value that was POST'd
self.assertEqual(json_resp[0]['cold_junction_temp'], cold_junction_temp) # Expect cold_junction_temp to be the same value that was POST'd
self.assertEqual(json_resp[0]['timestamp'], timestamp) # Expect timestamp to be the same value that was POST'd
if __name__ == '__main__':
unittest.main()
| johnrbnsn/Template_FlaskMarshmallowSQLAlchemy | api/tests.py | Python | mit | 2,021 |
import importlib
from importlib import _bootstrap
from .. import abc
from .. import util
from . import util as source_util
import imp
import marshal
import os
import py_compile
import shutil
import stat
import sys
import unittest
from test.support import make_legacy_pyc
class SimpleTest(unittest.TestCase):
"""Should have no issue importing a source module [basic]. And if there is
a syntax error, it should raise a SyntaxError [syntax error].
"""
# [basic]
def test_module(self):
with source_util.create_modules('_temp') as mapping:
loader = _bootstrap._SourceFileLoader('_temp', mapping['_temp'])
module = loader.load_module('_temp')
self.assertTrue('_temp' in sys.modules)
check = {'__name__': '_temp', '__file__': mapping['_temp'],
'__package__': ''}
for attr, value in check.items():
self.assertEqual(getattr(module, attr), value)
def test_package(self):
with source_util.create_modules('_pkg.__init__') as mapping:
loader = _bootstrap._SourceFileLoader('_pkg',
mapping['_pkg.__init__'])
module = loader.load_module('_pkg')
self.assertTrue('_pkg' in sys.modules)
check = {'__name__': '_pkg', '__file__': mapping['_pkg.__init__'],
'__path__': [os.path.dirname(mapping['_pkg.__init__'])],
'__package__': '_pkg'}
for attr, value in check.items():
self.assertEqual(getattr(module, attr), value)
def test_lacking_parent(self):
with source_util.create_modules('_pkg.__init__', '_pkg.mod')as mapping:
loader = _bootstrap._SourceFileLoader('_pkg.mod',
mapping['_pkg.mod'])
module = loader.load_module('_pkg.mod')
self.assertTrue('_pkg.mod' in sys.modules)
check = {'__name__': '_pkg.mod', '__file__': mapping['_pkg.mod'],
'__package__': '_pkg'}
for attr, value in check.items():
self.assertEqual(getattr(module, attr), value)
def fake_mtime(self, fxn):
"""Fake mtime to always be higher than expected."""
return lambda name: fxn(name) + 1
def test_module_reuse(self):
with source_util.create_modules('_temp') as mapping:
loader = _bootstrap._SourceFileLoader('_temp', mapping['_temp'])
module = loader.load_module('_temp')
module_id = id(module)
module_dict_id = id(module.__dict__)
with open(mapping['_temp'], 'w') as file:
file.write("testing_var = 42\n")
# For filesystems where the mtime is only to a second granularity,
# everything that has happened above can be too fast;
# force an mtime on the source that is guaranteed to be different
# than the original mtime.
loader.path_mtime = self.fake_mtime(loader.path_mtime)
module = loader.load_module('_temp')
self.assertTrue('testing_var' in module.__dict__,
"'testing_var' not in "
"{0}".format(list(module.__dict__.keys())))
self.assertEqual(module, sys.modules['_temp'])
self.assertEqual(id(module), module_id)
self.assertEqual(id(module.__dict__), module_dict_id)
def test_state_after_failure(self):
# A failed reload should leave the original module intact.
attributes = ('__file__', '__path__', '__package__')
value = '<test>'
name = '_temp'
with source_util.create_modules(name) as mapping:
orig_module = imp.new_module(name)
for attr in attributes:
setattr(orig_module, attr, value)
with open(mapping[name], 'w') as file:
file.write('+++ bad syntax +++')
loader = _bootstrap._SourceFileLoader('_temp', mapping['_temp'])
with self.assertRaises(SyntaxError):
loader.load_module(name)
for attr in attributes:
self.assertEqual(getattr(orig_module, attr), value)
# [syntax error]
def test_bad_syntax(self):
with source_util.create_modules('_temp') as mapping:
with open(mapping['_temp'], 'w') as file:
file.write('=')
loader = _bootstrap._SourceFileLoader('_temp', mapping['_temp'])
with self.assertRaises(SyntaxError):
loader.load_module('_temp')
self.assertTrue('_temp' not in sys.modules)
def test_file_from_empty_string_dir(self):
# Loading a module found from an empty string entry on sys.path should
# not only work, but keep all attributes relative.
file_path = '_temp.py'
with open(file_path, 'w') as file:
file.write("# test file for importlib")
try:
with util.uncache('_temp'):
loader = _bootstrap._SourceFileLoader('_temp', file_path)
mod = loader.load_module('_temp')
self.assertEqual(file_path, mod.__file__)
self.assertEqual(imp.cache_from_source(file_path),
mod.__cached__)
finally:
os.unlink(file_path)
pycache = os.path.dirname(imp.cache_from_source(file_path))
shutil.rmtree(pycache)
class BadBytecodeTest(unittest.TestCase):
def import_(self, file, module_name):
loader = self.loader(module_name, file)
module = loader.load_module(module_name)
self.assertTrue(module_name in sys.modules)
def manipulate_bytecode(self, name, mapping, manipulator, *,
del_source=False):
"""Manipulate the bytecode of a module by passing it into a callable
that returns what to use as the new bytecode."""
try:
del sys.modules['_temp']
except KeyError:
pass
py_compile.compile(mapping[name])
if not del_source:
bytecode_path = imp.cache_from_source(mapping[name])
else:
os.unlink(mapping[name])
bytecode_path = make_legacy_pyc(mapping[name])
if manipulator:
with open(bytecode_path, 'rb') as file:
bc = file.read()
new_bc = manipulator(bc)
with open(bytecode_path, 'wb') as file:
if new_bc is not None:
file.write(new_bc)
return bytecode_path
def _test_empty_file(self, test, *, del_source=False):
with source_util.create_modules('_temp') as mapping:
bc_path = self.manipulate_bytecode('_temp', mapping,
lambda bc: b'',
del_source=del_source)
test('_temp', mapping, bc_path)
@source_util.writes_bytecode_files
def _test_partial_magic(self, test, *, del_source=False):
# When their are less than 4 bytes to a .pyc, regenerate it if
# possible, else raise ImportError.
with source_util.create_modules('_temp') as mapping:
bc_path = self.manipulate_bytecode('_temp', mapping,
lambda bc: bc[:3],
del_source=del_source)
test('_temp', mapping, bc_path)
def _test_magic_only(self, test, *, del_source=False):
with source_util.create_modules('_temp') as mapping:
bc_path = self.manipulate_bytecode('_temp', mapping,
lambda bc: bc[:4],
del_source=del_source)
test('_temp', mapping, bc_path)
def _test_partial_timestamp(self, test, *, del_source=False):
with source_util.create_modules('_temp') as mapping:
bc_path = self.manipulate_bytecode('_temp', mapping,
lambda bc: bc[:7],
del_source=del_source)
test('_temp', mapping, bc_path)
def _test_no_marshal(self, *, del_source=False):
with source_util.create_modules('_temp') as mapping:
bc_path = self.manipulate_bytecode('_temp', mapping,
lambda bc: bc[:8],
del_source=del_source)
file_path = mapping['_temp'] if not del_source else bc_path
with self.assertRaises(EOFError):
self.import_(file_path, '_temp')
def _test_non_code_marshal(self, *, del_source=False):
with source_util.create_modules('_temp') as mapping:
bytecode_path = self.manipulate_bytecode('_temp', mapping,
lambda bc: bc[:8] + marshal.dumps(b'abcd'),
del_source=del_source)
file_path = mapping['_temp'] if not del_source else bytecode_path
with self.assertRaises(ImportError):
self.import_(file_path, '_temp')
def _test_bad_marshal(self, *, del_source=False):
with source_util.create_modules('_temp') as mapping:
bytecode_path = self.manipulate_bytecode('_temp', mapping,
lambda bc: bc[:8] + b'<test>',
del_source=del_source)
file_path = mapping['_temp'] if not del_source else bytecode_path
with self.assertRaises(EOFError):
self.import_(file_path, '_temp')
def _test_bad_magic(self, test, *, del_source=False):
with source_util.create_modules('_temp') as mapping:
bc_path = self.manipulate_bytecode('_temp', mapping,
lambda bc: b'\x00\x00\x00\x00' + bc[4:])
test('_temp', mapping, bc_path)
class SourceLoaderBadBytecodeTest(BadBytecodeTest):
loader = _bootstrap._SourceFileLoader
@source_util.writes_bytecode_files
def test_empty_file(self):
# When a .pyc is empty, regenerate it if possible, else raise
# ImportError.
def test(name, mapping, bytecode_path):
self.import_(mapping[name], name)
with open(bytecode_path, 'rb') as file:
self.assertGreater(len(file.read()), 8)
self._test_empty_file(test)
def test_partial_magic(self):
def test(name, mapping, bytecode_path):
self.import_(mapping[name], name)
with open(bytecode_path, 'rb') as file:
self.assertGreater(len(file.read()), 8)
self._test_partial_magic(test)
@source_util.writes_bytecode_files
def test_magic_only(self):
# When there is only the magic number, regenerate the .pyc if possible,
# else raise EOFError.
def test(name, mapping, bytecode_path):
self.import_(mapping[name], name)
with open(bytecode_path, 'rb') as file:
self.assertGreater(len(file.read()), 8)
@source_util.writes_bytecode_files
def test_bad_magic(self):
# When the magic number is different, the bytecode should be
# regenerated.
def test(name, mapping, bytecode_path):
self.import_(mapping[name], name)
with open(bytecode_path, 'rb') as bytecode_file:
self.assertEqual(bytecode_file.read(4), imp.get_magic())
self._test_bad_magic(test)
@source_util.writes_bytecode_files
def test_partial_timestamp(self):
# When the timestamp is partial, regenerate the .pyc, else
# raise EOFError.
def test(name, mapping, bc_path):
self.import_(mapping[name], name)
with open(bc_path, 'rb') as file:
self.assertGreater(len(file.read()), 8)
@source_util.writes_bytecode_files
def test_no_marshal(self):
# When there is only the magic number and timestamp, raise EOFError.
self._test_no_marshal()
@source_util.writes_bytecode_files
def test_non_code_marshal(self):
self._test_non_code_marshal()
# XXX ImportError when sourceless
# [bad marshal]
@source_util.writes_bytecode_files
def test_bad_marshal(self):
# Bad marshal data should raise a ValueError.
self._test_bad_marshal()
# [bad timestamp]
@source_util.writes_bytecode_files
def test_old_timestamp(self):
# When the timestamp is older than the source, bytecode should be
# regenerated.
zeros = b'\x00\x00\x00\x00'
with source_util.create_modules('_temp') as mapping:
py_compile.compile(mapping['_temp'])
bytecode_path = imp.cache_from_source(mapping['_temp'])
with open(bytecode_path, 'r+b') as bytecode_file:
bytecode_file.seek(4)
bytecode_file.write(zeros)
self.import_(mapping['_temp'], '_temp')
source_mtime = os.path.getmtime(mapping['_temp'])
source_timestamp = importlib._w_long(source_mtime)
with open(bytecode_path, 'rb') as bytecode_file:
bytecode_file.seek(4)
self.assertEqual(bytecode_file.read(4), source_timestamp)
# [bytecode read-only]
@source_util.writes_bytecode_files
def test_read_only_bytecode(self):
# When bytecode is read-only but should be rewritten, fail silently.
with source_util.create_modules('_temp') as mapping:
# Create bytecode that will need to be re-created.
py_compile.compile(mapping['_temp'])
bytecode_path = imp.cache_from_source(mapping['_temp'])
with open(bytecode_path, 'r+b') as bytecode_file:
bytecode_file.seek(0)
bytecode_file.write(b'\x00\x00\x00\x00')
# Make the bytecode read-only.
os.chmod(bytecode_path,
stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH)
try:
# Should not raise IOError!
self.import_(mapping['_temp'], '_temp')
finally:
# Make writable for eventual clean-up.
os.chmod(bytecode_path, stat.S_IWUSR)
class SourcelessLoaderBadBytecodeTest(BadBytecodeTest):
loader = _bootstrap._SourcelessFileLoader
def test_empty_file(self):
def test(name, mapping, bytecode_path):
with self.assertRaises(ImportError):
self.import_(bytecode_path, name)
self._test_empty_file(test, del_source=True)
def test_partial_magic(self):
def test(name, mapping, bytecode_path):
with self.assertRaises(ImportError):
self.import_(bytecode_path, name)
self._test_partial_magic(test, del_source=True)
def test_magic_only(self):
def test(name, mapping, bytecode_path):
with self.assertRaises(EOFError):
self.import_(bytecode_path, name)
self._test_magic_only(test, del_source=True)
def test_bad_magic(self):
def test(name, mapping, bytecode_path):
with self.assertRaises(ImportError):
self.import_(bytecode_path, name)
self._test_bad_magic(test, del_source=True)
def test_partial_timestamp(self):
def test(name, mapping, bytecode_path):
with self.assertRaises(EOFError):
self.import_(bytecode_path, name)
self._test_partial_timestamp(test, del_source=True)
def test_no_marshal(self):
self._test_no_marshal(del_source=True)
def test_non_code_marshal(self):
self._test_non_code_marshal(del_source=True)
def test_main():
from test.support import run_unittest
run_unittest(SimpleTest,
SourceLoaderBadBytecodeTest,
SourcelessLoaderBadBytecodeTest
)
if __name__ == '__main__':
test_main()
| invisiblek/python-for-android | python3-alpha/python3-src/Lib/importlib/test/source/test_file_loader.py | Python | apache-2.0 | 16,111 |
import urllib.request
import glob
def download_from_url(url, block_size=8192, location=""):
file_name = "files/" + url.split('/')[-1]
u = urllib.request.urlopen(url)
file_stream = open(file_name, 'wb')
print(u.info()._headers)
while True:
buffer = u.read(block_size)
if not buffer:
break
file_stream.write(buffer)
file_stream.close()
def is_image(file):
return has_extension(file, ('jpg', 'jpeg', 'png', 'gif'))
def is_doc(file):
return has_extension(file, ('txt', 'doc', 'docx', 'rtf', 'md'))
def is_archive(file):
extensions = ('zip', 'rar', 'iso', 'jar', 'tar',
'gz', '7z', 'bz2', 'wim', 'xz')
return has_extension(file, extensions)
def is_pdf(file):
return has_extension(file, ('pdf'))
def get_thumbnail(file):
if is_doc(file):
return '/images/default_doc.png'
if is_pdf(file):
return '/images/pdf.png'
if is_archive(file):
return '/images/zipped.png'
return '/images/default.png'
def has_extension(file, extensions):
return file.split('.')[-1] in extensions
def get_files():
all_files = glob.glob("files/*.*")
all_files = [txt.replace("\\", "/") for txt in all_files]
all_files = [txt.split('/')[-1] for txt in all_files]
result = []
for file in all_files:
if is_image(file):
result.append({
'url': '/download/' + file,
'is_image': "true"
})
else:
result.append({
'url': '/download/' + file,
'thumbnail_url': get_thumbnail(file),
'is_image': "false", 'name': file
})
return(result)
| gharizanov92/Organizer | utils/io.py | Python | mit | 1,720 |
"""
This is the Advanced Generic Widgets package (AGW). It provides many
custom-drawn wxPython controls: some of them can be used as a replacement
of the platform native controls, others are simply an addition to the
already rich wxPython widgets set.
Description:
AGW contains many different modules, listed below. Items labelled with
an asterisk were already present in `wx.lib` before:
- AdvancedSplash: reproduces the behaviour of `wx.SplashScreen`, with more
advanced features like custom shapes and text animations;
- AquaButton: this is another custom-drawn button class which
*approximatively* mimics the behaviour of Aqua buttons on the Mac;
- AUI: a pure-Python implementation of `wx.aui`, with many bug fixes and
new features like HUD docking and L{AuiNotebook} tab arts;
- BalloonTip: allows you to display tooltips in a balloon style window
(actually a frame), similarly to the Windows XP balloon help;
- ButtonPanel (*): a panel with gradient background shading with the
possibility to add buttons and controls still respecting the gradient
background;
- CubeColourDialog: an alternative implementation of `wx.ColourDialog`, it
offers different functionalities like colour wheel and RGB cube;
- CustomTreeCtrl (*): mimics the behaviour of `wx.TreeCtrl`, with almost the
same base functionalities plus a bunch of enhancements and goodies;
- FlatMenu: as the name implies, it is a generic menu implementation,
offering the same `wx.MenuBar`/`wx.Menu`/`wx.ToolBar` capabilities and much more;
- FlatNotebook (*): a full implementation of the `wx.Notebook`, and designed
to be a drop-in replacement for `wx.Notebook` with enhanced capabilities;
- FloatSpin: this class implements a floating point spinctrl, cabable (in
theory) of handling infinite-precision floating point numbers;
- FoldPanelBar (*): a control that contains multiple panels that can be
expanded or collapsed a la Windows Explorer/Outlook command bars;
- FourWaySplitter: this is a layout manager which manages four children like
four panes in a window, similar to many CAD software interfaces;
- GenericMessageDialog: it is a possible replacement for the standard
`wx.MessageDialog`, with a fancier look and extended functionalities;
- GradientButton: another custom-drawn button class which mimics Windows CE
mobile gradient buttons, using a tri-vertex blended gradient background;
- HyperLinkCtrl (*): this widget acts line an hyper link in a typical browser;
- HyperTreeList: a class that mimics the behaviour of `wx.gizmos.TreeListCtrl`,
with almost the same base functionalities plus some more enhancements;
- KnobCtrl: a widget which lets the user select a numerical value by
rotating it, like a slider with a wheel shape;
- LabelBook and FlatImageBook: these are a quasi-full implementations of
`wx.ListBook`, with additional features;
- MultiDirDialog: it represents a possible replacement for `wx.DirDialog`,
with the additional ability of selecting multiple folders at once and a
fancier look;
- PeakMeter: this widget mimics the behaviour of LED equalizers that are
usually found in stereos and MP3 players;
- PersistentControls: widgets which automatically save their state
when they are destroyed and restore it when they are recreated, even during
another program invocation;
- PieCtrl and ProgressPie: these are simple classes that reproduce the
behavior of a pie chart, in a static or progress-gauge-like way;
- PyBusyInfo: constructs a busy info window and displays a message in it:
it is similar to `wx.BusyInfo`;
- PyCollapsiblePane: a pure Python implementation of the original wxWidgets
C++ code of `wx.CollapsiblePane`, with customizable buttons;
- PyGauge: a generic `wx.Gauge` implementation, it supports the determinate
mode functions as `wx.Gauge`;
- PyProgress: it is similar to `wx.ProgressDialog` in indeterminated mode, but
with a different gauge appearance and a different spinning behavior;
- RibbonBar: the RibbonBar library is a set of classes for writing a ribbon
user interface, similar to the user interface present in recent versions
of Microsoft Office;
- RulerCtrl: it implements a ruler window that can be placed on top, bottom,
left or right to any wxPython widget. It is somewhat similar to the rulers
you can find in text editors software;
- ShapedButton: this class tries to fill the lack of "custom shaped" controls
in wxPython. It can be used to build round buttons or elliptic buttons;
- SpeedMeter: this widget tries to reproduce the behavior of some car
controls (but not only), by creating an "angular" control;
- SuperToolTip: a class that mimics the behaviour of `wx.TipWindow` and
generic tooltips, with many features and highly customizable;
- ThumbnailCtrl: a widget that can be used to display a series of images
in a "thumbnail" format; it mimics, for example, the Windows Explorer
behavior when you select the "view thumbnails" option;
- ToasterBox: a cross-platform widget to make the creation of MSN-style
"toaster" popups easier;
- UltimateListCtrl: mimics the behaviour of `wx.ListCtrl`, with almost the same
base functionalities plus some more enhancements;
- ZoomBar: a class that *appoximatively* mimics the behaviour of the Mac Dock,
inside a `wx.Panel`.
Bugs and Limitations: many, patches and fixes welcome :-D
See the demos for an example of what AGW can do, and on how to use it.
Copyright: Andrea Gavana
License: Same as the version of wxPython you are using it with.
SVN for latest code:
http://svn.wxwidgets.org/viewvc/wx/wxPython/3rdParty/AGW/
Mailing List:
[email protected]
My personal web page:
http://xoomer.alice.it/infinity77
Please let me know if you are using AGW!
You can contact me at:
[email protected]
[email protected]
AGW version: 0.9.1
Last updated: 10 Mar 2011, 15.00 GMT
"""
__version__ = "0.9.1"
__author__ = "Andrea Gavana <[email protected]>"
| beiko-lab/gengis | bin/Lib/site-packages/wx-2.8-msw-unicode/wx/lib/agw/__init__.py | Python | gpl-3.0 | 5,945 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# saltytools.py
#
# Copyright 2015 gkmcd <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#####################
# Imports - Python
#####################
import json
import re
import unicodedata
import urllib
#####################
# Imports - 3rd Party
#####################
from bs4 import BeautifulSoup
import dateutil
import fuzzywuzzy.process
import requests
#####################
# Imports - Project
#####################
from saltyerrors import UnknownNameError
#####################
# Functions
#####################
# http://stackoverflow.com/a/29247821/4539462
def normalise_caseless(text):
return unicodedata.normalize("NFKD", text.casefold())
def caseless_equal(left, right):
return normalise_caseless(left) == normalise_caseless(right)
def process_team(raw_team_name, json_file='teams.json'):
"""Takes a raw name (venue or team) and returns a normalised version of
that name to be used throughout SaltyStats.
"""
return _process_name(raw_team_name, json_file)
def process_venue(raw_venue_name, json_file='venues.json'):
"""
"""
return _process_name(raw_venue_name, json_file)
def _process_name(raw_name, json_file):
'''Takes a raw name (venue or team) and returns a normalised version of
that name to be used throughout SaltyStats.
Called by process_venue & process_team. Can be called directly on any
correctly structured json file.
Args:
raw_name: The name (venue or team), as extracted from the BeautifulSoup
HTML match data.
Raises
NameError: unknown team name (or variation thereof).
'''
# open JSON data file
with open(json_file) as json_file:
# parse JSON
json_data = json.load(json_file)
# prep temp datastore
results = {}
# venues as set out in JSON datafile
for name in json_data:
# create list of names for this venue including the cannonical name
all_names = name['variations']
all_names.append(name['cannonical'])
# find best match of raw venue name against this venues names
result = fuzzywuzzy.process.extractOne(raw_name, all_names)
# add best match to list of possible matches
# cannonical name comes along as dictionary value, to be returned
results[result[0]] = name['cannonical']
# from the best matches for each venue, choose the best overall
best_result = fuzzywuzzy.process.extractOne(raw_name, results.keys())
# if the best match is not strong, bail out
if best_result[1] < 75:
raise UnknownNameError(
'Unknown name or variation thereof: {}'.format(raw_name))
# and we're done
return results[best_result[0]]
def datasource_kw(urlobj):
'''
Takes a datasource URL and returns the keyword corrosponding to that
datasource (if supported).
- www.afltables.com: 'afltables'
- www.rugbyleagueproject.org: 'rlproject'
- matchcentre.nrl.com: 'nrlstats'
Args:
urlobj: a 6-tuple URL as generated by urllib.parse()
Raises:
UnknownDatasourceError
'''
if 'afltables' in urlobj.netloc:
datasource = 'afltables'
elif 'rugbyleagueproject' in urlobj.netloc:
datasource = 'rlproject'
elif 'matchcentre' in urlobj.netloc:
datasource = 'nrl'
else:
datasource = None
return datasource
def make_soup(urlobj, user_agent):
'''
Returns a BeautifulSoup object from a given URL.
Args:
urlobj: a 6-tuple URL as generated by urllib.parse()
'''
# FIXME: add defaults for user-agent and proxies
headers = {'User-Agent': user_agent}
proxies = {"http": "http://127.0.0.1:8123"}
response = requests.get(urlobj.geturl(),
headers=headers,
proxies=proxies,
timeout=60)
response.raise_for_status()
htmldata = response.text
soup = BeautifulSoup(htmldata, 'lxml')
return soup
def strip_non_numeric(s):
'''
Takes a string and removes all non-numeric characters
(ie: letters, spaces & punctuation). Returns a string.
Args:
s: string to have non-numeric characters removed.
'''
result = re.sub('[^0-9]', '', s)
return result
def zero_bs4(tag):
"""Internal utility function. Handles situations where we would prefer
BeautifulSoup4's get_text() method would, in the case of an empty tag,
return 0 instead of '' or '\xa0'.
Useful for handling scores.
"""
text = tag.get_text(strip=True)
if text:
return text
else:
return 0
def player_json_data(player_json_file):
"""Load and return player data from local json datafile
Args:
player_json_file: path to json data file
"""
with open(player_json_file) as data_file:
player_data = json.load(data_file)
return player_data
def find_player_uuid(name, team, year, player_json_file):
"""Searches the player json data to find the UUID of the player matching
supplied name, team & year.
Args:
name: player name to match
team: team of which player was a member in year
year: year in which player was in team
player_json_file: path to json data file
"""
uuid = None
player_data = player_json_data(player_json_file)
for player in player_data:
if caseless_equal(player['name'], name):
uuid = player.career
return uuid
def lookup_dob(player, team, date, datasource_kw, user_agent):
"""Finds a players DOB. This is probably going to be expensive in terms
of requests.
Args
player: string, player's name to lookup
team: string, name of team player players for at the time of
[date]
date: date object, date of match when player played for
[team]
datasource_kw: string, specify which datasource to lookup players DOB.
currently ignored, uses RLProject only
"""
# make a urllib url object for make_soup() based on the supplied date year
summary_url = urllib.parse.urlparse(
'http://www.rugbyleagueproject.org/seasons/nrl-{}/players.html'.format(
date.year))
summary_soup = make_soup(summary_url, user_agent)
# find all links to individual player detail pages
player_links = summary_soup.find_all(href=re.compile('/players/'))
# create list of all player names
choices = [name.get_text() for name in player_links]
# get our best 3 matches - if we have more than 3 guys with the same name,
# I give up ;)
results = fuzzywuzzy.process.extract(player, choices, limit=3)
print('fuzzy results: ', results)
# process matching names
for player in results:
# default DOB
dob = None
# split tuple for readbility
potential_name = player[0]
ratio = player[1]
print('looping on {} w/ratio {}'.format(potential_name, ratio))
# bail out before making any more requests if it's not a good match
# if this is the first iteration, DOB will remain as None
if ratio < 85:
print('breaking on {} w/ratio {}'.format(potential_name, ratio))
break
# find the link for the this matched name
details_link = summary_soup.find('a', string=potential_name)
# load link & parse
player_url = urllib.parse.urlparse('http://www.rugbyleagueproject.org'
+ details_link['href'])
detail_soup = make_soup(player_url, user_agent)
# find table row for [year]
year_tr = detail_soup.find('td', string='NRL ' + str(date.year)).parent
# check this player played for [team] in [year]
print('checking {} and {}'.format(team, year_tr.a.get_text()))
if process_team(team) == process_team(year_tr.a.get_text()):
# we have our guy; grab DOB, parse to date and break
dob = dateutil.parser.parse(detail_soup.dd.get_text()).date()
break
return dob
def main(args):
return 0
if __name__ == '__main__':
import sys
sys.exit(main(sys.argv))
| gkmcd/salty-stats | saltytools.py | Python | gpl-2.0 | 9,133 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('calendars', '0020_auto_20151104_1112'),
]
operations = [
migrations.RemoveField(
model_name='event',
name='category',
),
migrations.AddField(
model_name='event',
name='category',
field=models.ManyToManyField(null=True, to='calendars.Category', blank=True, verbose_name='Kategoria'),
),
]
| Fisiu/calendar-oswiecim | webapp/calendars/migrations/0021_auto_20151121_1625.py | Python | agpl-3.0 | 572 |
# -*- encoding: utf8 -*-
#
# The Qubes OS Project, http://www.qubes-os.org
#
# Copyright (C) 2017 Marek Marczykowski-Górecki
# <[email protected]>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, see <https://www.gnu.org/licenses/>.
import argparse
import json
import os
import sys
import qubespolicy
parser = argparse.ArgumentParser(description='Graph qrexec policy')
parser.add_argument('--include-ask', action='store_true',
help='Include `ask` action in graph')
parser.add_argument('--source', action='store', nargs='+',
help='Limit graph to calls from *source*')
parser.add_argument('--target', action='store', nargs='+',
help='Limit graph to calls to *target*')
parser.add_argument('--service', action='store', nargs='+',
help='Limit graph to *service*')
parser.add_argument('--output', action='store',
help='Write to *output* instead of stdout')
parser.add_argument('--policy-dir', action='store',
default=qubespolicy.POLICY_DIR,
help='Look for policy in *policy-dir*')
parser.add_argument('--system-info', action='store',
help='Load system information from file instead of querying qubesd')
parser.add_argument('--skip-labels', action='store_true',
help='Do not include service names on the graph, also deduplicate '
'connections.')
def handle_single_action(args, action):
'''Get single policy action and output (or not) a line to add'''
if args.skip_labels:
service = ''
else:
service = action.service
target = action.target or action.original_target
# handle forced target=
if action.rule.override_target:
target = action.rule.override_target
if args.target and target not in args.target:
return ''
if action.action == qubespolicy.Action.ask:
if args.include_ask:
return ' "{}" -> "{}" [label="{}" color=orange];\n'.format(
action.source, target, service)
elif action.action == qubespolicy.Action.allow:
return ' "{}" -> "{}" [label="{}" color=red];\n'.format(
action.source, target, service)
return ''
def main(args=None):
args = parser.parse_args(args)
output = sys.stdout
if args.output:
output = open(args.output, 'w')
if args.system_info:
with open(args.system_info) as f_system_info:
system_info = json.load(f_system_info)
else:
system_info = qubespolicy.get_system_info()
sources = list(system_info['domains'].keys())
if args.source:
sources = args.source
targets = list(system_info['domains'].keys())
targets.append('$dispvm')
targets.extend('$dispvm:' + dom for dom in system_info['domains']
if system_info['domains'][dom]['template_for_dispvms'])
connections = set()
output.write('digraph g {\n')
for service in os.listdir(args.policy_dir):
if os.path.isdir(os.path.join(args.policy_dir, service)):
continue
if args.service and service not in args.service and \
not any(service.startswith(srv + '+') for srv in args.service):
continue
policy = qubespolicy.Policy(service, args.policy_dir)
for source in sources:
for target in targets:
try:
action = policy.evaluate(system_info, source, target)
line = handle_single_action(args, action)
if line in connections:
continue
if line:
output.write(line)
connections.add(line)
except qubespolicy.AccessDenied:
continue
output.write('}\n')
if args.output:
output.close()
if __name__ == '__main__':
sys.exit(main())
| woju/qubes-core-admin | qubespolicy/graph.py | Python | lgpl-2.1 | 4,395 |
import sys
import random
def main():
rows = int(sys.argv[1]) if len(sys.argv) > 1 else 10
cols = int(sys.argv[2]) if len(sys.argv) > 2 else 10
queries = int(sys.argv[3]) if len(sys.argv) > 3 else 0
mode = int(sys.argv[4]) if len(sys.argv) > 4 else 0 # likelynes to repeat same
print(rows, cols)
map = []
prev = random.randint(0,1)
for i in range(rows):
map.append([])
for j in range(cols):
next = random.randint(0,1)
for k in range(mode):
if next != prev:
next = random.randint(0,1)
else:
break
map[i].append(next)
print(map[i][j], end="")
prev = next
print()
print(queries)
for i in range(queries):
r1 = random.randint(0, rows - 1)
c1 = random.randint(0, cols - 1)
while(True):
r2 = random.randint(0,rows - 1)
c2 = random.randint(0,cols - 1)
if map[r1][c1] == map[r2][c2]:
break
print(r1+1,c1+1,r2+1,c2+1)
if __name__ == '__main__':
main()
| rvrheenen/OpenKattis | Java/10kindsofpeople/make_test.py | Python | mit | 1,131 |
# Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Action to send a welcome e-mail."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from loaner.web_app.backend.actions import base_action
from loaner.web_app.backend.lib import send_email
class SendWelcome(base_action.BaseAction):
"""Action class to send a welcome e-mail to a device assignee."""
ACTION_NAME = 'send_welcome'
FRIENDLY_NAME = 'Send welcome'
ACTION_TYPE = base_action.ActionType.ASYNC
def run(self, device=None):
"""Sends an e-mail to a new device assignee."""
if not device:
raise base_action.MissingDeviceError(
'Cannot send mail. Task did not receive a device.')
send_email.send_user_email(device, 'reminder_welcome')
| google/loaner | loaner/web_app/backend/actions/send_welcome.py | Python | apache-2.0 | 1,342 |
# Authors: Jonas Thiem
import re
from gi.repository import Gtk, GObject, Pango
from pychess.System import uistuff
from pychess.System.glock import glock_connect
from pychess.System.Log import log
from pychess.System.prefix import addDataPrefix
from pychess.Utils.const import ARTIFICIAL
__title__ = _("Engines")
__icon__ = addDataPrefix("glade/panel_engineoutput.svg")
white = addDataPrefix("glade/panel_engineoutput.svg")
__desc__ = _("The engine output panel shows the thinking output of chess engines (computer players) during a game")
class Sidepanel:
def load (self, gmwidg):
# Specify whether the panel should have a horizontal layout:
horizontal = True
if horizontal:
self.box = Gtk.HBox()
else:
self.box = Gtk.VBox()
__widget__ = self.box
# Use two engine output widgets for each player color:
self.output_white = EngineOutput(True)
self.output_black = EngineOutput(False)
if horizontal:
self.output_separator = Gtk.VSeparator()
else:
self.output_separator = Gtk.HSeparator()
self.output_noengines = Gtk.TextView()
self.output_noengines.get_buffer().set_text(
_("No chess engines (computer players) are participating in this game."))
self.output_noengines.set_editable(False)
self.output_noengines.set_wrap_mode(Gtk.WrapMode.WORD_CHAR)
__widget__.pack_start(self.output_noengines, True, True, 0)
__widget__.show_all()
self.boardview = gmwidg.board.view
glock_connect(self.boardview.model, "game_changed", self.game_changed)
glock_connect(self.boardview.model, "players_changed", self.players_changed)
glock_connect(self.boardview.model, "game_started", self.game_changed)
return __widget__
def updateVisibleOutputs (self, model):
# Check which players participate and update which views are visible
gotplayers = False
gotEnginePlayers = False
gotWhiteEngine = False
gotBlackEngine = False
if len(model.players) > 0:
if model.players[0].__type__ == ARTIFICIAL:
gotWhiteEngine = True
self.output_white.attachEngine(model.players[0].engine)
if model.players[1].__type__ == ARTIFICIAL:
gotBlackEngine = True
self.output_black.attachEngine(model.players[1].engine)
# First, detach from old engines:
if not gotBlackEngine:
self.output_black.detachEngine()
if not gotWhiteEngine:
self.output_white.detachEngine()
if gotBlackEngine or gotWhiteEngine:
# Remove "no engines" label:
if self.output_noengines in self.box.get_children():
self.box.remove(self.output_noengines)
# Add white engine info if white engine is participating:
if gotWhiteEngine:
if not self.output_white in self.box.get_children():
# Remove black output and separator first
# to ensure proper ordering:
if self.output_black in self.box.get_children():
self.box.remove(self.output_black)
self.box.remove(self.output_separator)
self.box.pack_start(self.output_white, True, True, 0)
self.output_white.clear()
self.output_white.show_all()
self.output_white.setTitle(model.players[0].name)
else:
if self.output_white in self.box.get_children():
self.box.remove(self.output_white)
self.box.remove(self.output_separator)
# Add white engine info if black engine is participating:
if gotBlackEngine:
if not self.output_black in self.box.get_children():
if gotWhiteEngine:
self.box.pack_start(self.output_separator, False, True, 0)
self.output_separator.show()
self.box.pack_start(self.output_black, True, True, 0)
self.output_black.clear()
self.output_black.show_all()
self.output_black.setTitle(model.players[1].name)
else:
if self.output_black in self.box.get_children():
self.box.remove(self.output_black)
self.box.remove(self.output_separator)
else:
# Show "no engines" label
if self.output_white in self.box.get_children():
self.box.remove(self.output_white)
if self.output_black in self.box.get_children():
self.box.remove(self.output_black)
if not self.output_noengines in self.box.get_children():
self.box.pack_start(self.output_noengines, True, True, 0)
return
def players_changed (self, model):
log.debug("engineOutputPanel.players_changed: starting")
self.updateVisibleOutputs(model)
log.debug("engineOutputPanel.players_changed: returning")
return
def game_started (self, model):
self.updateVisibleOutputs(model)
return
def game_changed (self, model):
self.updateVisibleOutputs(model)
return
class EngineOutput (Gtk.VBox):
def __init__(self, white=True):
GObject.GObject.__init__(self)
self.attached_engine = None # engine attached to which we listen
self.white = white
self.clear_on_output = False # next thinking line belongs to new move
# Title bar:
self.title_label = Gtk.Label()
self.title_color = Gtk.Image()
self.title_hbox = Gtk.HBox()
#self.title_hbox.pack_start(self.title_color, False)
self.title_hbox.pack_start(self.title_color, False, False, 0)
#self.title_hbox.pack_start(self.title_label, True, True)
self.title_hbox.pack_start(self.title_label, True, True, 0)
# Set black or white player icon in front:
if white == True:
self.title_color.set_from_file(addDataPrefix("glade/white.png"))
else:
self.title_color.set_from_file(addDataPrefix("glade/black.png"))
# output scrolled window container:
self.output_container = Gtk.ScrolledWindow()
self.output_container.set_policy(Gtk.PolicyType.AUTOMATIC,
Gtk.PolicyType.AUTOMATIC)
# Allow the user to make the output pretty tiny vertically
# (to save space, only the last output line is really important)
self.output_container.set_size_request(-1, 40)
# scroll down on new output: -- not reliable with multilines added
#uistuff.keepDown(self.output_container)
# scroll down on new output: -- brute force variant
def changed (vadjust):
vadjust.set_value(vadjust.get_upper()-vadjust.get_page_size())
self.output_container.get_vadjustment().connect("changed", changed)
# Text field for output:
self.output = Gtk.TextView()
self.output_container.add(self.output)
self.output.set_editable(False)
self.output.set_wrap_mode(Gtk.WrapMode.WORD_CHAR)
self.tag_bold = self.output.get_buffer().create_tag("bold", weight=Pango.Weight.BOLD)
self.tag_color = self.output.get_buffer().create_tag("color", foreground="#0033ff")
# Add all sub widgets to ourselves:
#self.pack_start(self.title_hbox, False)
#self.pack_start(self.output_container, True)
self.pack_start(self.title_hbox, False, False, 0)
self.pack_start(self.output_container, True, False, 0)
# Precompile regexes we want to use:
self.re_thinking_line_cecp = re.compile( r'^[0-9]+\.? +\-?[0-9]+ +' )
self.re_thinking_line_uci = re.compile( r'^info (.*) pv [a-hA-H][0-9][a-hA-H][0-9](.*)$' )
self.re_move_line_cecp_alg = re.compile( r'^(move +)?[a-hA-H][0-9][a-hA-H][0-9]$' )
self.re_move_line_cecp_san = re.compile( r'^(move +)?([QKNB]?[a-hA-H]?[xX@]?[a-hA-H][0-9]\+?#?|[oO]-[oO]-[oO]|[oO]-[oO])$' )
self.re_move_line_uci = re.compile( r'^bestmove +[a-hA-H][0-9][a-hA-H][0-9]( .*)?$' )
self.re_extract_cecp_all = re.compile( r'^([0-9]+)\.? +(\-?[0-9]+) +[0-9]+.?[0-9]* ([^ ].*)$' )
self.re_extract_uci_depth = re.compile( r'depth +([0-9]+) +' )
self.re_extract_uci_score = re.compile( r'score cp +(-?[0-9]+) +' )
self.re_extract_uci_score_mate_other = re.compile( r'score +mate +([0-9]+) +' )
self.re_extract_uci_score_mate_us = re.compile( r'score +mate +\-([0-9]+) +' )
self.re_extract_uci_score_lowerbound = re.compile( r'score +lowerbound +' )
self.re_extract_uci_score_upperbound = re.compile( r'score +upperbound +' )
self.re_extract_uci_pv = re.compile( r'pv +([a-hA-HoO].*[^ ]) *$' )
def _del (self):
self.detachEngine()
def appendNewline (self):
# Start a new line if text output isn't empty:
if self.output.get_buffer().get_char_count() > 0:
# We have old content, append newline
self.output.get_buffer().insert(self.output.get_buffer().
get_end_iter(), "\n")
def append (self, line, tag=None):
# Append a specific string with the given formatting:
oldenditer = self.output.get_buffer().get_end_iter()
self.output.get_buffer().insert(self.output.get_buffer().
get_end_iter(), line)
if not tag is None:
enditer = self.output.get_buffer().get_end_iter()
startiter = enditer.copy()
startiter.backward_chars(len(line))
self.output.get_buffer().apply_tag(tag, startiter,
enditer)
def appendThinking (self, depth, score, pv):
# Append a formatted thinking line:
self.appendNewline();
self.append(depth.__str__() + ". ", self.tag_color)
self.append("Score: ", self.tag_bold)
self.append(score.__str__() + " ")
self.append("PV: ", self.tag_bold)
self.append(pv.__str__())
def parseInfoLine (self, line):
# Parse an identified info line and add it to our output:
if self.clear_on_output == True:
self.clear_on_output = False
self.clear()
# Clean up line first:
while line.find(" ") != -1:
line = line.replace(" ", " ")
depth = "?"
score = "?"
pv = "?"
infoFound = False
# do more sophisticated parsing here:
if line.startswith("info "):
# UCI info line
# always end with a space to faciliate searching:
line = line + " "
# parse depth:
result = self.re_extract_uci_depth.search(line)
if result:
depth = result.group(1)
# parse score:
result = self.re_extract_uci_score.search(line)
if result:
score = result.group(1)
else:
result = self.re_extract_uci_score_mate_other.search(line)
if result:
score = "winning in " + result.group(1) + " moves"
else:
result = self.re_extract_uci_score_mate_us.search(line)
if result:
score = "losing in " + result.group(1) + " moves"
else:
if self.re_extract_uci_score_lowerbound.search(line):
score = "lowerbound"
elif self.re_extract_uci_score_upperbound.search(line):
score = "upperbound"
# parse pv:
result = self.re_extract_uci_pv.search(line)
if result:
infoFound = True
pv = result.group(1)
else:
# CECP/Winboard/GNUChess info line
# parse all information in one go:
result = self.re_extract_cecp_all.match(line)
if not result:
return
infoFound = True
depth = result.group(1)
score = result.group(2)
pv = result.group(3)
# Clean pv of unwanted chars:
pv = re.sub( '[^a-z^A-Z^0-9^ ^x^@^?]', '', pv )
# If we found useful information, show it:
if infoFound:
self.appendThinking(depth, score, pv)
def parseLines (self, engine, lines):
for line in lines:
# Clean up the line a bit:
line = line.strip(" \r\t\n")
line = line.replace("\t", " ")
# PARSING THINKING OUTPUT (roughly, simply identifies the lines):
# GNU Chess/CECP/Winboard engine thinking output lines:
if self.re_thinking_line_cecp.match(line):
self.parseInfoLine(line)
# UCI engine thinking output lines:
if self.re_thinking_line_uci.match(line):
if line.find("depth") != -1 and line.find("score") != -1:
self.parseInfoLine(line)
# PARSE MOVE LINES (roughly, we merely identify them):
# We want to clear on the next output info line
# when a move arrived, so that for every move
# we freshly fill our thinking output:
# CECP/Winboard move line, long algebraeic notation:
if self.re_move_line_cecp_alg.match(line):
self.clear_on_output = True
# CECP/Winboard move line, SAN notation:
if self.re_move_line_cecp_san.match(line):
self.clear_on_output = True
# UCI move line:
if self.re_move_line_uci.match(line):
self.clear_on_output = True
return
def clear (self):
self.output.get_buffer().set_text("")
return
def setTitle (self, title):
self.title_label.set_text(title)
return
def attachEngine (self, engine):
# Attach an engine for line listening
if not self.attached_engine is None:
if self.attached_engine == engine:
# We are already attached to this engine
return
# Detach from previous engine
self.attached_engine.disconnect(self.attached_handler_id)
# Attach to new engine:
log.debug("Attaching " + self.__str__() + " to engine " + engine.__str__(), extra={"task":engine.defname})
self.attached_engine = engine
self.attached_handler_id = engine.connect("line", self.parseLines)
return
def detachEngine (self):
# Detach from attached engine
if not self.attached_engine is None:
log.debug("Detaching " + self.__str__() + " from engine " + self.attached_engine.__str__(), extra={"task":self.attached_engine.defname})
self.attached_engine.disconnect(self.attached_handler_id)
self.attached_engine = None
def __repr__(self):
color = "black"
if self.white:
color = "white"
return "Engine Output " + color + " #" + id(self).__str__()
# def __str__(self):
# return repr(self) + " (engine: " + self.attached_engine.__str__() + ")"
| importsfromgooglecode/pychess | sidepanel/engineOutputPanel.py | Python | gpl-3.0 | 15,439 |
from codecs import open
import os
from setuptools import setup, find_packages
ROOT = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(ROOT, 'VERSION')) as f:
VERSION = f.read().strip()
setup(
name='supervisor-confator',
packages=find_packages(exclude=['contrib', 'docs', 'tests*']),
license='MIT',
version = VERSION,
description = 'Python interface to generate supervisor configuration files.',
author = 'Alex Hayes',
author_email = '[email protected]',
url = 'https://github.com/alexhayes/supervisor-confator',
download_url = 'https://github.com/alexhayes/supervisor-confator/tarball/%s' % VERSION,
keywords = ['supervisor', 'config', 'generator', 'server management'],
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 4 - Beta',
# Indicate who your project is intended for
'Intended Audience :: System Administrators',
'Topic :: System :: Installation/Setup',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: MIT License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 2.7',
],
include_package_data=True,
) | alexhayes/supervisor-confator | setup.py | Python | mit | 1,557 |
#!/usr/bin/env python3
# Copyright (c) 2008-9 Qtrac Ltd. All rights reserved.
# This program or module is free software: you can redistribute it and/or
# modify it under the terms of the GNU General Public License as published
# by the Free Software Foundation, either version 2 of the License, or
# version 3 of the License, or (at your option) any later version. It is
# provided for educational purposes and is distributed in the hope that
# it will be useful, but WITHOUT ANY WARRANTY; without even the implied
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
# the GNU General Public License for more details.
from PyQt4.QtCore import (QDate, QString, Qt, SIGNAL, pyqtSignature)
from PyQt4.QtGui import (QApplication, QDialog, QDialogButtonBox)
import moviedata_ans as moviedata
import ui_addeditmoviedlg_ans as ui_addeditmoviedlg
class AddEditMovieDlg(QDialog,
ui_addeditmoviedlg.Ui_AddEditMovieDlg):
def __init__(self, movies, movie=None, parent=None):
super(AddEditMovieDlg, self).__init__(parent)
self.setupUi(self)
self.movies = movies
self.movie = movie
self.acquiredDateEdit.setDisplayFormat(moviedata.DATEFORMAT)
if movie is not None:
self.titleLineEdit.setText(movie.title)
self.yearSpinBox.setValue(movie.year)
self.minutesSpinBox.setValue(movie.minutes)
self.acquiredDateEdit.setDate(movie.acquired)
self.acquiredDateEdit.setEnabled(False)
self.locationLineEdit.setText(movie.location)
self.notesTextEdit.setPlainText(movie.notes)
self.notesTextEdit.setFocus()
self.buttonBox.button(QDialogButtonBox.Ok).setText(
"&Accept")
self.setWindowTitle("My Movies - Edit Movie")
else:
today = QDate.currentDate()
self.acquiredDateEdit.setDateRange(today.addDays(-5),
today)
self.acquiredDateEdit.setDate(today)
self.titleLineEdit.setFocus()
self.on_titleLineEdit_textEdited(QString())
@pyqtSignature("QString")
def on_titleLineEdit_textEdited(self, text):
self.buttonBox.button(QDialogButtonBox.Ok).setEnabled(
not self.titleLineEdit.text().isEmpty())
def accept(self):
title = self.titleLineEdit.text()
year = self.yearSpinBox.value()
minutes = self.minutesSpinBox.value()
location = self.locationLineEdit.text()
notes = self.notesTextEdit.toPlainText()
if self.movie is None:
acquired = self.acquiredDateEdit.date()
self.movie = moviedata.Movie(title, year, minutes,
acquired, location, notes)
self.movies.add(self.movie)
else:
self.movies.updateMovie(self.movie, title, year,
minutes, location, notes)
QDialog.accept(self)
if __name__ == "__main__":
import sys
app = QApplication(sys.argv)
form = AddEditMovieDlg(0)
form.show()
app.exec_()
| paradiseOffice/Bash_and_Cplus-plus | CPP/full_examples/pyqt/chap08/addeditmoviedlg_ans.py | Python | gpl-2.0 | 3,155 |
"""
mbed SDK
Copyright (c) 2011-2016 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import re
import tempfile
from types import ListType
from shutil import rmtree
from os.path import join, exists, dirname, basename, abspath, normpath, splitext
from os import linesep, remove, makedirs
from time import time
from intelhex import IntelHex
from tools.utils import mkdir, run_cmd, run_cmd_ext, NotSupportedException,\
ToolException, InvalidReleaseTargetException, intelhex_offset
from tools.paths import MBED_CMSIS_PATH, MBED_TARGETS_PATH, MBED_LIBRARIES,\
MBED_HEADER, MBED_DRIVERS, MBED_PLATFORM, MBED_HAL, MBED_CONFIG_FILE,\
MBED_LIBRARIES_DRIVERS, MBED_LIBRARIES_PLATFORM, MBED_LIBRARIES_HAL,\
BUILD_DIR
from tools.targets import TARGET_NAMES, TARGET_MAP
from tools.libraries import Library
from tools.toolchains import TOOLCHAIN_CLASSES
from jinja2 import FileSystemLoader
from jinja2.environment import Environment
from tools.config import Config
RELEASE_VERSIONS = ['2', '5']
def prep_report(report, target_name, toolchain_name, id_name):
"""Setup report keys
Positional arguments:
report - the report to fill
target_name - the target being used
toolchain_name - the toolchain being used
id_name - the name of the executable or library being built
"""
if not target_name in report:
report[target_name] = {}
if not toolchain_name in report[target_name]:
report[target_name][toolchain_name] = {}
if not id_name in report[target_name][toolchain_name]:
report[target_name][toolchain_name][id_name] = []
def prep_properties(properties, target_name, toolchain_name, vendor_label):
"""Setup test properties
Positional arguments:
properties - the dict to fill
target_name - the target the test is targeting
toolchain_name - the toolchain that will compile the test
vendor_label - the vendor
"""
if not target_name in properties:
properties[target_name] = {}
if not toolchain_name in properties[target_name]:
properties[target_name][toolchain_name] = {}
properties[target_name][toolchain_name]["target"] = target_name
properties[target_name][toolchain_name]["vendor"] = vendor_label
properties[target_name][toolchain_name]["toolchain"] = toolchain_name
def create_result(target_name, toolchain_name, id_name, description):
"""Create a result dictionary
Positional arguments:
target_name - the target being built for
toolchain_name - the toolchain doing the building
id_name - the name of the executable or library being built
description - a human readable description of what's going on
"""
cur_result = {}
cur_result["target_name"] = target_name
cur_result["toolchain_name"] = toolchain_name
cur_result["id"] = id_name
cur_result["description"] = description
cur_result["elapsed_time"] = 0
cur_result["output"] = ""
return cur_result
def add_result_to_report(report, result):
"""Add a single result to a report dictionary
Positional arguments:
report - the report to append to
result - the result to append
"""
target = result["target_name"]
toolchain = result["toolchain_name"]
id_name = result['id']
result_wrap = {0: result}
report[target][toolchain][id_name].append(result_wrap)
def get_config(src_paths, target, toolchain_name):
"""Get the configuration object for a target-toolchain combination
Positional arguments:
src_paths - paths to scan for the configuration files
target - the device we are building for
toolchain_name - the string that identifies the build tools
"""
# Convert src_paths to a list if needed
if type(src_paths) != ListType:
src_paths = [src_paths]
# Pass all params to the unified prepare_resources()
toolchain = prepare_toolchain(src_paths, target, toolchain_name)
# Scan src_path for config files
resources = toolchain.scan_resources(src_paths[0])
for path in src_paths[1:]:
resources.add(toolchain.scan_resources(path))
# Update configuration files until added features creates no changes
prev_features = set()
while True:
# Update the configuration with any .json files found while scanning
toolchain.config.add_config_files(resources.json_files)
# Add features while we find new ones
features = set(toolchain.config.get_features())
if features == prev_features:
break
for feature in features:
if feature in resources.features:
resources += resources.features[feature]
prev_features = features
toolchain.config.validate_config()
cfg, macros = toolchain.config.get_config_data()
features = toolchain.config.get_features()
return cfg, macros, features
def is_official_target(target_name, version):
""" Returns True, None if a target is part of the official release for the
given version. Return False, 'reason' if a target is not part of the
official release for the given version.
Positional arguments:
target_name - Name if the target (ex. 'K64F')
version - The release version string. Should be a string contained within
RELEASE_VERSIONS
"""
result = True
reason = None
target = TARGET_MAP[target_name]
if hasattr(target, 'release_versions') \
and version in target.release_versions:
if version == '2':
# For version 2, either ARM or uARM toolchain support is required
required_toolchains = set(['ARM', 'uARM'])
if not len(required_toolchains.intersection(
set(target.supported_toolchains))) > 0:
result = False
reason = ("Target '%s' must support " % target.name) + \
("one of the folowing toolchains to be included in the") + \
((" mbed 2.0 official release: %s" + linesep) %
", ".join(required_toolchains)) + \
("Currently it is only configured to support the ") + \
("following toolchains: %s" %
", ".join(target.supported_toolchains))
elif version == '5':
# For version 5, ARM, GCC_ARM, and IAR toolchain support is required
required_toolchains = set(['ARM', 'GCC_ARM', 'IAR'])
required_toolchains_sorted = list(required_toolchains)
required_toolchains_sorted.sort()
supported_toolchains = set(target.supported_toolchains)
supported_toolchains_sorted = list(supported_toolchains)
supported_toolchains_sorted.sort()
if not required_toolchains.issubset(supported_toolchains):
result = False
reason = ("Target '%s' must support " % target.name) + \
("ALL of the folowing toolchains to be included in the") + \
((" mbed OS 5.0 official release: %s" + linesep) %
", ".join(required_toolchains_sorted)) + \
("Currently it is only configured to support the ") + \
("following toolchains: %s" %
", ".join(supported_toolchains_sorted))
elif not target.default_lib == 'std':
result = False
reason = ("Target '%s' must set the " % target.name) + \
("'default_lib' to 'std' to be included in the ") + \
("mbed OS 5.0 official release." + linesep) + \
("Currently it is set to '%s'" % target.default_lib)
else:
result = False
reason = ("Target '%s' has set an invalid release version of '%s'" %
version) + \
("Please choose from the following release versions: %s" %
', '.join(RELEASE_VERSIONS))
else:
result = False
if not hasattr(target, 'release_versions'):
reason = "Target '%s' " % target.name
reason += "does not have the 'release_versions' key set"
elif not version in target.release_versions:
reason = "Target '%s' does not contain the version '%s' " % \
(target.name, version)
reason += "in its 'release_versions' key"
return result, reason
def transform_release_toolchains(toolchains, version):
""" Given a list of toolchains and a release version, return a list of
only the supported toolchains for that release
Positional arguments:
toolchains - The list of toolchains
version - The release version string. Should be a string contained within
RELEASE_VERSIONS
"""
if version == '5':
return ['ARM', 'GCC_ARM', 'IAR']
else:
return toolchains
def get_mbed_official_release(version):
""" Given a release version string, return a tuple that contains a target
and the supported toolchains for that release.
Ex. Given '2', return (('LPC1768', ('ARM', 'GCC_ARM')),
('K64F', ('ARM', 'GCC_ARM')), ...)
Positional arguments:
version - The version string. Should be a string contained within
RELEASE_VERSIONS
"""
mbed_official_release = (
tuple(
tuple(
[
TARGET_MAP[target].name,
tuple(transform_release_toolchains(
TARGET_MAP[target].supported_toolchains, version))
]
) for target in TARGET_NAMES \
if (hasattr(TARGET_MAP[target], 'release_versions')
and version in TARGET_MAP[target].release_versions)
)
)
for target in mbed_official_release:
is_official, reason = is_official_target(target[0], version)
if not is_official:
raise InvalidReleaseTargetException(reason)
return mbed_official_release
def add_regions_to_profile(profile, config, toolchain_class):
"""Add regions to the build profile, if there are any.
Positional Arguments:
profile - the profile to update
config - the configuration object that owns the region
toolchain_class - the class of the toolchain being used
"""
regions = list(config.regions)
for region in regions:
for define in [(region.name.upper() + "_ADDR", region.start),
(region.name.upper() + "_SIZE", region.size)]:
profile["common"].append("-D%s=0x%x" % define)
active_region = [r for r in regions if r.active][0]
for define in [("MBED_APP_START", active_region.start),
("MBED_APP_SIZE", active_region.size)]:
profile["ld"].append(toolchain_class.make_ld_define(*define))
print("Using regions in this build:")
for region in regions:
print(" Region %s size 0x%x, offset 0x%x"
% (region.name, region.size, region.start))
def prepare_toolchain(src_paths, target, toolchain_name,
macros=None, clean=False, jobs=1,
notify=None, silent=False, verbose=False,
extra_verbose=False, config=None,
app_config=None, build_profile=None):
""" Prepares resource related objects - toolchain, target, config
Positional arguments:
src_paths - the paths to source directories
target - ['LPC1768', 'LPC11U24', 'LPC2368', etc.]
toolchain_name - ['ARM', 'uARM', 'GCC_ARM', 'GCC_CR']
Keyword arguments:
macros - additional macros
clean - Rebuild everything if True
jobs - how many compilers we can run at once
notify - Notify function for logs
silent - suppress printing of progress indicators
verbose - Write the actual tools command lines used if True
extra_verbose - even more output!
config - a Config object to use instead of creating one
app_config - location of a chosen mbed_app.json file
build_profile - a dict of flags that will be passed to the compiler
"""
# We need to remove all paths which are repeated to avoid
# multiple compilations and linking with the same objects
src_paths = [src_paths[0]] + list(set(src_paths[1:]))
# If the configuration object was not yet created, create it now
config = config or Config(target, src_paths, app_config=app_config)
target = config.target
try:
cur_tc = TOOLCHAIN_CLASSES[toolchain_name]
except KeyError:
raise KeyError("Toolchain %s not supported" % toolchain_name)
if config.has_regions:
add_regions_to_profile(build_profile, config, cur_tc)
# Toolchain instance
toolchain = cur_tc(target, notify, macros, silent,
extra_verbose=extra_verbose, build_profile=build_profile)
toolchain.config = config
toolchain.jobs = jobs
toolchain.build_all = clean
toolchain.VERBOSE = verbose
return toolchain
def merge_region_list(region_list, destination, padding=b'\xFF'):
"""Merege the region_list into a single image
Positional Arguments:
region_list - list of regions, which should contain filenames
destination - file name to write all regions to
padding - bytes to fill gapps with
"""
merged = IntelHex()
print("Merging Regions:")
for region in region_list:
if region.active and not region.filename:
raise ToolException("Active region has no contents: No file found.")
if region.filename:
print(" Filling region %s with %s" % (region.name, region.filename))
part = intelhex_offset(region.filename, offset=region.start)
part_size = (part.maxaddr() - part.minaddr()) + 1
if part_size > region.size:
raise ToolException("Contents of region %s does not fit"
% region.name)
merged.merge(part)
pad_size = region.size - part_size
if pad_size > 0 and region != region_list[-1]:
print(" Padding region %s with 0x%x bytes" % (region.name, pad_size))
merged.puts(merged.maxaddr() + 1, padding * pad_size)
if not exists(dirname(destination)):
makedirs(dirname(destination))
print("Space used after regions merged: 0x%x" %
(merged.maxaddr() - merged.minaddr() + 1))
with open(destination, "wb+") as output:
merged.tofile(output, format='bin')
def scan_resources(src_paths, toolchain, dependencies_paths=None,
inc_dirs=None, base_path=None):
""" Scan resources using initialized toolcain
Positional arguments
src_paths - the paths to source directories
toolchain - valid toolchain object
dependencies_paths - dependency paths that we should scan for include dirs
inc_dirs - additional include directories which should be added to
the scanner resources
"""
# Scan src_path
resources = toolchain.scan_resources(src_paths[0], base_path=base_path)
for path in src_paths[1:]:
resources.add(toolchain.scan_resources(path, base_path=base_path))
# Scan dependency paths for include dirs
if dependencies_paths is not None:
for path in dependencies_paths:
lib_resources = toolchain.scan_resources(path)
resources.inc_dirs.extend(lib_resources.inc_dirs)
# Add additional include directories if passed
if inc_dirs:
if type(inc_dirs) == ListType:
resources.inc_dirs.extend(inc_dirs)
else:
resources.inc_dirs.append(inc_dirs)
# Load resources into the config system which might expand/modify resources
# based on config data
resources = toolchain.config.load_resources(resources)
# Set the toolchain's configuration data
toolchain.set_config_data(toolchain.config.get_config_data())
return resources
def build_project(src_paths, build_path, target, toolchain_name,
libraries_paths=None, linker_script=None,
clean=False, notify=None, verbose=False, name=None,
macros=None, inc_dirs=None, jobs=1, silent=False,
report=None, properties=None, project_id=None,
project_description=None, extra_verbose=False, config=None,
app_config=None, build_profile=None):
""" Build a project. A project may be a test or a user program.
Positional arguments:
src_paths - a path or list of paths that contain all files needed to build
the project
build_path - the directory where all of the object files will be placed
target - the MCU or board that the project will compile for
toolchain_name - the name of the build tools
Keyword arguments:
libraries_paths - The location of libraries to include when linking
linker_script - the file that drives the linker to do it's job
clean - Rebuild everything if True
notify - Notify function for logs
verbose - Write the actual tools command lines used if True
name - the name of the project
macros - additional macros
inc_dirs - additional directories where include files may be found
jobs - how many compilers we can run at once
silent - suppress printing of progress indicators
report - a dict where a result may be appended
properties - UUUUHHHHH beats me
project_id - the name put in the report
project_description - the human-readable version of what this thing does
extra_verbose - even more output!
config - a Config object to use instead of creating one
app_config - location of a chosen mbed_app.json file
build_profile - a dict of flags that will be passed to the compiler
"""
# Convert src_path to a list if needed
if type(src_paths) != ListType:
src_paths = [src_paths]
# Extend src_paths wiht libraries_paths
if libraries_paths is not None:
src_paths.extend(libraries_paths)
inc_dirs.extend(map(dirname, libraries_paths))
# Build Directory
if clean and exists(build_path):
rmtree(build_path)
mkdir(build_path)
# Pass all params to the unified prepare_toolchain()
toolchain = prepare_toolchain(
src_paths, target, toolchain_name, macros=macros, clean=clean,
jobs=jobs, notify=notify, silent=silent, verbose=verbose,
extra_verbose=extra_verbose, config=config, app_config=app_config,
build_profile=build_profile)
# The first path will give the name to the library
if name is None:
name = basename(normpath(abspath(src_paths[0])))
toolchain.info("Building project %s (%s, %s)" %
(name, toolchain.target.name, toolchain_name))
# Initialize reporting
if report != None:
start = time()
# If project_id is specified, use that over the default name
id_name = project_id.upper() if project_id else name.upper()
description = project_description if project_description else name
vendor_label = toolchain.target.extra_labels[0]
prep_report(report, toolchain.target.name, toolchain_name, id_name)
cur_result = create_result(toolchain.target.name, toolchain_name,
id_name, description)
if properties != None:
prep_properties(properties, toolchain.target.name, toolchain_name,
vendor_label)
try:
# Call unified scan_resources
resources = scan_resources(src_paths, toolchain, inc_dirs=inc_dirs)
# Change linker script if specified
if linker_script is not None:
resources.linker_script = linker_script
# Compile Sources
objects = toolchain.compile_sources(resources, build_path,
resources.inc_dirs)
resources.objects.extend(objects)
# Link Program
if toolchain.config.has_regions:
res, _ = toolchain.link_program(resources, build_path, name + "_application")
region_list = list(toolchain.config.regions)
region_list = [r._replace(filename=res) if r.active else r
for r in region_list]
res = join(build_path, name) + ".bin"
merge_region_list(region_list, res)
else:
res, _ = toolchain.link_program(resources, build_path, name)
memap_instance = getattr(toolchain, 'memap_instance', None)
memap_table = ''
if memap_instance:
# Write output to stdout in text (pretty table) format
memap_table = memap_instance.generate_output('table')
if not silent:
print memap_table
# Write output to file in JSON format
map_out = join(build_path, name + "_map.json")
memap_instance.generate_output('json', map_out)
# Write output to file in CSV format for the CI
map_csv = join(build_path, name + "_map.csv")
memap_instance.generate_output('csv-ci', map_csv)
resources.detect_duplicates(toolchain)
if report != None:
end = time()
cur_result["elapsed_time"] = end - start
cur_result["output"] = toolchain.get_output() + memap_table
cur_result["result"] = "OK"
cur_result["memory_usage"] = toolchain.map_outputs
add_result_to_report(report, cur_result)
return res
except Exception as exc:
if report != None:
end = time()
if isinstance(exc, NotSupportedException):
cur_result["result"] = "NOT_SUPPORTED"
else:
cur_result["result"] = "FAIL"
cur_result["elapsed_time"] = end - start
toolchain_output = toolchain.get_output()
if toolchain_output:
cur_result["output"] += toolchain_output
add_result_to_report(report, cur_result)
# Let Exception propagate
raise
def build_library(src_paths, build_path, target, toolchain_name,
dependencies_paths=None, name=None, clean=False,
archive=True, notify=None, verbose=False, macros=None,
inc_dirs=None, jobs=1, silent=False, report=None,
properties=None, extra_verbose=False, project_id=None,
remove_config_header_file=False, app_config=None,
build_profile=None):
""" Build a library
Positional arguments:
src_paths - a path or list of paths that contain all files needed to build
the library
build_path - the directory where all of the object files will be placed
target - the MCU or board that the project will compile for
toolchain_name - the name of the build tools
Keyword arguments:
dependencies_paths - The location of libraries to include when linking
name - the name of the library
clean - Rebuild everything if True
archive - whether the library will create an archive file
notify - Notify function for logs
verbose - Write the actual tools command lines used if True
macros - additional macros
inc_dirs - additional directories where include files may be found
jobs - how many compilers we can run at once
silent - suppress printing of progress indicators
report - a dict where a result may be appended
properties - UUUUHHHHH beats me
extra_verbose - even more output!
project_id - the name that goes in the report
remove_config_header_file - delete config header file when done building
app_config - location of a chosen mbed_app.json file
build_profile - a dict of flags that will be passed to the compiler
"""
# Convert src_path to a list if needed
if type(src_paths) != ListType:
src_paths = [src_paths]
# Build path
if archive:
# Use temp path when building archive
tmp_path = join(build_path, '.temp')
mkdir(tmp_path)
else:
tmp_path = build_path
# Clean the build directory
if clean and exists(tmp_path):
rmtree(tmp_path)
mkdir(tmp_path)
# Pass all params to the unified prepare_toolchain()
toolchain = prepare_toolchain(
src_paths, target, toolchain_name, macros=macros, clean=clean,
jobs=jobs, notify=notify, silent=silent, verbose=verbose,
extra_verbose=extra_verbose, app_config=app_config,
build_profile=build_profile)
# The first path will give the name to the library
if name is None:
name = basename(normpath(abspath(src_paths[0])))
toolchain.info("Building library %s (%s, %s)" %
(name, toolchain.target.name, toolchain_name))
# Initialize reporting
if report != None:
start = time()
# If project_id is specified, use that over the default name
id_name = project_id.upper() if project_id else name.upper()
description = name
vendor_label = toolchain.target.extra_labels[0]
prep_report(report, toolchain.target.name, toolchain_name, id_name)
cur_result = create_result(toolchain.target.name, toolchain_name,
id_name, description)
if properties != None:
prep_properties(properties, toolchain.target.name, toolchain_name,
vendor_label)
for src_path in src_paths:
if not exists(src_path):
error_msg = "The library source folder does not exist: %s", src_path
if report != None:
cur_result["output"] = error_msg
cur_result["result"] = "FAIL"
add_result_to_report(report, cur_result)
raise Exception(error_msg)
try:
# Call unified scan_resources
resources = scan_resources(src_paths, toolchain,
dependencies_paths=dependencies_paths,
inc_dirs=inc_dirs)
# Copy headers, objects and static libraries - all files needed for
# static lib
toolchain.copy_files(resources.headers, build_path, resources=resources)
toolchain.copy_files(resources.objects, build_path, resources=resources)
toolchain.copy_files(resources.libraries, build_path,
resources=resources)
toolchain.copy_files(resources.json_files, build_path,
resources=resources)
if resources.linker_script:
toolchain.copy_files(resources.linker_script, build_path,
resources=resources)
if resources.hex_files:
toolchain.copy_files(resources.hex_files, build_path,
resources=resources)
# Compile Sources
objects = toolchain.compile_sources(resources, abspath(tmp_path),
resources.inc_dirs)
resources.objects.extend(objects)
if archive:
toolchain.build_library(objects, build_path, name)
if remove_config_header_file:
config_header_path = toolchain.get_config_header()
if config_header_path:
remove(config_header_path)
if report != None:
end = time()
cur_result["elapsed_time"] = end - start
cur_result["output"] = toolchain.get_output()
cur_result["result"] = "OK"
add_result_to_report(report, cur_result)
return True
except Exception as exc:
if report != None:
end = time()
if isinstance(exc, ToolException):
cur_result["result"] = "FAIL"
elif isinstance(exc, NotSupportedException):
cur_result["result"] = "NOT_SUPPORTED"
cur_result["elapsed_time"] = end - start
toolchain_output = toolchain.get_output()
if toolchain_output:
cur_result["output"] += toolchain_output
add_result_to_report(report, cur_result)
# Let Exception propagate
raise
######################
### Legacy methods ###
######################
def build_lib(lib_id, target, toolchain_name, verbose=False,
clean=False, macros=None, notify=None, jobs=1, silent=False,
report=None, properties=None, extra_verbose=False,
build_profile=None):
""" Legacy method for building mbed libraries
Positional arguments:
lib_id - the library's unique identifier
target - the MCU or board that the project will compile for
toolchain_name - the name of the build tools
Keyword arguments:
clean - Rebuild everything if True
verbose - Write the actual tools command lines used if True
macros - additional macros
notify - Notify function for logs
jobs - how many compilers we can run at once
silent - suppress printing of progress indicators
report - a dict where a result may be appended
properties - UUUUHHHHH beats me
extra_verbose - even more output!
build_profile - a dict of flags that will be passed to the compiler
"""
lib = Library(lib_id)
if not lib.is_supported(target, toolchain_name):
print('Library "%s" is not yet supported on target %s with toolchain %s'
% (lib_id, target.name, toolchain_name))
return False
# We need to combine macros from parameter list with macros from library
# definition
lib_macros = lib.macros if lib.macros else []
if macros:
macros.extend(lib_macros)
else:
macros = lib_macros
src_paths = lib.source_dir
build_path = lib.build_dir
dependencies_paths = lib.dependencies
inc_dirs = lib.inc_dirs
inc_dirs_ext = lib.inc_dirs_ext
if type(src_paths) != ListType:
src_paths = [src_paths]
# The first path will give the name to the library
name = basename(src_paths[0])
if report != None:
start = time()
id_name = name.upper()
description = name
vendor_label = target.extra_labels[0]
cur_result = None
prep_report(report, target.name, toolchain_name, id_name)
cur_result = create_result(target.name, toolchain_name, id_name,
description)
if properties != None:
prep_properties(properties, target.name, toolchain_name,
vendor_label)
for src_path in src_paths:
if not exists(src_path):
error_msg = "The library source folder does not exist: %s", src_path
if report != None:
cur_result["output"] = error_msg
cur_result["result"] = "FAIL"
add_result_to_report(report, cur_result)
raise Exception(error_msg)
try:
# Toolchain instance
toolchain = TOOLCHAIN_CLASSES[toolchain_name](
target, macros=macros, notify=notify, silent=silent,
extra_verbose=extra_verbose, build_profile=build_profile)
toolchain.VERBOSE = verbose
toolchain.jobs = jobs
toolchain.build_all = clean
toolchain.info("Building library %s (%s, %s)" %
(name.upper(), target.name, toolchain_name))
# Take into account the library configuration (MBED_CONFIG_FILE)
config = Config(target)
toolchain.config = config
config.add_config_files([MBED_CONFIG_FILE])
# Scan Resources
resources = []
for src_path in src_paths:
resources.append(toolchain.scan_resources(src_path))
# Add extra include directories / files which are required by library
# This files usually are not in the same directory as source files so
# previous scan will not include them
if inc_dirs_ext is not None:
for inc_ext in inc_dirs_ext:
resources.append(toolchain.scan_resources(inc_ext))
# Dependencies Include Paths
dependencies_include_dir = []
if dependencies_paths is not None:
for path in dependencies_paths:
lib_resources = toolchain.scan_resources(path)
dependencies_include_dir.extend(lib_resources.inc_dirs)
dependencies_include_dir.extend(map(dirname, lib_resources.inc_dirs))
if inc_dirs:
dependencies_include_dir.extend(inc_dirs)
# Add other discovered configuration data to the configuration object
for res in resources:
config.load_resources(res)
toolchain.set_config_data(toolchain.config.get_config_data())
# Create the desired build directory structure
bin_path = join(build_path, toolchain.obj_path)
mkdir(bin_path)
tmp_path = join(build_path, '.temp', toolchain.obj_path)
mkdir(tmp_path)
# Copy Headers
for resource in resources:
toolchain.copy_files(resource.headers, build_path,
resources=resource)
dependencies_include_dir.extend(
toolchain.scan_resources(build_path).inc_dirs)
# Compile Sources
objects = []
for resource in resources:
objects.extend(toolchain.compile_sources(resource, tmp_path,
dependencies_include_dir))
needed_update = toolchain.build_library(objects, bin_path, name)
if report != None and needed_update:
end = time()
cur_result["elapsed_time"] = end - start
cur_result["output"] = toolchain.get_output()
cur_result["result"] = "OK"
add_result_to_report(report, cur_result)
return True
except Exception:
if report != None:
end = time()
cur_result["result"] = "FAIL"
cur_result["elapsed_time"] = end - start
toolchain_output = toolchain.get_output()
if toolchain_output:
cur_result["output"] += toolchain_output
add_result_to_report(report, cur_result)
# Let Exception propagate
raise
# We do have unique legacy conventions about how we build and package the mbed
# library
def build_mbed_libs(target, toolchain_name, verbose=False,
clean=False, macros=None, notify=None, jobs=1, silent=False,
report=None, properties=None, extra_verbose=False,
build_profile=None):
""" Function returns True is library was built and false if building was
skipped
Positional arguments:
target - the MCU or board that the project will compile for
toolchain_name - the name of the build tools
Keyword arguments:
verbose - Write the actual tools command lines used if True
clean - Rebuild everything if True
macros - additional macros
notify - Notify function for logs
jobs - how many compilers we can run at once
silent - suppress printing of progress indicators
report - a dict where a result may be appended
properties - UUUUHHHHH beats me
extra_verbose - even more output!
build_profile - a dict of flags that will be passed to the compiler
"""
if report != None:
start = time()
id_name = "MBED"
description = "mbed SDK"
vendor_label = target.extra_labels[0]
cur_result = None
prep_report(report, target.name, toolchain_name, id_name)
cur_result = create_result(target.name, toolchain_name, id_name,
description)
if properties != None:
prep_properties(properties, target.name, toolchain_name,
vendor_label)
# Check toolchain support
if toolchain_name not in target.supported_toolchains:
supported_toolchains_text = ", ".join(target.supported_toolchains)
print('%s target is not yet supported by toolchain %s' %
(target.name, toolchain_name))
print('%s target supports %s toolchain%s' %
(target.name, supported_toolchains_text, 's'
if len(target.supported_toolchains) > 1 else ''))
if report != None:
cur_result["result"] = "SKIP"
add_result_to_report(report, cur_result)
return False
try:
# Toolchain
toolchain = TOOLCHAIN_CLASSES[toolchain_name](
target, macros=macros, notify=notify, silent=silent,
extra_verbose=extra_verbose, build_profile=build_profile)
toolchain.VERBOSE = verbose
toolchain.jobs = jobs
toolchain.build_all = clean
# Take into account the library configuration (MBED_CONFIG_FILE)
config = Config(target)
toolchain.config = config
config.add_config_files([MBED_CONFIG_FILE])
toolchain.set_config_data(toolchain.config.get_config_data())
# Source and Build Paths
build_target = join(MBED_LIBRARIES, "TARGET_" + target.name)
build_toolchain = join(build_target, "TOOLCHAIN_" + toolchain.name)
mkdir(build_toolchain)
tmp_path = join(MBED_LIBRARIES, '.temp', toolchain.obj_path)
mkdir(tmp_path)
# CMSIS
toolchain.info("Building library %s (%s, %s)" %
('CMSIS', target.name, toolchain_name))
cmsis_src = MBED_CMSIS_PATH
resources = toolchain.scan_resources(cmsis_src)
toolchain.copy_files(resources.headers, build_target)
toolchain.copy_files(resources.linker_script, build_toolchain)
toolchain.copy_files(resources.bin_files, build_toolchain)
objects = toolchain.compile_sources(resources, tmp_path)
toolchain.copy_files(objects, build_toolchain)
# mbed
toolchain.info("Building library %s (%s, %s)" %
('MBED', target.name, toolchain_name))
# Common Headers
toolchain.copy_files([MBED_HEADER], MBED_LIBRARIES)
library_incdirs = [dirname(MBED_LIBRARIES), MBED_LIBRARIES]
for dir, dest in [(MBED_DRIVERS, MBED_LIBRARIES_DRIVERS),
(MBED_PLATFORM, MBED_LIBRARIES_PLATFORM),
(MBED_HAL, MBED_LIBRARIES_HAL)]:
resources = toolchain.scan_resources(dir)
toolchain.copy_files(resources.headers, dest)
library_incdirs.append(dest)
# Target specific sources
hal_src = MBED_TARGETS_PATH
hal_implementation = toolchain.scan_resources(hal_src)
toolchain.copy_files(hal_implementation.headers +
hal_implementation.hex_files +
hal_implementation.libraries +
[MBED_CONFIG_FILE],
build_target, resources=hal_implementation)
toolchain.copy_files(hal_implementation.linker_script, build_toolchain)
toolchain.copy_files(hal_implementation.bin_files, build_toolchain)
incdirs = toolchain.scan_resources(build_target).inc_dirs
objects = toolchain.compile_sources(hal_implementation, tmp_path,
library_incdirs + incdirs)
toolchain.copy_files(objects, build_toolchain)
# Common Sources
mbed_resources = None
for dir in [MBED_DRIVERS, MBED_PLATFORM, MBED_HAL]:
mbed_resources += toolchain.scan_resources(dir)
objects = toolchain.compile_sources(mbed_resources, tmp_path,
library_incdirs + incdirs)
# A number of compiled files need to be copied as objects as opposed to
# way the linker search for symbols in archives. These are:
# - retarget.o: to make sure that the C standard lib symbols get
# overridden
# - board.o: mbed_die is weak
# - mbed_overrides.o: this contains platform overrides of various
# weak SDK functions
separate_names, separate_objects = ['retarget.o', 'board.o',
'mbed_overrides.o'], []
for obj in objects:
for name in separate_names:
if obj.endswith(name):
separate_objects.append(obj)
for obj in separate_objects:
objects.remove(obj)
toolchain.build_library(objects, build_toolchain, "mbed")
for obj in separate_objects:
toolchain.copy_files(obj, build_toolchain)
if report != None:
end = time()
cur_result["elapsed_time"] = end - start
cur_result["output"] = toolchain.get_output()
cur_result["result"] = "OK"
add_result_to_report(report, cur_result)
return True
except Exception as exc:
if report != None:
end = time()
cur_result["result"] = "FAIL"
cur_result["elapsed_time"] = end - start
toolchain_output = toolchain.get_output()
if toolchain_output:
cur_result["output"] += toolchain_output
cur_result["output"] += str(exc)
add_result_to_report(report, cur_result)
# Let Exception propagate
raise
def get_unique_supported_toolchains(release_targets=None):
""" Get list of all unique toolchains supported by targets
Keyword arguments:
release_targets - tuple structure returned from get_mbed_official_release().
If release_targets is not specified, then it queries all
known targets
"""
unique_supported_toolchains = []
if not release_targets:
for target in TARGET_NAMES:
for toolchain in TARGET_MAP[target].supported_toolchains:
if toolchain not in unique_supported_toolchains:
unique_supported_toolchains.append(toolchain)
else:
for target in release_targets:
for toolchain in target[1]:
if toolchain not in unique_supported_toolchains:
unique_supported_toolchains.append(toolchain)
return unique_supported_toolchains
def mcu_toolchain_matrix(verbose_html=False, platform_filter=None,
release_version='5'):
""" Shows target map using prettytable
Keyword arguments:
verbose_html - emit html instead of a simple table
platform_filter - remove results that match the string
release_version - get the matrix for this major version number
"""
# Only use it in this function so building works without extra modules
from prettytable import PrettyTable
if isinstance(release_version, basestring):
# Force release_version to lowercase if it is a string
release_version = release_version.lower()
else:
# Otherwise default to printing all known targets and toolchains
release_version = 'all'
version_release_targets = {}
version_release_target_names = {}
for version in RELEASE_VERSIONS:
version_release_targets[version] = get_mbed_official_release(version)
version_release_target_names[version] = [x[0] for x in
version_release_targets[
version]]
if release_version in RELEASE_VERSIONS:
release_targets = version_release_targets[release_version]
else:
release_targets = None
unique_supported_toolchains = get_unique_supported_toolchains(
release_targets)
prepend_columns = ["Target"] + ["mbed OS %s" % x for x in RELEASE_VERSIONS]
# All tests status table print
columns = prepend_columns + unique_supported_toolchains
table_printer = PrettyTable(columns)
# Align table
for col in columns:
table_printer.align[col] = "c"
table_printer.align["Target"] = "l"
perm_counter = 0
target_counter = 0
target_names = []
if release_targets:
target_names = [x[0] for x in release_targets]
else:
target_names = TARGET_NAMES
for target in sorted(target_names):
if platform_filter is not None:
# FIlter out platforms using regex
if re.search(platform_filter, target) is None:
continue
target_counter += 1
row = [target] # First column is platform name
for version in RELEASE_VERSIONS:
if target in version_release_target_names[version]:
text = "Supported"
else:
text = "-"
row.append(text)
for unique_toolchain in unique_supported_toolchains:
if unique_toolchain in TARGET_MAP[target].supported_toolchains:
text = "Supported"
perm_counter += 1
else:
text = "-"
row.append(text)
table_printer.add_row(row)
result = table_printer.get_html_string() if verbose_html \
else table_printer.get_string()
result += "\n"
result += "Supported targets: %d\n"% (target_counter)
if target_counter == 1:
result += "Supported toolchains: %d"% (perm_counter)
return result
def get_target_supported_toolchains(target):
""" Returns target supported toolchains list
Positional arguments:
target - the target to get the supported toolchains of
"""
return TARGET_MAP[target].supported_toolchains if target in TARGET_MAP \
else None
def static_analysis_scan(target, toolchain_name, cppcheck_cmd,
cppcheck_msg_format, verbose=False,
clean=False, macros=None, notify=None, jobs=1,
extra_verbose=False, build_profile=None):
"""Perform static analysis on a target and toolchain combination
Positional arguments:
target - the target to fake the build for
toolchain_name - pretend you would compile with this toolchain
cppcheck_cmd - the command used to do static analysis
cppcheck_msg_format - the format of the check messages
Keyword arguments:
verbose - more printing!
clean - start from a clean slate
macros - extra macros to compile with
notify - the notification event handling function
jobs - number of commands to run at once
extra_verbose - even moar printing
build_profile - a dict of flags that will be passed to the compiler
"""
# Toolchain
toolchain = TOOLCHAIN_CLASSES[toolchain_name](target, macros=macros,
notify=notify,
extra_verbose=extra_verbose,
build_profile=build_profile)
toolchain.VERBOSE = verbose
toolchain.jobs = jobs
toolchain.build_all = clean
# Source and Build Paths
build_target = join(MBED_LIBRARIES, "TARGET_" + target.name)
build_toolchain = join(build_target, "TOOLCHAIN_" + toolchain.name)
mkdir(build_toolchain)
tmp_path = join(MBED_LIBRARIES, '.temp', toolchain.obj_path)
mkdir(tmp_path)
# CMSIS
toolchain.info("Static analysis for %s (%s, %s)" %
('CMSIS', target.name, toolchain_name))
cmsis_src = MBED_CMSIS_PATH
resources = toolchain.scan_resources(cmsis_src)
# Copy files before analysis
toolchain.copy_files(resources.headers, build_target)
toolchain.copy_files(resources.linker_script, build_toolchain)
# Gather include paths, c, cpp sources and macros to transfer to cppcheck
# command line
includes = ["-I%s"% i for i in resources.inc_dirs]
includes.append("-I%s"% str(build_target))
c_sources = " ".join(resources.c_sources)
cpp_sources = " ".join(resources.cpp_sources)
macros = ["-D%s"% s for s in toolchain.get_symbols() + toolchain.macros]
includes = [inc.strip() for inc in includes]
macros = [mac.strip() for mac in macros]
check_cmd = cppcheck_cmd
check_cmd += cppcheck_msg_format
check_cmd += includes
check_cmd += macros
# We need to pass some params via file to avoid "command line too long in
# some OSs"
tmp_file = tempfile.NamedTemporaryFile(delete=False)
tmp_file.writelines(line + '\n' for line in c_sources.split())
tmp_file.writelines(line + '\n' for line in cpp_sources.split())
tmp_file.close()
check_cmd += ["--file-list=%s"% tmp_file.name]
_stdout, _stderr, _ = run_cmd(check_cmd)
if verbose:
print _stdout
print _stderr
# =========================================================================
# MBED
toolchain.info("Static analysis for %s (%s, %s)" %
('MBED', target.name, toolchain_name))
# Common Headers
toolchain.copy_files([MBED_HEADER], MBED_LIBRARIES)
toolchain.copy_files(toolchain.scan_resources(MBED_DRIVERS).headers,
MBED_LIBRARIES)
toolchain.copy_files(toolchain.scan_resources(MBED_PLATFORM).headers,
MBED_LIBRARIES)
toolchain.copy_files(toolchain.scan_resources(MBED_HAL).headers,
MBED_LIBRARIES)
# Target specific sources
hal_src = join(MBED_TARGETS_PATH, "hal")
hal_implementation = toolchain.scan_resources(hal_src)
# Copy files before analysis
toolchain.copy_files(hal_implementation.headers +
hal_implementation.hex_files, build_target,
resources=hal_implementation)
incdirs = toolchain.scan_resources(build_target)
target_includes = ["-I%s" % i for i in incdirs.inc_dirs]
target_includes.append("-I%s"% str(build_target))
target_includes.append("-I%s"% str(hal_src))
target_c_sources = " ".join(incdirs.c_sources)
target_cpp_sources = " ".join(incdirs.cpp_sources)
target_macros = ["-D%s"% s for s in
toolchain.get_symbols() + toolchain.macros]
# Common Sources
mbed_resources = toolchain.scan_resources(MBED_COMMON)
# Gather include paths, c, cpp sources and macros to transfer to cppcheck
# command line
mbed_includes = ["-I%s" % i for i in mbed_resources.inc_dirs]
mbed_includes.append("-I%s"% str(build_target))
mbed_includes.append("-I%s"% str(MBED_DRIVERS))
mbed_includes.append("-I%s"% str(MBED_PLATFORM))
mbed_includes.append("-I%s"% str(MBED_HAL))
mbed_c_sources = " ".join(mbed_resources.c_sources)
mbed_cpp_sources = " ".join(mbed_resources.cpp_sources)
target_includes = [inc.strip() for inc in target_includes]
mbed_includes = [inc.strip() for inc in mbed_includes]
target_macros = [mac.strip() for mac in target_macros]
check_cmd = cppcheck_cmd
check_cmd += cppcheck_msg_format
check_cmd += target_includes
check_cmd += mbed_includes
check_cmd += target_macros
# We need to pass some parames via file to avoid "command line too long in
# some OSs"
tmp_file = tempfile.NamedTemporaryFile(delete=False)
tmp_file.writelines(line + '\n' for line in target_c_sources.split())
tmp_file.writelines(line + '\n' for line in target_cpp_sources.split())
tmp_file.writelines(line + '\n' for line in mbed_c_sources.split())
tmp_file.writelines(line + '\n' for line in mbed_cpp_sources.split())
tmp_file.close()
check_cmd += ["--file-list=%s"% tmp_file.name]
_stdout, _stderr, _ = run_cmd_ext(check_cmd)
if verbose:
print _stdout
print _stderr
def static_analysis_scan_lib(lib_id, target, toolchain, cppcheck_cmd,
cppcheck_msg_format, verbose=False,
clean=False, macros=None, notify=None, jobs=1,
extra_verbose=False, build_profile=None):
"""Perform static analysis on a library as if it were to be compiled for a
particular target and toolchain combination
"""
lib = Library(lib_id)
if lib.is_supported(target, toolchain):
static_analysis_scan_library(
lib.source_dir, lib.build_dir, target, toolchain, cppcheck_cmd,
cppcheck_msg_format, lib.dependencies, verbose=verbose,
clean=clean, macros=macros, notify=notify, jobs=jobs,
extra_verbose=extra_verbose, build_profile=build_profile)
else:
print('Library "%s" is not yet supported on target %s with toolchain %s'
% (lib_id, target.name, toolchain))
def static_analysis_scan_library(src_paths, build_path, target, toolchain_name,
cppcheck_cmd, cppcheck_msg_format,
dependencies_paths=None,
name=None, clean=False, notify=None,
verbose=False, macros=None, jobs=1,
extra_verbose=False, build_profile=None):
""" Function scans library for statically detectable defects
Positional arguments:
src_paths - the list of library paths to scan
build_path - the location directory of result files
target - the target to fake the build for
toolchain_name - pretend you would compile with this toolchain
cppcheck_cmd - the command used to do static analysis
cppcheck_msg_format - the format of the check messages
Keyword arguments:
dependencies_paths - the paths to sources that this library depends on
name - the name of this library
clean - start from a clean slate
notify - the notification event handling function
verbose - more printing!
macros - extra macros to compile with
jobs - number of commands to run at once
extra_verbose - even moar printing
build_profile - a dict of flags that will be passed to the compiler
"""
if type(src_paths) != ListType:
src_paths = [src_paths]
for src_path in src_paths:
if not exists(src_path):
raise Exception("The library source folder does not exist: %s",
src_path)
# Toolchain instance
toolchain = TOOLCHAIN_CLASSES[toolchain_name](target, macros=macros,
notify=notify,
extra_verbose=extra_verbose,
build_profile=build_profile)
toolchain.VERBOSE = verbose
toolchain.jobs = jobs
# The first path will give the name to the library
name = basename(src_paths[0])
toolchain.info("Static analysis for library %s (%s, %s)" %
(name.upper(), target.name, toolchain_name))
# Scan Resources
resources = []
for src_path in src_paths:
resources.append(toolchain.scan_resources(src_path))
# Dependencies Include Paths
dependencies_include_dir = []
if dependencies_paths is not None:
for path in dependencies_paths:
lib_resources = toolchain.scan_resources(path)
dependencies_include_dir.extend(lib_resources.inc_dirs)
# Create the desired build directory structure
bin_path = join(build_path, toolchain.obj_path)
mkdir(bin_path)
tmp_path = join(build_path, '.temp', toolchain.obj_path)
mkdir(tmp_path)
# Gather include paths, c, cpp sources and macros to transfer to cppcheck
# command line
includes = ["-I%s" % i for i in dependencies_include_dir + src_paths]
c_sources = " "
cpp_sources = " "
macros = ['-D%s' % s for s in toolchain.get_symbols() + toolchain.macros]
# Copy Headers
for resource in resources:
toolchain.copy_files(resource.headers, build_path, resources=resource)
includes += ["-I%s" % i for i in resource.inc_dirs]
c_sources += " ".join(resource.c_sources) + " "
cpp_sources += " ".join(resource.cpp_sources) + " "
dependencies_include_dir.extend(
toolchain.scan_resources(build_path).inc_dirs)
includes = [inc.strip() for inc in includes]
macros = [mac.strip() for mac in macros]
check_cmd = cppcheck_cmd
check_cmd += cppcheck_msg_format
check_cmd += includes
check_cmd += macros
# We need to pass some parameters via file to avoid "command line too long
# in some OSs". A temporary file is created to store e.g. cppcheck list of
# files for command line
tmp_file = tempfile.NamedTemporaryFile(delete=False)
tmp_file.writelines(line + '\n' for line in c_sources.split())
tmp_file.writelines(line + '\n' for line in cpp_sources.split())
tmp_file.close()
check_cmd += ["--file-list=%s"% tmp_file.name]
# This will allow us to grab result from both stdio and stderr outputs (so
# we can show them) We assume static code analysis tool is outputting
# defects on STDERR
_stdout, _stderr, _ = run_cmd_ext(check_cmd)
if verbose:
print _stdout
print _stderr
def print_build_results(result_list, build_name):
""" Generate result string for build results
Positional arguments:
result_list - the list of results to print
build_name - the name of the build we are printing result for
"""
result = ""
if len(result_list) > 0:
result += build_name + "\n"
result += "\n".join([" * %s" % f for f in result_list])
result += "\n"
return result
def print_build_memory_usage(report):
""" Generate result table with memory usage values for build results
Aggregates (puts together) reports obtained from self.get_memory_summary()
Positional arguments:
report - Report generated during build procedure.
"""
from prettytable import PrettyTable
columns_text = ['name', 'target', 'toolchain']
columns_int = ['static_ram', 'stack', 'heap', 'total_ram', 'total_flash']
table = PrettyTable(columns_text + columns_int)
for col in columns_text:
table.align[col] = 'l'
for col in columns_int:
table.align[col] = 'r'
for target in report:
for toolchain in report[target]:
for name in report[target][toolchain]:
for dlist in report[target][toolchain][name]:
for dlistelem in dlist:
# Get 'memory_usage' record and build table with
# statistics
record = dlist[dlistelem]
if 'memory_usage' in record and record['memory_usage']:
# Note that summary should be in the last record of
# 'memory_usage' section. This is why we are
# grabbing last "[-1]" record.
row = [
record['description'],
record['target_name'],
record['toolchain_name'],
record['memory_usage'][-1]['summary'][
'static_ram'],
record['memory_usage'][-1]['summary']['stack'],
record['memory_usage'][-1]['summary']['heap'],
record['memory_usage'][-1]['summary'][
'total_ram'],
record['memory_usage'][-1]['summary'][
'total_flash'],
]
table.add_row(row)
result = "Memory map breakdown for built projects (values in Bytes):\n"
result += table.get_string(sortby='name')
return result
def write_build_report(build_report, template_filename, filename):
"""Write a build report to disk using a template file
Positional arguments:
build_report - a report generated by the build system
template_filename - a file that contains the template for the style of build
report
filename - the location on disk to write the file to
"""
build_report_failing = []
build_report_passing = []
for report in build_report:
if len(report["failing"]) > 0:
build_report_failing.append(report)
else:
build_report_passing.append(report)
env = Environment(extensions=['jinja2.ext.with_'])
env.loader = FileSystemLoader('ci_templates')
template = env.get_template(template_filename)
with open(filename, 'w+') as placeholder:
placeholder.write(template.render(
failing_builds=build_report_failing,
passing_builds=build_report_passing))
| adustm/mbed | tools/build_api.py | Python | apache-2.0 | 61,089 |
#**********************************************************************************************#
#********* Return the top most group name of an object ****************************************#
#********* by Djordje Spasic ******************************************************************#
#********* [email protected] 6-May-2014 **************************************************#
"""
This small function replicates the "ObjectTopGroup" RhinoScript function, which still hasn't been implemented
into PythonScript.
Returns the top most group name that an object is assigned. This function primarily applies to objects that are
members of nested groups.
"""
import rhinoscriptsyntax as rs
import scriptcontext as sc
def objectTopGroup(_id):
groupNames = sc.doc.Groups.GroupNames(False)
groupName = False
for i in range(rs.GroupCount()):
groupRO = sc.doc.Groups.GroupMembers(i)
for ele in groupRO:
if ele.Id == _id:
groupName = groupNames[i]
if groupName:
print groupName
else:
print "The element you chose does not belong to any group"
id = rs.GetObject()
objectTopGroup(id)
| stgeorges/pythonscripts | objectTopGroup.py | Python | unlicense | 1,167 |
# Copyright 2005 Duke University
# Copyright (C) 2012-2015 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
Supplies the Base class.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from dnf.comps import CompsQuery
from dnf.i18n import _, P_, ucd
from dnf.util import first
from dnf.yum import history
from dnf.yum import misc
from dnf.yum import rpmsack
from functools import reduce
import collections
import dnf.callback
import dnf.comps
import dnf.conf
import dnf.conf.read
import dnf.crypto
import dnf.drpm
import dnf.exceptions
import dnf.goal
import dnf.history
import dnf.lock
import dnf.logging
import dnf.persistor
import dnf.plugin
import dnf.query
import dnf.repo
import dnf.repodict
import dnf.rpm.connection
import dnf.rpm.miscutils
import dnf.rpm.transaction
import dnf.sack
import dnf.subject
import dnf.transaction
import dnf.util
import dnf.yum.config
import dnf.yum.rpmtrans
import functools
import hawkey
import itertools
import logging
import os
import operator
import re
import rpm
import time
logger = logging.getLogger("dnf")
class Base(object):
def __init__(self, conf=None):
# :api
self._closed = False
self._conf = conf or self._setup_default_conf()
self._goal = None
self.repo_persistor = None
self._sack = None
self._transaction = None
self._ts = None
self._comps = None
self._history = None
self._tempfiles = set()
self.ds_callback = dnf.callback.Depsolve()
self.group_persistor = None
self.logging = dnf.logging.Logging()
self._repos = dnf.repodict.RepoDict()
self.rpm_probfilter = set([rpm.RPMPROB_FILTER_OLDPACKAGE])
self.plugins = dnf.plugin.Plugins()
self.clean_tempfiles = False
self._tempfile_persistor = None
def __enter__(self):
return self
def __exit__(self, *exc_args):
self.close()
def _add_repo_to_sack(self, name):
repo = self.repos[name]
try:
repo.load()
except dnf.exceptions.RepoError as e:
if repo.skip_if_unavailable is False:
raise
logger.warning(_("%s, disabling."), e)
repo.disable()
return
hrepo = repo.hawkey_repo
hrepo.repomd_fn = repo.repomd_fn
hrepo.primary_fn = repo.primary_fn
hrepo.filelists_fn = repo.filelists_fn
hrepo.cost = repo.cost
if repo.presto_fn:
hrepo.presto_fn = repo.presto_fn
else:
logger.debug("not found deltainfo for: %s" % repo.name)
if repo.updateinfo_fn:
hrepo.updateinfo_fn = repo.updateinfo_fn
else:
logger.debug("not found updateinfo for: %s" % repo.name)
self._sack.load_repo(hrepo, build_cache=True, load_filelists=True,
load_presto=repo.deltarpm,
load_updateinfo=True)
@staticmethod
def _setup_default_conf():
conf = dnf.conf.Conf()
subst = conf.substitutions
if 'releasever' not in subst:
subst['releasever'] = \
dnf.rpm.detect_releasever(conf.installroot)
cache_dirs = dnf.conf.CliCache(conf.cachedir)
conf.cachedir = cache_dirs.cachedir
return conf
def _setup_excludes_includes(self):
disabled = set(self.conf.disable_excludes)
if 'all' in disabled:
return
if 'main' not in disabled:
for excl in self.conf.exclude:
subj = dnf.subject.Subject(excl)
pkgs = subj.get_best_query(self.sack)
self.sack.add_excludes(pkgs)
for incl in self.conf.include:
subj = dnf.subject.Subject(incl)
pkgs = subj.get_best_query(self.sack)
self.sack.add_includes(pkgs)
for r in self.repos.iter_enabled():
if r.id in disabled:
continue
for excl in r.exclude:
pkgs = self.sack.query().filter(reponame=r.id).\
filter_autoglob(name=excl)
self.sack.add_excludes(pkgs)
for incl in r.include:
pkgs = self.sack.query().filter(reponame=r.id).\
filter_autoglob(name=incl)
self.sack.add_includes(pkgs)
def _store_persistent_data(self):
def check_expired(repo):
try:
exp_remaining = repo.metadata_expire_in()[1]
return False if exp_remaining is None else exp_remaining <= 0
except dnf.exceptions.MetadataError:
return False
if self.repo_persistor:
expired = [r.id for r in self.repos.iter_enabled()
if check_expired(r)]
self.repo_persistor.set_expired_repos(expired)
if self.group_persistor:
self.group_persistor.save()
if self._tempfile_persistor:
self._tempfile_persistor.save()
@property
def comps(self):
# :api
return self._comps
@property
def conf(self):
# :api
return self._conf
@property
def goal(self):
return self._goal
@property
def repos(self):
return self._repos
@repos.deleter
def repos(self):
self._repos = None
@property
@dnf.util.lazyattr("_rpmconn")
def rpmconn(self):
return dnf.rpm.connection.RpmConnection(self.conf.installroot)
@property
def sack(self):
# :api
return self._sack
@property
def transaction(self):
# :api
return self._transaction
@transaction.setter
def transaction(self, value):
if self._transaction:
raise ValueError('transaction already set')
self._transaction = value
def activate_persistor(self):
self.repo_persistor = dnf.persistor.RepoPersistor(self.conf.cachedir)
def fill_sack(self, load_system_repo=True, load_available_repos=True):
"""Prepare the Sack and the Goal objects. :api."""
timer = dnf.logging.Timer('sack setup')
self._sack = dnf.sack.build_sack(self)
lock = dnf.lock.build_metadata_lock(self.conf.cachedir)
with lock:
if load_system_repo is not False:
try:
self._sack.load_system_repo(build_cache=True)
except IOError:
if load_system_repo != 'auto':
raise
if load_available_repos:
for r in self.repos.iter_enabled():
self._add_repo_to_sack(r.id)
conf = self.conf
self._sack.configure(conf.installonlypkgs, conf.installonly_limit)
self._setup_excludes_includes()
timer()
self._goal = dnf.goal.Goal(self._sack)
return self._sack
@property
@dnf.util.lazyattr("_yumdb")
def yumdb(self):
db_path = os.path.normpath(self.conf.persistdir + '/yumdb')
return rpmsack.AdditionalPkgDB(db_path)
def close(self):
"""Close all potential handles and clean cache. :api
Typically the handles are to data sources and sinks.
"""
if self._closed:
return
logger.log(dnf.logging.DDEBUG, 'Cleaning up.')
self._closed = True
self._tempfile_persistor = dnf.persistor.TempfilePersistor(
self.conf.cachedir)
if self.clean_tempfiles:
# delete all packages from last unsuccessful transactions
self._tempfiles.update(
self._tempfile_persistor.get_saved_tempfiles())
self._tempfile_persistor.empty()
if not self.conf.keepcache:
if self.clean_tempfiles:
self.clean_used_packages()
else:
self._tempfile_persistor.tempfiles_to_add.update(
self._tempfiles)
if self._tempfile_persistor.tempfiles_to_add:
logger.info(_("The downloaded packages were saved in cache "
"till the next successful transaction."))
logger.info(_("You can remove cached packages by executing "
"'%s'"), "dnf clean packages")
# Do not trigger the lazy creation:
if self._history is not None:
self.history.close()
self._store_persistent_data()
self.closeRpmDB()
def read_all_repos(self, repo_setopts=None):
"""Read repositories from the main conf file and from .repo files."""
# :api
reader = dnf.conf.read.RepoReader(self.conf, repo_setopts or {})
for repo in reader:
try:
self.repos.add(repo)
except dnf.exceptions.ConfigError as e:
logger.warning(e)
def reset(self, sack=False, repos=False, goal=False):
"""Make the Base object forget about various things. :api"""
if sack:
self._sack = None
if repos:
self._repos = dnf.repodict.RepoDict()
if goal:
self._goal = None
if self._sack is not None:
self._goal = dnf.goal.Goal(self._sack)
if self.group_persistor is not None:
self.group_persistor = self._activate_group_persistor()
def closeRpmDB(self):
"""Closes down the instances of rpmdb that could be open."""
del self.ts
self._comps = None
_TS_FLAGS_TO_RPM = {'noscripts': rpm.RPMTRANS_FLAG_NOSCRIPTS,
'notriggers': rpm.RPMTRANS_FLAG_NOTRIGGERS,
'nodocs': rpm.RPMTRANS_FLAG_NODOCS,
'test': rpm.RPMTRANS_FLAG_TEST,
'justdb': rpm.RPMTRANS_FLAG_JUSTDB,
'nocontexts': rpm.RPMTRANS_FLAG_NOCONTEXTS,
'nocrypto': rpm.RPMTRANS_FLAG_NOFILEDIGEST}
_TS_VSFLAGS_TO_RPM = {'nocrypto': rpm._RPMVSF_NOSIGNATURES |
rpm._RPMVSF_NODIGESTS}
@property
def ts(self):
"""Set up the RPM transaction set that will be used
for all the work."""
if self._ts is not None:
return self._ts
self._ts = dnf.rpm.transaction.TransactionWrapper(
self.conf.installroot)
self._ts.setFlags(0) # reset everything.
for flag in self.conf.tsflags:
rpm_flag = self._TS_FLAGS_TO_RPM.get(flag)
if rpm_flag is None:
logger.critical(_('Invalid tsflag in config file: %s'), flag)
continue
self._ts.addTsFlag(rpm_flag)
vs_flag = self._TS_VSFLAGS_TO_RPM.get(flag)
if vs_flag is not None:
self._ts.pushVSFlags(vs_flag)
probfilter = reduce(operator.or_, self.rpm_probfilter, 0)
self._ts.setProbFilter(probfilter)
return self._ts
@ts.deleter
def ts(self):
"""Releases the RPM transaction set. """
if self._ts is None:
return
self._ts.close()
del self._ts
self._ts = None
def _activate_group_persistor(self):
return dnf.persistor.GroupPersistor(self.conf.persistdir)
def read_comps(self):
"""Create the groups object to access the comps metadata. :api"""
timer = dnf.logging.Timer('loading comps')
self.group_persistor = self._activate_group_persistor()
self._comps = dnf.comps.Comps()
logger.log(dnf.logging.DDEBUG, 'Getting group metadata')
for repo in self.repos.iter_enabled():
if not repo.enablegroups:
continue
if not repo.metadata:
continue
comps_fn = repo.metadata.comps_fn
if comps_fn is None:
continue
logger.log(dnf.logging.DDEBUG,
'Adding group file from repository: %s', repo.id)
if repo.md_only_cached:
decompressed = misc.calculate_repo_gen_dest(comps_fn,
'groups.xml')
if not os.path.exists(decompressed):
# root privileges are needed for comps decompression
continue
else:
decompressed = misc.repo_gen_decompress(comps_fn, 'groups.xml')
try:
self._comps.add_from_xml_filename(decompressed)
except dnf.exceptions.CompsError as e:
msg = _('Failed to add groups file for repository: %s - %s')
logger.critical(msg % (repo.id, e))
timer()
return self._comps
def _getHistory(self):
"""auto create the history object that to access/append the transaction
history information. """
if self._history is None:
db_path = self.conf.persistdir + "/history"
releasever = self.conf.releasever
self._history = history.YumHistory(db_path, self.yumdb,
root=self.conf.installroot,
releasever=releasever)
return self._history
history = property(fget=lambda self: self._getHistory(),
fset=lambda self, value: setattr(
self, "_history", value),
fdel=lambda self: setattr(self, "_history", None),
doc="Yum History Object")
def _goal2transaction(self, goal):
ts = dnf.transaction.Transaction()
all_obsoleted = set(goal.list_obsoleted())
for pkg in goal.list_downgrades():
obs = goal.obsoleted_by_package(pkg)
downgraded = obs[0]
self.ds_callback.pkg_added(downgraded, 'dd')
self.ds_callback.pkg_added(pkg, 'd')
ts.add_downgrade(pkg, downgraded, obs[1:])
for pkg in goal.list_reinstalls():
self.ds_callback.pkg_added(pkg, 'r')
obs = goal.obsoleted_by_package(pkg)
reinstalled = obs[0]
ts.add_reinstall(pkg, reinstalled, obs[1:])
for pkg in goal.list_installs():
self.ds_callback.pkg_added(pkg, 'i')
obs = goal.obsoleted_by_package(pkg)
ts.add_install(pkg, obs, goal.get_reason(pkg))
cb = lambda pkg: self.ds_callback.pkg_added(pkg, 'od')
dnf.util.mapall(cb, obs)
for pkg in goal.list_upgrades():
group_fn = functools.partial(operator.contains, all_obsoleted)
obs, upgraded = dnf.util.group_by_filter(
group_fn, goal.obsoleted_by_package(pkg))
cb = lambda pkg: self.ds_callback.pkg_added(pkg, 'od')
dnf.util.mapall(cb, obs)
if pkg.name in self.conf.installonlypkgs:
ts.add_install(pkg, obs)
else:
ts.add_upgrade(pkg, upgraded[0], obs)
cb = lambda pkg: self.ds_callback.pkg_added(pkg, 'ud')
dnf.util.mapall(cb, upgraded)
self.ds_callback.pkg_added(pkg, 'u')
for pkg in goal.list_erasures():
self.ds_callback.pkg_added(pkg, 'e')
ts.add_erase(pkg)
return ts
def _query_matches_installed(self, q):
""" See what packages in the query match packages (also in older
versions, but always same architecture) that are already installed.
Unlike in case of _sltr_matches_installed(), it is practical here
to know even the packages in the original query that can still be
installed.
"""
inst = q.installed()
inst_per_arch = inst.na_dict()
avail_per_arch = q.latest().available().na_dict()
avail_l = []
inst_l = []
for na in avail_per_arch:
if na in inst_per_arch:
inst_l.append(inst_per_arch[na][0])
else:
avail_l.extend(avail_per_arch[na])
return inst_l, avail_l
def _sltr_matches_installed(self, sltr):
""" See if sltr matches a patches that is (in older version or different
architecture perhaps) already installed.
"""
inst = self.sack.query().installed()
inst = inst.filter(pkg=sltr.matches())
return list(inst)
def iter_userinstalled(self):
"""Get iterator over the packages installed by the user."""
return (pkg for pkg in self.sack.query().installed()
if self.yumdb.get_package(pkg).get('reason') == 'user' and
self.yumdb.get_package(pkg).get('from_repo') != 'anakonda')
def run_hawkey_goal(self, goal, allow_erasing):
ret = goal.run(
allow_uninstall=allow_erasing, force_best=self.conf.best,
ignore_weak_deps=(not self.conf.install_weak_deps))
if self.conf.debug_solver:
goal.write_debugdata('./debugdata')
return ret
def resolve(self, allow_erasing=False):
"""Build the transaction set. :api"""
exc = None
timer = dnf.logging.Timer('depsolve')
self.ds_callback.start()
goal = self._goal
if goal.req_has_erase():
goal.push_userinstalled(self.sack.query().installed(), self.yumdb)
if not self.run_hawkey_goal(goal, allow_erasing):
if self.conf.debuglevel >= 6:
goal.log_decisions()
exc = dnf.exceptions.DepsolveError('.\n'.join(goal.problems))
else:
self._transaction = self._goal2transaction(goal)
self.ds_callback.end()
timer()
got_transaction = self._transaction is not None and \
len(self._transaction) > 0
if got_transaction:
msg = self._transaction.rpm_limitations()
if msg:
exc = dnf.exceptions.Error(msg)
if exc is not None:
raise exc
return got_transaction
def do_transaction(self, display=()):
# :api
if not isinstance(display, collections.Sequence):
display = [display]
display = \
[dnf.yum.rpmtrans.LoggingTransactionDisplay()] + list(display)
persistor = self.group_persistor
if persistor:
persistor.commit()
if not self.transaction:
return
# save our ds_callback out
dscb = self.ds_callback
self.ds_callback = None
self.transaction.populate_rpm_ts(self.ts)
logger.info(_('Running transaction check'))
msgs = self._run_rpm_check()
if msgs:
msg = _('Error: transaction check vs depsolve:')
logger.error(msg)
for msg in msgs:
logger.error(msg)
raise dnf.exceptions.TransactionCheckError(msg)
logger.info(_('Transaction check succeeded.'))
timer = dnf.logging.Timer('transaction test')
logger.info(_('Running transaction test'))
if not self.conf.diskspacecheck:
self.rpm_probfilter.add(rpm.RPMPROB_FILTER_DISKSPACE)
self.ts.order() # order the transaction
self.ts.clean() # release memory not needed beyond this point
testcb = dnf.yum.rpmtrans.RPMTransaction(self, test=True)
tserrors = self.ts.test(testcb)
del testcb
if len(tserrors) > 0:
errstring = _('Transaction check error:\n')
for descr in tserrors:
errstring += ' %s\n' % ucd(descr)
raise dnf.exceptions.Error(errstring + '\n' +
self._trans_error_summary(errstring))
logger.info(_('Transaction test succeeded.'))
timer()
# unset the sigquit handler
timer = dnf.logging.Timer('transaction')
# put back our depcheck callback
self.ds_callback = dscb
# setup our rpm ts callback
cb = dnf.yum.rpmtrans.RPMTransaction(self, displays=display)
if self.conf.debuglevel < 2:
for display_ in cb.displays:
display_.output = False
logger.info(_('Running transaction'))
lock = dnf.lock.build_rpmdb_lock(self.conf.persistdir)
with lock:
self._run_transaction(cb=cb)
timer()
def _trans_error_summary(self, errstring):
"""Parse the error string for 'interesting' errors which can
be grouped, such as disk space issues.
:param errstring: the error string
:return: a string containing a summary of the errors
"""
summary = ''
# do disk space report first
p = re.compile('needs (\d+)MB on the (\S+) filesystem')
disk = {}
for m in p.finditer(errstring):
if m.group(2) not in disk:
disk[m.group(2)] = int(m.group(1))
if disk[m.group(2)] < int(m.group(1)):
disk[m.group(2)] = int(m.group(1))
if disk:
summary += _('Disk Requirements:\n')
for k in disk:
summary += P_(
' At least %dMB more space needed on the %s filesystem.\n',
' At least %dMB more space needed on the %s filesystem.\n',
disk[k]) % (disk[k], k)
summary = _('Error Summary\n-------------\n') + summary
return summary
def _record_history(self):
return self.conf.history_record and \
not self.ts.isTsFlagSet(rpm.RPMTRANS_FLAG_TEST)
def _run_transaction(self, cb):
"""Perform the RPM transaction."""
if self._record_history():
using_pkgs_pats = list(self.conf.history_record_packages)
installed_query = self.sack.query().installed()
using_pkgs = installed_query.filter(name=using_pkgs_pats).run()
rpmdbv = self.sack.rpmdb_version(self.yumdb)
lastdbv = self.history.last()
if lastdbv is not None:
lastdbv = lastdbv.end_rpmdbversion
if lastdbv is None or rpmdbv != lastdbv:
logger.debug("RPMDB altered outside of DNF.")
cmdline = None
if hasattr(self, 'args') and self.args:
cmdline = ' '.join(self.args)
elif hasattr(self, 'cmds') and self.cmds:
cmdline = ' '.join(self.cmds)
self.history.beg(rpmdbv, using_pkgs, list(self.transaction),
[], [], cmdline)
# write out our config and repo data to additional history info
self._store_config_in_history()
if self.conf.reset_nice:
onice = os.nice(0)
if onice:
try:
os.nice(-onice)
except:
onice = 0
logger.log(dnf.logging.DDEBUG, 'RPM transaction start.')
errors = self.ts.run(cb.callback, '')
logger.log(dnf.logging.DDEBUG, 'RPM transaction over.')
# ts.run() exit codes are, hmm, "creative": None means all ok, empty
# list means some errors happened in the transaction and non-empty
# list that there were errors preventing the ts from starting...
if self.conf.reset_nice:
try:
os.nice(onice)
except:
pass
if errors is None:
pass
elif len(errors) == 0:
# this is a particularly tricky case happening also when rpm failed
# to obtain the transaction lock. We can only try to see if a
# particular element failed and if not, decide that is the
# case.
if len([el for el in self.ts if el.Failed()]) > 0:
errstring = _('Warning: scriptlet or other non-fatal errors '
'occurred during transaction.')
logger.debug(errstring)
else:
login = dnf.util.get_effective_login()
msg = _("Failed to obtain the transaction lock "
"(logged in as: %s).")
logger.critical(msg % login)
msg = _('Could not run transaction.')
raise dnf.exceptions.Error(msg)
else:
if self._record_history():
herrors = [ucd(x) for x in errors]
self.history.end(rpmdbv, 2, errors=herrors)
logger.critical(_("Transaction couldn't start:"))
for e in errors:
logger.critical(e[0]) # should this be 'to_unicoded'?
msg = _("Could not run transaction.")
raise dnf.exceptions.Error(msg)
for i in ('ts_all_fn', 'ts_done_fn'):
if hasattr(cb, i):
fn = getattr(cb, i)
try:
misc.unlink_f(fn)
except (IOError, OSError):
msg = _('Failed to remove transaction file %s')
logger.critical(msg, fn)
# sync up what just happened versus what is in the rpmdb
if not self.ts.isTsFlagSet(rpm.RPMTRANS_FLAG_TEST):
self.verify_transaction(cb.verify_tsi_package)
def verify_transaction(self, verify_pkg_cb=None):
"""Check that the transaction did what was expected, and
propagate external yumdb information. Output error messages
if the transaction did not do what was expected.
:param txmbr_cb: the callback for the rpm transaction members
"""
# check to see that the rpmdb and the transaction roughly matches
# push package object metadata outside of rpmdb into yumdb
# delete old yumdb metadata entries
# for each pkg in the transaction
# if it is an install - see that the pkg is installed
# if it is a remove - see that the pkg is no longer installed, provided
# that there is not also an install of this pkg in the transaction
# (reinstall)
# for any kind of install add from_repo to the yumdb, and the cmdline
# and the install reason
total = self.transaction.total_package_count()
def display_banner(pkg, count):
count += 1
if verify_pkg_cb is not None:
verify_pkg_cb(pkg, count, total)
return count
timer = dnf.logging.Timer('verify transaction')
count = 0
# the rpmdb has changed by now. hawkey doesn't support dropping a repo
# yet we have to check what packages are in now: build a transient sack
# with only rpmdb in it. In the future when RPM Python bindings can
# tell us if a particular transaction element failed or not we can skip
# this completely.
rpmdb_sack = dnf.sack.rpmdb_sack(self)
for tsi in self._transaction:
rpo = tsi.installed
if rpo is None:
continue
installed = rpmdb_sack.query().installed().nevra(
rpo.name, rpo.evr, rpo.arch)
if len(installed) < 1:
tsi.op_type = dnf.transaction.FAIL
logger.critical(_('%s was supposed to be installed'
' but is not!' % rpo))
count = display_banner(rpo, count)
continue
po = installed[0]
count = display_banner(rpo, count)
yumdb_info = self.yumdb.get_package(po)
yumdb_info.from_repo = rpo.repoid
yumdb_info.reason = tsi.propagated_reason(self.yumdb,
self.conf.installonlypkgs)
yumdb_info.releasever = self.conf.releasever
if hasattr(self, 'args') and self.args:
yumdb_info.command_line = ' '.join(self.args)
elif hasattr(self, 'cmds') and self.cmds:
yumdb_info.command_line = ' '.join(self.cmds)
csum = rpo.returnIdSum()
if csum is not None:
yumdb_info.checksum_type = str(csum[0])
yumdb_info.checksum_data = csum[1]
if rpo.from_cmdline:
try:
st = os.stat(rpo.localPkg())
lp_ctime = str(int(st.st_ctime))
lp_mtime = str(int(st.st_mtime))
yumdb_info.from_repo_revision = lp_ctime
yumdb_info.from_repo_timestamp = lp_mtime
except Exception:
pass
elif hasattr(rpo.repo, 'repoXML'):
md = rpo.repo.repoXML
if md and md.revision is not None:
yumdb_info.from_repo_revision = str(md.revision)
if md:
yumdb_info.from_repo_timestamp = str(md.timestamp)
loginuid = misc.getloginuid()
if tsi.op_type in (dnf.transaction.DOWNGRADE,
dnf.transaction.REINSTALL,
dnf.transaction.UPGRADE):
opo = tsi.erased
opo_yumdb_info = self.yumdb.get_package(opo)
if 'installed_by' in opo_yumdb_info:
yumdb_info.installed_by = opo_yumdb_info.installed_by
if loginuid is not None:
yumdb_info.changed_by = str(loginuid)
elif loginuid is not None:
yumdb_info.installed_by = str(loginuid)
if self.conf.history_record:
self.history.sync_alldb(po)
just_installed = self.sack.query().\
filter(pkg=self.transaction.install_set)
for rpo in self.transaction.remove_set:
installed = rpmdb_sack.query().installed().nevra(
rpo.name, rpo.evr, rpo.arch)
if len(installed) > 0:
if not len(just_installed.filter(arch=rpo.arch, name=rpo.name,
evr=rpo.evr)):
msg = _('%s was supposed to be removed but is not!')
logger.critical(msg % rpo)
count = display_banner(rpo, count)
continue
count = display_banner(rpo, count)
yumdb_item = self.yumdb.get_package(rpo)
yumdb_item.clean()
if self._record_history():
rpmdbv = rpmdb_sack.rpmdb_version(self.yumdb)
self.history.end(rpmdbv, 0)
timer()
self.clean_tempfiles = True
def download_packages(self, pkglist, progress=None, callback_total=None):
"""Download the packages specified by the given list of packages. :api
`pkglist` is a list of packages to download, `progress` is an optional
DownloadProgress instance, `callback_total` an optional callback to
output messages about the download operation.
"""
# select and sort packages to download
if progress is None:
progress = dnf.callback.NullDownloadProgress()
lock = dnf.lock.build_download_lock(self.conf.cachedir)
with lock:
drpm = dnf.drpm.DeltaInfo(self.sack.query().installed(), progress)
remote_pkgs = [po for po in pkglist
if not (po.from_cmdline or po.repo.local)]
for pkg in remote_pkgs:
self._tempfiles.add(pkg.localPkg())
payloads = [dnf.repo.pkg2payload(pkg, progress, drpm.delta_factory,
dnf.repo.RPMPayload)
for pkg in remote_pkgs]
beg_download = time.time()
est_remote_size = sum(pload.download_size for pload in payloads)
progress.start(len(payloads), est_remote_size)
errors = dnf.repo.download_payloads(payloads, drpm)
if errors.irrecoverable:
raise dnf.exceptions.DownloadError(errors.irrecoverable)
remote_size = sum(errors.bandwidth_used(pload)
for pload in payloads)
saving = dnf.repo.update_saving((0, 0), payloads,
errors.recoverable)
if errors.recoverable:
msg = dnf.exceptions.DownloadError.errmap2str(
errors.recoverable)
logger.info(msg)
remaining_pkgs = [pkg for pkg in errors.recoverable]
payloads = \
[dnf.repo.pkg2payload(pkg, progress, dnf.repo.RPMPayload)
for pkg in remaining_pkgs]
est_remote_size = sum(pload.download_size
for pload in payloads)
progress.start(len(payloads), est_remote_size)
errors = dnf.repo.download_payloads(payloads, drpm)
assert not errors.recoverable
if errors.irrecoverable:
raise dnf.exceptions.DownloadError(errors.irrecoverable)
remote_size += \
sum(errors.bandwidth_used(pload) for pload in payloads)
saving = dnf.repo.update_saving(saving, payloads, {})
if callback_total is not None:
callback_total(remote_size, beg_download)
(real, full) = saving
if real != full:
msg = _("Delta RPMs reduced %.1f MB of updates to %.1f MB "
"(%d.1%% saved)")
percent = 100 - real / full * 100
logger.info(msg, full / 1024 ** 2, real / 1024 ** 2, percent)
def add_remote_rpm(self, path):
# :api
if not os.path.exists(path) and '://' in path:
# download remote rpm to a tempfile
path = dnf.util.urlopen(path, suffix='.rpm', delete=False).name
self._tempfiles.add(path)
return self.sack.add_cmdline_package(path)
def sigCheckPkg(self, po):
"""Verify the GPG signature of the given package object.
:param po: the package object to verify the signature of
:return: (result, error_string)
where result is::
0 = GPG signature verifies ok or verification is not required.
1 = GPG verification failed but installation of the right GPG key
might help.
2 = Fatal GPG verification error, give up.
"""
if po.from_cmdline:
check = self.conf.localpkg_gpgcheck
hasgpgkey = 0
else:
repo = self.repos[po.repoid]
check = repo.gpgcheck
hasgpgkey = not not repo.gpgkey
if check:
ts = self.rpmconn.readonly_ts
sigresult = dnf.rpm.miscutils.checkSig(ts, po.localPkg())
localfn = os.path.basename(po.localPkg())
if sigresult == 0:
result = 0
msg = ''
elif sigresult == 1:
if hasgpgkey:
result = 1
else:
result = 2
msg = _('Public key for %s is not installed') % localfn
elif sigresult == 2:
result = 2
msg = _('Problem opening package %s') % localfn
elif sigresult == 3:
if hasgpgkey:
result = 1
else:
result = 2
result = 1
msg = _('Public key for %s is not trusted') % localfn
elif sigresult == 4:
result = 2
msg = _('Package %s is not signed') % localfn
else:
result = 0
msg = ''
return result, msg
def clean_used_packages(self):
"""Delete the header and package files used in the
transaction from the yum cache.
"""
for fn in self._tempfiles:
if not os.path.exists(fn):
continue
try:
misc.unlink_f(fn)
except OSError:
logger.warning(_('Cannot remove %s'), fn)
continue
else:
logger.log(dnf.logging.DDEBUG,
_('%s removed'), fn)
def doPackageLists(self, pkgnarrow='all', patterns=None, showdups=None,
ignore_case=False, reponame=None):
"""Return a :class:`misc.GenericHolder` containing
lists of package objects. The contents of the lists are
specified in various ways by the arguments.
:param pkgnarrow: a string specifying which types of packages
lists to produces, such as updates, installed, available,
etc.
:param patterns: a list of names or wildcards specifying
packages to list
:param showdups: whether to include duplicate packages in the
lists
:param ignore_case: whether to ignore case when searching by
package names
:param reponame: limit packages list to the given repository
:return: a :class:`misc.GenericHolder` instance with the
following lists defined::
available = list of packageObjects
installed = list of packageObjects
upgrades = tuples of packageObjects (updating, installed)
extras = list of packageObjects
obsoletes = tuples of packageObjects (obsoleting, installed)
recent = list of packageObjects
"""
if showdups is None:
showdups = self.conf.showdupesfromrepos
if patterns is None:
return self._list_pattern(
pkgnarrow, patterns, showdups, ignore_case, reponame)
assert not dnf.util.is_string_type(patterns)
list_fn = functools.partial(
self._list_pattern, pkgnarrow, showdups=showdups,
ignore_case=ignore_case, reponame=reponame)
if patterns is None or len(patterns) == 0:
return list_fn(None)
yghs = map(list_fn, patterns)
return reduce(lambda a, b: a.merge_lists(b), yghs)
def _list_pattern(self, pkgnarrow, pattern, showdups, ignore_case,
reponame=None):
def is_from_repo(package):
"""Test whether given package originates from the repository."""
if reponame is None:
return True
return self.yumdb.get_package(package).get('from_repo') == reponame
def pkgs_from_repo(packages):
"""Filter out the packages which do not originate from the repo."""
return (package for package in packages if is_from_repo(package))
def query_for_repo(query):
"""Filter out the packages which do not originate from the repo."""
if reponame is None:
return query
return query.filter(reponame=reponame)
ygh = misc.GenericHolder(iter=pkgnarrow)
installed = []
available = []
reinstall_available = []
old_available = []
updates = []
obsoletes = []
obsoletesTuples = []
recent = []
extras = []
autoremove = []
# do the initial pre-selection
ic = ignore_case
q = self.sack.query()
if pattern is not None:
subj = dnf.subject.Subject(pattern, ignore_case=ic)
q = subj.get_best_query(self.sack, with_provides=False)
# list all packages - those installed and available:
if pkgnarrow == 'all':
dinst = {}
ndinst = {} # Newest versions by name.arch
for po in q.installed():
dinst[po.pkgtup] = po
if showdups:
continue
key = (po.name, po.arch)
if key not in ndinst or po > ndinst[key]:
ndinst[key] = po
installed = list(pkgs_from_repo(dinst.values()))
avail = query_for_repo(q)
if not showdups:
avail = avail.latest()
for pkg in avail:
if showdups:
if pkg.pkgtup in dinst:
reinstall_available.append(pkg)
else:
available.append(pkg)
else:
key = (pkg.name, pkg.arch)
if pkg.pkgtup in dinst:
reinstall_available.append(pkg)
elif key not in ndinst or pkg.evr_gt(ndinst[key]):
available.append(pkg)
else:
old_available.append(pkg)
# produce the updates list of tuples
elif pkgnarrow == 'upgrades':
updates = query_for_repo(q).upgrades().run()
# installed only
elif pkgnarrow == 'installed':
installed = list(pkgs_from_repo(q.installed()))
# available in a repository
elif pkgnarrow == 'available':
if showdups:
avail = query_for_repo(q).available()
installed_dict = q.installed().na_dict()
for avail_pkg in avail:
key = (avail_pkg.name, avail_pkg.arch)
installed_pkgs = installed_dict.get(key, [])
same_ver = [pkg for pkg in installed_pkgs
if pkg.evr == avail_pkg.evr]
if len(same_ver) > 0:
reinstall_available.append(avail_pkg)
else:
available.append(avail_pkg)
else:
# we will only look at the latest versions of packages:
available_dict = query_for_repo(
q).available().latest().na_dict()
installed_dict = q.installed().latest().na_dict()
for (name, arch) in available_dict:
avail_pkg = available_dict[(name, arch)][0]
inst_pkg = installed_dict.get((name, arch), [None])[0]
if not inst_pkg or avail_pkg.evr_gt(inst_pkg):
available.append(avail_pkg)
elif avail_pkg.evr_eq(inst_pkg):
reinstall_available.append(avail_pkg)
else:
old_available.append(avail_pkg)
# packages to be removed by autoremove
elif pkgnarrow == 'autoremove':
autoremove = dnf.query.autoremove_pkgs(query_for_repo(q),
self.sack, self.yumdb)
# not in a repo but installed
elif pkgnarrow == 'extras':
extras = [pkg for pkg in dnf.query.extras_pkgs(q) if is_from_repo(pkg)]
# obsoleting packages (and what they obsolete)
elif pkgnarrow == 'obsoletes':
inst = q.installed()
obsoletes = query_for_repo(
self.sack.query()).filter(obsoletes=inst)
obsoletesTuples = []
for new in obsoletes:
obsoleted_reldeps = new.obsoletes
obsoletesTuples.extend(
[(new, old) for old in
inst.filter(provides=obsoleted_reldeps)])
# packages recently added to the repositories
elif pkgnarrow == 'recent':
avail = q.available()
if not showdups:
avail = avail.latest()
recent = dnf.query.recent_pkgs(query_for_repo(avail), self.conf.recent)
ygh.installed = installed
ygh.available = available
ygh.reinstall_available = reinstall_available
ygh.old_available = old_available
ygh.updates = updates
ygh.obsoletes = obsoletes
ygh.obsoletesTuples = obsoletesTuples
ygh.recent = recent
ygh.extras = extras
ygh.autoremove = autoremove
return ygh
def _add_comps_trans(self, trans):
cnt = 0
clean_deps = self.conf.clean_requirements_on_remove
attr_fn = ((trans.install, self._goal.install),
(trans.install_opt,
functools.partial(self._goal.install, optional=True)),
(trans.upgrade, self._goal.upgrade),
(trans.remove,
functools.partial(self._goal.erase, clean_deps=clean_deps)))
for (attr, fn) in attr_fn:
for it in attr:
if not self.sack.query().filter(name=it):
# a comps item that doesn't refer to anything real
continue
sltr = dnf.selector.Selector(self.sack)
sltr.set(name=it)
fn(select=sltr)
cnt += 1
self._goal.group_members.update(trans.install)
self._goal.group_members.update(trans.install_opt)
return cnt
def build_comps_solver(self):
def reason_fn(pkgname):
q = self.sack.query().installed().filter(name=pkgname)
if not q:
return None
try:
return self.yumdb.get_package(q[0]).reason
except AttributeError:
return 'unknown'
return dnf.comps.Solver(self.group_persistor, reason_fn)
def environment_install(self, env, types, exclude=None, strict=True):
solver = self.build_comps_solver()
types = self._translate_comps_pkg_types(types)
trans = dnf.comps.install_or_skip(solver.environment_install,
env, types, exclude or set(),
strict)
if not trans:
return 0
return self._add_comps_trans(trans)
def environment_remove(self, env):
solver = self.build_comps_solver()
trans = solver.environment_remove(env)
return self._add_comps_trans(trans)
_COMPS_TRANSLATION = {
'default': dnf.comps.DEFAULT,
'mandatory': dnf.comps.MANDATORY,
'optional': dnf.comps.OPTIONAL
}
@staticmethod
def _translate_comps_pkg_types(pkg_types):
ret = 0
for (name, enum) in Base._COMPS_TRANSLATION.items():
if name in pkg_types:
ret |= enum
return ret
def group_install(self, grp, pkg_types, exclude=None, strict=True):
"""Installs packages of selected group
:param exclude: list of package name glob patterns
that will be excluded from install set
"""
# :api
def _pattern_to_pkgname(pattern):
if dnf.util.is_glob_pattern(pattern):
q = self.sack.query().filter(name__glob=pattern)
return map(lambda p: p.name, q)
else:
return (pattern,)
exclude_pkgnames = None
if exclude:
nested_excludes = [_pattern_to_pkgname(p) for p in exclude]
exclude_pkgnames = itertools.chain.from_iterable(nested_excludes)
solver = self.build_comps_solver()
pkg_types = self._translate_comps_pkg_types(pkg_types)
trans = dnf.comps.install_or_skip(solver.group_install,
grp, pkg_types, exclude_pkgnames,
strict)
if not trans:
return 0
logger.debug("Adding packages from group '%s': %s",
grp.id, trans.install)
return self._add_comps_trans(trans)
def env_group_install(self, patterns, types, strict=True):
q = CompsQuery(self.comps, self.group_persistor,
CompsQuery.ENVIRONMENTS | CompsQuery.GROUPS,
CompsQuery.AVAILABLE | CompsQuery.INSTALLED)
cnt = 0
done = True
for pattern in patterns:
try:
res = q.get(pattern)
except dnf.exceptions.CompsError as err:
logger.error("Warning: %s", ucd(err))
done = False
continue
for group in res.groups:
cnt += self.group_install(group, types, strict=strict)
for env in res.environments:
cnt += self.environment_install(env, types, strict=strict)
if not done and strict:
raise dnf.exceptions.Error(_('Nothing to do.'))
return cnt
def group_remove(self, grp):
# :api
solver = self.build_comps_solver()
trans = solver.group_remove(grp)
return self._add_comps_trans(trans)
def env_group_remove(self, patterns):
q = CompsQuery(self.comps, self.group_persistor,
CompsQuery.ENVIRONMENTS | CompsQuery.GROUPS,
CompsQuery.INSTALLED)
try:
res = q.get(*patterns)
except dnf.exceptions.CompsError as err:
logger.error("Warning: %s", ucd(err))
raise dnf.exceptions.Error(_('No groups marked for removal.'))
cnt = 0
for env in res.environments:
cnt += self.environment_remove(env)
for grp in res.groups:
cnt += self.group_remove(grp)
return cnt
def group_upgrade(self, grp):
# :api
solver = self.build_comps_solver()
trans = solver.group_upgrade(grp)
return self._add_comps_trans(trans)
def gpgKeyCheck(self):
"""Checks for the presence of GPG keys in the rpmdb.
:return: 0 if there are no GPG keys in the rpmdb, and 1 if
there are keys
"""
gpgkeyschecked = self.conf.cachedir + '/.gpgkeyschecked.yum'
if os.path.exists(gpgkeyschecked):
return 1
installroot = self.conf.installroot
myts = dnf.rpm.transaction.initReadOnlyTransaction(root=installroot)
myts.pushVSFlags(~(rpm._RPMVSF_NOSIGNATURES | rpm._RPMVSF_NODIGESTS))
idx = myts.dbMatch('name', 'gpg-pubkey')
keys = len(idx)
del idx
del myts
if keys == 0:
return 0
else:
mydir = os.path.dirname(gpgkeyschecked)
if not os.path.exists(mydir):
os.makedirs(mydir)
fo = open(gpgkeyschecked, 'w')
fo.close()
del fo
return 1
def install(self, pkg_spec, reponame=None, strict=True):
"""Mark package(s) given by pkg_spec and reponame for installation.:api
"""
subj = dnf.subject.Subject(pkg_spec)
if self.conf.multilib_policy == "all" or \
subj.is_arch_specified(self.sack):
q = subj.get_best_query(self.sack)
if reponame is not None:
q = q.filter(reponame=reponame)
if not q:
raise dnf.exceptions.PackageNotFoundError(
_('no package matched'), pkg_spec)
already_inst, available = self._query_matches_installed(q)
for i in already_inst:
_msg_installed(i)
for a in available:
self._goal.install(a, optional=(not strict))
return len(available)
elif self.conf.multilib_policy == "best":
sltrs = subj.get_best_selectors(self.sack)
match = reduce(lambda x, y: y.matches() or x, sltrs, [])
if not match:
raise dnf.exceptions.MarkingError(
_('no package matched'), pkg_spec)
for sltr in sltrs:
if not sltr.matches():
continue
if reponame is not None:
sltr = sltr.set(reponame=reponame)
already_inst = self._sltr_matches_installed(sltr)
if already_inst:
for package in already_inst:
_msg_installed(package)
self._goal.install(select=sltr, optional=(not strict))
return 1
return 0
def install_groupie(self, pkg_name, inst_set):
"""Installs a group member package by name. """
forms = [hawkey.FORM_NAME]
subj = dnf.subject.Subject(pkg_name)
if self.conf.multilib_policy == "all":
q = subj.get_best_query(
self.sack, with_provides=False, forms=forms)
for pkg in q:
self._goal.install(pkg)
return len(q)
elif self.conf.multilib_policy == "best":
sltr = subj.get_best_selector(self.sack, forms=forms)
if sltr.matches():
self._goal.install(select=sltr)
return 1
return 0
def package_downgrade(self, pkg):
# :api
if pkg.from_system:
msg = 'downgrade_package() for an installed package.'
raise NotImplementedError(msg)
q = self.sack.query().installed().filter(name=pkg.name, arch=pkg.arch)
if not q:
msg = _("Package %s not installed, cannot downgrade it.")
logger.warning(msg, pkg.name)
return 0
elif sorted(q)[0] > pkg:
self._goal.downgrade_to(pkg)
return 1
else:
msg = _("Package %s of lower version already installed, "
"cannot downgrade it.") % pkg.name
logger.warning(msg)
return 0
def package_install(self, pkg, strict=True):
# :api
q = self.sack.query().nevra(pkg.name, pkg.evr, pkg.arch)
already_inst, _ = self._query_matches_installed(q)
if pkg in already_inst:
_msg_installed(pkg)
else:
self._goal.install(pkg, optional=(not strict))
return 1
def package_reinstall(self, pkg):
if self.sack.query().installed().filter(nevra=str(pkg)):
self._goal.install(pkg)
return 1
msg = _("Package %s not installed, cannot reinstall it.") % str(pkg)
logger.warning(msg)
return 0
def package_remove(self, pkg):
self._goal.erase(pkg)
return 1
def package_upgrade(self, pkg):
# :api
if pkg.from_system:
msg = 'upgrade_package() for an installed package.'
raise NotImplementedError(msg)
q = self.sack.query().installed().filter(name=pkg.name, arch=pkg.arch)
if not q:
msg = _("Package %s not installed, cannot update it.") % pkg.name
logger.warning(msg)
return 0
elif sorted(q)[-1] < pkg:
self._goal.upgrade_to(pkg)
return 1
else:
msg = _("Package %s of higher version already installed, "
"cannot update it.") % pkg.name
logger.warning(msg)
return 0
def upgrade(self, pkg_spec, reponame=None):
# :api
def is_installed_by_name(pkg_name):
return first(self.sack.query().installed().filter(name=pkg_name))
wildcard = True if dnf.util.is_glob_pattern(pkg_spec) else False
sltrs = dnf.subject.Subject(pkg_spec).get_best_selectors(self.sack)
match = reduce(lambda x, y: y.matches() or x, sltrs, [])
if match:
prev_count = self._goal.req_length()
for sltr in sltrs:
if not sltr.matches():
continue
pkg_name = sltr.matches()[0].name
if not is_installed_by_name(pkg_name):
if not wildcard: # wildcard shouldn't print not installed packages
msg = _("Package %s not installed, cannot update it.")
logger.warning(msg, pkg_name)
continue
if reponame is not None:
sltr = sltr.set(reponame=reponame)
self._goal.upgrade(select=sltr)
if self._goal.req_length() - prev_count:
return 1
raise dnf.exceptions.MarkingError('no package matched', pkg_spec)
def upgrade_all(self, reponame=None):
# :api
if reponame is None:
self._goal.upgrade_all()
else:
try:
self.upgrade('*', reponame)
except dnf.exceptions.MarkingError:
pass
return 1
def upgrade_to(self, pkg_spec, reponame=None):
forms = [hawkey.FORM_NEVRA, hawkey.FORM_NEVR]
sltr = dnf.subject.Subject(pkg_spec).get_best_selector(self.sack,
forms=forms)
if sltr.matches():
if reponame is not None:
sltr = sltr.set(reponame=reponame)
prev_count = self._goal.req_length()
self._goal.upgrade_to(select=sltr)
if self._goal.req_length() - prev_count:
return 1
return 0
def distro_sync(self, pkg_spec=None):
if pkg_spec is None:
self._goal.distupgrade_all()
else:
sltrs = dnf.subject.Subject(pkg_spec).get_best_selectors(self.sack)
match = reduce(lambda x, y: y.matches() or x, sltrs, [])
if not match:
logger.info(_('No package %s installed.'), pkg_spec)
return 0
for sltr in sltrs:
if not sltr.matches():
continue
self._goal.distupgrade(select=sltr)
return 1
def remove(self, pkg_spec, reponame=None):
"""Mark the specified package for removal. #:api """
matches = dnf.subject.Subject(pkg_spec).get_best_query(self.sack)
installed = [
pkg for pkg in matches.installed()
if reponame is None or
self.yumdb.get_package(pkg).get('from_repo') == reponame]
if not installed:
raise dnf.exceptions.PackagesNotInstalledError(
'no package matched', pkg_spec)
clean_deps = self.conf.clean_requirements_on_remove
for pkg in installed:
self._goal.erase(pkg, clean_deps=clean_deps)
return len(installed)
def reinstall(self, pkg_spec, old_reponame=None, new_reponame=None,
new_reponame_neq=None, remove_na=False):
subj = dnf.subject.Subject(pkg_spec)
q = subj.get_best_query(self.sack)
installed_pkgs = [
pkg for pkg in q.installed()
if old_reponame is None or
self.yumdb.get_package(pkg).get('from_repo') == old_reponame]
available_q = q.available()
if new_reponame is not None:
available_q = available_q.filter(reponame=new_reponame)
if new_reponame_neq is not None:
available_q = available_q.filter(reponame__neq=new_reponame_neq)
available_nevra2pkg = dnf.query.per_nevra_dict(available_q)
if not installed_pkgs:
raise dnf.exceptions.PackagesNotInstalledError(
'no package matched', pkg_spec, available_nevra2pkg.values())
cnt = 0
clean_deps = self.conf.clean_requirements_on_remove
for installed_pkg in installed_pkgs:
try:
available_pkg = available_nevra2pkg[ucd(installed_pkg)]
except KeyError:
if not remove_na:
continue
self._goal.erase(installed_pkg, clean_deps=clean_deps)
else:
self._goal.install(available_pkg)
cnt += 1
if cnt == 0:
raise dnf.exceptions.PackagesNotAvailableError(
'no package matched', pkg_spec, installed_pkgs)
return cnt
def downgrade(self, pkg_spec):
"""Mark a package to be downgraded. :api
This is equivalent to first removing the currently installed package,
and then installing an older version.
"""
subj = dnf.subject.Subject(pkg_spec)
q = subj.get_best_query(self.sack)
installed = sorted(q.installed())
installed_pkg = first(installed)
if installed_pkg is None:
available_pkgs = q.available()
if available_pkgs:
raise dnf.exceptions.PackagesNotInstalledError(
'no package matched', pkg_spec, available_pkgs)
raise dnf.exceptions.PackageNotFoundError('no package matched',
pkg_spec)
arch = installed_pkg.arch
q = self.sack.query().filter(name=installed_pkg.name, arch=arch)
avail = [pkg for pkg in q.downgrades() if pkg < installed_pkg]
avail_pkg = first(sorted(avail, reverse=True))
if avail_pkg is None:
return 0
self._goal.install(avail_pkg)
return 1
def downgrade_to(self, pkg_spec):
"""Downgrade to specific version if specified otherwise downgrades
to one version lower than the package installed.
"""
subj = dnf.subject.Subject(pkg_spec)
poss = subj.subj.nevra_possibilities_real(self.sack, allow_globs=True)
nevra = dnf.util.first(poss)
if not nevra:
raise dnf.exceptions.PackageNotFoundError('no package matched',
pkg_spec)
q = subj._nevra_to_filters(self.sack.query(), nevra)
available_pkgs = q.available()
if not self.sack.query().filter(name=nevra.name).installed():
raise dnf.exceptions.PackagesNotInstalledError(
'no package matched', pkg_spec, available_pkgs)
downgrade_pkgs = available_pkgs.downgrades().latest()
if not downgrade_pkgs:
msg = _("Package %s of lowest version already installed, "
"cannot downgrade it.") % nevra.name
logger.warning(msg)
return 0
dnf.util.mapall(self._goal.downgrade_to, downgrade_pkgs)
return 1
def provides(self, provides_spec):
providers = dnf.query.by_provides(self.sack, provides_spec)
if providers:
return providers
if any(map(dnf.util.is_glob_pattern, provides_spec)):
return self.sack.query().filter(file__glob=provides_spec)
return self.sack.query().filter(file=provides_spec)
def history_undo_operations(self, operations):
"""Undo the operations on packages by their NEVRAs.
:param operations: a NEVRAOperations to be undone
:return: (exit_code, [ errors ])
exit_code is::
0 = we're done, exit
1 = we've errored, exit with error string
2 = we've got work yet to do, onto the next stage
"""
def handle_downgrade(new_nevra, old_nevra, obsoleted_nevras):
"""Handle a downgraded package."""
news = self.sack.query().installed().nevra(new_nevra)
if not news:
raise dnf.exceptions.PackagesNotInstalledError(
'no package matched', new_nevra)
olds = self.sack.query().available().nevra(old_nevra)
if not olds:
raise dnf.exceptions.PackagesNotAvailableError(
'no package matched', old_nevra)
assert len(news) == 1
self._transaction.add_upgrade(first(olds), news[0], None)
for obsoleted_nevra in obsoleted_nevras:
handle_erase(obsoleted_nevra)
def handle_erase(old_nevra):
"""Handle an erased package."""
pkgs = self.sack.query().available().nevra(old_nevra)
if not pkgs:
raise dnf.exceptions.PackagesNotAvailableError(
'no package matched', old_nevra)
self._transaction.add_install(
first(pkgs), None, 'history')
def handle_install(new_nevra, obsoleted_nevras):
"""Handle an installed package."""
pkgs = self.sack.query().installed().nevra(new_nevra)
if not pkgs:
raise dnf.exceptions.PackagesNotInstalledError(
'no package matched', new_nevra)
assert len(pkgs) == 1
self._transaction.add_erase(pkgs[0])
for obsoleted_nevra in obsoleted_nevras:
handle_erase(obsoleted_nevra)
def handle_reinstall(new_nevra, old_nevra, obsoleted_nevras):
"""Handle a reinstalled package."""
news = self.sack.query().installed().nevra(new_nevra)
if not news:
raise dnf.exceptions.PackagesNotInstalledError(
'no package matched', new_nevra)
olds = self.sack.query().available().nevra(old_nevra)
if not olds:
raise dnf.exceptions.PackagesNotAvailableError(
'no package matched', old_nevra)
obsoleteds = []
for nevra in obsoleted_nevras:
obsoleteds_ = self.sack.query().installed().nevra(nevra)
if obsoleteds_:
assert len(obsoleteds_) == 1
obsoleteds.append(obsoleteds_[0])
assert len(news) == 1
self._transaction.add_reinstall(first(olds), news[0],
obsoleteds)
def handle_upgrade(new_nevra, old_nevra, obsoleted_nevras):
"""Handle an upgraded package."""
news = self.sack.query().installed().nevra(new_nevra)
if not news:
raise dnf.exceptions.PackagesNotInstalledError(
'no package matched', new_nevra)
olds = self.sack.query().available().nevra(old_nevra)
if not olds:
raise dnf.exceptions.PackagesNotAvailableError(
'no package matched', old_nevra)
assert len(news) == 1
self._transaction.add_downgrade(
first(olds), news[0], None)
for obsoleted_nevra in obsoleted_nevras:
handle_erase(obsoleted_nevra)
# Build the transaction directly, because the depsolve is not needed.
self._transaction = dnf.transaction.Transaction()
for state, nevra, replaced_nevra, obsoleted_nevras in operations:
if state == 'Install':
assert not replaced_nevra
handle_install(nevra, obsoleted_nevras)
elif state == 'Erase':
assert not replaced_nevra and not obsoleted_nevras
handle_erase(nevra)
elif state == 'Reinstall':
handle_reinstall(nevra, replaced_nevra, obsoleted_nevras)
elif state == 'Downgrade':
handle_downgrade(nevra, replaced_nevra, obsoleted_nevras)
elif state == 'Update':
handle_upgrade(nevra, replaced_nevra, obsoleted_nevras)
else:
assert False
def getKeyForPackage(self, po, askcb=None, fullaskcb=None):
"""Retrieve a key for a package. If needed, use the given
callback to prompt whether the key should be imported.
:param po: the package object to retrieve the key of
:param askcb: Callback function to use to ask permission to
import a key. The arguments *askck* should take are the
package object, the userid of the key, and the keyid
:param fullaskcb: Callback function to use to ask permission to
import a key. This differs from *askcb* in that it gets
passed a dictionary so that we can expand the values passed.
:raises: :class:`dnf.exceptions.Error` if there are errors
retrieving the keys
"""
repo = self.repos[po.repoid]
keyurls = repo.gpgkey
key_installed = False
def _prov_key_data(msg):
msg += _('\n\n\n'
' Failing package is: %s\n'
' GPG Keys are configured as: %s\n'
) % (po, ", ".join(repo.gpgkey))
return msg
user_cb_fail = False
for keyurl in keyurls:
keys = dnf.crypto.retrieve(keyurl, repo)
for info in keys:
ts = self.rpmconn.readonly_ts
# Check if key is already installed
if misc.keyInstalled(ts, info.rpm_id, info.timestamp) >= 0:
msg = _('GPG key at %s (0x%s) is already installed')
logger.info(msg, keyurl, info.short_id)
continue
# Try installing/updating GPG key
info.url = keyurl
dnf.crypto.log_key_import(info)
rc = False
if self.conf.assumeno:
rc = False
elif self.conf.assumeyes:
rc = True
# grab the .sig/.asc for the keyurl, if it exists if it
# does check the signature on the key if it is signed by
# one of our ca-keys for this repo or the global one then
# rc = True else ask as normal.
elif fullaskcb:
rc = fullaskcb({"po": po, "userid": info.userid,
"hexkeyid": info.short_id,
"keyurl": keyurl,
"fingerprint": info.fingerprint,
"timestamp": info.timestamp})
elif askcb:
rc = askcb(po, info.userid, info.short_id)
if not rc:
user_cb_fail = True
continue
# Import the key
result = ts.pgpImportPubkey(misc.procgpgkey(info.raw_key))
if result != 0:
msg = _('Key import failed (code %d)') % result
raise dnf.exceptions.Error(_prov_key_data(msg))
logger.info(_('Key imported successfully'))
key_installed = True
if not key_installed and user_cb_fail:
raise dnf.exceptions.Error(_("Didn't install any keys"))
if not key_installed:
msg = _('The GPG keys listed for the "%s" repository are '
'already installed but they are not correct for this '
'package.\n'
'Check that the correct key URLs are configured for '
'this repository.') % repo.name
raise dnf.exceptions.Error(_prov_key_data(msg))
# Check if the newly installed keys helped
result, errmsg = self.sigCheckPkg(po)
if result != 0:
msg = _("Import of key(s) didn't help, wrong key(s)?")
logger.info(msg)
errmsg = ucd(errmsg)
raise dnf.exceptions.Error(_prov_key_data(errmsg))
def _run_rpm_check(self):
results = []
self.ts.check()
for prob in self.ts.problems():
# Newer rpm (4.8.0+) has problem objects, older have just strings.
# Should probably move to using the new objects, when we can. For
# now just be compatible.
results.append(ucd(prob))
return results
def _store_config_in_history(self):
self.history.write_addon_data('config-main', self.conf.dump())
myrepos = ''
for repo in self.repos.iter_enabled():
myrepos += repo.dump()
myrepos += '\n'
self.history.write_addon_data('config-repos', myrepos)
def _msg_installed(pkg):
name = ucd(pkg)
msg = _('Package %s is already installed, skipping.') % name
logger.warning(msg)
| shaded-enmity/dnf | dnf/base.py | Python | gpl-2.0 | 72,096 |
import asyncio
async def hello_world():
print("Hello World")
return 42
hello_world_coroutine = hello_world()
print(hello_world_coroutine)
event_loop = asyncio.get_event_loop()
try:
print("entering event loop")
result = event_loop.run_until_complete(hello_world_coroutine)
print(result)
finally:
event_loop.close() | zzsza/TIL | python/asyncio-helloy.py | Python | mit | 343 |
#!/usr/bin/python
# -*- coding: UTF-8 -*-
import numpy as np
def matrix_test():
a = np.matrix([[1,4,3],[6,3,8],[-1,5,3]])
print a
print a.shape
print a.T
print a.A
print a.A1
print np.linalg.det(a) #hang lie shi
print np.linalg.det(a.T)
print np.linalg.matrix_rank(a)
print a.I
matrix_test() | AgainstWind/python-demos | mathematics/linear_algebra_test.py | Python | apache-2.0 | 335 |
# -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/stable/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = 'Two targets'
copyright = '1999, Author'
author = 'Author'
# The short X.Y version
version = ''
# The full version, including alpha/beta/rc tags
release = ''
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path .
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for rinohtype PDF output ----------------------------------------
rinoh_documents = [
dict(doc='manual', target='manual', title='Manual',
template='template.rtt'),
dict(doc='reference', target='reference', title='Reference',
template='template.rtt'),
]
| brechtm/rinohtype | tests_regression/sphinx/test-twotargets/conf.py | Python | agpl-3.0 | 2,445 |
import os
import sys
root_path = os.path.abspath("../../../")
if root_path not in sys.path:
sys.path.append(root_path)
import numpy as np
import tensorflow as tf
from _Dist.NeuralNetworks.Base import Generator4d
from _Dist.NeuralNetworks.h_RNN.RNN import Basic3d
from _Dist.NeuralNetworks.NNUtil import Activations
class Basic4d(Basic3d):
def _calculate(self, x, y=None, weights=None, tensor=None, n_elem=1e7, is_training=False):
return super(Basic4d, self)._calculate(x, y, weights, tensor, n_elem / 10, is_training)
class CNN(Basic4d):
def __init__(self, *args, **kwargs):
self.height, self.width = kwargs.pop("height", None), kwargs.pop("width", None)
super(CNN, self).__init__(*args, **kwargs)
self._name_appendix = "CNN"
self._generator_base = Generator4d
self.conv_activations = None
self.n_filters = self.filter_sizes = self.poolings = None
def init_model_param_settings(self):
super(CNN, self).init_model_param_settings()
self.conv_activations = self.model_param_settings.get("conv_activations", "relu")
def init_model_structure_settings(self):
super(CNN, self).init_model_structure_settings()
self.n_filters = self.model_structure_settings.get("n_filters", [32, 32])
self.filter_sizes = self.model_structure_settings.get("filter_sizes", [(3, 3), (3, 3)])
self.poolings = self.model_structure_settings.get("poolings", [None, "max_pool"])
if not len(self.filter_sizes) == len(self.poolings) == len(self.n_filters):
raise ValueError("Length of filter_sizes, n_filters & pooling should be the same")
if isinstance(self.conv_activations, str):
self.conv_activations = [self.conv_activations] * len(self.filter_sizes)
def init_from_data(self, x, y, x_test, y_test, sample_weights, names):
if self.height is None or self.width is None:
assert len(x.shape) == 4, "height and width are not provided, hence len(x.shape) should be 4"
self.height, self.width = x.shape[1:3]
if len(x.shape) == 2:
x = x.reshape(len(x), self.height, self.width, -1)
else:
assert self.height == x.shape[1], "height is set to be {}, but {} found".format(self.height, x.shape[1])
assert self.width == x.shape[2], "width is set to be {}, but {} found".format(self.height, x.shape[2])
if x_test is not None and len(x_test.shape) == 2:
x_test = x_test.reshape(len(x_test), self.height, self.width, -1)
super(CNN, self).init_from_data(x, y, x_test, y_test, sample_weights, names)
def _define_input_and_placeholder(self):
self._is_training = tf.placeholder(tf.bool, name="is_training")
self._tfx = tf.placeholder(tf.float32, [None, self.height, self.width, self.n_dim], name="X")
self._tfy = tf.placeholder(tf.float32, [None, self.n_class], name="Y")
def _build_model(self, net=None):
self._model_built = True
if net is None:
net = self._tfx
for i, (filter_size, n_filter, pooling) in enumerate(zip(
self.filter_sizes, self.n_filters, self.poolings
)):
net = tf.layers.conv2d(net, n_filter, filter_size, padding="same")
net = tf.layers.batch_normalization(net, training=self._is_training)
activation = self.conv_activations[i]
if activation is not None:
net = getattr(Activations, activation)(net, activation)
net = tf.layers.dropout(net, training=self._is_training)
if pooling is not None:
net = tf.layers.max_pooling2d(net, 2, 2, name="pool")
fc_shape = np.prod([net.shape[i].value for i in range(1, 4)])
net = tf.reshape(net, [-1, fc_shape])
super(CNN, self)._build_model(net)
| carefree0910/MachineLearning | _Dist/NeuralNetworks/i_CNN/CNN.py | Python | mit | 3,860 |
"""Site-specific content, templatetags and such.""" | JamesJGarner/cms | src/cms/project_template/project_name/apps/site/__init__.py | Python | bsd-3-clause | 51 |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""Youtubedlg module for managing the download process.
This module is responsible for managing the download process
and update the GUI interface.
Attributes:
MANAGER_PUB_TOPIC (string): wxPublisher subscription topic of the
DownloadManager thread.
WORKER_PUB_TOPIC (string): wxPublisher subscription topic of the
Worker thread.
Note:
It's not the actual module that downloads the urls
thats the job of the 'downloaders' module.
"""
from __future__ import unicode_literals
import time
import os.path
from threading import (
Thread,
RLock,
Lock
)
from wx import CallAfter
from wx.lib.pubsub import setuparg1
from wx.lib.pubsub import pub as Publisher
from .parsers import OptionsParser
from .updatemanager import UpdateThread
from .downloaders import YoutubeDLDownloader
from .utils import (
YOUTUBEDL_BIN,
os_path_exists,
format_bytes,
to_string,
to_bytes
)
MANAGER_PUB_TOPIC = 'dlmanager'
WORKER_PUB_TOPIC = 'dlworker'
_SYNC_LOCK = RLock()
# Decorator that adds thread synchronization to a function
def synchronized(lock):
def _decorator(func):
def _wrapper(*args, **kwargs):
lock.acquire()
ret_value = func(*args, **kwargs)
lock.release()
return ret_value
return _wrapper
return _decorator
class DownloadItem(object):
"""Object that represents a download.
Attributes:
STAGES (tuple): Main stages of the download item.
ACTIVE_STAGES (tuple): Sub stages of the 'Active' stage.
COMPLETED_STAGES (tuple): Sub stages of the 'Completed' stage.
ERROR_STAGES (tuple): Sub stages of the 'Error' stage.
Args:
url (string): URL that corresponds to the download item.
options (list): Options list to use during the download phase.
"""
STAGES = ("Queued", "Active", "Paused", "Completed", "Error")
ACTIVE_STAGES = ("Pre Processing", "Downloading", "Post Processing")
COMPLETED_STAGES = ("Finished", "Warning", "Already Downloaded")
ERROR_STAGES = ("Error", "Stopped", "Filesize Abort")
def __init__(self, url, options):
self.url = url
self.options = options
self.object_id = hash(url + to_string(options))
self.reset()
@property
def stage(self):
return self._stage
@stage.setter
def stage(self, value):
if value not in self.STAGES:
raise ValueError(value)
if value == "Queued":
self.progress_stats["status"] = value
if value == "Active":
self.progress_stats["status"] = self.ACTIVE_STAGES[0]
if value == "Completed":
self.progress_stats["status"] = self.COMPLETED_STAGES[0]
if value == "Paused":
self.progress_stats["status"] = value
if value == "Error":
self.progress_stats["status"] = self.ERROR_STAGES[0]
self._stage = value
def reset(self):
if hasattr(self, "_stage") and self._stage == self.STAGES[1]:
raise RuntimeError("Cannot reset an 'Active' item")
self._stage = self.STAGES[0]
self.path = ""
self.filenames = []
self.extensions = []
self.filesizes = []
self.default_values = {
"filename": self.url,
"extension": "-",
"filesize": "-",
"percent": "0%",
"speed": "-",
"eta": "-",
"status": self.stage,
"playlist_size": "",
"playlist_index": ""
}
self.progress_stats = dict(self.default_values)
def get_files(self):
"""Returns a list that contains all the system files bind to this object."""
files = []
for index, item in enumerate(self.filenames):
filename = item + self.extensions[index]
files.append(os.path.join(self.path, filename))
return files
def update_stats(self, stats_dict):
"""Updates the progress_stats dict from the given dictionary."""
assert isinstance(stats_dict, dict)
for key in stats_dict:
if key in self.progress_stats:
value = stats_dict[key]
if not isinstance(value, basestring) or not value:
self.progress_stats[key] = self.default_values[key]
else:
self.progress_stats[key] = value
# Extract extra stuff
if key == "filename":
self.filenames.append(stats_dict[key])
if key == "extension":
self.extensions.append(stats_dict[key])
if key == "path":
self.path = stats_dict[key]
if key == "status":
self._set_stage(stats_dict[key])
if "filesize" in stats_dict:
if stats_dict["percent"] == "100%" and len(self.filesizes) < len(self.filenames):
filesize = stats_dict["filesize"].lstrip("~") # HLS downloader etc
self.filesizes.append(to_bytes(filesize))
if "status" in stats_dict:
# If we are post processing try to calculate the size of
# the output file since youtube-dl does not
if stats_dict["status"] == self.ACTIVE_STAGES[2] and not len(self.filenames) % 3:
post_proc_filesize = self.filesizes[-2] + self.filesizes[-1]
self.filesizes.append(post_proc_filesize)
self.progress_stats["filesize"] = format_bytes(post_proc_filesize)
def _set_stage(self, status):
if status in self.ACTIVE_STAGES:
self._stage = self.STAGES[1]
if status in self.COMPLETED_STAGES:
self._stage = self.STAGES[3]
if status in self.ERROR_STAGES:
self._stage = self.STAGES[4]
def __eq__(self, other):
return self.object_id == other.object_id
class DownloadList(object):
"""List like data structure that contains DownloadItems.
Args:
items (list): List that contains DownloadItems.
"""
def __init__(self, items=None):
assert isinstance(items, list) or items is None
if items is None:
self._items_dict = {} # Speed up lookup
self._items_list = [] # Keep the sequence
else:
self._items_list = [item.object_id for item in items]
self._items_dict = {item.object_id: item for item in items}
@synchronized(_SYNC_LOCK)
def clear(self):
"""Removes all the items from the list even the 'Active' ones."""
self._items_list = []
self._items_dict = {}
@synchronized(_SYNC_LOCK)
def insert(self, item):
"""Inserts the given item to the list. Does not check for duplicates. """
self._items_list.append(item.object_id)
self._items_dict[item.object_id] = item
@synchronized(_SYNC_LOCK)
def remove(self, object_id):
"""Removes an item from the list.
Removes the item with the corresponding object_id from
the list if the item is not in 'Active' state.
Returns:
True on success else False.
"""
if self._items_dict[object_id].stage != "Active":
self._items_list.remove(object_id)
del self._items_dict[object_id]
return True
return False
@synchronized(_SYNC_LOCK)
def fetch_next(self):
"""Returns the next queued item on the list.
Returns:
Next queued item or None if no other item exist.
"""
for object_id in self._items_list:
cur_item = self._items_dict[object_id]
if cur_item.stage == "Queued":
return cur_item
return None
@synchronized(_SYNC_LOCK)
def move_up(self, object_id):
"""Moves the item with the corresponding object_id up to the list."""
index = self._items_list.index(object_id)
if index > 0:
self._swap(index, index - 1)
return True
return False
@synchronized(_SYNC_LOCK)
def move_down(self, object_id):
"""Moves the item with the corresponding object_id down to the list."""
index = self._items_list.index(object_id)
if index < (len(self._items_list) - 1):
self._swap(index, index + 1)
return True
return False
@synchronized(_SYNC_LOCK)
def get_item(self, object_id):
"""Returns the DownloadItem with the given object_id."""
return self._items_dict[object_id]
@synchronized(_SYNC_LOCK)
def has_item(self, object_id):
"""Returns True if the given object_id is in the list else False."""
return object_id in self._items_list
@synchronized(_SYNC_LOCK)
def get_items(self):
"""Returns a list with all the items."""
return [self._items_dict[object_id] for object_id in self._items_list]
@synchronized(_SYNC_LOCK)
def change_stage(self, object_id, new_stage):
"""Change the stage of the item with the given object_id."""
self._items_dict[object_id].stage = new_stage
@synchronized(_SYNC_LOCK)
def index(self, object_id):
"""Get the zero based index of the item with the given object_id."""
if object_id in self._items_list:
return self._items_list.index(object_id)
return -1
@synchronized(_SYNC_LOCK)
def __len__(self):
return len(self._items_list)
def _swap(self, index1, index2):
self._items_list[index1], self._items_list[index2] = self._items_list[index2], self._items_list[index1]
class DownloadManager(Thread):
"""Manages the download process.
Attributes:
WAIT_TIME (float): Time in seconds to sleep.
Args:
download_list (DownloadList): List that contains items to download.
opt_manager (optionsmanager.OptionsManager): Object responsible for
managing the youtubedlg options.
log_manager (logmanager.LogManager): Object responsible for writing
errors to the log.
"""
WAIT_TIME = 0.1
def __init__(self, parent, download_list, opt_manager, log_manager=None):
super(DownloadManager, self).__init__()
self.parent = parent
self.opt_manager = opt_manager
self.log_manager = log_manager
self.download_list = download_list
self._time_it_took = 0
self._successful = 0
self._running = True
# Init the custom workers thread pool
log_lock = None if log_manager is None else Lock()
wparams = (opt_manager, self._youtubedl_path(), log_manager, log_lock)
self._workers = [Worker(*wparams) for _ in xrange(opt_manager.options["workers_number"])]
self.start()
@property
def successful(self):
"""Returns number of successful downloads. """
return self._successful
@property
def time_it_took(self):
"""Returns time(seconds) it took for the download process
to complete. """
return self._time_it_took
def run(self):
self._check_youtubedl()
self._time_it_took = time.time()
while self._running:
item = self.download_list.fetch_next()
if item is not None:
worker = self._get_worker()
if worker is not None:
worker.download(item.url, item.options, item.object_id)
self.download_list.change_stage(item.object_id, "Active")
if item is None and self._jobs_done():
break
time.sleep(self.WAIT_TIME)
# Close all the workers
for worker in self._workers:
worker.close()
# Join and collect
for worker in self._workers:
worker.join()
self._successful += worker.successful
self._time_it_took = time.time() - self._time_it_took
if not self._running:
self._talk_to_gui('closed')
else:
self._talk_to_gui('finished')
def active(self):
"""Returns number of active items.
Note:
active_items = (workers that work) + (items waiting in the url_list).
"""
#counter = 0
#for worker in self._workers:
#if not worker.available():
#counter += 1
#counter += len(self.download_list)
return len(self.download_list)
def stop_downloads(self):
"""Stop the download process. Also send 'closing'
signal back to the GUI.
Note:
It does NOT kill the workers thats the job of the
clean up task in the run() method.
"""
self._talk_to_gui('closing')
self._running = False
def add_url(self, url):
"""Add given url to the download_list.
Args:
url (dict): Python dictionary that contains two keys.
The url and the index of the corresponding row in which
the worker should send back the information about the
download process.
"""
self.download_list.append(url)
def send_to_worker(self, data):
"""Send data to the Workers.
Args:
data (dict): Python dictionary that holds the 'index'
which is used to identify the Worker thread and the data which
can be any of the Worker's class valid data. For a list of valid
data keys see __init__() under the Worker class.
"""
if 'index' in data:
for worker in self._workers:
if worker.has_index(data['index']):
worker.update_data(data)
def _talk_to_gui(self, data):
"""Send data back to the GUI using wxCallAfter and wxPublisher.
Args:
data (string): Unique signal string that informs the GUI for the
download process.
Note:
DownloadManager supports 4 signals.
1) closing: The download process is closing.
2) closed: The download process has closed.
3) finished: The download process was completed normally.
4) report_active: Signal the gui to read the number of active
downloads using the active() method.
"""
CallAfter(Publisher.sendMessage, MANAGER_PUB_TOPIC, data)
def _check_youtubedl(self):
"""Check if youtube-dl binary exists. If not try to download it. """
if not os_path_exists(self._youtubedl_path()) and self.parent.update_thread is None:
self.parent.update_thread = UpdateThread(self.opt_manager.options['youtubedl_path'], True)
self.parent.update_thread.join()
self.parent.update_thread = None
def _get_worker(self):
for worker in self._workers:
if worker.available():
return worker
return None
def _jobs_done(self):
"""Returns True if the workers have finished their jobs else False. """
for worker in self._workers:
if not worker.available():
return False
return True
def _youtubedl_path(self):
"""Returns the path to youtube-dl binary. """
path = self.opt_manager.options['youtubedl_path']
path = os.path.join(path, YOUTUBEDL_BIN)
return path
class Worker(Thread):
"""Simple worker which downloads the given url using a downloader
from the downloaders.py module.
Attributes:
WAIT_TIME (float): Time in seconds to sleep.
Args:
opt_manager (optionsmanager.OptionsManager): Check DownloadManager
description.
youtubedl (string): Absolute path to youtube-dl binary.
log_manager (logmanager.LogManager): Check DownloadManager
description.
log_lock (threading.Lock): Synchronization lock for the log_manager.
If the log_manager is set (not None) then the caller has to make
sure that the log_lock is also set.
Note:
For available data keys see self._data under the __init__() method.
"""
WAIT_TIME = 0.1
def __init__(self, opt_manager, youtubedl, log_manager=None, log_lock=None):
super(Worker, self).__init__()
self.opt_manager = opt_manager
self.log_manager = log_manager
self.log_lock = log_lock
self._downloader = YoutubeDLDownloader(youtubedl, self._data_hook, self._log_data)
self._options_parser = OptionsParser()
self._successful = 0
self._running = True
self._options = None
self._wait_for_reply = False
self._data = {
'playlist_index': None,
'playlist_size': None,
'new_filename': None,
'extension': None,
'filesize': None,
'filename': None,
'percent': None,
'status': None,
'index': None,
'speed': None,
'path': None,
'eta': None,
'url': None
}
self.start()
def run(self):
while self._running:
if self._data['url'] is not None:
#options = self._options_parser.parse(self.opt_manager.options)
ret_code = self._downloader.download(self._data['url'], self._options)
if (ret_code == YoutubeDLDownloader.OK or
ret_code == YoutubeDLDownloader.ALREADY or
ret_code == YoutubeDLDownloader.WARNING):
self._successful += 1
# Ask GUI for name updates
#self._talk_to_gui('receive', {'source': 'filename', 'dest': 'new_filename'})
# Wait until you get a reply
#while self._wait_for_reply:
#time.sleep(self.WAIT_TIME)
self._reset()
time.sleep(self.WAIT_TIME)
# Call the destructor function of YoutubeDLDownloader object
self._downloader.close()
def download(self, url, options, object_id):
"""Download given item.
Args:
item (dict): Python dictionary that contains two keys.
The url and the index of the corresponding row in which
the worker should send back the information about the
download process.
"""
self._data['url'] = url
self._options = options
self._data['index'] = object_id
def stop_download(self):
"""Stop the download process of the worker. """
self._downloader.stop()
def close(self):
"""Kill the worker after stopping the download process. """
self._running = False
self._downloader.stop()
def available(self):
"""Return True if the worker has no job else False. """
return self._data['url'] is None
def has_index(self, index):
"""Return True if index is equal to self._data['index'] else False. """
return self._data['index'] == index
def update_data(self, data):
"""Update self._data from the given data. """
if self._wait_for_reply:
# Update data only if a receive request has been issued
for key in data:
self._data[key] = data[key]
self._wait_for_reply = False
@property
def successful(self):
"""Return the number of successful downloads for current worker. """
return self._successful
def _reset(self):
"""Reset self._data back to the original state. """
for key in self._data:
self._data[key] = None
def _log_data(self, data):
"""Callback method for self._downloader.
This method is used to write the given data in a synchronized way
to the log file using the self.log_manager and the self.log_lock.
Args:
data (string): String to write to the log file.
"""
if self.log_manager is not None:
self.log_lock.acquire()
self.log_manager.log(data)
self.log_lock.release()
def _data_hook(self, data):
"""Callback method for self._downloader.
This method updates self._data and sends the updates back to the
GUI using the self._talk_to_gui() method.
Args:
data (dict): Python dictionary which contains information
about the download process. For more info see the
extract_data() function under the downloaders.py module.
"""
## Temp dictionary which holds the updates
#temp_dict = {}
## Update each key
#for key in data:
#if self._data[key] != data[key]:
#self._data[key] = data[key]
#temp_dict[key] = data[key]
## Build the playlist status if there is an update
## REFACTOR re-implement this on DownloadItem or ListCtrl level?
##if self._data['playlist_index'] is not None:
##if 'status' in temp_dict or 'playlist_index' in temp_dict:
##temp_dict['status'] = '{status} {index}/{size}'.format(
##status=self._data['status'],
##index=self._data['playlist_index'],
##size=self._data['playlist_size']
##)
#if len(temp_dict):
#self._talk_to_gui('send', temp_dict)
self._talk_to_gui('send', data)
def _talk_to_gui(self, signal, data):
"""Communicate with the GUI using wxCallAfter and wxPublisher.
Send/Ask data to/from the GUI. Note that if the signal is 'receive'
then the Worker will wait until it receives a reply from the GUI.
Args:
signal (string): Unique string that informs the GUI about the
communication procedure.
data (dict): Python dictionary which holds the data to be sent
back to the GUI. If the signal is 'send' then the dictionary
contains the updates for the GUI (e.g. percentage, eta). If
the signal is 'receive' then the dictionary contains exactly
three keys. The 'index' (row) from which we want to retrieve
the data, the 'source' which identifies a column in the
wxListCtrl widget and the 'dest' which tells the wxListCtrl
under which key to store the retrieved data.
Note:
Worker class supports 2 signals.
1) send: The Worker sends data back to the GUI
(e.g. Send status updates).
2) receive: The Worker asks data from the GUI
(e.g. Receive the name of a file).
Structure:
('send', {'index': <item_row>, data_to_send*})
('receive', {'index': <item_row>, 'source': 'source_key', 'dest': 'destination_key'})
"""
data['index'] = self._data['index']
if signal == 'receive':
self._wait_for_reply = True
CallAfter(Publisher.sendMessage, WORKER_PUB_TOPIC, (signal, data))
| Sofronio/youtube-dl-gui | youtube_dl_gui/downloadmanager.py | Python | unlicense | 23,289 |
"""
Classes for representing hierarchical data
"""
import json
import logging
import sys
from neoteric.util.compat import NoneType
from neoteric.util.debug import get_caller
from neoteric.util.files import read_file, write_file
from neoteric.util.json_util import json_format
from neoteric.util.type import type_assert
__all__ = ['AbstractHData']
class AbstractHData:
""" Inherit this class and implement __init__(self)
"""
def __setattr__(self, key, value):
type_assert(key, str)
caller = get_caller()
if key.startswith('secret_'):
h = hash(value)
msg = "Setting HData {0} to {1} -- {2}".format(key, h, caller)
else:
msg = "Setting HData {0} to {1} -- {2}".format(key, value, caller)
logging.debug(msg)
self.__dict__[key] = value
def __getattr__(self, key, default=None):
type_assert(key, str)
if key in self.__dict__:
return self.__dict__[key]
elif key.startswith('__') and key.endswith('__'):
raise AttributeError # magic methods
else:
return default
if sys.version_info >= (3,):
def __bool__(self):
return bool(self.__dict__)
else:
def __nonzero__(self):
return bool(self.__dict__)
def get_value(self, key, default=None):
""" Use this to return a default value if @key is not present
@key: str, The class member to return
@default: mixed, The value to return if @key returns None
"""
type_assert(key, str)
return self.__getattr__(key=key, default=default)
def merge(self, other):
""" Merge another HData into this one.
"""
for k, v in other.__dict__.items():
setattr(self, k, v)
def to_file(self, path):
with open(path, 'w') as f:
f.write(str(self))
@staticmethod
def from_file(path):
type_assert(path, (str, NoneType))
result = AbstractHData()
if path is not None:
text = read_file(path)
data = json.loads(text)
for k, v in data.items():
result.__dict__[k] = v
return result
def __str__(self):
return json_format(self.__dict__)
| j3ffhubb/neoteric | neoteric/resource/hdata.py | Python | gpl-3.0 | 2,297 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
""" Utility functions for implementing Winograd convolutions
[*] Fast Algorithms for Convolutional Neural Networks
Andrew Lavin, Scott Gray
https://arxiv.org/abs/1509.09308
https://github.com/andravin/wincnn
"""
from operator import mul
from functools import reduce
import numpy as np
from tvm.contrib.pickle_memoize import memoize
from ..util import const_matrix
# pylint: disable=invalid-name
def _cook_toom_convolution(a, n, r):
"""Compute Cook-Toom convolution A,B,G matrices"""
def _F_m(a, n):
f = lambda j, i: reduce(mul, ((a[i] - a[k] if k != i else 1) for k in range(0, n - 1)), 1)
F = np.fromfunction(np.vectorize(f), (1, n - 1), dtype=int)
F = np.diagflat(F)
F = np.append(F, np.zeros((n - 1, 1), dtype=int), axis=1)
f = lambda i, j: (1 if j == (n - 1) else 0)
z = np.fromfunction(np.vectorize(f), (1, n), dtype=int)
return np.append(F, z, axis=0)
def _A_m(a, m, n):
f = lambda i, j: a[i] ** j
A = np.fromfunction(np.vectorize(f), (m - 1, n), dtype=int)
f = lambda i, j: (1 if j == (n - 1) else 0)
z = np.fromfunction(np.vectorize(f), (1, n), dtype=int)
return np.append(A, z, axis=0)
def _B_m(a, n):
f = lambda j, i: reduce(mul, ((a[i] - a[k] if k != i else 1) for k in range(0, n - 1)), 1)
Ff = np.fromfunction(np.vectorize(f), (1, n - 1), dtype=int)
f = (
lambda i, nth: (
reduce(mul, [(np.poly1d([1, -a[k]]) if k != i else 1) for k in range(0, n - 1)], 1)
).coef[n - 1 - nth - 1]
/ Ff[0, i]
)
F = np.fromfunction(np.vectorize(f), (n - 1, n - 1), dtype=int)
f = lambda i, j: -a[i] ** (n - 1)
t = np.fromfunction(np.vectorize(f), (n - 1, 1), dtype=int)
T = np.append(np.eye(n - 1), t, axis=1)
return np.append(F.T.dot(T), np.array([np.eye(n)[n - 1]]), axis=0)
alpha = n + r - 1
f = _F_m(a, alpha)
if f[0, 0] < 0:
f[0, :] *= -1
A = _A_m(a, alpha, n)
G = _A_m(a, alpha, r).T
G = G.dot(np.linalg.inv(f)).T
B = _B_m(a, alpha)
B = B.dot(f.T)
return (A, B, G)
def _interpolation_points(degree):
"""Propose filter points"""
assert 2 < degree < 18
# Default interpolation lookup table
#
# [1] Error Analysis and Improving the Accuracy of Winograd Convolution for Deep Neural Networks
# Barbara Barabasz, Andrew Anderson, Kirk M. Soodhalter, David Gregg
# https://arxiv.org/abs/1803.10986
#
# pylint: disable=bad-whitespace,line-too-long
in_pts = [
# {invalid}
[],
# 01 {E=4.63E-08 on conv2d [1]}
[],
# 02 {E=7.65E-08 on F( 2,3) [1]}
[0, -1, 1],
# 03 {E=2.35E-07 on F( 3,3) [1]}
[0, -1, 1, 1 / 2],
# 04 {E=3.29E-07 on F( 4,3) [1]}
[0, -1, 1, 1 / 2, -2],
# 05 {E=6.81E-07 on F( 5,3) [1]}
[0, -1, 1, 1 / 2, -2, -1 / 2],
# 06 {E=8.79E-07 on F( 6,3) [1]}
[0, -1, 1, 1 / 2, -1 / 2, 2, -2],
# 07 {E=3.71E-06 on F( 7,3) [1]}
[0, -1, 1, 1 / 2, -1 / 2, 2, -2, -1 / 4],
# 08 {E=7.35E-06 on F( 8,3) [1]}
[0, -1, 1, 1 / 2, -1 / 2, 2, -2, -1 / 4, 4],
# 09 {E=2.20E-05 on F( 9,3) [1]}
[0, -1, 1, 1 / 2, -1 / 2, 2, -2, -1 / 4, 3 / 4, -4 / 3],
# 10 {E=3.22E-05 on F(10,3) [1]}
[0, -1, 1, 1 / 2, -1 / 2, 2, -2, -1 / 4, 4, 3 / 4, -4 / 3],
# 11 {E=1.09E-04 on F(11,3) [1]}
[0, -1, 1, 1 / 2, -1 / 2, 2, -2, -1 / 4, 4, 3 / 4, -4 / 3, 1 / 4],
# 12 {E=1.99E-04 on F(12,3) [1]}
[0, -1, 1, 1 / 2, -1 / 2, 2, -2, -1 / 4, 4, 1 / 4, -3 / 4, 4 / 3, -4],
# 13 {E=5.54E-04 on F(13,3) [1]}
[0, -1, 1, 1 / 2, -1 / 2, 2, -2, -1 / 4, 4, 1 / 4, -3 / 4, 4 / 3, 3 / 4, -4 / 3],
# 14 {E=8.80E-04 on F(14,3) [1]}
[0, -1, 1, 1 / 2, -1 / 2, 2, -2, -1 / 4, 4, 1 / 4, -3 / 4, 4 / 3, -4, 3 / 4, -4 / 3],
# 15 {E=1.07E-02 on F(15,3) [1]}
[0, -1, 1, 1 / 2, -1 / 2, 2, -2, -1 / 4, 4, 1 / 4, -3 / 4, 4 / 3, -4, 2 / 3, -3 / 2, 3 / 2],
# 16 {E=1.93E-02 on F(16,3) [1]}
[
0,
-1,
1,
1 / 2,
-1 / 2,
2,
-2,
-1 / 4,
4,
1 / 4,
-3 / 4,
4 / 3,
-4,
2 / 3,
-3 / 2,
-2 / 3,
3 / 2,
],
] # pylint: enable=bad-whitespace,line-too-long
return np.array(in_pts[degree - 1], dtype=np.float64)
@memoize("topi.nn.winograd_matrices", save_at_exit=False)
def winograd_transform_matrices(tile_size, kernel_size, out_dtype):
"""Compute the A, B, and G transform matrices for `tile_size` as a `tvm.Expr`."""
if not 1 < tile_size < 9:
raise ValueError("Unsupported tile size for Winograd: {}".format(tile_size))
if not 2 < kernel_size < 8:
raise ValueError("Unsupported kernel size for Winograd: {}".format(kernel_size))
degree = tile_size + kernel_size - 2
intp_pts = _interpolation_points(degree)
A_data, B_data, G_data = _cook_toom_convolution(intp_pts, tile_size, kernel_size)
return (
const_matrix(A_data.astype(out_dtype), "A"),
const_matrix(B_data.astype(out_dtype), "B"),
const_matrix(G_data.astype(out_dtype), "G"),
)
| tqchen/tvm | python/tvm/topi/nn/winograd_util.py | Python | apache-2.0 | 6,191 |
from django.db import models
from django.contrib.auth.models import User
from django.template import loader, Context
# There can be basically infinite hierarchy of categories
class Category(models.Model):
""" A Category for Skills
"""
name = models.CharField(max_length=50)
description = models.CharField(max_length=255, blank=True)
parent_category = models.ForeignKey("self", null=True, blank=True, default=None)
is_root = models.BooleanField(default=False)
def getSkills(self):
return Skill.objects.filter(category__id=self.id).order_by('name')
def getSubcategories(self):
return Category.objects.filter(parent_category__id=self.id).order_by('name')
def getCategoryHTML(self, level=1, traverse=True, hide_top_level=False, test=False):
template = loader.get_template('siteapp/category.html')
context = Context(
{
'name': self.name,
'skills': self.getSkills(),
'subcategories': self.getSubcategories(),
'level': level,
'next_level': level + 1,
'traverse': traverse,
'hide_top_level': hide_top_level, # FIXME: Implement logic
'test': test,
}
)
return template.render(context)
def __unicode__(self):
return self.name
# Used to map certain type of skill levels to skills
# E.g. "Numeric", "String", "Grade", ...
class SkillLevelType(models.Model):
""" Type of Skill Levels.
"""
name = models.CharField(max_length=20)
def __unicode__(self):
return self.name
# Levels per level type
class SkillLevel(models.Model):
""" Level of the skill
"""
level = models.CharField(max_length=50)
skill_level_type = models.ForeignKey(SkillLevelType)
def __unicode__(self):
return self.level
# Skills. Can be under any category,
# Each skill should determine which type of level can be defined for it
class Skill(models.Model):
""" Skill. Belongs to a category. Specifies type of skill levels. May have a description.
"""
name = models.CharField(max_length=50)
description = models.CharField(max_length=255, blank=True)
skill_level_type = models.ForeignKey(SkillLevelType)
category = models.ForeignKey(Category)
def getValidLevels(self):
return SkillLevel.objects.filter(skill_level_type__id = self.skill_level_type.id)
def getLevelsHTMLDropDownList(self):
template = loader.get_template('siteapp/levels.html')
context = Context(
{
'levels_type': self.skill_level_type.name, # For html class
'levels': self.getValidLevels()
})
return template.render(context)
def getSkillHTML(self, select_function_name='function_name'):
template = loader.get_template('siteapp/skill.html')
context = Context(
{
'skill': self,
'select_function_name': select_function_name,
'level_dropdownbox_html': self.getLevelsHTMLDropDownList()
}
)
return template.render(context)
def __unicode__(self):
return self.name
# Map skills and their levels for user
class UserSkillLevel(models.Model):
user = models.ForeignKey(User)
skill = models.ForeignKey(Skill)
level = models.ForeignKey(SkillLevel)
def __unicode__(self):
return "Skill level for user '%s'" % str(self.user.username)
| CSXM/laborbook | siteapp/models.py | Python | gpl-2.0 | 3,513 |
"""Leecode 482. License Key Formatting
Easy
URL: https://leetcode.com/problems/license-key-formatting/
You are given a license key represented as a string S which consists only
alphanumeric character and dashes. The string is separated into N+1 groups
by N dashes.
Given a number K, we would want to reformat the strings such that each group
contains exactly K characters, except for the first group which could be
shorter than K, but still must contain at least one character. Furthermore,
there must be a dash inserted between two groups and all lowercase letters
should be converted to uppercase.
Given a non-empty string S and a number K, format the string according to the
rules described above.
Example 1:
Input: S = "5F3Z-2e-9-w", K = 4
Output: "5F3Z-2E9W"
Explanation: The string S has been split into two parts, each part has 4
characters.
Note that the two extra dashes are not needed and can be removed.
Example 2:
Input: S = "2-5g-3-J", K = 2
Output: "2-5G-3J"
Explanation: The string S has been split into three parts, each part has 2
characters except the first part as it could be shorter as mentioned above.
Note:
- The length of string S will not exceed 12,000, and K is a positive integer.
- String S consists only of alphanumerical characters (a-z and/or A-Z and/or 0-9)
and dashes(-).
- String S is non-empty.
"""
class SolutionReverseIter(object):
def licenseKeyFormatting(self, S, K):
"""
:type S: str
:type K: int
:rtype: str
Time complexity: O(n).
Space complexity: O(|S|/K).
"""
# Upper case and drop dash.
S_nodashes = S.upper().replace('-','')
len_nodashes = len(S_nodashes)
# Reversely iterate through no-dashed list, concat to string until K chars.
str_ls = [''] * (len_nodashes // K + (len_nodashes % K > 0))
cur_idx = len(str_ls) - 1
cur_counter = 0
for i in range(len_nodashes - 1, -1, -1):
if cur_counter < K:
# Still concat the current string.
str_ls[cur_idx] = S_nodashes[i] + str_ls[cur_idx]
cur_counter += 1
else:
# Start concating the next string.
cur_idx -= 1
str_ls[cur_idx] = S_nodashes[i] + str_ls[cur_idx]
cur_counter = 1
# Concat list's strings with -.
return '-'.join(str_ls)
class SolutionForwardIterK(object):
def licenseKeyFormatting(self, S, K):
"""
:type S: str
:type K: int
:rtype: str
Time complexity: O(n).
Space complexity: O(|S|/K).
"""
# Upper case and drop dash.
S_nodashes = S.upper().replace('-', '')
len_nodashes = len(S_nodashes)
# Get the end index for the 1st part.
remainder = len_nodashes % K
if remainder:
end_idx = remainder
else:
end_idx = K
# Get the 1st part.
str_ls = [S_nodashes[:end_idx]]
# Iteratively append K chars at a time.
while end_idx < len_nodashes:
str_ls.append(S_nodashes[end_idx:end_idx+K])
end_idx += K
# Concat list's strings with -.
return '-'.join(str_ls)
def main():
# Output: "5F3Z-2E9W"
S = "5F3Z-2e-9-w"
K = 4
print SolutionReverseIter().licenseKeyFormatting(S, K)
print SolutionForwardIterK().licenseKeyFormatting(S, K)
# Output: "5F3Z-2E9W"
S = "2-5g-3-J"
K = 2
print SolutionReverseIter().licenseKeyFormatting(S, K)
print SolutionForwardIterK().licenseKeyFormatting(S, K)
if __name__ == '__main__':
main()
| bowen0701/algorithms_data_structures | lc0482_license_key_formatting.py | Python | bsd-2-clause | 3,662 |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'URLChangeRecord.old_url'
db.alter_column('url_tracker_urlchangerecord', 'old_url', self.gf('django.db.models.fields.TextField')(unique=True))
# Changing field 'URLChangeRecord.new_url'
db.alter_column('url_tracker_urlchangerecord', 'new_url', self.gf('django.db.models.fields.TextField')(null=True))
def backwards(self, orm):
# Changing field 'URLChangeRecord.old_url'
db.alter_column('url_tracker_urlchangerecord', 'old_url', self.gf('django.db.models.fields.CharField')(max_length=200, unique=True))
# Changing field 'URLChangeRecord.new_url'
db.alter_column('url_tracker_urlchangerecord', 'new_url', self.gf('django.db.models.fields.CharField')(max_length=200, null=True))
models = {
'url_tracker.urlchangerecord': {
'Meta': {'object_name': 'URLChangeRecord'},
'date_changed': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'new_url': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'old_url': ('django.db.models.fields.TextField', [], {'unique': 'True'})
}
}
complete_apps = ['url_tracker'] | elbaschid/django-url-tracker | url_tracker/migrations/0005_auto__chg_field_urlchangerecord_old_url__chg_field_urlchangerecord_new.py | Python | bsd-3-clause | 1,609 |
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import _
from odoo.http import route, request
from odoo.osv import expression
from odoo.addons.mass_mailing.controllers.main import MassMailController
class MassMailController(MassMailController):
@route('/website_mass_mailing/is_subscriber', type='json', website=True, auth="public")
def is_subscriber(self, list_id, **post):
email = None
if not request.env.user._is_public():
email = request.env.user.email
elif request.session.get('mass_mailing_email'):
email = request.session['mass_mailing_email']
is_subscriber = False
if email:
contacts_count = request.env['mailing.contact.subscription'].sudo().search_count([('list_id', 'in', [int(list_id)]), ('contact_id.email', '=', email), ('opt_out', '=', False)])
is_subscriber = contacts_count > 0
return {'is_subscriber': is_subscriber, 'email': email}
@route('/website_mass_mailing/subscribe', type='json', website=True, auth="public")
def subscribe(self, list_id, email, **post):
# FIXME the 14.0 was released with this but without the google_recaptcha
# module being added as a dependency of the website_mass_mailing module.
# This is to be fixed in master of course but in stable, we'll have to
# use this workaround.
if hasattr(request.env['ir.http'], '_verify_request_recaptcha_token') \
and not request.env['ir.http']._verify_request_recaptcha_token('website_mass_mailing_subscribe'):
return {
'toast_type': 'danger',
'toast_content': _("Suspicious activity detected by Google reCaptcha."),
}
ContactSubscription = request.env['mailing.contact.subscription'].sudo()
Contacts = request.env['mailing.contact'].sudo()
name, email = Contacts.get_name_email(email)
subscription = ContactSubscription.search([('list_id', '=', int(list_id)), ('contact_id.email', '=', email)], limit=1)
if not subscription:
# inline add_to_list as we've already called half of it
contact_id = Contacts.search([('email', '=', email)], limit=1)
if not contact_id:
contact_id = Contacts.create({'name': name, 'email': email})
ContactSubscription.create({'contact_id': contact_id.id, 'list_id': int(list_id)})
elif subscription.opt_out:
subscription.opt_out = False
# add email to session
request.session['mass_mailing_email'] = email
mass_mailing_list = request.env['mailing.list'].sudo().browse(list_id)
return {
'toast_type': 'success',
'toast_content': mass_mailing_list.toast_content
}
@route(['/website_mass_mailing/get_content'], type='json', website=True, auth="public")
def get_mass_mailing_content(self, newsletter_id, **post):
PopupModel = request.env['website.mass_mailing.popup'].sudo()
data = self.is_subscriber(newsletter_id, **post)
domain = expression.AND([request.website.website_domain(), [('mailing_list_id', '=', newsletter_id)]])
mass_mailing_popup = PopupModel.search(domain, limit=1)
if mass_mailing_popup:
data['popup_content'] = mass_mailing_popup.popup_content
else:
data.update(PopupModel.default_get(['popup_content']))
return data
@route(['/website_mass_mailing/set_content'], type='json', website=True, auth="user")
def set_mass_mailing_content(self, newsletter_id, content, **post):
PopupModel = request.env['website.mass_mailing.popup']
domain = expression.AND([request.website.website_domain(), [('mailing_list_id', '=', newsletter_id)]])
mass_mailing_popup = PopupModel.search(domain, limit=1)
if mass_mailing_popup:
mass_mailing_popup.write({'popup_content': content})
else:
PopupModel.create({
'mailing_list_id': newsletter_id,
'popup_content': content,
'website_id': request.website.id,
})
return True
| rven/odoo | addons/website_mass_mailing/controllers/main.py | Python | agpl-3.0 | 4,228 |
#exercise 11 - asking questions
print "How old are you?",
age = raw_input()
print "How tall are you?",
height = raw_input()
print "How much do you weigh?",
weight = raw_input()
print "So, you're %r old, %r tall and %r heavy." % (age, height, weight)
print | UWPCE-PythonCert/IntroPython2016 | students/sheree/session_02/homework/LPTHW-EXC-11.py | Python | unlicense | 261 |
# -*- coding: utf-8 -*-
#
# sphinx-tests documentation build configuration file, created by
# sphinx-quickstart on Thu Mar 10 13:50:13 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'sphinx-tests'
copyright = u'2016, svx'
author = u'svx'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'1'
# The full version, including alpha/beta/rc tags.
release = u'1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'sphinx-testsdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'sphinx-tests.tex', u'sphinx-tests Documentation',
u'svx', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'sphinx-tests', u'sphinx-tests Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'sphinx-tests', u'sphinx-tests Documentation',
author, 'sphinx-tests', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| leftxs/sphinx-tests | docs/conf.py | Python | gpl-2.0 | 9,185 |
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
VMware vSphere driver. Uses pyvmomi - https://github.com/vmware/pyvmomi
Code inspired by https://github.com/vmware/pyvmomi-community-samples
Authors: Dimitris Moraitis, Alex Tsiliris, Markos Gogoulos
"""
import time
import logging
import json
import base64
import warnings
import asyncio
import ssl
import functools
import itertools
import hashlib
try:
from pyVim import connect
from pyVmomi import vim, vmodl, VmomiSupport
from pyVim.task import WaitForTask
pyvmomi = True
except ImportError:
pyvmomi = None
import atexit
from libcloud.common.types import InvalidCredsError, LibcloudError
from libcloud.compute.base import NodeDriver
from libcloud.compute.base import Node, NodeSize
from libcloud.compute.base import NodeImage, NodeLocation
from libcloud.compute.types import NodeState, Provider
from libcloud.utils.networking import is_public_subnet
from libcloud.utils.py3 import httplib
from libcloud.common.types import ProviderError
from libcloud.common.exceptions import BaseHTTPError
from libcloud.common.base import JsonResponse, ConnectionKey
logger = logging.getLogger('libcloud.compute.drivers.vsphere')
def recurse_snapshots(snapshot_list):
ret = []
for s in snapshot_list:
ret.append(s)
ret += recurse_snapshots(getattr(s, 'childSnapshotList', []))
return ret
def format_snapshots(snapshot_list):
ret = []
for s in snapshot_list:
ret.append({
'id': s.id,
'name': s.name,
'description': s.description,
'created': s.createTime.strftime('%Y-%m-%d %H:%M'),
'state': s.state})
return ret
# 6.5 and older, probably won't work on anything earlier than 4.x
class VSphereNodeDriver(NodeDriver):
name = 'VMware vSphere'
website = 'http://www.vmware.com/products/vsphere/'
type = Provider.VSPHERE
NODE_STATE_MAP = {
'poweredOn': NodeState.RUNNING,
'poweredOff': NodeState.STOPPED,
'suspended': NodeState.SUSPENDED,
}
def __init__(self, host, username, password, port=443, ca_cert=None):
"""Initialize a connection by providing a hostname,
username and password
"""
if pyvmomi is None:
raise ImportError('Missing "pyvmomi" dependency. '
'You can install it '
'using pip - pip install pyvmomi')
self.host = host
try:
if ca_cert is None:
self.connection = connect.SmartConnect(
host=host, port=port, user=username, pwd=password,
)
else:
context = ssl.create_default_context(cafile=ca_cert)
self.connection = connect.SmartConnect(
host=host, port=port, user=username, pwd=password,
sslContext=context
)
atexit.register(connect.Disconnect, self.connection)
except Exception as exc:
error_message = str(exc).lower()
if 'incorrect user name' in error_message:
raise InvalidCredsError('Check your username and '
'password are valid')
if 'connection refused' in error_message or 'is not a vim server' \
in error_message:
raise LibcloudError('Check that the host provided is a '
'vSphere installation',
driver=self)
if 'name or service not known' in error_message:
raise LibcloudError(
'Check that the vSphere host is accessible',
driver=self)
if 'certificate verify failed' in error_message:
# bypass self signed certificates
try:
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.verify_mode = ssl.CERT_NONE
except ImportError:
raise ImportError('To use self signed certificates, '
'please upgrade to python 2.7.11 and '
'pyvmomi 6.0.0+')
self.connection = connect.SmartConnect(
host=host, port=port, user=username, pwd=password,
sslContext=context
)
atexit.register(connect.Disconnect, self.connection)
else:
raise LibcloudError('Cannot connect to vSphere',
driver=self)
def list_locations(self, ex_show_hosts_in_drs=True):
"""
Lists locations
"""
content = self.connection.RetrieveContent()
potential_locations = [dc for dc in
content.viewManager.CreateContainerView(
content.rootFolder, [
vim.ClusterComputeResource,
vim.HostSystem],
recursive=True).view]
# Add hosts and clusters with DRS enabled
locations = []
hosts_all = []
clusters = []
for location in potential_locations:
if isinstance(location, vim.HostSystem):
hosts_all.append(location)
elif isinstance(location, vim.ClusterComputeResource):
if location.configuration.drsConfig.enabled:
clusters.append(location)
if ex_show_hosts_in_drs:
hosts = hosts_all
else:
hosts_filter = [host for cluster in clusters
for host in cluster.host]
hosts = [host for host in hosts_all if host not in hosts_filter]
for cluster in clusters:
locations.append(self._to_location(cluster))
for host in hosts:
locations.append(self._to_location(host))
return locations
def _to_location(self, data):
try:
if isinstance(data, vim.HostSystem):
extra = {
"type": "host",
"state": data.runtime.connectionState,
"hypervisor": data.config.product.fullName,
"vendor": data.hardware.systemInfo.vendor,
"model": data.hardware.systemInfo.model,
"ram": data.hardware.memorySize,
"cpu": {
"packages": data.hardware.cpuInfo.numCpuPackages,
"cores": data.hardware.cpuInfo.numCpuCores,
"threads": data.hardware.cpuInfo.numCpuThreads,
},
"uptime": data.summary.quickStats.uptime,
"parent": str(data.parent)
}
elif isinstance(data, vim.ClusterComputeResource):
extra = {
"type": "cluster",
"overallStatus": data.overallStatus,
"drs": data.configuration.drsConfig.enabled,
'hosts': [host.name for host in data.host],
'parent': str(data.parent)
}
except AttributeError as exc:
logger.error('Cannot convert location %s: %r' % (data.name, exc))
extra = {}
return NodeLocation(id=data.name, name=data.name, country=None,
extra=extra, driver=self)
def ex_list_networks(self):
"""
List networks
"""
content = self.connection.RetrieveContent()
networks = content.viewManager.CreateContainerView(
content.rootFolder,
[vim.Network],
recursive=True
).view
return [self._to_network(network) for network in networks]
def _to_network(self, data):
summary = data.summary
extra = {
'hosts': [h.name for h in data.host],
'ip_pool_name': summary.ipPoolName,
'ip_pool_id': summary.ipPoolId,
'accessible': summary.accessible
}
return VSphereNetwork(id=data.name, name=data.name, extra=extra)
def list_sizes(self):
"""
Returns sizes
"""
return []
def list_images(self, location=None, folder_ids=None):
"""
Lists VM templates as images.
If folder is given then it will list images contained
in that folder only.
"""
images = []
if folder_ids:
vms = []
for folder_id in folder_ids:
folder_object = self._get_item_by_moid('Folder', folder_id)
vms.extend(folder_object.childEntity)
else:
content = self.connection.RetrieveContent()
vms = content.viewManager.CreateContainerView(
content.rootFolder,
[vim.VirtualMachine],
recursive=True
).view
for vm in vms:
if vm.config and vm.config.template:
images.append(self._to_image(vm))
return images
def _to_image(self, data):
summary = data.summary
name = summary.config.name
uuid = summary.config.instanceUuid
memory = summary.config.memorySizeMB
cpus = summary.config.numCpu
operating_system = summary.config.guestFullName
os_type = 'unix'
if 'Microsoft' in str(operating_system):
os_type = 'windows'
annotation = summary.config.annotation
extra = {
"path": summary.config.vmPathName,
"operating_system": operating_system,
"os_type": os_type,
"memory_MB": memory,
"cpus": cpus,
"overallStatus": str(summary.overallStatus),
"metadata": {},
"type": "template_6_5",
"disk_size": int(summary.storage.committed) // (1024**3),
'datastore': data.datastore[0].info.name
}
boot_time = summary.runtime.bootTime
if boot_time:
extra['boot_time'] = boot_time.isoformat()
if annotation:
extra['annotation'] = annotation
for custom_field in data.customValue:
key_id = custom_field.key
key = self.find_custom_field_key(key_id)
extra["metadata"][key] = custom_field.value
return NodeImage(id=uuid, name=name, driver=self,
extra=extra)
def _collect_properties(self, content, view_ref, obj_type, path_set=None,
include_mors=False):
"""
Collect properties for managed objects from a view ref
Check the vSphere API documentation for example on retrieving
object properties:
- http://goo.gl/erbFDz
Args:
content (ServiceInstance): ServiceInstance content
view_ref (pyVmomi.vim.view.*): Starting point of inventory
navigation
obj_type (pyVmomi.vim.*): Type of managed object
path_set (list): List of properties to retrieve
include_mors (bool): If True include the managed objects
refs in the result
Returns:
A list of properties for the managed objects
"""
collector = content.propertyCollector
# Create object specification to define the starting point of
# inventory navigation
obj_spec = vmodl.query.PropertyCollector.ObjectSpec()
obj_spec.obj = view_ref
obj_spec.skip = True
# Create a traversal specification to identify the path for collection
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec()
traversal_spec.name = 'traverseEntities'
traversal_spec.path = 'view'
traversal_spec.skip = False
traversal_spec.type = view_ref.__class__
obj_spec.selectSet = [traversal_spec]
# Identify the properties to the retrieved
property_spec = vmodl.query.PropertyCollector.PropertySpec()
property_spec.type = obj_type
if not path_set:
property_spec.all = True
property_spec.pathSet = path_set
# Add the object and property specification to the
# property filter specification
filter_spec = vmodl.query.PropertyCollector.FilterSpec()
filter_spec.objectSet = [obj_spec]
filter_spec.propSet = [property_spec]
# Retrieve properties
props = collector.RetrieveContents([filter_spec])
data = []
for obj in props:
properties = {}
for prop in obj.propSet:
properties[prop.name] = prop.val
if include_mors:
properties['obj'] = obj.obj
data.append(properties)
return data
def list_nodes(self, enhance=True, max_properties=20):
"""
List nodes, excluding templates
"""
vm_properties = [
'config.template',
'summary.config.name', 'summary.config.vmPathName',
'summary.config.memorySizeMB', 'summary.config.numCpu',
'summary.storage.committed', 'summary.config.guestFullName',
'summary.runtime.host', 'summary.config.instanceUuid',
'summary.config.annotation', 'summary.runtime.powerState',
'summary.runtime.bootTime', 'summary.guest.ipAddress',
'summary.overallStatus', 'customValue', 'snapshot'
]
content = self.connection.RetrieveContent()
view = content.viewManager.CreateContainerView(
content.rootFolder, [vim.VirtualMachine], True)
i = 0
vm_dict = {}
while i < len(vm_properties):
vm_list = self._collect_properties(content, view,
vim.VirtualMachine,
path_set=vm_properties[
i:i + max_properties],
include_mors=True)
i += max_properties
for vm in vm_list:
if not vm_dict.get(vm['obj']):
vm_dict[vm['obj']] = vm
else:
vm_dict[vm['obj']].update(vm)
vm_list = [vm_dict[k] for k in vm_dict]
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
nodes = loop.run_until_complete(self._to_nodes(vm_list))
if enhance:
nodes = self._enhance_metadata(nodes, content)
return nodes
def list_nodes_recursive(self, enhance=True):
"""
Lists nodes, excluding templates
"""
nodes = []
content = self.connection.RetrieveContent()
children = content.rootFolder.childEntity
# this will be needed for custom VM metadata
if content.customFieldsManager:
self.custom_fields = content.customFieldsManager.field
else:
self.custom_fields = []
for child in children:
if hasattr(child, 'vmFolder'):
datacenter = child
vm_folder = datacenter.vmFolder
vm_list = vm_folder.childEntity
nodes.extend(self._to_nodes_recursive(vm_list))
if enhance:
nodes = self._enhance_metadata(nodes, content)
return nodes
def _enhance_metadata(self, nodes, content):
nodemap = {}
for node in nodes:
node.extra['vSphere version'] = content.about.version
nodemap[node.id] = node
# Get VM deployment events to extract creation dates & images
filter_spec = vim.event.EventFilterSpec(
eventTypeId=['VmBeingDeployedEvent']
)
deploy_events = content.eventManager.QueryEvent(filter_spec)
for event in deploy_events:
try:
uuid = event.vm.vm.config.instanceUuid
except Exception:
continue
if uuid in nodemap:
node = nodemap[uuid]
try: # Get source template as image
source_template_vm = event.srcTemplate.vm
image_id = source_template_vm.config.instanceUuid
node.extra['image_id'] = image_id
except Exception:
logger.error('Cannot get instanceUuid '
'from source template')
try: # Get creation date
node.created_at = event.createdTime
except AttributeError:
logger.error('Cannot get creation date from VM '
'deploy event')
return nodes
async def _to_nodes(self, vm_list):
vms = []
for vm in vm_list:
if vm.get('config.template'):
continue # Do not include templates in node list
vms.append(vm)
loop = asyncio.get_event_loop()
vms = [
loop.run_in_executor(None, self._to_node, vms[i])
for i in range(len(vms))
]
return await asyncio.gather(*vms)
def _to_nodes_recursive(self, vm_list):
nodes = []
for virtual_machine in vm_list:
if hasattr(virtual_machine, 'childEntity'):
# If this is a group it will have children.
# If it does, recurse into them and then return
nodes.extend(self._to_nodes_recursive(
virtual_machine.childEntity))
elif isinstance(virtual_machine, vim.VirtualApp):
# If this is a vApp, it likely contains child VMs
# (vApps can nest vApps, but it is hardly
# a common usecase, so ignore that)
nodes.extend(self._to_nodes_recursive(virtual_machine.vm))
else:
if not hasattr(virtual_machine, 'config') or \
(virtual_machine.config and
virtual_machine.config.template):
continue # Do not include templates in node list
nodes.append(self._to_node_recursive(virtual_machine))
return nodes
def _to_node(self, vm):
name = vm.get('summary.config.name')
path = vm.get('summary.config.vmPathName')
memory = vm.get('summary.config.memorySizeMB')
cpus = vm.get('summary.config.numCpu')
disk = vm.get('summary.storage.committed', 0) // (1024 ** 3)
id_to_hash = str(memory) + str(cpus) + str(disk)
size_id = hashlib.md5(id_to_hash.encode("utf-8")).hexdigest()
size_name = name + "-size"
size_extra = {'cpus': cpus}
driver = self
size = NodeSize(id=size_id, name=size_name, ram=memory, disk=disk,
bandwidth=0, price=0, driver=driver, extra=size_extra)
operating_system = vm.get('summary.config.guestFullName')
host = vm.get('summary.runtime.host')
os_type = 'unix'
if 'Microsoft' in str(operating_system):
os_type = 'windows'
uuid = vm.get('summary.config.instanceUuid') or \
(vm.get('obj').config and vm.get('obj').config.instanceUuid)
if not uuid:
logger.error('No uuid for vm: {}'.format(vm))
annotation = vm.get('summary.config.annotation')
state = vm.get('summary.runtime.powerState')
status = self.NODE_STATE_MAP.get(state, NodeState.UNKNOWN)
boot_time = vm.get('summary.runtime.bootTime')
ip_addresses = []
if vm.get('summary.guest.ipAddress'):
ip_addresses.append(vm.get('summary.guest.ipAddress'))
overall_status = str(vm.get('summary.overallStatus'))
public_ips = []
private_ips = []
extra = {
'path': path,
'operating_system': operating_system,
'os_type': os_type,
'memory_MB': memory,
'cpus': cpus,
'overall_status': overall_status,
'metadata': {},
'snapshots': []
}
if disk:
extra['disk'] = disk
if host:
extra['host'] = host.name
parent = host.parent
while parent:
if isinstance(parent, vim.ClusterComputeResource):
extra['cluster'] = parent.name
break
parent = parent.parent
if boot_time:
extra['boot_time'] = boot_time.isoformat()
if annotation:
extra['annotation'] = annotation
for ip_address in ip_addresses:
try:
if is_public_subnet(ip_address):
public_ips.append(ip_address)
else:
private_ips.append(ip_address)
except Exception:
# IPV6 not supported
pass
if vm.get('snapshot'):
extra['snapshots'] = format_snapshots(
recurse_snapshots(vm.get('snapshot').rootSnapshotList))
for custom_field in vm.get('customValue', []):
key_id = custom_field.key
key = self.find_custom_field_key(key_id)
extra['metadata'][key] = custom_field.value
node = Node(id=uuid, name=name, state=status, size=size,
public_ips=public_ips, private_ips=private_ips,
driver=self, extra=extra)
node._uuid = uuid
return node
def _to_node_recursive(self, virtual_machine):
summary = virtual_machine.summary
name = summary.config.name
path = summary.config.vmPathName
memory = summary.config.memorySizeMB
cpus = summary.config.numCpu
disk = ''
if summary.storage.committed:
disk = summary.storage.committed / (1024 ** 3)
id_to_hash = str(memory) + str(cpus) + str(disk)
size_id = hashlib.md5(id_to_hash.encode("utf-8")).hexdigest()
size_name = name + "-size"
size_extra = {'cpus': cpus}
driver = self
size = NodeSize(id=size_id, name=size_name, ram=memory, disk=disk,
bandwidth=0, price=0, driver=driver, extra=size_extra)
operating_system = summary.config.guestFullName
host = summary.runtime.host
# mist.io needs this metadata
os_type = 'unix'
if 'Microsoft' in str(operating_system):
os_type = 'windows'
uuid = summary.config.instanceUuid
annotation = summary.config.annotation
state = summary.runtime.powerState
status = self.NODE_STATE_MAP.get(state, NodeState.UNKNOWN)
boot_time = summary.runtime.bootTime
ip_addresses = []
if summary.guest is not None:
ip_addresses.append(summary.guest.ipAddress)
overall_status = str(summary.overallStatus)
public_ips = []
private_ips = []
extra = {
"path": path,
"operating_system": operating_system,
"os_type": os_type,
"memory_MB": memory,
"cpus": cpus,
"overallStatus": overall_status,
"metadata": {},
"snapshots": []
}
if disk:
extra['disk'] = disk
if host:
extra['host'] = host.name
parent = host.parent
while parent:
if isinstance(parent, vim.ClusterComputeResource):
extra['cluster'] = parent.name
break
parent = parent.parent
if boot_time:
extra['boot_time'] = boot_time.isoformat()
if annotation:
extra['annotation'] = annotation
for ip_address in ip_addresses:
try:
if is_public_subnet(ip_address):
public_ips.append(ip_address)
else:
private_ips.append(ip_address)
except Exception:
# IPV6 not supported
pass
if virtual_machine.snapshot:
snapshots = [{
'id': s.id,
'name': s.name,
'description': s.description,
'created': s.createTime.strftime('%Y-%m-%d %H:%M'),
'state': s.state}
for s in virtual_machine.snapshot.rootSnapshotList]
extra['snapshots'] = snapshots
for custom_field in virtual_machine.customValue:
key_id = custom_field.key
key = self.find_custom_field_key(key_id)
extra["metadata"][key] = custom_field.value
node = Node(id=uuid, name=name, state=status, size=size,
public_ips=public_ips, private_ips=private_ips,
driver=self, extra=extra)
node._uuid = uuid
return node
def reboot_node(self, node):
"""
"""
vm = self.find_by_uuid(node.id)
return self.wait_for_task(vm.RebootGuest())
def destroy_node(self, node):
"""
"""
vm = self.find_by_uuid(node.id)
if node.state != NodeState.STOPPED:
self.stop_node(node)
return self.wait_for_task(vm.Destroy())
def stop_node(self, node):
"""
"""
vm = self.find_by_uuid(node.id)
return self.wait_for_task(vm.PowerOff())
def start_node(self, node):
"""
"""
vm = self.find_by_uuid(node.id)
return self.wait_for_task(vm.PowerOn())
def ex_list_snapshots(self, node):
"""
List node snapshots
"""
vm = self.find_by_uuid(node.id)
if not vm.snapshot:
return []
return format_snapshots(
recurse_snapshots(vm.snapshot.rootSnapshotList))
def ex_create_snapshot(self, node, snapshot_name, description='',
dump_memory=False, quiesce=False):
"""
Create node snapshot
"""
vm = self.find_by_uuid(node.id)
return WaitForTask(
vm.CreateSnapshot(snapshot_name, description, dump_memory, quiesce)
)
def ex_remove_snapshot(self, node, snapshot_name=None,
remove_children=True):
"""
Remove a snapshot from node.
If snapshot_name is not defined remove the last one.
"""
vm = self.find_by_uuid(node.id)
if not vm.snapshot:
raise LibcloudError(
"Remove snapshot failed. No snapshots for node %s" % node.name,
driver=self)
snapshots = recurse_snapshots(vm.snapshot.rootSnapshotList)
if not snapshot_name:
snapshot = snapshots[-1].snapshot
else:
for s in snapshots:
if snapshot_name == s.name:
snapshot = s.snapshot
break
else:
raise LibcloudError("Snapshot `%s` not found" % snapshot_name,
driver=self)
return self.wait_for_task(snapshot.RemoveSnapshot_Task(
removeChildren=remove_children))
def ex_revert_to_snapshot(self, node, snapshot_name=None):
"""
Revert node to a specific snapshot.
If snapshot_name is not defined revert to the last one.
"""
vm = self.find_by_uuid(node.id)
if not vm.snapshot:
raise LibcloudError("Revert failed. No snapshots "
"for node %s" % node.name,
driver=self)
snapshots = recurse_snapshots(vm.snapshot.rootSnapshotList)
if not snapshot_name:
snapshot = snapshots[-1].snapshot
else:
for s in snapshots:
if snapshot_name == s.name:
snapshot = s.snapshot
break
else:
raise LibcloudError("Snapshot `%s` not found" % snapshot_name,
driver=self)
return self.wait_for_task(snapshot.RevertToSnapshot_Task())
def _find_template_by_uuid(self, template_uuid):
# on version 5.5 and earlier search index won't return a VM
try:
template = self.find_by_uuid(template_uuid)
except LibcloudError:
content = self.connection.RetrieveContent()
vms = content.viewManager.CreateContainerView(
content.rootFolder,
[vim.VirtualMachine],
recursive=True
).view
for vm in vms:
if vm.config.instanceUuid == template_uuid:
template = vm
except Exception as exc:
raise LibcloudError("Error while searching for template: %s" % exc,
driver=self)
if not template:
raise LibcloudError("Unable to locate VirtualMachine.",
driver=self)
return template
def find_by_uuid(self, node_uuid):
"""Searches VMs for a given uuid
returns pyVmomi.VmomiSupport.vim.VirtualMachine
"""
vm = self.connection.content.searchIndex.FindByUuid(None, node_uuid,
True, True)
if not vm:
# perhaps it is a moid
vm = self._get_item_by_moid('VirtualMachine', node_uuid)
if not vm:
raise LibcloudError("Unable to locate VirtualMachine.",
driver=self)
return vm
def find_custom_field_key(self, key_id):
"""Return custom field key name, provided it's id
"""
if not hasattr(self, "custom_fields"):
content = self.connection.RetrieveContent()
if content.customFieldsManager:
self.custom_fields = content.customFieldsManager.field
else:
self.custom_fields = []
for k in self.custom_fields:
if k.key == key_id:
return k.name
return None
def get_obj(self, vimtype, name):
"""
Return an object by name, if name is None the
first found object is returned
"""
obj = None
content = self.connection.RetrieveContent()
container = content.viewManager.CreateContainerView(
content.rootFolder, vimtype, True)
for c in container.view:
if name:
if c.name == name:
obj = c
break
else:
obj = c
break
return obj
def wait_for_task(self, task, timeout=1800, interval=10):
""" wait for a vCenter task to finish """
start_time = time.time()
task_done = False
while not task_done:
if (time.time() - start_time >= timeout):
raise LibcloudError('Timeout while waiting '
'for import task Id %s'
% task.info.id,
driver=self)
if task.info.state == 'success':
if task.info.result and str(task.info.result) != 'success':
return task.info.result
return True
if task.info.state == 'error':
raise LibcloudError(task.info.error.msg, driver=self)
time.sleep(interval)
def create_node(self, name, image, size, location=None, ex_cluster=None,
ex_network=None, ex_datacenter=None, ex_folder=None,
ex_resource_pool=None, ex_datastore_cluster=None,
ex_datastore=None):
"""
Creates and returns node.
:keyword ex_network: Name of a "Network" to connect the VM to ",
:type ex_network: ``str``
"""
template = self._find_template_by_uuid(image.id)
if ex_cluster:
cluster_name = ex_cluster
else:
cluster_name = location.name
cluster = self.get_obj([vim.ClusterComputeResource], cluster_name)
if not cluster: # It is a host go with it
cluster = self.get_obj([vim.HostSystem], cluster_name)
datacenter = None
if not ex_datacenter: # Get datacenter from cluster
parent = cluster.parent
while parent:
if isinstance(parent, vim.Datacenter):
datacenter = parent
break
parent = parent.parent
if ex_datacenter or datacenter is None:
datacenter = self.get_obj([vim.Datacenter],
ex_datacenter)
if ex_folder:
folder = self.get_obj([vim.Folder], ex_folder)
if folder is None:
folder = self._get_item_by_moid('Folder',
ex_folder)
else:
folder = datacenter.vmFolder
if ex_resource_pool:
resource_pool = self.get_obj([vim.ResourcePool],
ex_resource_pool)
else:
try:
resource_pool = cluster.resourcePool
except AttributeError:
resource_pool = cluster.parent.resourcePool
devices = []
vmconf = vim.vm.ConfigSpec(
numCPUs=int(size.extra.get('cpu', 1)),
memoryMB=int(size.ram),
deviceChange=devices
)
datastore = None
pod = None
podsel = vim.storageDrs.PodSelectionSpec()
if ex_datastore_cluster:
pod = self.get_obj([vim.StoragePod],
ex_datastore_cluster)
else:
content = self.connection.RetrieveContent()
pods = content.viewManager.CreateContainerView(
content.rootFolder, [vim.StoragePod], True).view
for pod in pods:
if cluster.name.lower() in pod.name:
break
podsel.storagePod = pod
storagespec = vim.storageDrs.StoragePlacementSpec()
storagespec.podSelectionSpec = podsel
storagespec.type = 'create'
storagespec.folder = folder
storagespec.resourcePool = resource_pool
storagespec.configSpec = vmconf
try:
content = self.connection.RetrieveContent()
rec = content.storageResourceManager.RecommendDatastores(
storageSpec=storagespec)
rec_action = rec.recommendations[0].action[0]
real_datastore_name = rec_action.destination.name
except Exception:
real_datastore_name = template.datastore[0].info.name
datastore = self.get_obj([vim.Datastore], real_datastore_name)
if ex_datastore:
datastore = self.get_obj([vim.Datastore],
ex_datastore)
if datastore is None:
datastore = self._get_item_by_moid('Datastore',
ex_datastore)
elif not datastore:
datastore = self.get_obj([vim.Datastore],
template.datastore[0].info.name)
add_network = True
if ex_network and len(template.network) > 0:
for nets in template.network:
if template in nets.vm:
add_network = False
if ex_network and add_network:
nicspec = vim.vm.device.VirtualDeviceSpec()
nicspec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
nicspec.device = vim.vm.device.VirtualVmxnet3()
nicspec.device.wakeOnLanEnabled = True
nicspec.device.deviceInfo = vim.Description()
portgroup = self.get_obj([vim.dvs.DistributedVirtualPortgroup],
ex_network)
if portgroup:
dvs_port_connection = vim.dvs.PortConnection()
dvs_port_connection.portgroupKey = portgroup.key
dvs_port_connection.switchUuid = portgroup.config.\
distributedVirtualSwitch.uuid
nicspec.device.backing = vim.vm.device.VirtualEthernetCard.\
DistributedVirtualPortBackingInfo()
nicspec.device.backing.port = dvs_port_connection
else:
nicspec.device.backing = vim.vm.device.VirtualEthernetCard.\
NetworkBackingInfo()
nicspec.device.backing.network = self.get_obj([
vim.Network], ex_network)
nicspec.device.backing.deviceName = ex_network
nicspec.device.connectable = vim.vm.device.VirtualDevice.\
ConnectInfo()
nicspec.device.connectable.startConnected = True
nicspec.device.connectable.connected = True
nicspec.device.connectable.allowGuestControl = True
devices.append(nicspec)
# new_disk_kb = int(size.disk) * 1024 * 1024
# disk_spec = vim.vm.device.VirtualDeviceSpec()
# disk_spec.fileOperation = "create"
# disk_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
# disk_spec.device = vim.vm.device.VirtualDisk()
# disk_spec.device.backing = \
# vim.vm.device.VirtualDisk.FlatVer2BackingInfo()
# if size.extra.get('disk_type') == 'thin':
# disk_spec.device.backing.thinProvisioned = True
# disk_spec.device.backing.diskMode = 'persistent'
# disk_spec.device.capacityInKB = new_disk_kb
# disk_spec.device.controllerKey = controller.key
# devices.append(disk_spec)
clonespec = vim.vm.CloneSpec(config=vmconf)
relospec = vim.vm.RelocateSpec()
relospec.datastore = datastore
relospec.pool = resource_pool
if location:
host = self.get_obj([vim.HostSystem], location.name)
if host:
relospec.host = host
clonespec.location = relospec
clonespec.powerOn = True
task = template.Clone(
folder=folder,
name=name,
spec=clonespec
)
return self._to_node_recursive(self.wait_for_task(task))
def ex_connect_network(self, vm, network_name):
spec = vim.vm.ConfigSpec()
# add Switch here
dev_changes = []
network_spec = vim.vm.device.VirtualDeviceSpec()
# network_spec.fileOperation = "create"
network_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
network_spec.device = vim.vm.device.VirtualVmxnet3()
network_spec.device.backing = vim.vm.device.VirtualEthernetCard.\
NetworkBackingInfo()
network_spec.device.backing.useAutoDetect = False
network_spec.device.backing.network = self.get_obj([
vim.Network], network_name)
network_spec.device.connectable = vim.vm.device.VirtualDevice.\
ConnectInfo()
network_spec.device.connectable.startConnected = True
network_spec.device.connectable.connected = True
network_spec.device.connectable.allowGuestControl = True
dev_changes.append(network_spec)
spec.deviceChange = dev_changes
output = vm.ReconfigVM_Task(spec=spec)
print(output.info)
def _get_item_by_moid(self, type_, moid):
vm_obj = VmomiSupport.templateOf(
type_)(moid, self.connection._stub)
return vm_obj
def ex_list_folders(self):
content = self.connection.RetrieveContent()
folders_raw = content.viewManager.CreateContainerView(
content.rootFolder, [vim.Folder], True).view
folders = []
for folder in folders_raw:
to_add = {'type': list(folder.childType)}
to_add['name'] = folder.name
to_add['id'] = folder._moId
folders.append(to_add)
return folders
def ex_list_datastores(self):
content = self.connection.RetrieveContent()
datastores_raw = content.viewManager.CreateContainerView(
content.rootFolder, [vim.Datastore], True).view
datastores = []
for dstore in datastores_raw:
to_add = {'type': dstore.summary.type}
to_add['name'] = dstore.name
to_add['id'] = dstore._moId
to_add['free_space'] = int(dstore.summary.freeSpace)
to_add['capacity'] = int(dstore.summary.capacity)
datastores.append(to_add)
return datastores
def ex_open_console(self, vm_uuid):
vm = self.find_by_uuid(vm_uuid)
ticket = vm.AcquireTicket(ticketType='webmks')
return 'wss://{}:{}/ticket/{}'.format(
ticket.host, ticket.port, ticket.ticket)
def _get_version(self):
content = self.connection.RetrieveContent()
return(content.about.version)
class VSphereNetwork(object):
"""
Represents information about a VPC (Virtual Private Cloud) network
Note: This class is VSphere specific.
"""
def __init__(self, id, name, extra=None):
self.id = id
self.name = name
self.extra = extra or {}
def __repr__(self):
return (('<VSphereNetwork: id=%s, name=%s')
% (self.id, self.name))
# 6.7
class VSphereResponse(JsonResponse):
def parse_error(self):
if self.body:
message = self.body
message += "-- code: {}".format(self.status)
return message
return self.body
class VSphereConnection(ConnectionKey):
responseCls = VSphereResponse
session_token = None
def add_default_headers(self, headers):
"""
VSphere needs an initial connection to a specific API endpoint to
generate a session-token, which will be used for the purpose of
authenticating for the rest of the session.
"""
headers['Content-Type'] = 'application/json'
headers['Accept'] = 'application/json'
if self.session_token is None:
to_encode = '{}:{}'.format(self.key, self.secret)
b64_user_pass = base64.b64encode(to_encode.encode())
headers['Authorization'] = 'Basic {}'.format(
b64_user_pass.decode())
else:
headers['vmware-api-session-id'] = self.session_token
return headers
class VSphereException(Exception):
def __init__(self, code, message):
self.code = code
self.message = message
self.args = (code, message)
def __str__(self):
return "{} {}".format(self.code, self.message)
def __repr__(self):
return "VSphereException {} {}".format(self.code, self.message)
class VSphere_REST_NodeDriver(NodeDriver):
name = 'VMware vSphere'
website = 'http://www.vmware.com/products/vsphere/'
type = Provider.VSPHERE
connectionCls = VSphereConnection
session_token = None
NODE_STATE_MAP = {
'powered_on': NodeState.RUNNING,
'powered_off': NodeState.STOPPED,
'suspended': NodeState.SUSPENDED
}
VALID_RESPONSE_CODES = [httplib.OK, httplib.ACCEPTED, httplib.CREATED,
httplib.NO_CONTENT]
def __init__(self, key, secret=None, secure=True, host=None, port=443,
ca_cert=None):
if not key or not secret:
raise InvalidCredsError("Please provide both username "
"(key) and password (secret).")
super(VSphere_REST_NodeDriver, self).__init__(key=key,
secure=secure,
host=host,
port=port)
prefixes = ['http://', 'https://']
for prefix in prefixes:
if host.startswith(prefix):
host = host.lstrip(prefix)
if ca_cert:
self.connection.connection.ca_cert = ca_cert
else:
self.connection.connection.ca_cert = False
self.connection.secret = secret
self.host = host
self.username = key
# getting session token
self._get_session_token()
self.driver_soap = None
def _get_soap_driver(self):
if pyvmomi is None:
raise ImportError('Missing "pyvmomi" dependency. '
'You can install it '
'using pip - pip install pyvmomi')
self.driver_soap = VSphereNodeDriver(self.host, self.username,
self.connection.secret,
ca_cert=self.
connection.connection.ca_cert)
def _get_session_token(self):
uri = "/rest/com/vmware/cis/session"
try:
result = self.connection.request(uri, method="POST")
except Exception:
raise
self.session_token = result.object['value']
self.connection.session_token = self.session_token
def list_sizes(self):
return []
def list_nodes(self, ex_filter_power_states=None, ex_filter_folders=None,
ex_filter_names=None, ex_filter_hosts=None,
ex_filter_clusters=None, ex_filter_vms=None,
ex_filter_datacenters=None, ex_filter_resource_pools=None,
max_properties=20):
"""
The ex parameters are search options and must be an array of strings,
even ex_filter_power_states which can have at most two items but makes
sense to keep only one ("POWERED_ON" or "POWERED_OFF")
Keep in mind that this method will return up to 1000 nodes so if your
network has more, do use the provided filters and call it multiple
times.
"""
req = "/rest/vcenter/vm"
kwargs = {'filter.power_states': ex_filter_power_states,
'filter.folders': ex_filter_folders,
'filter.names': ex_filter_names,
'filter.hosts': ex_filter_hosts,
'filter.clusters': ex_filter_clusters,
'filter.vms': ex_filter_vms,
'filter.datacenters': ex_filter_datacenters,
'filter.resource_pools': ex_filter_resource_pools}
params = {}
for param, value in kwargs.items():
if value:
params[param] = value
result = self._request(req, params=params).object['value']
vm_ids = [[item['vm']] for item in result]
vms = []
interfaces = self._list_interfaces()
for vm_id in vm_ids:
vms.append(self._to_node(vm_id, interfaces))
return vms
def async_list_nodes(self):
"""
In this case filtering is not possible.
Use this method when the cloud has
a lot of vms and you want to return them all.
"""
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
result = loop.run_until_complete(self._get_all_vms())
vm_ids = [(item['vm'], item['host']) for item in result]
interfaces = self._list_interfaces()
return loop.run_until_complete(self._list_nodes_async(vm_ids,
interfaces))
async def _list_nodes_async(self, vm_ids, interfaces):
loop = asyncio.get_event_loop()
vms = [
loop.run_in_executor(None, self._to_node, vm_ids[i], interfaces)
for i in range(len(vm_ids))
]
return await asyncio.gather(*vms)
async def _get_all_vms(self):
"""
6.7 doesn't offer any pagination, if we get 1000 vms we will try
this roundabout way: First get all the datacenters, for each
datacenter get the hosts and for each host the vms it has.
This assumes that datacenters, hosts per datacenter and vms per
host don't exceed 1000.
"""
datacenters = self.ex_list_datacenters()
loop = asyncio.get_event_loop()
hosts_futures = [
loop.run_in_executor(None, functools.partial(
self.ex_list_hosts, ex_filter_datacenters=datacenter['id']))
for datacenter in datacenters
]
hosts = await asyncio.gather(*hosts_futures)
vm_resp_futures = [
loop.run_in_executor(None, functools.partial(
self._get_vms_with_host, host))
for host in itertools.chain(*hosts)
]
vm_resp = await asyncio.gather(*vm_resp_futures)
# return a flat list
return [item for vm_list in vm_resp for item in vm_list]
def _get_vms_with_host(self, host):
req = "/rest/vcenter/vm"
host_id = host['host']
response = self._request(req, params={'filter.hosts': host_id})
vms = response.object['value']
for vm in vms:
vm['host'] = host
return vms
def list_locations(self, ex_show_hosts_in_drs=True):
"""
Location in the general sense means any resource that allows for node
creation. In vSphere's case that usually is a host but if a cluster
has rds enabled, a cluster can be assigned to create the VM, thus the
clusters with rds enabled will be added to locations.
:param ex_show_hosts_in_drs: A DRS cluster schedules automatically
on which host should be placed thus it
may not be desired to show the hosts
in a DRS enabled cluster. Set False to
not show these hosts.
:type ex_show_hosts_in_drs: `boolean`
"""
clusters = self.ex_list_clusters()
hosts_all = self.ex_list_hosts()
hosts = []
if ex_show_hosts_in_drs:
hosts = hosts_all
else:
cluster_filter = [cluster['cluster'] for cluster in clusters]
filter_hosts = self.ex_list_hosts(
ex_filter_clusters=cluster_filter)
hosts = [host for host in hosts_all if host not in filter_hosts]
driver = self.connection.driver
locations = []
for cluster in clusters:
if cluster['drs_enabled']:
extra = {'type': 'cluster', 'drs': True,
'ha': cluster['ha_enabled']}
locations.append(NodeLocation(id=cluster['cluster'],
name=cluster['name'],
country='', driver=driver,
extra=extra))
for host in hosts:
extra = {'type': 'host', 'status': host['connection_state'],
'state': host['power_state']}
locations.append(NodeLocation(id=host['host'], name=host['name'],
country="", driver=driver,
extra=extra))
return locations
def stop_node(self, node):
if node.state == NodeState.STOPPED:
return True
method = 'POST'
req = "/rest/vcenter/vm/{}/power/stop".format(node.id)
result = self._request(req, method=method)
return result.status in self.VALID_RESPONSE_CODES
def start_node(self, node):
if isinstance(node, str):
node_id = node
else:
if node.state is NodeState.RUNNING:
return True
node_id = node.id
method = 'POST'
req = "/rest/vcenter/vm/{}/power/start".format(node_id)
result = self._request(req, method=method)
return result.status in self.VALID_RESPONSE_CODES
def reboot_node(self, node):
if node.state is not NodeState.RUNNING:
return False
method = 'POST'
req = "/rest/vcenter/vm/{}/power/reset".format(node.id)
result = self._request(req, method=method)
return result.status in self.VALID_RESPONSE_CODES
def destroy_node(self, node):
# make sure the machine is stopped
if node.state is not NodeState.STOPPED:
self.stop_node(node)
# wait to make sure it stopped
# in the future this can be made asynchronously
# for i in range(6):
# if node.state is NodeState.STOPPED:
# break
# time.sleep(10)
req = "/rest/vcenter/vm/{}".format(node.id)
resp = self._request(req, method="DELETE")
return resp.status in self.VALID_RESPONSE_CODES
def ex_suspend_node(self, node):
if node.state is not NodeState.RUNNING:
return False
method = 'POST'
req = "/rest/vcenter/vm/{}/power/suspend".format(node.id)
result = self._request(req, method=method)
return result.status in self.VALID_RESPONSE_CODES
def _list_interfaces(self):
request = "/rest/appliance/networking/interfaces"
response = self._request(request).object['value']
interfaces = [{'name': interface['name'],
'mac': interface['mac'],
'status': interface['status'],
'ip': interface['ipv4']['address']
} for interface in response]
return interfaces
def _to_node(self, vm_id_host, interfaces):
'''
id, name, state, public_ips, private_ips,
driver, size=None, image=None, extra=None, created_at=None)
'''
vm_id = vm_id_host[0]
req = '/rest/vcenter/vm/' + vm_id
vm = self._request(req).object['value']
name = vm['name']
state = self.NODE_STATE_MAP[vm['power_state'].lower()]
# IP's
private_ips = []
nic_macs = set()
for nic in vm['nics']:
nic_macs.add(nic['value']['mac_address'])
for interface in interfaces:
if interface['mac'] in nic_macs:
private_ips.append(interface['ip'])
nic_macs.remove(interface['mac'])
if len(nic_macs) == 0:
break
public_ips = [] # should default_getaway be the public?
driver = self.connection.driver
# size
total_size = 0
gb_converter = 1024 ** 3
for disk in vm['disks']:
total_size += int(int(disk['value']['capacity'] / gb_converter))
ram = int(vm['memory']['size_MiB'])
cpus = int(vm['cpu']['count'])
id_to_hash = str(ram) + str(cpus) + str(total_size)
size_id = hashlib.md5(id_to_hash.encode("utf-8")).hexdigest()
size_name = name + "-size"
size_extra = {'cpus': cpus}
size = NodeSize(id=size_id, name=size_name, ram=ram, disk=total_size,
bandwidth=0, price=0, driver=driver, extra=size_extra)
# image
image_name = vm['guest_OS']
image_id = image_name + "_id"
image_extra = {"type": "guest_OS"}
image = NodeImage(id=image_id, name=image_name, driver=driver,
extra=image_extra)
extra = {'snapshots': []}
if len(vm_id_host) > 1:
extra['host'] = vm_id_host[1].get('name', '')
return Node(id=vm_id, name=name, state=state, public_ips=public_ips,
private_ips=private_ips, driver=driver,
size=size, image=image, extra=extra)
def ex_list_hosts(self, ex_filter_folders=None, ex_filter_standalone=None,
ex_filter_hosts=None, ex_filter_clusters=None,
ex_filter_names=None, ex_filter_datacenters=None,
ex_filter_connection_states=None):
kwargs = {'filter.folders': ex_filter_folders,
'filter.names': ex_filter_names,
'filter.hosts': ex_filter_hosts,
'filter.clusters': ex_filter_clusters,
'filter.standalone': ex_filter_standalone,
'filter.datacenters': ex_filter_datacenters,
'filter.connection_states': ex_filter_connection_states}
params = {}
for param, value in kwargs.items():
if value:
params[param] = value
req = "/rest/vcenter/host"
result = self._request(req, params=params).object['value']
return result
def ex_list_clusters(self, ex_filter_folders=None, ex_filter_names=None,
ex_filter_datacenters=None, ex_filter_clusters=None):
kwargs = {'filter.folders': ex_filter_folders,
'filter.names': ex_filter_names,
'filter.datacenters': ex_filter_datacenters,
'filter.clusters': ex_filter_clusters}
params = {}
for param, value in kwargs.items():
if value:
params[param] = value
req = "/rest/vcenter/cluster"
result = self._request(req, params=params).object['value']
return result
def ex_list_datacenters(self, ex_filter_folders=None, ex_filter_names=None,
ex_filter_datacenters=None):
req = "/rest/vcenter/datacenter"
kwargs = {'filter.folders': ex_filter_folders,
'filter.names': ex_filter_names,
'filter.datacenters': ex_filter_datacenters}
params = {}
for param, value in kwargs.items():
if value:
params[param] = value
result = self._request(req, params=params)
to_return = [{'name': item['name'],
'id': item['datacenter']} for item in
result.object['value']]
return to_return
def ex_list_content_libraries(self):
req = '/rest/com/vmware/content/library'
try:
result = self._request(req).object
return result['value']
except BaseHTTPError:
return []
def ex_list_content_library_items(self, library_id):
req = "/rest/com/vmware/content/library/item"
params = {'library_id': library_id}
try:
result = self._request(req, params=params).object
return result['value']
except BaseHTTPError:
logger.error('Library was cannot be accesed, '
' most probably the VCenter service '
'is stopped')
return []
def ex_list_folders(self):
req = "/rest/vcenter/folder"
response = self._request(req).object
folders = response['value']
for folder in folders:
folder['id'] = folder['folder']
return folders
def ex_list_datastores(self, ex_filter_folders=None, ex_filter_names=None,
ex_filter_datacenters=None, ex_filter_types=None,
ex_filter_datastores=None):
req = "/rest/vcenter/datastore"
kwargs = {'filter.folders': ex_filter_folders,
'filter.names': ex_filter_names,
'filter.datacenters': ex_filter_datacenters,
'filter.types': ex_filter_types,
'filter.datastores': ex_filter_datastores}
params = {}
for param, value in kwargs.items():
if value:
params[param] = value
result = self._request(req, params=params).object['value']
for datastore in result:
datastore['id'] = datastore['datastore']
return result
def ex_update_memory(self, node, ram):
"""
:param ram: The ammount of ram in MB.
:type ram: `str` or `int`
"""
if isinstance(node, str):
node_id = node
else:
node_id = node.id
request = "/rest/vcenter/vm/{}/hardware/memory".format(node_id)
ram = int(ram)
body = {'spec': {
"size_MiB": ram
}}
response = self._request(request, method="PATCH",
data=json.dumps(body))
return response.status in self.VALID_RESPONSE_CODES
def ex_update_cpu(self, node, cores):
"""
Assuming 1 Core per socket
:param cores: Integer or string indicating number of cores
:type cores: `int` or `str`
"""
if isinstance(node, str):
node_id = node
else:
node_id = node.id
request = "/rest/vcenter/vm/{}/hardware/cpu".format(node_id)
cores = int(cores)
body = {"spec": {
"count": cores
}}
response = self._request(request, method="PATCH",
data=json.dumps(body))
return response.status in self.VALID_RESPONSE_CODES
def ex_update_capacity(self, node, capacity):
# Should be added when REST API supports it
pass
def ex_add_nic(self, node, network):
"""
Creates a network adapater that will connect to the specified network
for the given node. Returns a boolean indicating success or not.
"""
if isinstance(node, str):
node_id = node
else:
node_id = node.id
spec = {}
spec['mac_type'] = "GENERATED"
spec['backing'] = {}
spec['backing']['type'] = "STANDARD_PORTGROUP"
spec['backing']['network'] = network
spec['start_connected'] = True
data = json.dumps({'spec': spec})
req = "/rest/vcenter/vm/{}/hardware/ethernet".format(node_id)
method = "POST"
resp = self._request(req, method=method, data=data)
return resp.status
def _get_library_item(self, item_id):
req = "/rest/com/vmware/content/library/item/id:{}".format(item_id)
result = self._request(req).object
return result['value']
def _get_resource_pool(self, host_id=None, cluster_id=None, name=None):
if host_id:
pms = {"filter.hosts": host_id}
if cluster_id:
pms = {"filter.clusters": cluster_id}
if name:
pms = {"filter.names": name}
rp_request = "/rest/vcenter/resource-pool"
resource_pool = self._request(rp_request,
params=pms).object
return resource_pool['value'][0]['resource_pool']
def _request(self, req, method="GET", params=None, data=None):
try:
result = self.connection.request(req, method=method,
params=params, data=data)
except BaseHTTPError as exc:
if exc.code == 401:
self.connection.session_token = None
self._get_session_token()
result = self.connection.request(req, method=method,
params=params, data=data)
else:
raise
except Exception:
raise
return result
def list_images(self, **kwargs):
libraries = self.ex_list_content_libraries()
item_ids = []
if libraries:
for library in libraries:
item_ids.extend(self.ex_list_content_library_items(library))
items = []
if item_ids:
for item_id in item_ids:
items.append(self._get_library_item(item_id))
images = []
names = set()
if items:
driver = self.connection.driver
for item in items:
names.add(item['name'])
extra = {"type": item['type']}
if item['type'] == 'vm-template':
capacity = item['size'] // (1024**3)
extra['disk_size'] = capacity
images.append(NodeImage(id=item['id'],
name=item['name'],
driver=driver, extra=extra))
if self.driver_soap is None:
self._get_soap_driver()
templates_in_hosts = self.driver_soap.list_images()
for template in templates_in_hosts:
if template.name not in names:
images += [template]
return images
def ex_list_networks(self):
request = "/rest/vcenter/network"
response = self._request(request).object['value']
networks = []
for network in response:
networks.append(VSphereNetwork(id=network['network'],
name=network['name'],
extra={'type': network['type']}))
return networks
def create_node(self, name, image, size=None, location=None,
ex_datastore=None, ex_disks=None,
ex_folder=None, ex_network=None, ex_turned_on=True):
"""
Image can be either a vm template , a ovf template or just
the guest OS.
ex_folder is necessary if the image is a vm-template, this method
will attempt to put the VM in a random folder and a warning about it
will be issued in case the value remains `None`.
"""
# image is in the host then need the 6.5 driver
if image.extra['type'] == "template_6_5":
kwargs = {}
kwargs['name'] = name
kwargs['image'] = image
kwargs['size'] = size
kwargs['ex_network'] = ex_network
kwargs['location'] = location
for dstore in self.ex_list_datastores():
if dstore['id'] == ex_datastore:
kwargs['ex_datastore'] = dstore['name']
break
kwargs['folder'] = ex_folder
if self.driver_soap is None:
self._get_soap_driver()
result = self.driver_soap.create_node(**kwargs)
return result
# post creation checks
create_nic = False
update_memory = False
update_cpu = False
create_disk = False
update_capacity = False
if image.extra['type'] == "guest_OS":
spec = {}
spec['guest_OS'] = image.name
spec['name'] = name
spec['placement'] = {}
if ex_folder is None:
warn = ("The API(6.7) requires the folder to be given, I will"
" place it into a random folder, after creation you "
"might find it convenient to move it into a better "
"folder.")
warnings.warn(warn)
folders = self.ex_list_folders()
for folder in folders:
if folder['type'] == "VIRTUAL_MACHINE":
ex_folder = folder['folder']
if ex_folder is None:
msg = "No suitable folder vor VMs found, please create one"
raise ProviderError(msg, 404)
spec['placement']['folder'] = ex_folder
if location.extra['type'] == "host":
spec['placement']['host'] = location.id
elif location.extra['type'] == 'cluster':
spec['placement']['cluster'] = location.id
elif location.extra['type'] == 'resource_pool':
spec['placement']['resource_pool'] = location.id
spec['placement']['datastore'] = ex_datastore
cpu = size.extra.get('cpu', 1)
spec['cpu'] = {'count': cpu}
spec['memory'] = {'size_MiB': size.ram}
if size.disk:
disk = {}
disk['new_vmdk'] = {}
disk['new_vmdk']['capacity'] = size.disk * (1024 ** 3)
spec['disks'] = [disk]
if ex_network:
nic = {}
nic['mac_type'] = 'GENERATED'
nic['backing'] = {}
nic['backing']['type'] = "STANDARD_PORTGROUP"
nic['backing']['network'] = ex_network
nic['start_connected'] = True
spec['nics'] = [nic]
create_request = "/rest/vcenter/vm"
data = json.dumps({'spec': spec})
elif image.extra['type'] == 'ovf':
ovf_request = ('/rest/com/vmware/vcenter/ovf/library-item'
'/id:{}?~action=filter'.format(image.id))
spec = {}
spec['target'] = {}
if location.extra.get('type') == "resource-pool":
spec['target']['resource_pool_id'] = location.id
elif location.extra.get('type') == "host":
resource_pool = self._get_resource_pool(host_id=location.id)
if not resource_pool:
msg = ("Could not find resource-pool for given location "
"(host). Please make sure the location is valid.")
raise VSphereException(code="504", message=msg)
spec['target']['resource_pool_id'] = resource_pool
spec['target']['host_id'] = location.id
elif location.extra.get('type') == 'cluster':
resource_pool = self._get_resource_pool(cluster_id=location.id)
if not resource_pool:
msg = ("Could not find resource-pool for given location "
"(cluster). Please make sure the location "
"is valid.")
raise VSphereException(code="504", message=msg)
spec['target']['resource_pool_id'] = resource_pool
ovf = self._request(ovf_request, method="POST",
data=json.dumps(spec)).object['value']
spec['deployment_spec'] = {}
spec['deployment_spec']['name'] = name
# assuming that since you want to make a vm you don't need reminder
spec['deployment_spec']['accept_all_EULA'] = True
# network
if ex_network and ovf['networks']:
spec['deployment_spec'][
'network_mappings'] = [{'key': ovf['networks'][0],
'value': ex_network}]
elif not ovf['networks'] and ex_network:
create_nic = True
# storage
if ex_datastore:
spec['deployment_spec']['storage_mappings'] = []
store_map = {"type": "DATASTORE", "datastore_id": ex_datastore}
spec['deployment_spec']['storage_mappings'].append(store_map)
if size and size.ram:
update_memory = True
if size and size.extra and size.extra.get('cpu'):
update_cpu = True
if size and size.disk:
# TODO Should update capacity but it is not possible with 6.7
pass
if ex_disks:
create_disk = True
create_request = ('/rest/com/vmware/vcenter/ovf/library-item'
'/id:{}?~action=deploy'.format(image.id))
data = json.dumps({"target": spec['target'],
'deployment_spec': spec['deployment_spec']})
elif image.extra['type'] == 'vm-template':
tp_request = "/rest/vcenter/vm-template/library-items/" + image.id
template = self._request(tp_request).object['value']
spec = {}
spec['name'] = name
# storage
if ex_datastore:
spec['disk_storage'] = {}
spec['disk_storage']['datastore'] = ex_datastore
# location :: folder,resource group, datacenter, host
spec['placement'] = {}
if not ex_folder:
warn = ("The API(6.7) requires the folder to be given, I will"
" place it into a random folder, after creation you "
"might find it convenient to move it into a better "
"folder.")
warnings.warn(warn)
folders = self.ex_list_folders()
for folder in folders:
if folder['type'] == "VIRTUAL_MACHINE":
ex_folder = folder['folder']
if ex_folder is None:
msg = "No suitable folder vor VMs found, please create one"
raise ProviderError(msg, 404)
spec['placement']['folder'] = ex_folder
if location.extra['type'] == 'host':
spec['placement']['host'] = location.id
elif location.extra['type'] == 'cluster':
spec['placement']['cluster'] = location.id
# network changes the network to existing nics if
# there are no adapters
# in the template then we will make on in the vm
# after the creation finishes
# only one network atm??
spec['hardware_customization'] = {}
if ex_network:
nics = template['nics']
if len(nics) > 0:
nic = nics[0]
spec['hardware_customization']['nics'] = [{
'key': nic['key'],
'value': {'network': ex_network}
}]
else:
create_nic = True
spec['powered_on'] = False
# hardware
if size:
if size.ram:
spec['hardware_customization']['memory_update'] = {
'memory': int(size.ram)
}
if size.extra.get('cpu'):
spec['hardware_customization']['cpu_update'] = {
'num_cpus': size.extra['cpu']
}
if size.disk:
if not len(template['disks']) > 0:
create_disk = True
else:
capacity = size.disk * 1024 * 1024 * 1024
dsk = template['disks'][0]['key']
if template['disks'][0][
'value']['capacity'] < capacity:
update = {'capacity': capacity}
spec['hardware_customization'][
'disks_to_update'] = [
{'key': dsk, 'value': update}]
create_request = ("/rest/vcenter/vm-template/library-items/"
"{}/?action=deploy".format(image.id))
data = json.dumps({'spec': spec})
# deploy the node
result = self._request(create_request,
method="POST", data=data)
# wait until the node is up and then add extra config
node_id = result.object['value']
if image.extra['type'] == "ovf":
node_id = node_id['resource_id']['id']
node = self.list_nodes(ex_filter_vms=node_id)[0]
if create_nic:
self.ex_add_nic(node, ex_network)
if update_memory:
self.ex_update_memory(node, size.ram)
if update_cpu:
self.ex_update_cpu(node, size.extra['cpu'])
if create_disk:
pass # until volumes are added
if update_capacity:
pass # until API method is added
if ex_turned_on:
self.start_node(node)
return node
# TODO As soon as snapshot support gets added to the REST api
# these methods should be rewritten with REST api calls
def ex_list_snapshots(self, node):
"""
List node snapshots
"""
if self.driver_soap is None:
self._get_soap_driver()
return self.driver_soap.ex_list_snapshots(node)
def ex_create_snapshot(self, node, snapshot_name, description='',
dump_memory=False, quiesce=False):
"""
Create node snapshot
"""
if self.driver_soap is None:
self._get_soap_driver()
return self.driver_soap.ex_create_snapshot(node, snapshot_name,
description=description,
dump_memory=dump_memory,
quiesce=False)
def ex_remove_snapshot(self, node, snapshot_name=None,
remove_children=True):
"""
Remove a snapshot from node.
If snapshot_name is not defined remove the last one.
"""
if self.driver_soap is None:
self._get_soap_driver()
return self.driver_soap.ex_remove_snapshot(
node, snapshot_name=snapshot_name, remove_children=remove_children)
def ex_revert_to_snapshot(self, node, snapshot_name=None):
"""
Revert node to a specific snapshot.
If snapshot_name is not defined revert to the last one.
"""
if self.driver_soap is None:
self._get_soap_driver()
return self.driver_soap.ex_revert_to_snapshot(
node, snapshot_name=snapshot_name)
def ex_open_console(self, vm_id):
if self.driver_soap is None:
self._get_soap_driver()
return self.driver_soap.ex_open_console(vm_id)
| andrewsomething/libcloud | libcloud/compute/drivers/vsphere.py | Python | apache-2.0 | 78,343 |
from django.conf.urls import patterns, url
from . import views
urlpatterns = patterns('',
url('^server.html$', views.commonplace, {'repo': 'fireplace'},
name='commonplace.fireplace'),
url('^comm/thread/(?P<thread_id>\d+)$', views.commonplace, {'repo': 'commbadge'},
name='commonplace.commbadge.show_thread'),
url('^comm/.*$', views.commonplace, {'repo': 'commbadge'},
name='commonplace.commbadge'),
url('^curation/.*$', views.commonplace, {'repo': 'rocketfuel'},
name='commonplace.rocketfuel'),
url('^stats/.*$', views.commonplace, {'repo': 'marketplace-stats'},
name='commonplace.stats'),
)
| Joergen/zamboni | mkt/commonplace/urls.py | Python | bsd-3-clause | 655 |
# Copyright 2017 NOKIA
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""add nuage config and nuage_config_param
Revision ID: b05bd74f4cc7
Revises: c5a28aa0e583
Create Date: 2017-08-01 16:35:25.246027
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'b05bd74f4cc7'
down_revision = 'bd8449bfe34d'
def upgrade():
op.create_table(
'nuage_config_parameter',
sa.Column('name', sa.String(255), primary_key=True)
)
op.create_table(
'nuage_config',
sa.Column('organization', sa.String(255), nullable=False),
sa.Column('username', sa.String(255), nullable=False),
sa.Column('config_parameter', sa.String(255), nullable=False),
sa.Column('config_value', sa.String(255), nullable=False),
sa.PrimaryKeyConstraint('organization', 'username',
'config_parameter'),
sa.ForeignKeyConstraint(['config_parameter'],
['nuage_config_parameter.name'],
name='fk_nuage_config_config_parameter',
ondelete='CASCADE')
)
nuage_config_param = sa.Table('nuage_config_parameter', sa.MetaData(),
sa.Column('name', sa.String(255),
primary_key=True)
)
op.bulk_insert(nuage_config_param,
[
{'name': 'auth_token'}
])
| nuagenetworks/nuage-openstack-neutron | nuage_neutron/db/migration/alembic_migrations/versions/newton/expand/b05bd74f4cc7_add_nuage_config.py | Python | apache-2.0 | 2,072 |
import json
import uuid
from django.core.urlresolvers import reverse
from django.contrib.auth import get_user_model
from django.core.cache import cache
from rest_framework.test import APITestCase
from rest_framework.exceptions import ValidationError
from model_mommy import mommy
from ..models import (
County,
Contact,
ContactType,
Constituency,
Ward,
UserContact
)
from ..serializers import (
ContactSerializer,
WardSerializer,
WardDetailSerializer,
CountySerializer,
CountyDetailSerializer,
ConstituencySerializer,
ConstituencyDetailSerializer,
UserContactSerializer
)
from ..views import APIRoot
def default(obj):
if isinstance(obj, uuid.UUID):
return str(obj)
class LoginMixin(object):
def setUp(self):
self.user = get_user_model().objects.create_superuser(
email='[email protected]',
first_name='Test',
username='test',
password='mtihani',
is_national=True
)
self.client.login(email='[email protected]', password='mtihani')
self.maxDiff = None
super(LoginMixin, self).setUp()
class TestViewCounties(LoginMixin, APITestCase):
def setUp(self):
super(TestViewCounties, self).setUp()
self.url = reverse('api:common:counties_list')
def test_post(self):
data = {
"name": "Kiambu",
"code": 22
}
response = self.client.post(self.url, data)
self.assertEquals(201, response.status_code)
def test_list_counties(self):
county = County.objects.create(
created_by=self.user, updated_by=self.user,
name='county 1', code='100')
county_2 = County.objects.create(
created_by=self.user, updated_by=self.user,
name='county 2', code='101')
url = reverse('api:common:counties_list')
response = self.client.get(url)
expected_data = {
"count": 2,
"next": None,
"previous": None,
"results": [
CountySerializer(county_2).data,
CountySerializer(county).data
]
}
self.assertEquals(
json.loads(json.dumps(expected_data, default=default)),
json.loads(json.dumps(response.data, default=default)))
self.assertEquals(200, response.status_code)
def test_retrieve_single_county(self):
county = County.objects.create(
created_by=self.user, updated_by=self.user,
name='county 1', code='100')
url = reverse('api:common:counties_list')
url += "{}/".format(county.id)
response = self.client.get(url)
expected_data = CountyDetailSerializer(county).data
self.assertEquals(
json.loads(json.dumps(expected_data, default=default)),
json.loads(json.dumps(response.data, default=default)))
self.assertEquals(200, response.status_code)
class TestViewConstituencies(LoginMixin, APITestCase):
def setUp(self):
super(TestViewConstituencies, self).setUp()
def test_list_constituencies(self):
county = County.objects.create(
created_by=self.user, updated_by=self.user,
name='county 1', code='100')
constituency = Constituency.objects.create(
created_by=self.user, updated_by=self.user, county=county,
name='constituency 1',
code='335')
constituency_2 = Constituency.objects.create(
created_by=self.user, updated_by=self.user, name='constituency 2',
code='337', county=county)
url = reverse('api:common:constituencies_list')
response = self.client.get(url)
expected_data = {
"count": 2,
"next": None,
"previous": None,
"results": [
ConstituencySerializer(constituency_2).data,
ConstituencySerializer(constituency).data
]
}
# some weird ordering the dumps string
# json.loads the dumped string to check equality of dicts
self.assertEquals(
json.loads(json.dumps(expected_data, default=default)),
json.loads(json.dumps(response.data, default=default)))
self.assertEquals(200, response.status_code)
self.assertEquals(2, response.data.get('count'))
def test_retrive_single_constituency(self):
county = County.objects.create(
created_by=self.user, updated_by=self.user,
name='county 1', code='100')
constituency = Constituency.objects.create(
created_by=self.user, updated_by=self.user, county=county,
name='constituency 1',
code='335')
url = reverse('api:common:constituencies_list')
url += "{}/".format(constituency.id)
response = self.client.get(url)
expected_data = ConstituencyDetailSerializer(constituency).data
self.assertEquals(
json.loads(json.dumps(expected_data, default=default)),
json.loads(json.dumps(response.data, default=default)))
self.assertEquals(200, response.status_code)
class TestViewWards(LoginMixin, APITestCase):
def setUp(self):
super(TestViewWards, self).setUp()
def test_list_wards(self):
county = mommy.make(County)
constituency = Constituency.objects.create(
created_by=self.user, updated_by=self.user,
name='county 1', code='100', county=county)
ward_1 = Ward.objects.create(
created_by=self.user, updated_by=self.user,
constituency=constituency, name='ward 1',
code='335')
ward_2 = Ward.objects.create(
created_by=self.user, updated_by=self.user, name='ward 2',
code='337', constituency=constituency)
url = reverse('api:common:wards_list')
response = self.client.get(url)
expected_data = {
"count": 2,
"next": None,
"previous": None,
"results": [
WardSerializer(ward_2).data,
WardSerializer(ward_1).data
]
}
self.assertEquals(
json.loads(json.dumps(expected_data, default=default)),
json.loads(json.dumps(response.data, default=default)))
self.assertEquals(200, response.status_code)
self.assertEquals(2, response.data.get('count'))
def test_retrive_single_ward(self):
county = County.objects.create(
created_by=self.user, updated_by=self.user,
name='county 1', code='100')
constituency = mommy.make(Constituency, county=county)
ward = Ward.objects.create(
created_by=self.user, updated_by=self.user,
constituency=constituency,
name='sub county',
code='335')
url = reverse('api:common:wards_list')
url += "{}/".format(ward.id)
response = self.client.get(url)
expected_data = WardDetailSerializer(ward).data
self.assertEquals(
json.loads(json.dumps(expected_data, default=default)),
json.loads(json.dumps(response.data, default=default)))
self.assertEquals(200, response.status_code)
class TestContactView(LoginMixin, APITestCase):
def setUp(self):
super(TestContactView, self).setUp()
self.url = reverse("api:common:contacts_list")
def test_get_contacts(self):
contact_type = mommy.make(ContactType, name="EMAIL")
contact_type_1 = mommy.make(ContactType, name="PHONE")
contact = mommy.make(
Contact,
contact='[email protected]', contact_type=contact_type)
contact_1 = mommy.make(
Contact,
contact='0784636499', contact_type=contact_type_1)
expected_data = {
"count": 2,
"next": None,
"previous": None,
"results": [
ContactSerializer(contact_1).data,
ContactSerializer(contact).data
]
}
response = self.client.get(self.url)
self.assertEquals(
json.loads(json.dumps(expected_data, default=default)),
json.loads(json.dumps(response.data, default=default)))
def test_post_created_by_not_supplied(self):
# Special case, to test AbstractFieldsMixin
contact_type = mommy.make(ContactType)
data = {
"contact": "072578980",
"contact_type": str(contact_type.id)
}
response = self.client.post(self.url, data)
self.assertEquals(201, response.status_code)
self.assertEquals(1, Contact.objects.count())
self.assertIn('id', json.dumps(response.data, default=default))
self.assertIn('contact', json.dumps(response.data, default=default))
self.assertIn(
'contact_type', json.dumps(response.data, default=default))
def test_post_created_by_supplied(self):
# Special case, to test AbstractFieldsMixin
contact_type = mommy.make(ContactType)
data = {
"contact": "072578980",
"contact_type": str(contact_type.id),
"created_by": str(self.user.id)
}
response = self.client.post(self.url, data)
self.assertEquals(201, response.status_code)
self.assertEquals(1, Contact.objects.count())
self.assertIn('id', json.dumps(response.data, default=default))
self.assertIn('contact', json.dumps(response.data, default=default))
self.assertIn(
'contact_type', json.dumps(response.data, default=default))
def test_retrieve_contact(self):
contact = mommy.make(Contact)
url = self.url + "{}/".format(contact.id)
response = self.client.get(url)
self.assertEquals(200, response.status_code)
def test_filtering(self):
pass
class TestContactTypeView(LoginMixin, APITestCase):
def setUp(self):
super(TestContactTypeView, self).setUp()
self.url = reverse("api:common:contact_types_list")
def test_post_contact_types(self):
data = {
"created": "2015-04-10T08:41:05.169411Z",
"updated": "2015-04-10T08:41:05.169411Z",
"name": "EMAIL",
"description": "This is an email contact typ"
}
response = self.client.post(self.url, data)
self.assertEquals(201, response.status_code)
self.assertIn("id", response.data)
self.assertIn("name", response.data)
self.assertIn("description", response.data)
# run the other side of the default method
def test_default_method(self):
obj = uuid.uuid4()
result = default(obj)
self.assertIsInstance(result, str)
obj_2 = ""
result = default(obj_2)
self.assertIsNone(result)
class TestTownView(LoginMixin, APITestCase):
def setUp(self):
super(TestTownView, self).setUp()
self.url = reverse("api:common:towns_list")
def test_post_contact_types(self):
data = {
"name": "Kiamaiko Taon"
}
response = self.client.post(self.url, data)
self.assertEquals(201, response.status_code)
self.assertIn("id", response.data)
self.assertIn("name", response.data)
self.assertEqual("Kiamaiko Taon", response.data['name'])
class TestAPIRootView(LoginMixin, APITestCase):
def setUp(self):
self.url = reverse('api:root_listing')
cache.clear()
super(TestAPIRootView, self).setUp()
def test_api_root_exception_path(self):
with self.assertRaises(ValidationError) as c:
# Auth makes this test really "interesting"
# We have to monkey patch the view to trigger the error path
root_view = APIRoot()
class DummyRequest(object):
user = get_user_model()()
root_view.get(request=DummyRequest())
self.assertEqual(
c.exception.message, 'Could not create root / metadata view')
def test_api_and_metadata_root_view(self):
"""
So, you've landed here, presumably after an exasperating test failure
( probably cursing under your breath ).
There's a high chance that one of two things is wrong:
* you have a concrete model in an app that is in
`settings.LOCAL_APPS` that has no list & detail views and URLs OR
* you violated the URL naming conventions (for the `name` param )
What are these naming conventions, I hear you ask...
* detail views -> 'api:<app_name>:<applicable_model_verbose_name>'
* list views ->
'api:<app_name>:<applicable_model_verbose_name_plural>'
If Django gives your model a funny `verbose_name_plural` ( because
it ends with a 'y' or 's' and silly Django just appends an 's' ),
set a better `verbose_name_plural` in the model's `Meta`. Once in
a while, Django will also pick a `verbose_name` that is not to your
liking; you can override that too.
PS: Yes, this is a bitch. It is also a good discipline master.
And - it is stupid, only assembling metadata for CRUD views.
"""
# It is not the size of the dog in the fight that matters...
# This is one sensitive bitch of a test!
response = self.client.get(self.url)
self.assertEquals(200, response.status_code)
# Test that the root redirects here
redirect_response = self.client.get(
reverse('root_redirect'), follow=True)
self.assertEquals(200, redirect_response.status_code)
class TestUserContactView(LoginMixin, APITestCase):
def setUp(self):
super(TestUserContactView, self).setUp()
self.url = reverse("api:common:user_contacts_list")
def test_save(self):
user_contact_1 = mommy.make(UserContact)
user_contact_2 = mommy.make(UserContact)
response = self.client.get(self.url)
expected_data = {
"count": 2,
"next": None,
"previous": None,
"results": [
UserContactSerializer(user_contact_2).data,
UserContactSerializer(user_contact_1).data
]
}
self.assertEquals(200, response.status_code)
self.assertEquals(
json.loads(json.dumps(expected_data, default=default)),
json.loads(json.dumps(response.data, default=default)))
def test_retrieve_user_contact(self):
user_contact = mommy.make(UserContact)
url = self.url + "{}/".format(user_contact.id)
response = self.client.get(url)
self.assertEquals(200, response.status_code)
expected_data = UserContactSerializer(user_contact).data
self.assertEquals(
json.loads(json.dumps(expected_data, default=default)),
json.loads(json.dumps(response.data, default=default)))
class TestAuditableViewMixin(LoginMixin, APITestCase):
def setUp(self):
super(TestAuditableViewMixin, self).setUp()
def test_response_with_no_audit(self):
county = mommy.make(County)
url = reverse(
'api:common:county_detail', kwargs={'pk': county.pk})
# First, fetch with no audit
response = self.client.get(url)
self.assertEquals(200, response.status_code)
self.assertTrue(
"revisions" not in
json.loads(json.dumps(response.data, default=default))
)
def test_response_with_audit_single_change(self):
county_rev_1 = mommy.make(County)
url = reverse(
'api:common:county_detail',
kwargs={'pk': county_rev_1.pk}
) + '?include_audit=true'
# First, fetch with no audit
response = self.client.get(url)
self.assertEquals(200, response.status_code)
parsed_response = json.loads(
json.dumps(response.data, default=default))
self.assertTrue("revisions" in parsed_response)
self.assertEqual(
parsed_response["revisions"][0]["code"],
county_rev_1.code
)
self.assertEqual(
parsed_response["revisions"][0]["id"],
str(county_rev_1.id)
)
self.assertEqual(
parsed_response["revisions"][0]["name"],
county_rev_1.name
)
self.assertEqual(
parsed_response["revisions"][0]["active"],
county_rev_1.active
)
self.assertEqual(
parsed_response["revisions"][0]["deleted"],
county_rev_1.deleted
)
def test_response_with_audit_two_changes(self):
county_rev_1 = mommy.make(County)
url = reverse(
'api:common:county_detail',
kwargs={'pk': county_rev_1.pk}
) + '?include_audit=true'
county_rev_1.name = 'Kaunti Yangu'
county_rev_1.save()
response = self.client.get(url)
self.assertEquals(200, response.status_code)
parsed_response = json.loads(
json.dumps(response.data, default=default))
self.assertTrue("revisions" in parsed_response)
self.assertEqual(len(parsed_response["revisions"]), 2)
class TestDownloadView(LoginMixin, APITestCase):
def test_download_view_with_css(self):
url = reverse('api:common:download_pdf')
url = url + "?file_url={}&file_name={}&css={}".format(
'http://google.com', 'awesome_file', 'p,h1,h2,h3 {color: red}'
)
response = self.client.get(url)
self.assertEquals(200, response.status_code)
def test_download_view_without_css(self):
url = reverse('api:common:download_pdf')
url = url + "?file_url={}&file_name={}".format(
'http://google.com', 'awesome_file'
)
response = self.client.get(url)
self.assertEquals(200, response.status_code)
| urandu/mfl_api | common/tests/test_views.py | Python | mit | 18,078 |
from decouple import config
from unipath import Path
import dj_database_url
PROJECT_DIR = Path(__file__).parent
SECRET_KEY = config('SECRET_KEY')
DEBUG = config('DEBUG', default=False, cast=bool)
ALLOWED_HOSTS = ['*']
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'social_django',
'website.apps.core',
# 'website.apps.stravauth',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
'social_django.middleware.SocialAuthExceptionMiddleware',
)
ROOT_URLCONF = 'website.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [ PROJECT_DIR.child('templates') ],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'social_django.context_processors.backends', # <--
'social_django.context_processors.login_redirect', # <--
],
},
},
]
WSGI_APPLICATION = 'website.wsgi.application'
DATABASES = {
'default': dj_database_url.config(
default = config('DATABASE_URL')
)
}
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
STATIC_ROOT = PROJECT_DIR.parent.parent.child('static')
STATIC_URL = '/static/'
STATICFILES_DIRS = (
PROJECT_DIR.child('static'),
)
MEDIA_ROOT = PROJECT_DIR.parent.parent.child('media')
MEDIA_URL = '/media/'
LOGIN_URL = 'login'
LOGOUT_URL = 'logout'
LOGIN_REDIRECT_URL = 'home'
AUTHENTICATION_BACKENDS = (
# 'social_core.backends.github.GithubOAuth2',
'social_core.backends.twitter.TwitterOAuth',
'social_core.backends.facebook.FacebookOAuth2',
'social_core.backends.strava.StravaOAuth',
'social_core.backends.moves.MovesOAuth2',
# 'website.apps.stravauth.backend.StravaV3Backend',
'django.contrib.auth.backends.ModelBackend',
)
SOCIAL_AUTH_FACEBOOK_KEY = config('SOCIAL_AUTH_FACEBOOK_KEY')
SOCIAL_AUTH_FACEBOOK_SECRET = config('SOCIAL_AUTH_FACEBOOK_SECRET')
SOCIAL_AUTH_STRAVA_KEY = config('CLIENT_ID')
SOCIAL_AUTH_STRAVA_SECRET = config('CLIENT_SECRET')
SOCIAL_AUTH_MOVES_KEY = config('MOVES_ID')
SOCIAL_AUTH_MOVES_SECRET = config('MOVES_SECRET')
SOCIAL_AUTH_MOVES_SCOPE = ['activity', 'location']
SOCIAL_AUTH_LOGIN_ERROR_URL = '/settings/'
SOCIAL_AUTH_LOGIN_REDIRECT_URL = '/settings/'
SOCIAL_AUTH_RAISE_EXCEPTIONS = False
| Stegallo/django | website/settings.py | Python | mit | 3,178 |
#!/usr/bin/python
import sys
import os
username="#DEFINEME"
ff='/home//.todorepo/%s'
fdir='/home/' + username + '/.todorepo'
class TaskList(object):
def __init__(self):
self.tasks=list()
if not(os.path.isdir(fdir)):
os.mkdir(fdir)
def loadTasks(self):
if os.path.exists(ff%'cur.todo'):
curtod=open(ff%'cur.todo','r')
for line in curtod:
if line and not(line.isspace()):
print(line)
task=line.split(' ',maxsplit=1)
print(len(task))
self.tasks.append(task[1])
curtod.close()
def addTask(self, task):
self.tasks.append(task)
def delTask(self, number):
if number<=len(self.tasks):
self.tasks.pop(number-1)
def writeTasks(self):
fle=open(ff%'cur.todo','w')
for itemInd in range(len(self.tasks)):
fle.write('%s. %s\n'%(itemInd+1,self.tasks[itemInd]))
fle.close()
def main(argv=None):
if argv is None:
argv=sys.argv[1:]
tm=TaskList()
tm.loadTasks()
if argv[0]=='a':
tm.addTask(' '.join(argv[1:]))
elif argv[0]=='d':
tm.delTask(int(argv[1]))
tm.writeTasks()
if __name__ == "__main__":
sys.exit(main())
| fredsmithjr/de-scripts | pytasks.py | Python | gpl-2.0 | 1,160 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import hashlib
import binascii
import evernote.edam.userstore.constants as UserStoreConstants
from evernote.edam.notestore.ttypes import NoteFilter
import evernote.edam.type.ttypes as Types
from evernote.api.client import EvernoteClient
# Sandbox: https://sandbox.evernote.com/api/DeveloperToken.action
# Production: https://www.evernote.com/api/DeveloperToken.action
auth_token = "S=s1:U=8f65c:E=14fa7113473:C=1484f600570:P=1cd:A=en-devtoken:V=2:H=484744fd0ffa906797416ae02ce5cd9c"
client = EvernoteClient(token=auth_token, sandbox=True)
note_store = client.get_note_store()
# GET CONTENT
def get_tip_notes(start_date, end_date):
tip_notes = []
noteFilter = NoteFilter()
noteFilter.words = "tag:tip created:%s -created:%s" % (start_date, end_date) # notes with tag #tip created between 2012-01-01 and 2014-09-08 (flight dates)
note_list = note_store.findNotes(auth_token, noteFilter, 0, 10)
for note in note_list.notes:
guid = note.guid
title = note.title
url = "evernote:///view/8f65c/s1/%s/%s/" % (guid, guid)
tip_notes.append( '<div><en-todo/> %s (<a href="%s">view full note</a>)</div>' % (title, url) )
return tip_notes
tip_notes = get_tip_notes("20120101", "20140908")
for tip_note in tip_notes:
# note.content += tip_note
print tip_note
sys.exit()
# Listar todos notebooks:
notebooks = note_store.listNotebooks()
print "Achei ", len(notebooks), " notebooks:"
for notebook in notebooks:
print " * ", notebook.name
# Criar uma nova nota:
print "\nCriando uma nova nota no notebook principal\n"
note = Types.Note()
note.title = "Evernote API Workshop @ Campus Party! Python!"
# Anexando uma imagem:
image = open('enlogo.png', 'rb').read()
md5 = hashlib.md5()
md5.update(image)
hash = md5.digest()
data = Types.Data()
data.size = len(image)
data.bodyHash = hash
data.body = image
resource = Types.Resource()
resource.mime = 'image/png'
resource.data = data
# Adicionando o novo Resource na lista de resources dessa nota
note.resources = [resource]
# Para exibir a imagem no meio da nota, soh precisamos do hash MD5 dela
hash_hex = binascii.hexlify(hash)
# ENML = Evernote Markup Language. Eh um subset do HTML, com umas tags a mais
note.content = '<?xml version="1.0" encoding="UTF-8"?>'
note.content += '<!DOCTYPE en-note SYSTEM "http://xml.evernote.com/pub/enml2.dtd">'
note.content += '<en-note>'
note.content += 'Esta eh uma nova nota, inserida direto no notebook principal :)<br/>'
note.content += 'Olha aqui o logo do Evernote:<br/>'
note.content += '<en-media type="image/png" hash="' + hash_hex + '"/>'
note.content += '</en-note>'
# Finalmente, enviando a nota
created_note = note_store.createNote(note)
print "Nota criada com sucesso! O GUID dela eh: ", created_note.guid | Doingcast/bizmem | tip.py | Python | mit | 2,785 |
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""An OAuth2 client library.
This library provides a client implementation of the OAuth2 protocol (see
https://developers.google.com/storage/docs/authentication.html#oauth).
**** Experimental API ****
This module is experimental and is subject to modification or removal without
notice.
"""
# This implementation is a wrapper around the oauth2client implementation
# that implements caching of access tokens independent of refresh
# tokens (in the python API client oauth2client, there is a single class that
# encapsulates both refresh and access tokens).
from __future__ import absolute_import
import cgi
import datetime
import errno
from hashlib import sha1
import json
import logging
import os
import socket
import tempfile
import threading
import urllib
if os.environ.get('USER_AGENT'):
import boto
boto.UserAgent += os.environ.get('USER_AGENT')
from boto import config
import httplib2
from oauth2client import service_account
from oauth2client.client import AccessTokenRefreshError
from oauth2client.client import Credentials
from oauth2client.client import EXPIRY_FORMAT
from oauth2client.client import HAS_CRYPTO
from oauth2client.client import OAuth2Credentials
from retry_decorator.retry_decorator import retry as Retry
import socks
if HAS_CRYPTO:
from oauth2client.client import SignedJwtAssertionCredentials
LOG = logging.getLogger('oauth2_client')
# Lock used for checking/exchanging refresh token, so multithreaded
# operation doesn't attempt concurrent refreshes.
token_exchange_lock = threading.Lock()
DEFAULT_SCOPE = 'https://www.googleapis.com/auth/devstorage.full_control'
METADATA_SERVER = 'http://metadata.google.internal'
META_TOKEN_URI = (METADATA_SERVER + '/computeMetadata/v1/instance/'
'service-accounts/default/token')
META_HEADERS = {
'X-Google-Metadata-Request': 'True'
}
# Note: this is copied from gsutil's gslib.cred_types. It should be kept in
# sync. Also note that this library does not use HMAC, but it's preserved from
# gsutil's copy to maintain compatibility.
class CredTypes(object):
HMAC = "HMAC"
OAUTH2_SERVICE_ACCOUNT = "OAuth 2.0 Service Account"
OAUTH2_USER_ACCOUNT = "Oauth 2.0 User Account"
GCE = "GCE"
class Error(Exception):
"""Base exception for the OAuth2 module."""
pass
class AuthorizationCodeExchangeError(Error):
"""Error trying to exchange an authorization code into a refresh token."""
pass
class TokenCache(object):
"""Interface for OAuth2 token caches."""
def PutToken(self, key, value):
raise NotImplementedError
def GetToken(self, key):
raise NotImplementedError
class NoopTokenCache(TokenCache):
"""A stub implementation of TokenCache that does nothing."""
def PutToken(self, key, value):
pass
def GetToken(self, key):
return None
class InMemoryTokenCache(TokenCache):
"""An in-memory token cache.
The cache is implemented by a python dict, and inherits the thread-safety
properties of dict.
"""
def __init__(self):
super(InMemoryTokenCache, self).__init__()
self.cache = dict()
def PutToken(self, key, value):
LOG.debug('InMemoryTokenCache.PutToken: key=%s', key)
self.cache[key] = value
def GetToken(self, key):
value = self.cache.get(key, None)
LOG.debug('InMemoryTokenCache.GetToken: key=%s%s present',
key, ' not' if value is None else '')
return value
class FileSystemTokenCache(TokenCache):
"""An implementation of a token cache that persists tokens on disk.
Each token object in the cache is stored in serialized form in a separate
file. The cache file's name can be configured via a path pattern that is
parameterized by the key under which a value is cached and optionally the
current processes uid as obtained by os.getuid().
Since file names are generally publicly visible in the system, it is important
that the cache key does not leak information about the token's value. If
client code computes cache keys from token values, a cryptographically strong
one-way function must be used.
"""
def __init__(self, path_pattern=None):
"""Creates a FileSystemTokenCache.
Args:
path_pattern: Optional string argument to specify the path pattern for
cache files. The argument should be a path with format placeholders
'%(key)s' and optionally '%(uid)s'. If the argument is omitted, the
default pattern
<tmpdir>/oauth2client-tokencache.%(uid)s.%(key)s
is used, where <tmpdir> is replaced with the system temp dir as
obtained from tempfile.gettempdir().
"""
super(FileSystemTokenCache, self).__init__()
self.path_pattern = path_pattern
if not path_pattern:
self.path_pattern = os.path.join(
tempfile.gettempdir(), 'oauth2_client-tokencache.%(uid)s.%(key)s')
def CacheFileName(self, key):
uid = '_'
try:
# os.getuid() doesn't seem to work in Windows
uid = str(os.getuid())
except:
pass
return self.path_pattern % {'key': key, 'uid': uid}
def PutToken(self, key, value):
"""Serializes the value to the key's filename.
To ensure that written tokens aren't leaked to a different users, we
a) unlink an existing cache file, if any (to ensure we don't fall victim
to symlink attacks and the like),
b) create a new file with O_CREAT | O_EXCL (to ensure nobody is trying to
race us)
If either of these steps fail, we simply give up (but log a warning). Not
caching access tokens is not catastrophic, and failure to create a file
can happen for either of the following reasons:
- someone is attacking us as above, in which case we want to default to
safe operation (not write the token);
- another legitimate process is racing us; in this case one of the two
will win and write the access token, which is fine;
- we don't have permission to remove the old file or write to the
specified directory, in which case we can't recover
Args:
key: the hash key to store.
value: the access_token value to serialize.
"""
cache_file = self.CacheFileName(key)
LOG.debug('FileSystemTokenCache.PutToken: key=%s, cache_file=%s',
key, cache_file)
try:
os.unlink(cache_file)
except:
# Ignore failure to unlink the file; if the file exists and can't be
# unlinked, the subsequent open with O_CREAT | O_EXCL will fail.
pass
flags = os.O_RDWR | os.O_CREAT | os.O_EXCL
# Accommodate Windows; stolen from python2.6/tempfile.py.
if hasattr(os, 'O_NOINHERIT'):
flags |= os.O_NOINHERIT
if hasattr(os, 'O_BINARY'):
flags |= os.O_BINARY
try:
fd = os.open(cache_file, flags, 0600)
except (OSError, IOError) as e:
LOG.warning('FileSystemTokenCache.PutToken: '
'Failed to create cache file %s: %s', cache_file, e)
return
f = os.fdopen(fd, 'w+b')
f.write(value.Serialize())
f.close()
def GetToken(self, key):
"""Returns a deserialized access token from the key's filename."""
value = None
cache_file = self.CacheFileName(key)
try:
f = open(cache_file)
value = AccessToken.UnSerialize(f.read())
f.close()
except (IOError, OSError) as e:
if e.errno != errno.ENOENT:
LOG.warning('FileSystemTokenCache.GetToken: '
'Failed to read cache file %s: %s', cache_file, e)
except Exception as e:
LOG.warning('FileSystemTokenCache.GetToken: '
'Failed to read cache file %s (possibly corrupted): %s',
cache_file, e)
LOG.debug('FileSystemTokenCache.GetToken: key=%s%s present (cache_file=%s)',
key, ' not' if value is None else '', cache_file)
return value
class OAuth2Client(object):
"""Common logic for OAuth2 clients."""
def __init__(self, cache_key_base, access_token_cache=None,
datetime_strategy=datetime.datetime, auth_uri=None,
token_uri=None, disable_ssl_certificate_validation=False,
proxy_host=None, proxy_port=None, proxy_user=None,
proxy_pass=None, ca_certs_file=None):
# datetime_strategy is used to invoke utcnow() on; it is injected into the
# constructor for unit testing purposes.
self.auth_uri = auth_uri
self.token_uri = token_uri
self.cache_key_base = cache_key_base
self.datetime_strategy = datetime_strategy
self.access_token_cache = access_token_cache or InMemoryTokenCache()
self.disable_ssl_certificate_validation = disable_ssl_certificate_validation
self.ca_certs_file = ca_certs_file
if proxy_host and proxy_port:
self._proxy_info = httplib2.ProxyInfo(socks.PROXY_TYPE_HTTP,
proxy_host,
proxy_port,
proxy_user=proxy_user,
proxy_pass=proxy_pass,
proxy_rdns=True)
else:
self._proxy_info = None
def CreateHttpRequest(self):
return httplib2.Http(
ca_certs=self.ca_certs_file,
disable_ssl_certificate_validation=(
self.disable_ssl_certificate_validation),
proxy_info=self._proxy_info)
def GetAccessToken(self):
"""Obtains an access token for this client.
This client's access token cache is first checked for an existing,
not-yet-expired access token. If none is found, the client obtains a fresh
access token from the OAuth2 provider's token endpoint.
Returns:
The cached or freshly obtained AccessToken.
Raises:
AccessTokenRefreshError if an error occurs.
"""
# Ensure only one thread at a time attempts to get (and possibly refresh)
# the access token. This doesn't prevent concurrent refresh attempts across
# multiple gsutil instances, but at least protects against multiple threads
# simultaneously attempting to refresh when gsutil -m is used.
token_exchange_lock.acquire()
try:
cache_key = self.CacheKey()
LOG.debug('GetAccessToken: checking cache for key %s', cache_key)
access_token = self.access_token_cache.GetToken(cache_key)
LOG.debug('GetAccessToken: token from cache: %s', access_token)
if access_token is None or access_token.ShouldRefresh():
LOG.debug('GetAccessToken: fetching fresh access token...')
access_token = self.FetchAccessToken()
LOG.debug('GetAccessToken: fresh access token: %s', access_token)
self.access_token_cache.PutToken(cache_key, access_token)
return access_token
finally:
token_exchange_lock.release()
def CacheKey(self):
"""Computes a cache key.
The cache key is computed as the SHA1 hash of the refresh token for user
accounts, or the hash of the gs_service_client_id for service accounts,
which satisfies the FileSystemTokenCache requirement that cache keys do not
leak information about token values.
Returns:
A hash key.
"""
h = sha1()
h.update(self.cache_key_base)
return h.hexdigest()
def GetAuthorizationHeader(self):
"""Gets the access token HTTP authorization header value.
Returns:
The value of an Authorization HTTP header that authenticates
requests with an OAuth2 access token.
"""
return 'Bearer %s' % self.GetAccessToken().token
class _BaseOAuth2ServiceAccountClient(OAuth2Client):
"""Base class for OAuth2ServiceAccountClients.
Args:
client_id: The OAuth2 client ID of this client.
access_token_cache: An optional instance of a TokenCache. If omitted or
None, an InMemoryTokenCache is used.
auth_uri: The URI for OAuth2 authorization.
token_uri: The URI used to refresh access tokens.
datetime_strategy: datetime module strategy to use.
disable_ssl_certificate_validation: True if certifications should not be
validated.
proxy_host: An optional string specifying the host name of an HTTP proxy
to be used.
proxy_port: An optional int specifying the port number of an HTTP proxy
to be used.
proxy_user: An optional string specifying the user name for interacting
with the HTTP proxy.
proxy_pass: An optional string specifying the password for interacting
with the HTTP proxy.
ca_certs_file: The cacerts.txt file to use.
"""
def __init__(self, client_id, access_token_cache=None, auth_uri=None,
token_uri=None, datetime_strategy=datetime.datetime,
disable_ssl_certificate_validation=False,
proxy_host=None, proxy_port=None, proxy_user=None,
proxy_pass=None, ca_certs_file=None):
super(_BaseOAuth2ServiceAccountClient, self).__init__(
cache_key_base=client_id, auth_uri=auth_uri, token_uri=token_uri,
access_token_cache=access_token_cache,
datetime_strategy=datetime_strategy,
disable_ssl_certificate_validation=disable_ssl_certificate_validation,
proxy_host=proxy_host, proxy_port=proxy_port, proxy_user=proxy_user,
proxy_pass=proxy_pass, ca_certs_file=ca_certs_file)
self._client_id = client_id
def FetchAccessToken(self):
credentials = self.GetCredentials()
http = self.CreateHttpRequest()
credentials.refresh(http)
return AccessToken(credentials.access_token, credentials.token_expiry,
datetime_strategy=self.datetime_strategy)
class OAuth2ServiceAccountClient(_BaseOAuth2ServiceAccountClient):
"""An OAuth2 service account client using .p12 or .pem keys."""
def __init__(self, client_id, private_key, password,
access_token_cache=None, auth_uri=None, token_uri=None,
datetime_strategy=datetime.datetime,
disable_ssl_certificate_validation=False,
proxy_host=None, proxy_port=None, proxy_user=None,
proxy_pass=None, ca_certs_file=None):
# Avoid long repeated kwargs list.
# pylint: disable=g-doc-args
"""Creates an OAuth2ServiceAccountClient.
Args:
client_id: The OAuth2 client ID of this client.
private_key: The private key associated with this service account.
password: The private key password used for the crypto signer.
Keyword arguments match the _BaseOAuth2ServiceAccountClient class.
"""
# pylint: enable=g-doc-args
super(OAuth2ServiceAccountClient, self).__init__(
client_id, auth_uri=auth_uri, token_uri=token_uri,
access_token_cache=access_token_cache,
datetime_strategy=datetime_strategy,
disable_ssl_certificate_validation=disable_ssl_certificate_validation,
proxy_host=proxy_host, proxy_port=proxy_port, proxy_user=proxy_user,
proxy_pass=proxy_pass, ca_certs_file=ca_certs_file)
self._private_key = private_key
self._password = password
def GetCredentials(self):
if HAS_CRYPTO:
return SignedJwtAssertionCredentials(
self._client_id, self._private_key, scope=DEFAULT_SCOPE,
private_key_password=self._password)
else:
raise MissingDependencyError(
'Service account authentication requires PyOpenSSL. Please install '
'this library and try again.')
# TODO: oauth2client should expose _ServiceAccountCredentials as it is the only
# way to properly set scopes. In the longer term this class should probably
# be refactored into oauth2client directly in a way that allows for setting of
# user agent and scopes. https://github.com/google/oauth2client/issues/164
# pylint: disable=protected-access
class ServiceAccountCredentials(service_account._ServiceAccountCredentials):
def to_json(self):
self.service_account_name = self._service_account_email
strip = (['_private_key'] +
Credentials.NON_SERIALIZED_MEMBERS)
return super(ServiceAccountCredentials, self)._to_json(strip)
@classmethod
def from_json(cls, s):
try:
data = json.loads(s)
retval = ServiceAccountCredentials(
service_account_id=data['_service_account_id'],
service_account_email=data['_service_account_email'],
private_key_id=data['_private_key_id'],
private_key_pkcs8_text=data['_private_key_pkcs8_text'],
scopes=[DEFAULT_SCOPE])
# TODO: Need to define user agent here,
# but it is not known until runtime.
retval.invalid = data['invalid']
retval.access_token = data['access_token']
if 'token_expiry' in data:
retval.token_expiry = datetime.datetime.strptime(
data['token_expiry'], EXPIRY_FORMAT)
return retval
except KeyError, e:
raise Exception('Your JSON credentials are invalid; '
'missing required entry %s.' % e[0])
# pylint: enable=protected-access
class OAuth2JsonServiceAccountClient(_BaseOAuth2ServiceAccountClient):
"""An OAuth2 service account client using .json keys."""
def __init__(self, client_id, service_account_email, private_key_id,
private_key_pkcs8_text, access_token_cache=None, auth_uri=None,
token_uri=None, datetime_strategy=datetime.datetime,
disable_ssl_certificate_validation=False,
proxy_host=None, proxy_port=None, proxy_user=None,
proxy_pass=None, ca_certs_file=None):
# Avoid long repeated kwargs list.
# pylint: disable=g-doc-args
"""Creates an OAuth2JsonServiceAccountClient.
Args:
client_id: The OAuth2 client ID of this client.
client_email: The email associated with this client.
private_key_id: The private key id associated with this service account.
private_key_pkcs8_text: The pkcs8 text containing the private key data.
Keyword arguments match the _BaseOAuth2ServiceAccountClient class.
"""
# pylint: enable=g-doc-args
super(OAuth2JsonServiceAccountClient, self).__init__(
client_id, auth_uri=auth_uri, token_uri=token_uri,
access_token_cache=access_token_cache,
datetime_strategy=datetime_strategy,
disable_ssl_certificate_validation=disable_ssl_certificate_validation,
proxy_host=proxy_host, proxy_port=proxy_port, proxy_user=proxy_user,
proxy_pass=proxy_pass, ca_certs_file=ca_certs_file)
self._service_account_email = service_account_email
self._private_key_id = private_key_id
self._private_key_pkcs8_text = private_key_pkcs8_text
def GetCredentials(self):
return ServiceAccountCredentials(
service_account_id=self._client_id,
service_account_email=self._service_account_email,
private_key_id=self._private_key_id,
private_key_pkcs8_text=self._private_key_pkcs8_text,
scopes=[DEFAULT_SCOPE])
# TODO: Need to plumb user agent through here.
class GsAccessTokenRefreshError(Exception):
"""Transient error when requesting access token."""
def __init__(self, e):
super(Exception, self).__init__(e)
class GsInvalidRefreshTokenError(Exception):
def __init__(self, e):
super(Exception, self).__init__(e)
class MissingDependencyError(Exception):
def __init__(self, e):
super(Exception, self).__init__(e)
class OAuth2UserAccountClient(OAuth2Client):
"""An OAuth2 client."""
def __init__(self, token_uri, client_id, client_secret, refresh_token,
auth_uri=None, access_token_cache=None,
datetime_strategy=datetime.datetime,
disable_ssl_certificate_validation=False,
proxy_host=None, proxy_port=None, proxy_user=None,
proxy_pass=None, ca_certs_file=None):
"""Creates an OAuth2UserAccountClient.
Args:
token_uri: The URI used to refresh access tokens.
client_id: The OAuth2 client ID of this client.
client_secret: The OAuth2 client secret of this client.
refresh_token: The token used to refresh the access token.
auth_uri: The URI for OAuth2 authorization.
access_token_cache: An optional instance of a TokenCache. If omitted or
None, an InMemoryTokenCache is used.
datetime_strategy: datetime module strategy to use.
disable_ssl_certificate_validation: True if certifications should not be
validated.
proxy_host: An optional string specifying the host name of an HTTP proxy
to be used.
proxy_port: An optional int specifying the port number of an HTTP proxy
to be used.
proxy_user: An optional string specifying the user name for interacting
with the HTTP proxy.
proxy_pass: An optional string specifying the password for interacting
with the HTTP proxy.
ca_certs_file: The cacerts.txt file to use.
"""
super(OAuth2UserAccountClient, self).__init__(
cache_key_base=refresh_token, auth_uri=auth_uri, token_uri=token_uri,
access_token_cache=access_token_cache,
datetime_strategy=datetime_strategy,
disable_ssl_certificate_validation=disable_ssl_certificate_validation,
proxy_host=proxy_host, proxy_port=proxy_port, proxy_user=proxy_user,
proxy_pass=proxy_pass, ca_certs_file=ca_certs_file)
self.token_uri = token_uri
self.client_id = client_id
self.client_secret = client_secret
self.refresh_token = refresh_token
def GetCredentials(self):
"""Fetches a credentials objects from the provider's token endpoint."""
access_token = self.GetAccessToken()
credentials = OAuth2Credentials(
access_token.token, self.client_id, self.client_secret,
self.refresh_token, access_token.expiry, self.token_uri, None)
return credentials
@Retry(GsAccessTokenRefreshError,
tries=config.get('OAuth2', 'oauth2_refresh_retries', 6),
timeout_secs=1)
def FetchAccessToken(self):
"""Fetches an access token from the provider's token endpoint.
Fetches an access token from this client's OAuth2 provider's token endpoint.
Returns:
The fetched AccessToken.
"""
try:
http = self.CreateHttpRequest()
credentials = OAuth2Credentials(None, self.client_id, self.client_secret,
self.refresh_token, None, self.token_uri, None)
credentials.refresh(http)
return AccessToken(credentials.access_token,
credentials.token_expiry, datetime_strategy=self.datetime_strategy)
except AccessTokenRefreshError, e:
if 'Invalid response 403' in e.message:
# This is the most we can do at the moment to accurately detect rate
# limiting errors since they come back as 403s with no further
# information.
raise GsAccessTokenRefreshError(e)
elif 'invalid_grant' in e.message:
LOG.info("""
Attempted to retrieve an access token from an invalid refresh token. Two common
cases in which you will see this error are:
1. Your refresh token was revoked.
2. Your refresh token was typed incorrectly.
""")
raise GsInvalidRefreshTokenError(e)
else:
raise
class OAuth2GCEClient(OAuth2Client):
"""OAuth2 client for GCE instance."""
def __init__(self):
super(OAuth2GCEClient, self).__init__(
cache_key_base='',
# Only InMemoryTokenCache can be used with empty cache_key_base.
access_token_cache=InMemoryTokenCache())
@Retry(GsAccessTokenRefreshError,
tries=6,
timeout_secs=1)
def FetchAccessToken(self):
response = None
try:
http = httplib2.Http()
response, content = http.request(META_TOKEN_URI, method='GET',
body=None, headers=META_HEADERS)
except Exception:
raise GsAccessTokenRefreshError()
if response.status == 200:
d = json.loads(content)
return AccessToken(
d['access_token'],
datetime.datetime.now() +
datetime.timedelta(seconds=d.get('expires_in', 0)),
datetime_strategy=self.datetime_strategy)
def _IsGCE():
try:
http = httplib2.Http()
response, _ = http.request(METADATA_SERVER)
return response.status == 200
except (httplib2.ServerNotFoundError, socket.error):
# We might see something like "No route to host" propagated as a socket
# error. We might also catch transient socket errors, but at that point
# we're going to fail anyway, just with a different error message. With
# this approach, we'll avoid having to enumerate all possible non-transient
# socket errors.
return False
except Exception, e:
LOG.warning("Failed to determine whether we're running on GCE, so we'll"
"assume that we aren't: %s", e)
return False
return False
def CreateOAuth2GCEClient():
return OAuth2GCEClient() if _IsGCE() else None
class AccessToken(object):
"""Encapsulates an OAuth2 access token."""
def __init__(self, token, expiry, datetime_strategy=datetime.datetime):
self.token = token
self.expiry = expiry
self.datetime_strategy = datetime_strategy
@staticmethod
def UnSerialize(query):
"""Creates an AccessToken object from its serialized form."""
def GetValue(d, key):
return (d.get(key, [None]))[0]
kv = cgi.parse_qs(query)
if not kv['token']:
return None
expiry = None
expiry_tuple = GetValue(kv, 'expiry')
if expiry_tuple:
try:
expiry = datetime.datetime(
*[int(n) for n in expiry_tuple.split(',')])
except:
return None
return AccessToken(GetValue(kv, 'token'), expiry)
def Serialize(self):
"""Serializes this object as URI-encoded key-value pairs."""
# There's got to be a better way to serialize a datetime. Unfortunately,
# there is no reliable way to convert into a unix epoch.
kv = {'token': self.token}
if self.expiry:
t = self.expiry
tupl = (t.year, t.month, t.day, t.hour, t.minute, t.second, t.microsecond)
kv['expiry'] = ','.join([str(i) for i in tupl])
return urllib.urlencode(kv)
def ShouldRefresh(self, time_delta=300):
"""Whether the access token needs to be refreshed.
Args:
time_delta: refresh access token when it expires within time_delta secs.
Returns:
True if the token is expired or about to expire, False if the
token should be expected to work. Note that the token may still
be rejected, e.g. if it has been revoked server-side.
"""
if self.expiry is None:
return False
return (self.datetime_strategy.utcnow()
+ datetime.timedelta(seconds=time_delta) > self.expiry)
def __eq__(self, other):
return self.token == other.token and self.expiry == other.expiry
def __ne__(self, other):
return not self.__eq__(other)
def __str__(self):
return 'AccessToken(token=%s, expiry=%sZ)' % (self.token, self.expiry)
| Chilledheart/chromium | tools/telemetry/third_party/gsutilz/third_party/gcs-oauth2-boto-plugin/gcs_oauth2_boto_plugin/oauth2_client.py | Python | bsd-3-clause | 27,433 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# This file is part of kryptomime, a Python module for email kryptography.
# Copyright © 2013,2014 Thomas Tanner <[email protected]>
#
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the included LICENSE file for details.
#______________________________________________________________________________
from __future__ import absolute_import
from __future__ import print_function
import setuptools, sys
from setuptools.command.test import test as TestCommand
__author__ = "Thomas Tanner"
__contact__ = '[email protected]'
__url__ = 'https://github.com/ttanner/kryptomime'
class PyTest(TestCommand):
user_options = [('pytest-args=', 'a', "Arguments to pass to py.test")]
def initialize_options(self):
TestCommand.initialize_options(self)
self.pytest_args = None
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
#import here, cause outside the eggs aren't loaded
import pytest, sys
args = [self.pytest_args] if self.pytest_args else []
errno = pytest.main(['--cov-config','.coveragerc','--cov','kryptomime']+args)
sys.exit(errno)
subproc = ["subprocess32"] if sys.version_info[0] == 2 else []
setuptools.setup(
name = "kryptomime",
description="Python support for E-Mail kryptography",
long_description=open('README.rst').read(),
license="GPLv3+",
version='0.5.0',
author=__author__,
author_email=__contact__,
maintainer=__author__,
maintainer_email=__contact__,
url=__url__,
package_dir={'kryptomime': 'kryptomime'},
packages=['kryptomime'],
package_data={'': ['README.rst', 'COPYING.txt', 'requirements.txt']},
tests_require=['pytest-cov','pytest-pythonpath'],
cmdclass = {'test': PyTest},
install_requires=['gnupg>=1.3.2','six>=1.8']+subproc,
extras_require={'docs': ["Sphinx>=1.1", "repoze.sphinx"]},
platforms="Linux, BSD, OSX, Windows",
download_url="https://github.com/ttanner/kryptomime/archive/master.zip",
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"License :: OSI Approved :: GNU Lesser General Public License v3 or later (LGPLv3+)",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.2",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: Security :: Cryptography",
"Topic :: Utilities",]
)
| ttanner/kryptomime | setup.py | Python | lgpl-3.0 | 3,207 |
# Generated by Django 2.2.5 on 2019-09-14 07:24
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('groups', '0039_group_theme'),
]
operations = [
migrations.AlterField(
model_name='group',
name='theme',
field=models.TextField(choices=[('foodsaving', 'foodsaving'), ('bikekitchen', 'bikekitchen'), ('general', 'general')], default='foodsaving'),
),
]
| yunity/foodsaving-backend | karrot/groups/migrations/0040_auto_20190914_0724.py | Python | agpl-3.0 | 479 |
import numpy as np
import pytest
import pandas as pd
import pandas._testing as tm
from pandas.arrays import BooleanArray
from pandas.core.arrays.boolean import coerce_to_array
def test_boolean_array_constructor():
values = np.array([True, False, True, False], dtype="bool")
mask = np.array([False, False, False, True], dtype="bool")
result = BooleanArray(values, mask)
expected = pd.array([True, False, True, None], dtype="boolean")
tm.assert_extension_array_equal(result, expected)
with pytest.raises(TypeError, match="values should be boolean numpy array"):
BooleanArray(values.tolist(), mask)
with pytest.raises(TypeError, match="mask should be boolean numpy array"):
BooleanArray(values, mask.tolist())
with pytest.raises(TypeError, match="values should be boolean numpy array"):
BooleanArray(values.astype(int), mask)
with pytest.raises(TypeError, match="mask should be boolean numpy array"):
BooleanArray(values, None)
with pytest.raises(ValueError, match="values.shape must match mask.shape"):
BooleanArray(values.reshape(1, -1), mask)
with pytest.raises(ValueError, match="values.shape must match mask.shape"):
BooleanArray(values, mask.reshape(1, -1))
def test_boolean_array_constructor_copy():
values = np.array([True, False, True, False], dtype="bool")
mask = np.array([False, False, False, True], dtype="bool")
result = BooleanArray(values, mask)
assert result._data is values
assert result._mask is mask
result = BooleanArray(values, mask, copy=True)
assert result._data is not values
assert result._mask is not mask
def test_to_boolean_array():
expected = BooleanArray(
np.array([True, False, True]), np.array([False, False, False])
)
result = pd.array([True, False, True], dtype="boolean")
tm.assert_extension_array_equal(result, expected)
result = pd.array(np.array([True, False, True]), dtype="boolean")
tm.assert_extension_array_equal(result, expected)
result = pd.array(np.array([True, False, True], dtype=object), dtype="boolean")
tm.assert_extension_array_equal(result, expected)
# with missing values
expected = BooleanArray(
np.array([True, False, True]), np.array([False, False, True])
)
result = pd.array([True, False, None], dtype="boolean")
tm.assert_extension_array_equal(result, expected)
result = pd.array(np.array([True, False, None], dtype=object), dtype="boolean")
tm.assert_extension_array_equal(result, expected)
def test_to_boolean_array_all_none():
expected = BooleanArray(np.array([True, True, True]), np.array([True, True, True]))
result = pd.array([None, None, None], dtype="boolean")
tm.assert_extension_array_equal(result, expected)
result = pd.array(np.array([None, None, None], dtype=object), dtype="boolean")
tm.assert_extension_array_equal(result, expected)
@pytest.mark.parametrize(
"a, b",
[
([True, False, None, np.nan, pd.NA], [True, False, None, None, None]),
([True, np.nan], [True, None]),
([True, pd.NA], [True, None]),
([np.nan, np.nan], [None, None]),
(np.array([np.nan, np.nan], dtype=float), [None, None]),
],
)
def test_to_boolean_array_missing_indicators(a, b):
result = pd.array(a, dtype="boolean")
expected = pd.array(b, dtype="boolean")
tm.assert_extension_array_equal(result, expected)
@pytest.mark.parametrize(
"values",
[
["foo", "bar"],
["1", "2"],
# "foo",
[1, 2],
[1.0, 2.0],
pd.date_range("20130101", periods=2),
np.array(["foo"]),
np.array([1, 2]),
np.array([1.0, 2.0]),
[np.nan, {"a": 1}],
],
)
def test_to_boolean_array_error(values):
# error in converting existing arrays to BooleanArray
msg = "Need to pass bool-like value"
with pytest.raises(TypeError, match=msg):
pd.array(values, dtype="boolean")
def test_to_boolean_array_from_integer_array():
result = pd.array(np.array([1, 0, 1, 0]), dtype="boolean")
expected = pd.array([True, False, True, False], dtype="boolean")
tm.assert_extension_array_equal(result, expected)
# with missing values
result = pd.array(np.array([1, 0, 1, None]), dtype="boolean")
expected = pd.array([True, False, True, None], dtype="boolean")
tm.assert_extension_array_equal(result, expected)
def test_to_boolean_array_from_float_array():
result = pd.array(np.array([1.0, 0.0, 1.0, 0.0]), dtype="boolean")
expected = pd.array([True, False, True, False], dtype="boolean")
tm.assert_extension_array_equal(result, expected)
# with missing values
result = pd.array(np.array([1.0, 0.0, 1.0, np.nan]), dtype="boolean")
expected = pd.array([True, False, True, None], dtype="boolean")
tm.assert_extension_array_equal(result, expected)
def test_to_boolean_array_integer_like():
# integers of 0's and 1's
result = pd.array([1, 0, 1, 0], dtype="boolean")
expected = pd.array([True, False, True, False], dtype="boolean")
tm.assert_extension_array_equal(result, expected)
# with missing values
result = pd.array([1, 0, 1, None], dtype="boolean")
expected = pd.array([True, False, True, None], dtype="boolean")
tm.assert_extension_array_equal(result, expected)
def test_coerce_to_array():
# TODO this is currently not public API
values = np.array([True, False, True, False], dtype="bool")
mask = np.array([False, False, False, True], dtype="bool")
result = BooleanArray(*coerce_to_array(values, mask=mask))
expected = BooleanArray(values, mask)
tm.assert_extension_array_equal(result, expected)
assert result._data is values
assert result._mask is mask
result = BooleanArray(*coerce_to_array(values, mask=mask, copy=True))
expected = BooleanArray(values, mask)
tm.assert_extension_array_equal(result, expected)
assert result._data is not values
assert result._mask is not mask
# mixed missing from values and mask
values = [True, False, None, False]
mask = np.array([False, False, False, True], dtype="bool")
result = BooleanArray(*coerce_to_array(values, mask=mask))
expected = BooleanArray(
np.array([True, False, True, True]), np.array([False, False, True, True])
)
tm.assert_extension_array_equal(result, expected)
result = BooleanArray(*coerce_to_array(np.array(values, dtype=object), mask=mask))
tm.assert_extension_array_equal(result, expected)
result = BooleanArray(*coerce_to_array(values, mask=mask.tolist()))
tm.assert_extension_array_equal(result, expected)
# raise errors for wrong dimension
values = np.array([True, False, True, False], dtype="bool")
mask = np.array([False, False, False, True], dtype="bool")
with pytest.raises(ValueError, match="values.shape and mask.shape must match"):
coerce_to_array(values.reshape(1, -1))
with pytest.raises(ValueError, match="values.shape and mask.shape must match"):
coerce_to_array(values, mask=mask.reshape(1, -1))
def test_coerce_to_array_from_boolean_array():
# passing BooleanArray to coerce_to_array
values = np.array([True, False, True, False], dtype="bool")
mask = np.array([False, False, False, True], dtype="bool")
arr = BooleanArray(values, mask)
result = BooleanArray(*coerce_to_array(arr))
tm.assert_extension_array_equal(result, arr)
# no copy
assert result._data is arr._data
assert result._mask is arr._mask
result = BooleanArray(*coerce_to_array(arr), copy=True)
tm.assert_extension_array_equal(result, arr)
assert result._data is not arr._data
assert result._mask is not arr._mask
with pytest.raises(ValueError, match="cannot pass mask for BooleanArray input"):
coerce_to_array(arr, mask=mask)
def test_coerce_to_numpy_array():
# with missing values -> object dtype
arr = pd.array([True, False, None], dtype="boolean")
result = np.array(arr)
expected = np.array([True, False, pd.NA], dtype="object")
tm.assert_numpy_array_equal(result, expected)
# also with no missing values -> object dtype
arr = pd.array([True, False, True], dtype="boolean")
result = np.array(arr)
expected = np.array([True, False, True], dtype="object")
tm.assert_numpy_array_equal(result, expected)
# force bool dtype
result = np.array(arr, dtype="bool")
expected = np.array([True, False, True], dtype="bool")
tm.assert_numpy_array_equal(result, expected)
# with missing values will raise error
arr = pd.array([True, False, None], dtype="boolean")
msg = (
"cannot convert to 'bool'-dtype NumPy array with missing values. "
"Specify an appropriate 'na_value' for this dtype."
)
with pytest.raises(ValueError, match=msg):
np.array(arr, dtype="bool")
def test_to_boolean_array_from_strings():
result = BooleanArray._from_sequence_of_strings(
np.array(["True", "False", "1", "1.0", "0", "0.0", np.nan], dtype=object)
)
expected = BooleanArray(
np.array([True, False, True, True, False, False, False]),
np.array([False, False, False, False, False, False, True]),
)
tm.assert_extension_array_equal(result, expected)
def test_to_boolean_array_from_strings_invalid_string():
with pytest.raises(ValueError, match="cannot be cast"):
BooleanArray._from_sequence_of_strings(["donkey"])
@pytest.mark.parametrize("box", [True, False], ids=["series", "array"])
def test_to_numpy(box):
con = pd.Series if box else pd.array
# default (with or without missing values) -> object dtype
arr = con([True, False, True], dtype="boolean")
result = arr.to_numpy()
expected = np.array([True, False, True], dtype="object")
tm.assert_numpy_array_equal(result, expected)
arr = con([True, False, None], dtype="boolean")
result = arr.to_numpy()
expected = np.array([True, False, pd.NA], dtype="object")
tm.assert_numpy_array_equal(result, expected)
arr = con([True, False, None], dtype="boolean")
result = arr.to_numpy(dtype="str")
expected = np.array([True, False, pd.NA], dtype="<U5")
tm.assert_numpy_array_equal(result, expected)
# no missing values -> can convert to bool, otherwise raises
arr = con([True, False, True], dtype="boolean")
result = arr.to_numpy(dtype="bool")
expected = np.array([True, False, True], dtype="bool")
tm.assert_numpy_array_equal(result, expected)
arr = con([True, False, None], dtype="boolean")
with pytest.raises(ValueError, match="cannot convert to 'bool'-dtype"):
result = arr.to_numpy(dtype="bool")
# specify dtype and na_value
arr = con([True, False, None], dtype="boolean")
result = arr.to_numpy(dtype=object, na_value=None)
expected = np.array([True, False, None], dtype="object")
tm.assert_numpy_array_equal(result, expected)
result = arr.to_numpy(dtype=bool, na_value=False)
expected = np.array([True, False, False], dtype="bool")
tm.assert_numpy_array_equal(result, expected)
result = arr.to_numpy(dtype="int64", na_value=-99)
expected = np.array([1, 0, -99], dtype="int64")
tm.assert_numpy_array_equal(result, expected)
result = arr.to_numpy(dtype="float64", na_value=np.nan)
expected = np.array([1, 0, np.nan], dtype="float64")
tm.assert_numpy_array_equal(result, expected)
# converting to int or float without specifying na_value raises
with pytest.raises(ValueError, match="cannot convert to 'int64'-dtype"):
arr.to_numpy(dtype="int64")
with pytest.raises(ValueError, match="cannot convert to 'float64'-dtype"):
arr.to_numpy(dtype="float64")
def test_to_numpy_copy():
# to_numpy can be zero-copy if no missing values
arr = pd.array([True, False, True], dtype="boolean")
result = arr.to_numpy(dtype=bool)
result[0] = False
tm.assert_extension_array_equal(
arr, pd.array([False, False, True], dtype="boolean")
)
arr = pd.array([True, False, True], dtype="boolean")
result = arr.to_numpy(dtype=bool, copy=True)
result[0] = False
tm.assert_extension_array_equal(arr, pd.array([True, False, True], dtype="boolean"))
# FIXME: don't leave commented out
# TODO when BooleanArray coerces to object dtype numpy array, need to do conversion
# manually in the indexing code
# def test_indexing_boolean_mask():
# arr = pd.array([1, 2, 3, 4], dtype="Int64")
# mask = pd.array([True, False, True, False], dtype="boolean")
# result = arr[mask]
# expected = pd.array([1, 3], dtype="Int64")
# tm.assert_extension_array_equal(result, expected)
# # missing values -> error
# mask = pd.array([True, False, True, None], dtype="boolean")
# with pytest.raises(IndexError):
# result = arr[mask]
| jorisvandenbossche/pandas | pandas/tests/arrays/boolean/test_construction.py | Python | bsd-3-clause | 12,897 |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import copy
import multiprocessing
import os
import sys
from crossrunner.util import merge_dict
def domain_socket_path(port):
return '/tmp/ThriftTest.thrift.%d' % port
class TestProgram(object):
def __init__(self, kind, name, protocol, transport, socket, workdir, command, env=None,
extra_args=[], join_args=False, **kwargs):
self.kind = kind
self.name = name
self.protocol = protocol
self.transport = transport
self.socket = socket
self.workdir = workdir
self.command = None
self._base_command = self._fix_cmd_path(command)
if env:
self.env = copy.copy(os.environ)
self.env.update(env)
else:
self.env = os.environ
self._extra_args = extra_args
self._join_args = join_args
def _fix_cmd_path(self, cmd):
# if the arg is a file in the current directory, make it path
def abs_if_exists(arg):
p = os.path.join(self.workdir, arg)
return p if os.path.exists(p) else arg
if cmd[0] == 'python':
cmd[0] = sys.executable
else:
cmd[0] = abs_if_exists(cmd[0])
return cmd
def _socket_arg(self, socket, port):
return {
'ip-ssl': '--ssl',
'domain': '--domain-socket=%s' % domain_socket_path(port),
}.get(socket, None)
def build_command(self, port):
cmd = copy.copy(self._base_command)
args = []
args.append('--protocol=' + self.protocol)
args.append('--transport=' + self.transport)
socket_arg = self._socket_arg(self.socket, port)
if socket_arg:
args.append(socket_arg)
args.append('--port=%d' % port)
if self._join_args:
cmd.append('%s' % " ".join(args))
else:
cmd.extend(args)
if self._extra_args:
cmd.extend(self._extra_args)
self.command = cmd
return self.command
class TestEntry(object):
def __init__(self, testdir, server, client, delay, timeout, **kwargs):
self.testdir = testdir
self._log = multiprocessing.get_logger()
self._config = kwargs
self.protocol = kwargs['protocol']
self.transport = kwargs['transport']
self.socket = kwargs['socket']
self.server = TestProgram('server', **self._fix_workdir(merge_dict(self._config, server)))
self.client = TestProgram('client', **self._fix_workdir(merge_dict(self._config, client)))
self.delay = delay
self.timeout = timeout
self._name = None
# results
self.success = None
self.as_expected = None
self.returncode = None
self.expired = False
def _fix_workdir(self, config):
key = 'workdir'
path = config.get(key, None)
if not path:
path = self.testdir
if os.path.isabs(path):
path = os.path.realpath(path)
else:
path = os.path.realpath(os.path.join(self.testdir, path))
config.update({key: path})
return config
@classmethod
def get_name(cls, server, client, proto, trans, sock, *args):
return '%s-%s_%s_%s-%s' % (server, client, proto, trans, sock)
@property
def name(self):
if not self._name:
self._name = self.get_name(
self.server.name, self.client.name, self.protocol, self.transport, self.socket)
return self._name
@property
def transport_name(self):
return '%s-%s' % (self.transport, self.socket)
def test_name(server, client, protocol, transport, socket, **kwargs):
return TestEntry.get_name(server['name'], client['name'], protocol, transport, socket)
| mway08/thrift | test/crossrunner/test.py | Python | apache-2.0 | 4,187 |
# -*- coding: utf8 -*-
"""
KARN voting app in Google App Engine
Copyright (C) 2014 Klubb Alfa Romeo Norge <[email protected]>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
import cgi
import webapp2
import urllib
import jinja2
import os.path
import datetime
from google.appengine.ext import ndb
from google.appengine.api import users
JINJA_ENVIRONMENT=jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.dirname(__file__)),
extensions=['jinja2.ext.autoescape'],
autoescape=True)
VOTE_NAME = 'sedan_1978plus'
COOKIE_NAME = 'karn_vote_'+VOTE_NAME
def alt_key(id=VOTE_NAME):
return ndb.Key('Alternative', id)
class Alternative(ndb.Model):
""" Alternative entity """
id = ndb.StringProperty()
order = ndb.IntegerProperty()
title = ndb.StringProperty()
subtitle = ndb.StringProperty()
image = ndb.StringProperty()
description = ndb.StringProperty()
class Vote(ndb.Model):
""" Vote entity """
vote_id = ndb.StringProperty()
ip_address = ndb.StringProperty()
member_no = ndb.StringProperty()
email = ndb.StringProperty()
date = ndb.DateTimeProperty(auto_now_add=True)
class MainPage(webapp2.RequestHandler):
def get(self):
alt_query = Alternative.query(ancestor=alt_key()).order(Alternative.order)
alternatives = alt_query.fetch(10)
voted_cookie = self.request.cookies.get(COOKIE_NAME)
already_voted = False
if voted_cookie:
already_voted = True
template_data = {
'alternatives': alternatives,
'already_voted': already_voted,
}
template = JINJA_ENVIRONMENT.get_template('index.html')
self.response.write(template.render(template_data))
class VotePage(webapp2.RequestHandler):
def post(self):
vote = Vote(parent=alt_key())
vote.vote_id = self.request.get('vote-id')
vote.ip_address = self.request.remote_addr
vote.member_no = self.request.get('memberno')
vote.email = self.request.get('email')
if not self.sane_values(vote.vote_id, vote.member_no, vote.email):
self.redirect('/')
return
vote.put()
# todo: add cookie
much_much_later = datetime.datetime.now()+datetime.timedelta(days=365)
self.response.set_cookie(COOKIE_NAME, 'voted', expires=much_much_later)
self.redirect('/')
def sane_values(self, vote, member_no, email):
if not vote or len(vote) < 2:
return False
if not member_no or len(member_no) < 4:
return False
return True
class DataStoreInit(webapp2.RequestHandler):
def get(self):
# "Hmm" you might be thinking here :)
alt_query = Alternative.query(ancestor=alt_key()).order(Alternative.order)
alternatives = alt_query.fetch(10)
if len(alternatives) == 0:
self.put_alternative(1, 'alfetta', 'Alfa Romeo Alfetta', '1972-1987', u'<p><strong>Alfetta</strong> kom både som sedan og coupe - dette er sedanutgaven.</p><p>Sedanmodellen er designed av Centro Stile Alfa Romeo. Gearkassen og clutchen var plassert bak; dette er en såkalt <strong>transaksel</strong>.</p><p>Dette var en svært populær modell, ikke minst på grunn av lav vekt, gode kjøreegenskaper og en relativt kraftig motor etter datidens standard.</p><p>Det ble produsert over 400.000 Alfetta totalt.</p>')
self.put_alternative(2, 'giulietta', 'Alfa Romeo Giulietta', '1977-1985', u'<p><strong>Giulietta Nuovo</strong> som denne også kalles siden det er det andre bilen fra Alfa Romeo som heter Giulietta.</p><p>Dette var en videreutivkling av Alfetta med en mer tidsriktig styling. Den fantes kun i sedanutgave.</p><p>I likhet med Alfetta var dette en <strong>transaksel</strong> med gearkassen montert bak.</p><p>Den absolutt raskeste utgaven av denne ble laget av <strong>Autodelta</strong> og het <strong>Giulietta Turbodelta</strong> med 170hk under panseret.</p>')
self.put_alternative(3, 'alfa6', 'Alfa Romeo Alfa 6', '1979-1985', u'<p>Alfa 6 (eller <strong>Alfa Sei</strong> som den også kalles) var en virkelig direktørbil når den ble lansert i 1979.</p><p>I motsetning til andre samtidige Alfa så hadde den gearkassen og clutchen montert foran, antageligvis for å maksimere plassen i baksetet</p><p>Om man tittet under panseret kunne man se den første versjonen av den legendariske <strong>"busso"</strong> V6-motoren som ble brukt i forskjellige Alfa Romeo helt frem til 166 gikk ut av produksjon.</p>')
self.put_alternative(4, '90', 'Alfa Romeo 90', '1984-1987', u'<p>Alfa 90 var lansert som en mellomting mellom Alfa 6 og Giulietta. Den hadde mange morsomme detaljer, blant annet kom det ned en spoiler i fremkant når en kom over en viss fart og man fikk en stresskoffert som passet perfekt inn i dashboardet.</p><p>Om man kjøpte toppmodelen QO - Quadrifoglio Oro - så fikk man digitale instrumenter, elektriske vinduer, tripcomputer og sentrallås.</p><p>over 56 000 biler ble solgt i løpet av fire år.</p>')
self.put_alternative(5, '75', 'Alfa Romeo 75', '1985-1992', u'<p>Alfa 75 ble lansert som Giulietta Nuovo-erstatteren og fikk navnet for å feire at Alfa Romeo hadde 75-årsjubileum.</p><p>Utseendet er kileformet og svært karakteristisk - og kjøreegenskapene var i toppklasse; som på 90, Giulietta og Alfetta var dette en <strong>transaksel</strong> så vektfordelingen var det lite å utsette på.</p><p>Den hvasseste utgaven kom i 1987 og het <strong>Turbo Evoluzione</strong> og var en spesiell homogloberingsutgave. Effekten var oppgitt til å være 155hk.</p>')
self.put_alternative(6, '164', 'Alfa Romeo 164', '1987-1998', u'<p>164 er den siste bilen Alfa Romeo konstruerte på egen hånd før firmaet ble kjøpt av Fiat.</p><p>Den ble konstruert på <strong>type 4</strong>-plattformen som var et samarbeid med Lancia, Fiat og Saab.</p><p>164 var også den siste bilen som ble solgt i Nord-Amerika før Alfa Romeo trakk seg ut og det ruller fortsatt en og annen 164 på amerikanske highways.</p><p>De to toppmodellene QV og Q4 hadde en V6 med 232hk - den samme motoren som i Alfa 6.</p>')
self.put_alternative(7, '155', 'Alfa Romeo 155', '1992-1998', u'<p>155 var erstatteren til 75 og designet bærer en del likhetstrekk med 75 men det var en helt ny bil; nå med forhjulstrekk.</p><p>I 1995 ble det lansert en <strong>widebody</strong>-utgave av bilen med litt bredere sporvidde og bredere hjulbuer</p><p>Det ble også bygd en 155 Q4 TI for racing men den hadde ikke mye til felles med originalen; en V6 med 490hk på 11.900rpm (!) er i overkant mye for en gatebil men man kunne få kjøpt en Q4 Turbo med 190hk under panseret.</p>')
self.put_alternative(8, '156', 'Alfa Romeo 156', '1996-2007', u'<p>156 var - som navnet antyder - etterfølgeren til 155 og var en stor suksess. Over 680.000 ble produsert fra 1997 til 2005.</p><p>Motoralternativene var mange - fra 1.6 liter og fire sylindre til 3.2 V6 ("busso"-motoren fra Alfa 6) pluss diesler fra 1.9 til 2.4 liter.</p><p>I 2001 kom <strong>GTA</strong> som hadde fått navn etter 60-tallets GTA. Den hadde bla. en V6 på 250hk, stivere hjuloppheng, større bremser og skinninteriør.</p>')
self.put_alternative(9, '166', 'Alfa Romeo 166', '1996-2007', u'<p>Alfa 166 var erstatteren for 164 og siktet seg inn på direktørbil-segmentet. Den var fullpakket med utstyr og kom med motoralternativer fra 2 liter til 3.2 liter.</p><p>Utseendet var karakteristisk og det er lett å se en 166 i trafikken. I 2003 kom det en facelift med en annen grill og man kunne få en V6 på 3.2 liter.</p>')
self.put_alternative(10, '159', 'Alfa Romeo 159', '2004-2011', u'<p>159 har fått navn etter den legendariske racerbilen <strong>159 Alfetta</strong>. I løpet av produksjonsperioden gjennomgikk den en rekke forandringer men utseendet (designet av Guigiaro) har stort sett vært likt fra start til slutt.</p><p>Det var en hel rekke motoralternativer - fra 1.8 bensin til 3.2 V6.</p><p>Toppmodellen hadde en 3.2-liters V6 og Q4 med 260hk. Motoren var produsert av Holden i Australia men fikk en liten tune-up i Italia før den ble montert.</p>')
self.redirect('/')
def put_alternative(self, order, id, title, subtitle, description):
alt = Alternative(parent = alt_key())
alt.order = order
alt.id = id
alt.title = title
alt.subtitle = subtitle
alt.image = 'images/' + id + '.jpg'
alt.description = description
alt.put()
application = webapp2.WSGIApplication([
('/', MainPage),
('/vote', VotePage),
('/init', DataStoreInit)
], debug=True)
| KlubbAlfaRomeoNorge/voting | bestealfa.py | Python | gpl-2.0 | 9,434 |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Unit tests for the util transforms."""
import unittest
from apache_beam import Create
from apache_beam.test_pipeline import TestPipeline
from apache_beam.transforms.util import assert_that, equal_to, is_empty
class UtilTest(unittest.TestCase):
def test_assert_that_passes(self):
with TestPipeline() as p:
assert_that(p | Create([1, 2, 3]), equal_to([1, 2, 3]))
def test_assert_that_fails(self):
with self.assertRaises(Exception):
with TestPipeline() as p:
assert_that(p | Create([1, 10, 100]), equal_to([1, 2, 3]))
def test_assert_that_fails_on_empty_input(self):
with self.assertRaises(Exception):
with TestPipeline() as p:
assert_that(p | Create([]), equal_to([1, 2, 3]))
def test_assert_that_fails_on_empty_expected(self):
with self.assertRaises(Exception):
with TestPipeline() as p:
assert_that(p | Create([1, 2, 3]), is_empty())
if __name__ == '__main__':
unittest.main()
| xsm110/Apache-Beam | sdks/python/apache_beam/transforms/util_test.py | Python | apache-2.0 | 1,749 |
# -*- coding: utf-8 -*-
# Copyright 2020 OpenSynergy Indonesia
# Copyright 2020 PT. Simetri Sinergi Indonesia
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
{
"name": "New API for Timesheet Analytic Line Onchange",
"version": "8.0.1.1.0",
"website": "https://simetri-sinergi.id",
"author": "PT. Simetri Sinergi Indonesia, OpenSynergy Indonesia",
"license": "AGPL-3",
"installable": True,
"auto_install": True,
"depends": [
"hr_timesheet",
"account_analytic_line_onchange",
],
"data": [
"views/hr_analytic_timesheet_views.xml",
],
}
| open-synergy/opnsynid-hr | hr_timesheet_onchange/__openerp__.py | Python | agpl-3.0 | 617 |
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__all__ = [
"Provider",
"State",
"LibcloudLBError",
"LibcloudLBImmutableError",
"OLD_CONSTANT_TO_NEW_MAPPING"
]
from libcloud.common.types import LibcloudError
class LibcloudLBError(LibcloudError):
pass
class LibcloudLBImmutableError(LibcloudLBError):
pass
class Provider(object):
"""
Defines for each of the supported providers
Non-Dummy drivers are sorted in alphabetical order. Please preserve this
ordering when adding new drivers.
:cvar ALIYUN_SLB: Aliyun SLB loadbalancer driver
"""
ALIYUN_SLB = 'aliyun_slb'
BRIGHTBOX = 'brightbox'
CLOUDSTACK = 'cloudstack'
DIMENSIONDATA = 'dimensiondata'
ELB = 'elb'
GCE = 'gce'
GOGRID = 'gogrid'
NINEFOLD = 'ninefold'
RACKSPACE = 'rackspace'
SOFTLAYER = 'softlayer'
# Deprecated
RACKSPACE_US = 'rackspace_us'
RACKSPACE_UK = 'rackspace_uk'
OLD_CONSTANT_TO_NEW_MAPPING = {
Provider.RACKSPACE_US: Provider.RACKSPACE,
Provider.RACKSPACE_UK: Provider.RACKSPACE,
}
class State(object):
"""
Standard states for a loadbalancer
:cvar RUNNING: loadbalancer is running and ready to use
:cvar UNKNOWN: loabalancer state is unknown
"""
RUNNING = 0
PENDING = 1
UNKNOWN = 2
ERROR = 3
DELETED = 4
class MemberCondition(object):
"""
Each member of a load balancer can have an associated condition
which determines its role within the load balancer.
"""
ENABLED = 0
DISABLED = 1
DRAINING = 2
| NexusIS/libcloud | libcloud/loadbalancer/types.py | Python | apache-2.0 | 2,298 |
#!/usr/bin/env python
# -*- mode: python -*-
#
# Copyright (C) 2008 Hartmut Goebel <[email protected]>
# Licence: GNU General Public License version 3 (GPL v3)
#
# This file is part of PyInstaller <http://www.pyinstaller.org>
#
# pyinstaller is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# pyinstaller is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
test for zipimport - minimalistic, just import pgk_resource
"""
import os
import sys
print __name__, 'is running'
print 'sys.path:', sys.path
print 'dir contents .exe:', os.listdir(os.path.dirname(sys.executable))
print '-----------'
print 'dir contents sys._MEIPASS:', os.listdir(sys._MEIPASS)
print '-----------'
print 'now importing pkg_resources'
import pkg_resources
print "dir(pkg_resources)", dir(pkg_resources)
| supercheetah/diceroller | pyinstaller/buildtests/import/test_onefile_zipimport.py | Python | artistic-2.0 | 1,296 |
from unittest import TestCase
from grail import step
from tests.utils import validate_method_output
import grail.settings as settings
import grail.state as state
@step
def simple_step():
pass
@step(pending=True)
def pending_step():
pass
@step(description='Some Description')
def step_with_description():
pass
@step
def step_with_params(some_string=None):
print some_string
@step(description='Some info \'{0}\' {kw_str}', format_description=True)
def step_with_format_params(some_string, kw_str):
print some_string
print kw_str
@step(step_group=True)
def step_group():
simple_step()
pending_step()
@step
def step_with_args(*args):
print args
@step
def step_with_kwargs(**kwargs):
print kwargs
class TestExport(TestCase):
def setUp(self):
state.reset()
settings.export_mode = True
def tearDown(self):
state.reset()
settings.export_mode = False
def test_simple_output(self):
validate_method_output(simple_step, 'simple step')
def test_pending_output(self):
validate_method_output(pending_step, 'pending step')
def test_description(self):
validate_method_output(step_with_description, 'Some Description')
def test_skip_none_params(self):
validate_method_output(step_with_params, 'step with params')
def test_print_args_params(self):
validate_method_output(step_with_params, 'step with params (42)', args=('42',))
def test_print_kwargs_params(self):
validate_method_output(step_with_params, 'step with params (some_string=42)', kwargs={'some_string': '42'})
def test_format_params(self):
validate_method_output(step_with_format_params, 'Some info \'None\' kw42',
args=(None,),
kwargs={'kw_str': 'kw42'})
def test_step_group(self):
validate_method_output(step_group, 'step group\n'
' simple step\n'
' pending step')
def test_step_with_args(self):
validate_method_output(step_with_args, 'step with args', args=(None, None))
def test_step_with_kwargs(self):
validate_method_output(step_with_kwargs, 'step with kwargs (a=b)', kwargs={'a': 'b'})
| wgnet/grail | tests/test_export.py | Python | apache-2.0 | 2,316 |
import threading
import time
import cv2
import config
# Rate at which the webcam will be polled for new images.
CAPTURE_HZ = 30.0
class OpenCVCapture(object):
def __init__(self, device_id=0):
"""Create an OpenCV capture object associated with the provided webcam
device ID.
"""
# Open the camera.
self._camera = cv2.VideoCapture(device_id)
if not self._camera.isOpened():
self._camera.open()
# Start a thread to continuously capture frames.
self._capture_frame = None
# Use a lock to prevent access concurrent access to the camera.
self._capture_lock = threading.Lock()
self._capture_thread = threading.Thread(target=self._grab_frames)
self._capture_thread.daemon = True
self._capture_thread.start()
def _grab_frames(self):
while True:
retval, frame = self._camera.read()
with self._capture_lock:
self._capture_frame = None
if retval:
self._capture_frame = frame
time.sleep(1.0/CAPTURE_HZ)
def read(self):
"""Read a single frame from the camera and return the data as an OpenCV
image (which is a numpy array).
"""
frame = None
with self._capture_lock:
frame = self._capture_frame
# If there are problems, keep retrying until an image can be read.
while frame == None:
time.sleep(0)
with self._capture_lock:
frame = self._capture_frame
# Save captured image for debugging.
cv2.imwrite(config.DEBUG_IMAGE, frame)
# Return the capture image data.
return frame
| MayurRocks/haims-project | webcam.py | Python | gpl-3.0 | 1,456 |
# A very basic settings file that allows Sphinx to build
# the docs (this is becuase autodoc is used).
import os
import sys
sys.path.insert(0, os.getcwd())
sys.path.insert(0, os.path.join(os.getcwd(), os.pardir))
SITE_ID = 303
DEBUG = True
TEMPLATE_DEBUG = DEBUG
DATABASES = {"default": {
"NAME": ":memory:",
"ENGINE": "django.db.backends.sqlite3",
"USER": '',
"PASSWORD": '',
"PORT": '',
}}
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'scaffold',
)
SECRET_KEY = "NULL"
SCAFFOLD_EXTENDING_APP_NAME = "scaffold"
SCAFFOLD_EXTENDING_MODEL_PATH = "scaffold.models.BaseSection"
| mazelife/django-scaffold | docs/settings.py | Python | bsd-3-clause | 698 |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from imagekit.imagecache import PessimisticImageCacheBackend, InvalidImageCacheBackendError
def generate(model, pk, attr):
try:
instance = model._default_manager.get(pk=pk)
except model.DoesNotExist:
pass # The model was deleted since the task was scheduled. NEVER MIND!
else:
field_file = getattr(instance, attr)
field_file.delete(save=False)
field_file.generate(save=True)
class CeleryImageCacheBackend(PessimisticImageCacheBackend):
"""
A pessimistic cache state backend that uses celery to generate its spec
images. Like PessimisticCacheStateBackend, this one checks to see if the
file exists on validation, so the storage is hit fairly frequently, but an
image is guaranteed to exist. However, while validation guarantees the
existence of *an* image, it does not necessarily guarantee that you will get
the correct image, as the spec may be pending regeneration. In other words,
while there are `generate` tasks in the queue, it is possible to get a
stale spec image. The tradeoff is that calling `invalidate()` won't block
to interact with file storage.
"""
def __init__(self):
try:
from celery.task import task
except:
raise InvalidImageCacheBackendError("Celery image cache backend requires the 'celery' library")
if not getattr(CeleryImageCacheBackend, '_task', None):
CeleryImageCacheBackend._task = task(generate)
def invalidate(self, file):
self._task.delay(file.instance.__class__, file.instance.pk, file.attname)
def clear(self, file):
file.delete(save=False)
| pcompassion/django-imagekit | imagekit/imagecache/celery.py | Python | bsd-3-clause | 1,730 |
from django.shortcuts import resolve_url as r
from django.test import TestCase
class HomeTest(TestCase):
def setUp(self):
self.response = self.client.get(r('core:home'))
def test_get(self):
self.assertEqual(200, self.response.status_code)
def test_template(self):
"""Must use core/index.html"""
self.assertTemplateUsed(self.response, 'core/index.html') | tyagow/FacebookBot | src/core/tests_core/test_view.py | Python | mit | 400 |
from novaclient import exceptions
from novaclient.v1_1 import security_group_rules
from novaclient.tests import utils
from novaclient.tests.v1_1 import fakes
cs = fakes.FakeClient()
class SecurityGroupRulesTest(utils.TestCase):
def test_delete_security_group_rule(self):
cs.security_group_rules.delete(1)
cs.assert_called('DELETE', '/os-security-group-rules/1')
def test_create_security_group_rule(self):
sg = cs.security_group_rules.create(1, "tcp", 1, 65535, "10.0.0.0/16")
body = {
"security_group_rule": {
"ip_protocol": "tcp",
"from_port": 1,
"to_port": 65535,
"cidr": "10.0.0.0/16",
"group_id": None,
"parent_group_id": 1,
}
}
cs.assert_called('POST', '/os-security-group-rules', body)
self.assertTrue(isinstance(sg, security_group_rules.SecurityGroupRule))
def test_create_security_group_group_rule(self):
sg = cs.security_group_rules.create(1, "tcp", 1, 65535, "10.0.0.0/16",
101)
body = {
"security_group_rule": {
"ip_protocol": "tcp",
"from_port": 1,
"to_port": 65535,
"cidr": "10.0.0.0/16",
"group_id": 101,
"parent_group_id": 1,
}
}
cs.assert_called('POST', '/os-security-group-rules', body)
self.assertTrue(isinstance(sg, security_group_rules.SecurityGroupRule))
def test_invalid_parameters_create(self):
self.assertRaises(exceptions.CommandError,
cs.security_group_rules.create,
1, "invalid_ip_protocol", 1, 65535, "10.0.0.0/16", 101)
self.assertRaises(exceptions.CommandError,
cs.security_group_rules.create,
1, "tcp", "invalid_from_port", 65535, "10.0.0.0/16", 101)
self.assertRaises(exceptions.CommandError,
cs.security_group_rules.create,
1, "tcp", 1, "invalid_to_port", "10.0.0.0/16", 101)
def test_security_group_rule_str(self):
sg = cs.security_group_rules.create(1, "tcp", 1, 65535, "10.0.0.0/16")
self.assertEquals('1', str(sg))
def test_security_group_rule_del(self):
sg = cs.security_group_rules.create(1, "tcp", 1, 65535, "10.0.0.0/16")
sg.delete()
cs.assert_called('DELETE', '/os-security-group-rules/1')
| tylertian/Openstack | openstack F/python-novaclient/novaclient/tests/v1_1/test_security_group_rules.py | Python | apache-2.0 | 2,485 |
#!/usr/bin/env python
import argparse, collections, math
import nltk.corpus, nltk.corpus.reader, nltk.data, nltk.tag, nltk.metrics
from nltk.corpus.util import LazyCorpusLoader
from nltk_trainer import load_corpus_reader, load_model, simplify_wsj_tag
from nltk_trainer.chunking import chunkers
from nltk_trainer.chunking.transforms import node_label
from nltk_trainer.tagging import taggers
########################################
## command options & argument parsing ##
########################################
parser = argparse.ArgumentParser(description='Analyze a part-of-speech tagged corpus',
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('corpus',
help='''The name of a tagged corpus included with NLTK, such as treebank,
brown, cess_esp, floresta, or the root path to a corpus directory,
which can be either an absolute path or relative to a nltk_data directory.''')
parser.add_argument('--tagger', default=None,
help='''pickled tagger filename/path relative to an nltk_data directory
default is NLTK's default tagger''')
parser.add_argument('--chunker', default=nltk.chunk._MULTICLASS_NE_CHUNKER,
help='''pickled chunker filename/path relative to an nltk_data directory
default is NLTK's default multiclass chunker''')
parser.add_argument('--trace', default=1, type=int,
help='How much trace output you want, defaults to 1. 0 is no trace output.')
parser.add_argument('--score', action='store_true', default=False,
help='Evaluate chunk score of chunker using corpus.chunked_sents()')
corpus_group = parser.add_argument_group('Corpus Reader Options')
corpus_group.add_argument('--reader', default=None,
help='''Full module path to a corpus reader class, such as
nltk.corpus.reader.chunked.ChunkedCorpusReader''')
corpus_group.add_argument('--fileids', default=None,
help='Specify fileids to load from corpus')
corpus_group.add_argument('--fraction', default=1.0, type=float,
help='''The fraction of the corpus to use for testing coverage''')
if simplify_wsj_tag:
corpus_group.add_argument('--simplify_tags', action='store_true', default=False,
help='Use simplified tags')
args = parser.parse_args()
###################
## corpus reader ##
###################
corpus = load_corpus_reader(args.corpus, reader=args.reader, fileids=args.fileids)
if args.score and not hasattr(corpus, 'chunked_sents'):
raise ValueError('%s does not support scoring' % args.corpus)
############
## tagger ##
############
if args.trace:
print('loading tagger %s' % args.tagger)
if not args.tagger:
tagger = nltk.tag._get_tagger()
elif args.tagger == 'pattern':
tagger = taggers.PatternTagger()
else:
tagger = load_model(args.tagger)
if args.trace:
print('loading chunker %s' % args.chunker)
if args.chunker == 'pattern':
chunker = chunkers.PatternChunker()
else:
chunker = load_model(args.chunker)
#######################
## coverage analysis ##
#######################
if args.score:
if args.trace:
print('evaluating chunker score\n')
chunked_sents = corpus.chunked_sents()
if args.fraction != 1.0:
cutoff = int(math.ceil(len(chunked_sents) * args.fraction))
chunked_sents = chunked_sents[:cutoff]
print(chunker.evaluate(chunked_sents))
print('\n')
if args.trace:
print('analyzing chunker coverage of %s with %s\n' % (args.corpus, chunker.__class__.__name__))
iobs_found = collections.defaultdict(int)
sents = corpus.sents()
if args.fraction != 1.0:
cutoff = int(math.ceil(len(sents) * args.fraction))
sents = sents[:cutoff]
for sent in sents:
tree = chunker.parse(tagger.tag(sent))
for child in tree.subtrees(lambda t: node_label(t) != 'S'):
iobs_found[node_label(child)] += 1
iobs = iobs_found.keys()
justify = max(7, *[len(iob) for iob in iobs])
print('IOB'.center(justify) + ' Found ')
print('='*justify + ' =========')
for iob in sorted(iobs):
print(' '.join([iob.ljust(justify), str(iobs_found[iob]).rjust(9)]))
print('='*justify + ' =========') | japerk/nltk-trainer | analyze_chunker_coverage.py | Python | apache-2.0 | 3,938 |
# THIS FILE IS PART OF THE CYLC WORKFLOW ENGINE.
# Copyright (C) NIWA & British Crown (Met Office) & Contributors.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Common options for all cylc commands."""
from contextlib import suppress
import logging
from optparse import OptionParser, OptionConflictError, Values, Option
import os
import re
from ansimarkup import parse as cparse
import sys
from typing import Any, Dict, Optional, List, Tuple
from cylc.flow import LOG, RSYNC_LOG
import cylc.flow.flags
from cylc.flow.loggingutil import (
CylcLogFormatter,
setup_segregated_log_streams,
)
def format_shell_examples(string):
"""Put comments in the terminal "diminished" colour."""
return cparse(
re.sub(
r'^(\s*(?:\$[^#]+)?)(#.*)$',
r'\1<dim>\2</dim>',
string,
flags=re.M
)
)
def verbosity_to_opts(verb: int) -> List[str]:
"""Convert Cylc verbosity to the CLI opts required to replicate it.
Examples:
>>> verbosity_to_opts(0)
[]
>>> verbosity_to_opts(-2)
['-q', '-q']
>>> verbosity_to_opts(2)
['-v', '-v']
"""
return [
'-q'
for _ in range(verb, 0)
] + [
'-v'
for _ in range(0, verb)
]
def verbosity_to_env(verb: int) -> Dict[str, str]:
"""Convert Cylc verbosity to the env vars required to replicate it.
Examples:
>>> verbosity_to_env(0)
{'CYLC_VERBOSE': 'false', 'CYLC_DEBUG': 'false'}
>>> verbosity_to_env(1)
{'CYLC_VERBOSE': 'true', 'CYLC_DEBUG': 'false'}
>>> verbosity_to_env(2)
{'CYLC_VERBOSE': 'true', 'CYLC_DEBUG': 'true'}
"""
return {
'CYLC_VERBOSE': str((verb > 0)).lower(),
'CYLC_DEBUG': str((verb > 1)).lower(),
}
def env_to_verbosity(env: dict) -> int:
"""Extract verbosity from environment variables.
Examples:
>>> env_to_verbosity({})
0
>>> env_to_verbosity({'CYLC_VERBOSE': 'true'})
1
>>> env_to_verbosity({'CYLC_DEBUG': 'true'})
2
>>> env_to_verbosity({'CYLC_DEBUG': 'TRUE'})
2
"""
return (
2 if env.get('CYLC_DEBUG', '').lower() == 'true'
else 1 if env.get('CYLC_VERBOSE', '').lower() == 'true'
else 0
)
class CylcOption(Option):
"""Optparse option which adds a decrement action."""
ACTIONS = Option.ACTIONS + ('decrement',)
STORE_ACTIONS = Option.STORE_ACTIONS + ('decrement',)
def take_action(self, action, dest, opt, value, values, parser):
if action == 'decrement':
setattr(values, dest, values.ensure_value(dest, 0) - 1)
else:
Option.take_action(self, action, dest, opt, value, values, parser)
class CylcOptionParser(OptionParser):
"""Common options for all cylc CLI commands."""
# Shared text for commands which can, & cannot, glob on cycle points:
MULTI_USAGE_TEMPLATE = """{0}
For example, to match:{1}"""
# Help text either including or excluding globbing on cycle points:
WITH_CYCLE_GLOBS = """
One or more TASK_GLOBs can be given to match task instances in the current task
pool, by task or family name pattern, cycle point pattern, and task state.
* [CYCLE-POINT-GLOB/]TASK-NAME-GLOB[:TASK-STATE]
* [CYCLE-POINT-GLOB/]FAMILY-NAME-GLOB[:TASK-STATE]
* TASK-NAME-GLOB[.CYCLE-POINT-GLOB][:TASK-STATE]
* FAMILY-NAME-GLOB[.CYCLE-POINT-GLOB][:TASK-STATE]"""
WITHOUT_CYCLE_GLOBS = """
TASK_GLOB matches task or family names at a given cycle point.
* CYCLE-POINT/TASK-NAME-GLOB
* CYCLE-POINT/FAMILY-NAME-GLOB
* TASK-NAME-GLOB.CYCLE-POINT
* FAMILY-NAME-GLOB.CYCLE-POINT"""
WITH_CYCLE_EXAMPLES = """
* all tasks in a cycle: '20200202T0000Z/*' or '*.20200202T0000Z'
* all tasks in the submitted status: ':submitted'
* running 'foo*' tasks in 0000Z cycles: 'foo*.*0000Z:running' or
'*0000Z/foo*:running'
* waiting tasks in 'BAR' family: '*/BAR:waiting' or 'BAR.*:waiting'
* submitted tasks in 'BAR' or 'BAZ' families: '*/BA[RZ]:submitted' or
'BA[RZ].*:submitted'"""
WITHOUT_CYCLE_EXAMPLES = """
* all tasks: '20200202T0000Z/*' or '*.20200202T0000Z'
* all tasks named model_N for some character N: '20200202T0000Z/model_?' or
'model_?.20200202T0000Z'
* all tasks in 'BAR' family: '20200202T0000Z/BAR' or 'BAR.20200202T0000Z'
* all tasks in 'BAR' or 'BAZ' families: '20200202T0000Z/BA[RZ]' or
'BA[RZ].20200202T0000Z'"""
MULTITASKCYCLE_USAGE = MULTI_USAGE_TEMPLATE.format(
WITH_CYCLE_GLOBS, WITH_CYCLE_EXAMPLES)
MULTITASK_USAGE = MULTI_USAGE_TEMPLATE.format(
WITHOUT_CYCLE_GLOBS, WITHOUT_CYCLE_EXAMPLES)
def __init__(
self,
usage: str,
argdoc: Optional[List[Tuple[str, str]]] = None,
comms: bool = False,
jset: bool = False,
multitask: bool = False,
multitask_nocycles: bool = False,
prep: bool = False,
auto_add: bool = True,
icp: bool = False,
color: bool = True,
segregated_log: bool = False
) -> None:
self.auto_add = auto_add
if argdoc is None:
if prep:
argdoc = [('WORKFLOW | PATH', 'Workflow ID or path')]
else:
argdoc = [('WORKFLOW', 'Workflow ID')]
if '--color=never' not in '='.join(sys.argv[2:]):
# Before option parsing, for `--help`, make comments grey in usage.
# (This catches both '--color=never' and '--color never'.)
usage = format_shell_examples(usage)
if multitask:
usage += self.MULTITASKCYCLE_USAGE
elif multitask_nocycles: # glob on task names but not cycle points
usage += self.MULTITASK_USAGE
args = ""
self.n_compulsory_args = 0
self.n_optional_args = 0
self.unlimited_args = False
self.comms = comms
self.jset = jset
self.prep = prep
self.icp = icp
self.color = color
# Whether to log messages that are below warning level to stdout
# instead of stderr:
self.segregated_log = segregated_log
maxlen = 0
for arg in argdoc:
if len(arg[0]) > maxlen:
maxlen = len(arg[0])
if argdoc:
usage += "\n\nArguments:"
for arg in argdoc:
if arg[0].startswith('['):
self.n_optional_args += 1
else:
self.n_compulsory_args += 1
if arg[0].endswith('...]'):
self.unlimited_args = True
args += arg[0] + " "
pad = (maxlen - len(arg[0])) * ' ' + ' '
usage += "\n " + arg[0] + pad + arg[1]
usage = usage.replace('ARGS', args)
OptionParser.__init__(self, usage, option_class=CylcOption)
def add_std_option(self, *args, **kwargs):
"""Add a standard option, ignoring override."""
with suppress(OptionConflictError):
self.add_option(*args, **kwargs)
def add_std_options(self):
"""Add standard options if they have not been overridden."""
self.add_std_option(
"-q", "--quiet",
help="Decrease verbosity.",
action='decrement',
dest='verbosity',
)
self.add_std_option(
"-v", "--verbose",
help="Increase verbosity.",
dest='verbosity',
action='count',
default=env_to_verbosity(os.environ)
)
self.add_std_option(
"--debug",
help="Equivalent to -v -v",
dest="verbosity",
action='store_const',
const=2
)
self.add_std_option(
"--no-timestamp",
help="Don't timestamp logged messages.",
action="store_false", dest="log_timestamp", default=True)
if self.color:
self.add_std_option(
'--color', '--colour', metavar='WHEN', action='store',
default='auto', choices=['never', 'auto', 'always'],
help=(
"When to use color/bold text in terminal output."
" Options are 'never', 'auto' and 'always'."
)
)
if self.comms:
self.add_std_option(
"--comms-timeout", metavar='SEC',
help=(
"Set a timeout for network connections "
"to the running workflow. The default is no timeout. "
"For task messaging connections see "
"site/user config file documentation."
),
action="store", default=None, dest="comms_timeout")
if self.jset:
self.add_std_option(
"-s", "--set", metavar="NAME=VALUE",
help=(
"Set the value of a Jinja2 template variable in the"
" workflow definition."
" Values should be valid Python literals so strings"
" must be quoted"
" e.g. 'STR=\"string\"', INT=43, BOOL=True."
" This option can be used multiple "
" times on the command line."
" NOTE: these settings persist across workflow restarts,"
" but can be set again on the \"cylc play\""
" command line if they need to be overridden."
),
action="append", default=[], dest="templatevars")
self.add_std_option(
"--set-file", metavar="FILE",
help=(
"Set the value of Jinja2 template variables in the "
"workflow definition from a file containing NAME=VALUE "
"pairs (one per line). "
"As with --set values should be valid Python literals "
"so strings must be quoted e.g. STR='string'. "
"NOTE: these settings persist across workflow restarts, "
"but can be set again on the \"cylc play\" "
"command line if they need to be overridden."
),
action="store", default=None, dest="templatevars_file")
if self.icp:
self.add_option(
"--initial-cycle-point", "--icp",
metavar="CYCLE_POINT",
help=(
"Set the initial cycle point. "
"Required if not defined in flow.cylc."
),
action="store",
dest="icp",
)
def add_cylc_rose_options(self) -> None:
"""Add extra options for cylc-rose plugin if it is installed."""
try:
__import__('cylc.rose')
except ImportError:
return
self.add_option(
"--opt-conf-key", "-O",
help=(
"Use optional Rose Config Setting "
"(If Cylc-Rose is installed)"
),
action="append",
default=[],
dest="opt_conf_keys"
)
self.add_option(
"--define", '-D',
help=(
"Each of these overrides the `[SECTION]KEY` setting in a "
"`rose-suite.conf` file. "
"Can be used to disable a setting using the syntax "
"`--define=[SECTION]!KEY` or even `--define=[!SECTION]`."
),
action="append",
default=[],
dest="defines"
)
self.add_option(
"--rose-template-variable", '-S',
help=(
"As `--define`, but with an implicit `[SECTION]` for "
"workflow variables."
),
action="append",
default=[],
dest="rose_template_vars"
)
def parse_args(self, api_args, remove_opts=None):
"""Parse options and arguments, overrides OptionParser.parse_args.
Args:
api_args (list):
Command line options if passed via Python as opposed to
sys.argv
remove_opts (list):
List of standard options to remove before parsing.
"""
if self.auto_add:
# Add common options after command-specific options.
self.add_std_options()
if remove_opts:
for opt in remove_opts:
with suppress(ValueError):
self.remove_option(opt)
(options, args) = OptionParser.parse_args(self, api_args)
if len(args) < self.n_compulsory_args:
self.error("Wrong number of arguments (too few)")
elif (
not self.unlimited_args
and len(args) > self.n_compulsory_args + self.n_optional_args
):
self.error("Wrong number of arguments (too many)")
if self.jset and options.templatevars_file:
options.templatevars_file = os.path.abspath(os.path.expanduser(
options.templatevars_file)
)
cylc.flow.flags.verbosity = options.verbosity
# Set up stream logging for CLI. Note:
# 1. On choosing STDERR: Log messages are diagnostics, so STDERR is the
# better choice for the logging stream. This allows us to use STDOUT
# for verbosity agnostic outputs.
# 2. Scheduler will remove this handler when it becomes a daemon.
if options.verbosity < 0:
LOG.setLevel(logging.WARNING)
elif options.verbosity > 0:
LOG.setLevel(logging.DEBUG)
else:
LOG.setLevel(logging.INFO)
RSYNC_LOG.setLevel(logging.INFO)
# Remove NullHandler before add the StreamHandler
for log in (LOG, RSYNC_LOG):
while log.handlers:
log.handlers[0].close()
log.removeHandler(log.handlers[0])
log_handler = logging.StreamHandler(sys.stderr)
log_handler.setFormatter(CylcLogFormatter(
timestamp=options.log_timestamp,
dev_info=bool(options.verbosity > 2)
))
LOG.addHandler(log_handler)
if self.segregated_log:
setup_segregated_log_streams(LOG, log_handler)
return (options, args)
class Options:
"""Wrapper to allow Python API access to optparse CLI functionality.
Example:
Create an optparse parser as normal:
>>> import optparse
>>> parser = optparse.OptionParser()
>>> _ = parser.add_option('-a', default=1)
>>> _ = parser.add_option('-b', default=2)
Create an Options object from the parser:
>>> PythonOptions = Options(parser, overrides={'c': 3})
"Parse" options via Python API:
>>> opts = PythonOptions(a=4)
Access options as normal:
>>> opts.a
4
>>> opts.b
2
>>> opts.c
3
Optparse allows you to create new options on the fly:
>>> opts.d = 5
>>> opts.d
5
But you can't create new options at initiation, this gives us basic
input validation:
>>> opts(e=6)
Traceback (most recent call last):
TypeError: 'Values' object is not callable
You can reuse the object multiple times
>>> opts2 = PythonOptions(a=2)
>>> id(opts) == id(opts2)
False
"""
def __init__(
self, parser: OptionParser, overrides: Optional[Dict[str, Any]] = None
) -> None:
if overrides is None:
overrides = {}
self.defaults = {**parser.defaults, **overrides}
def __call__(self, **kwargs) -> Values:
opts = Values(self.defaults)
for key, value in kwargs.items():
if not hasattr(opts, key):
raise ValueError(key)
setattr(opts, key, value)
return opts
| hjoliver/cylc | cylc/flow/option_parsers.py | Python | gpl-3.0 | 16,598 |
#!/usr/bin/env python
# se_config.py - contains configuration information
#
# Copyright (c) 2016,2020 Casey Bartlett <[email protected]>
#
# See LICENSE for terms of usage, modification and redistribution.
from os.path import join
home = "/home/botuser"
install_directory = join(home,"knife_scraper")
| Uname-a/knife_scraper | se_config.py | Python | mit | 303 |
# Copyright (c) 2016-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
## @package sparse_feature_hash
# Module caffe2.python.layers.sparse_feature_hash
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import schema
from caffe2.python.layers.layers import (
ModelLayer,
IdList,
IdScoreList,
)
import numpy as np
class SparseFeatureHash(ModelLayer):
def __init__(self, model, input_record, seed=0, modulo=None,
use_hashing=True, name='sparse_feature_hash', **kwargs):
super(SparseFeatureHash, self).__init__(model, name, input_record, **kwargs)
self.seed = seed
self.use_hashing = use_hashing
if schema.equal_schemas(input_record, IdList):
self.modulo = modulo or self.extract_hash_size(input_record.items.metadata)
metadata = schema.Metadata(
categorical_limit=self.modulo,
feature_specs=input_record.items.metadata.feature_specs,
)
hashed_indices = schema.Scalar(
np.int64,
self.get_next_blob_reference("hashed_idx")
)
hashed_indices.set_metadata(metadata)
self.output_schema = schema.List(
values=hashed_indices,
lengths_blob=input_record.lengths,
)
elif schema.equal_schemas(input_record, IdScoreList):
self.modulo = modulo or self.extract_hash_size(input_record.keys.metadata)
metadata = schema.Metadata(
categorical_limit=self.modulo,
feature_specs=input_record.keys.metadata.feature_specs,
)
hashed_indices = schema.Scalar(
np.int64,
self.get_next_blob_reference("hashed_idx")
)
hashed_indices.set_metadata(metadata)
self.output_schema = schema.Map(
keys=hashed_indices,
values=input_record.values,
lengths_blob=input_record.lengths,
)
else:
assert False, "Input type must be one of (IdList, IdScoreList)"
assert self.modulo >= 1, 'Unexpected modulo: {}'.format(self.modulo)
def extract_hash_size(self, metadata):
if metadata.feature_specs and metadata.feature_specs.desired_hash_size:
return metadata.feature_specs.desired_hash_size
elif metadata.categorical_limit is not None:
return metadata.categorical_limit
else:
assert False, "desired_hash_size or categorical_limit must be set"
def add_ops(self, net):
if schema.equal_schemas(self.output_schema, IdList):
input_blob = self.input_record.items()
output_blob = self.output_schema.items()
elif schema.equal_schemas(self.output_schema, IdScoreList):
input_blob = self.input_record.keys()
output_blob = self.output_schema.keys()
else:
raise NotImplementedError()
if self.use_hashing:
net.IndexHash(
input_blob, output_blob, seed=self.seed, modulo=self.modulo
)
else:
net.Mod(
input_blob, output_blob, divisor=self.modulo
)
| davinwang/caffe2 | caffe2/python/layers/sparse_feature_hash.py | Python | apache-2.0 | 3,959 |
import traceback
class Watcher(object):
def __init__(self, obj=None, attr=None, log_file='log.txt', include=[], enabled=True):
"""
Debugger that watches for changes in object attributes
obj - object to be watched
attr - string, name of attribute
log_file - string, where to write output
include - list of strings, debug files only in these directories.
Set it to path of your project otherwise it will take long time
to run on big libraries import and usage.
"""
self.log_file=log_file
with open(self.log_file, 'wb'): pass
self.prev_st = None
self.include = [incl.replace('\\','/') for incl in include]
if obj:
self.value = getattr(obj, attr)
self.obj = obj
self.attr = attr
self.enabled = enabled # Important, must be last line on __init__.
def __call__(self, *args, **kwargs):
kwargs['enabled'] = True
self.__init__(*args, **kwargs)
def check_condition(self):
print self.obj,self.attr
tmp = getattr(self.obj, self.attr)
result = tmp != self.value
self.value = tmp
return result
def trace_command(self, frame, event, arg):
if event!='line' or not self.enabled:
print "returning",event,self.enabled
return self.trace_command
if self.check_condition():
if self.prev_st:
with open(self.log_file, 'ab') as f:
print "Value of",self.obj,".",self.attr,"changed!"
print >>f, "Value of",self.obj,".",self.attr,"changed!"
print >>f,"###### Line:"
print >>f,''.join(self.prev_st)
if self.include:
fname = frame.f_code.co_filename.replace('\\','/')
to_include = False
for incl in self.include:
if fname.startswith(incl):
to_include = True
break
if not to_include:
return self.trace_command
self.prev_st = traceback.format_stack(frame)
return self.trace_command
import sys
watcher = Watcher()
sys.settrace(watcher.trace_command) | pexnet/poxlibpacket | poxlibpacket/lib/packet/watcher.py | Python | apache-2.0 | 2,268 |
# Tools for working with modern DreamMaker icon files (PNGs + metadata)
import math
from PIL import Image
from PIL.PngImagePlugin import PngInfo
DEFAULT_SIZE = 32, 32
LOOP_UNLIMITED = 0
LOOP_ONCE = 1
NORTH = 1
SOUTH = 2
EAST = 4
WEST = 8
SOUTHEAST = SOUTH | EAST
SOUTHWEST = SOUTH | WEST
NORTHEAST = NORTH | EAST
NORTHWEST = NORTH | WEST
CARDINALS = [NORTH, SOUTH, EAST, WEST]
DIR_ORDER = [SOUTH, NORTH, EAST, WEST, SOUTHEAST, SOUTHWEST, NORTHEAST, NORTHWEST]
DIR_NAMES = {
'SOUTH': SOUTH,
'NORTH': NORTH,
'EAST': EAST,
'WEST': WEST,
'SOUTHEAST': SOUTHEAST,
'SOUTHWEST': SOUTHWEST,
'NORTHEAST': NORTHEAST,
'NORTHWEST': NORTHWEST,
**{str(x): x for x in DIR_ORDER},
**{x: x for x in DIR_ORDER},
'0': SOUTH,
None: SOUTH,
}
class Dmi:
version = "4.0"
def __init__(self, width, height):
self.width = width
self.height = height
self.states = []
@classmethod
def from_file(cls, fname):
image = Image.open(fname)
if image.mode != 'RGBA':
image = image.convert('RGBA')
# no metadata = regular image file
if 'Description' not in image.info:
dmi = Dmi(*image.size)
state = dmi.state("")
state.frame(image)
return dmi
# read metadata
metadata = image.info['Description']
line_iter = iter(metadata.splitlines())
assert next(line_iter) == "# BEGIN DMI"
assert next(line_iter) == f"version = {cls.version}"
dmi = Dmi(*DEFAULT_SIZE)
state = None
for line in line_iter:
if line == "# END DMI":
break
key, value = line.lstrip().split(" = ")
if key == 'width':
dmi.width = int(value)
elif key == 'height':
dmi.height = int(value)
elif key == 'state':
state = dmi.state(unescape(value))
elif key == 'dirs':
state.dirs = int(value)
elif key == 'frames':
state._nframes = int(value)
elif key == 'delay':
state.delays = [parse_num(x) for x in value.split(',')]
elif key == 'loop':
state.loop = int(value)
elif key == 'rewind':
state.rewind = parse_bool(value)
elif key == 'hotspot':
x, y, frm = [int(x) for x in value.split(',')]
state.hotspot(frm - 1, x, y)
elif key == 'movement':
state.movement = parse_bool(value)
else:
raise NotImplementedError(key)
# cut image into frames
width, height = image.size
gridwidth = width // dmi.width
i = 0
for state in dmi.states:
for frame in range(state._nframes):
for dir in range(state.dirs):
px = dmi.width * (i % gridwidth)
py = dmi.height * (i // gridwidth)
im = image.crop((px, py, px + dmi.width, py + dmi.height))
assert im.size == (dmi.width, dmi.height)
state.frames.append(im)
i += 1
state._nframes = None
return dmi
def state(self, *args, **kwargs):
s = State(self, *args, **kwargs)
self.states.append(s)
return s
@property
def default_state(self):
return self.states[0]
def get_state(self, name):
for state in self.states:
if state.name == name:
return state
raise KeyError(name)
return self.default_state
def _assemble_comment(self):
comment = "# BEGIN DMI\n"
comment += f"version = {self.version}\n"
comment += f"\twidth = {self.width}\n"
comment += f"\theight = {self.height}\n"
for state in self.states:
comment += f"state = {escape(state.name)}\n"
comment += f"\tdirs = {state.dirs}\n"
comment += f"\tframes = {state.framecount}\n"
if state.framecount > 1 and len(state.delays): # any(x != 1 for x in state.delays):
comment += "\tdelay = " + ",".join(map(str, state.delays)) + "\n"
if state.loop != 0:
comment += f"\tloop = {state.loop}\n"
if state.rewind:
comment += "\trewind = 1\n"
if state.movement:
comment += "\tmovement = 1\n"
if state.hotspots and any(state.hotspots):
current = None
for i, value in enumerate(state.hotspots):
if value != current:
x, y = value
comment += f"\thotspot = {x},{y},{i + 1}\n"
current = value
comment += "# END DMI"
return comment
def to_file(self, filename, *, palette=False):
# assemble comment
comment = self._assemble_comment()
# assemble spritesheet
W, H = self.width, self.height
num_frames = sum(len(state.frames) for state in self.states)
sqrt = math.ceil(math.sqrt(num_frames))
output = Image.new('RGBA', (sqrt * W, math.ceil(num_frames / sqrt) * H))
i = 0
for state in self.states:
for frame in state.frames:
output.paste(frame, ((i % sqrt) * W, (i // sqrt) * H))
i += 1
# save
pnginfo = PngInfo()
pnginfo.add_text('Description', comment, zip=True)
if palette:
output = output.convert('P')
output.save(filename, 'png', optimize=True, pnginfo=pnginfo)
class State:
def __init__(self, dmi, name, *, loop=LOOP_UNLIMITED, rewind=False, movement=False, dirs=1):
self.dmi = dmi
self.name = name
self.loop = loop
self.rewind = rewind
self.movement = movement
self.dirs = dirs
self._nframes = None # used during loading only
self.frames = []
self.delays = []
self.hotspots = None
@property
def framecount(self):
if self._nframes is not None:
return self._nframes
else:
return len(self.frames) // self.dirs
def frame(self, image, *, delay=1):
assert image.size == (self.dmi.width, self.dmi.height)
self.delays.append(delay)
self.frames.append(image)
def hotspot(self, first_frame, x, y):
if self.hotspots is None:
self.hotspots = [None] * self.framecount
for i in range(first_frame, self.framecount):
self.hotspots[i] = x, y
def _frame_index(self, frame=0, dir=None):
ofs = DIR_ORDER.index(DIR_NAMES[dir])
if ofs >= self.dirs:
ofs = 0
return frame * self.dirs + ofs
def get_frame(self, *args, **kwargs):
return self.frames[self._frame_index(*args, **kwargs)]
def escape(text):
text = text.replace('\\', '\\\\')
text = text.replace('"', '\\"')
return f'"{text}"'
def unescape(text, quote='"'):
if text == 'null':
return None
if not (text.startswith(quote) and text.endswith(quote)):
raise ValueError(text)
text = text[1:-1]
text = text.replace('\\"', '"')
text = text.replace('\\\\', '\\')
return text
def parse_num(value):
if '.' in value:
return float(value)
return int(value)
def parse_bool(value):
if value not in ('0', '1'):
raise ValueError(value)
return value == '1'
| Cruix/-tg-station | tools/dmi/__init__.py | Python | agpl-3.0 | 7,556 |
# Copyright 2016 Isotoma Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import socket
import threading
import paramiko
from touchdown.tests.fixtures.fixture import Fixture
class DummyServer(paramiko.ServerInterface):
def get_allowed_auths(self, username):
return "publickey,password"
def check_auth_password(self, username, password):
return paramiko.AUTH_SUCCESSFUL
def check_auth_publickey(self, username, key):
return paramiko.AUTH_SUCCESSFUL
def check_channel_request(self, kind, chanid):
return paramiko.OPEN_SUCCEEDED
def check_channel_exec_request(self, channel, command):
return True
def check_channel_shell_request(self, channel):
return True
def check_channel_pty_request(
self, channel, term, width, height, pixelwidth, pixelheight, modes
):
return True
class SshConnectionFixture(Fixture):
def __enter__(self):
self.listen_socket = socket.socket()
self.listen_socket.bind(("0.0.0.0", 0))
self.listen_socket.listen(1)
self.address, self.port = self.listen_socket.getsockname()
self.fixtures.push(lambda *exc_info: self.listen_socket.close())
self.event = threading.Event()
self.ssh_connection = self.workspace.add_ssh_connection(
name="test-ssh-connection", hostname=self.address, port=self.port
)
self.listen_thread = threading.Thread(target=self.server_thread)
self.listen_thread.daemon = True
self.listen_thread.start()
return self
def server_thread(self):
self.client_socket, addr = self.listen_socket.accept()
self.fixtures.push(lambda *exc_info: self.client_socket.close())
self.server_transport = paramiko.Transport(self.client_socket)
self.fixtures.push(lambda *exc_info: self.server_transport.close())
self.server_transport.add_server_key(
paramiko.RSAKey.from_private_key_file(
os.path.join(os.path.dirname(__file__), "..", "assets/id_rsa_test")
)
)
self.server = DummyServer()
self.server_transport.start_server(self.event, self.server)
| yaybu/touchdown | touchdown/tests/fixtures/ssh_connection.py | Python | apache-2.0 | 2,710 |
import sys
def count_length(input_file, output_file):
input_handle = open(input_file, 'rU')
output_handle = open(output_file, "w")
for line in input_handle:
line = line.strip().split()
line_type = line[0]
if line_type != 'S':
break
e_id = line[1]
e_seq = line[2]
output_handle.write(e_id+'\t'+str(len(e_seq))+'\n')
input_handle.close()
output_handle.close()
if __name__=="__main__":
count_length(sys.argv[1], sys.argv[2])
| snurk/meta-strains | scripts/scripts_for_desman/edges_lengths.py | Python | mit | 545 |
from shardgather.renderers import (
render_plain, render_table, render_csv)
collected = {
'live': [{'a': 1}],
}
def test_render_plain():
result = render_plain(collected)
expected = (
"Total: 1\n"
"----------------------------------------------------------------\n"
"{'live': [{'a': 1}]}"
)
assert expected == result
def test_render_table():
result = render_table(collected)
expected = (
'+---------+---+\n'
'| db_name | a |\n'
'+---------+---+\n'
'| live | 1 |\n'
'+---------+---+'
)
assert expected == result
def test_render_csv():
result = render_csv(collected)
expected = (
'db_name,a\r\n'
'live,1\r\n'
)
assert expected == result
| kevinjqiu/shardgather | tests/test_renderers.py | Python | bsd-3-clause | 781 |
"""Test coils."""
from mpf.platforms.interfaces.driver_platform_interface import PulseSettings, HoldSettings
from mpf.tests.MpfTestCase import MpfTestCase
from unittest.mock import MagicMock
class TestDeviceDriver(MpfTestCase):
def get_config_file(self):
return 'coils.yaml'
def get_machine_path(self):
return 'tests/machine_files/device/'
def get_platform(self):
return 'smart_virtual'
def testBasicFunctions(self):
# Make sure hardware devices have been configured for tests
self.assertIn('coil_01', self.machine.coils)
self.assertIn('coil_02', self.machine.coils)
# Setup platform function mock to test coil
self.machine.coils["coil_01"].hw_driver.disable = MagicMock()
self.machine.coils["coil_01"].hw_driver.enable = MagicMock()
self.machine.coils["coil_01"].hw_driver.pulse = MagicMock()
self.machine.coils["coil_01"].enable()
self.machine.coils["coil_01"].hw_driver.enable.assert_called_with(PulseSettings(power=1.0, duration=30),
HoldSettings(power=1.0, duration=None))
self.machine.coils["coil_01"].pulse(100)
self.machine.coils["coil_01"].hw_driver.pulse.assert_called_with(PulseSettings(power=1.0, duration=100))
self.machine.coils["coil_01"].disable()
self.machine.coils["coil_01"].hw_driver.disable.assert_called_with()
self.machine.coils["coil_03"].hw_driver.disable = MagicMock()
self.machine.coils["coil_03"].hw_driver.enable = MagicMock()
self.machine.coils["coil_03"].hw_driver.pulse = MagicMock()
# test power
self.machine.coils["coil_03"].pulse(pulse_power=1.0)
self.machine.coils["coil_03"].hw_driver.pulse.assert_called_with(PulseSettings(power=1.0, duration=10))
self.machine.coils["coil_03"].pulse(pulse_power=0.5)
self.machine.coils["coil_03"].hw_driver.pulse.assert_called_with(PulseSettings(power=0.5, duration=10))
self.machine.coils["coil_01"].enable(pulse_power=0.7, hold_power=0.3)
self.machine.coils["coil_01"].hw_driver.enable.assert_called_with(PulseSettings(power=0.7, duration=30),
HoldSettings(power=0.3, duration=None))
# test long pulse with delay
self.machine.coils["coil_03"].hw_driver.pulse = MagicMock()
self.machine.coils["coil_03"].hw_driver.enable = MagicMock()
self.machine.coils["coil_03"].hw_driver.disable = MagicMock()
self.machine.coils["coil_03"].pulse(pulse_ms=500)
self.machine.coils["coil_03"].hw_driver.enable.assert_called_with(PulseSettings(power=1.0, duration=0),
HoldSettings(power=1.0, duration=None))
self.machine.coils["coil_03"].hw_driver.pulse.assert_not_called()
self.advance_time_and_run(.5)
self.machine.coils["coil_03"].hw_driver.disable.assert_called_with()
def testMaxHoldDuration(self):
coil = self.machine.coils["coil_max_hold_duration"]
# check that coil disables after max_hold_duration (5s)
coil.enable()
self.advance_time_and_run(.5)
self.assertEqual("enabled", coil.hw_driver.state)
self.advance_time_and_run(4)
self.assertEqual("enabled", coil.hw_driver.state)
self.advance_time_and_run(1)
self.assertEqual("disabled", coil.hw_driver.state)
# make sure a disable resets the timer
coil.enable()
self.advance_time_and_run(3.0)
self.assertEqual("enabled", coil.hw_driver.state)
coil.disable()
self.advance_time_and_run(.5)
self.assertEqual("disabled", coil.hw_driver.state)
coil.enable()
self.advance_time_and_run(3.0)
self.assertEqual("enabled", coil.hw_driver.state)
def testPulseWithTimedEnable(self):
coil = self.machine.coils["coil_pulse_with_timed_enable"]
coil.hw_driver.timed_enable = MagicMock()
coil.pulse()
coil.hw_driver.timed_enable.assert_called_with(
PulseSettings(power=0.25, duration=60),
HoldSettings(power=0.5, duration=200))
| missionpinball/mpf | mpf/tests/test_DeviceDriver.py | Python | mit | 4,266 |
#!/usr/bin/env python
# encoding: utf-8
# The MIT License (MIT)
# Copyright (c) 2014 CNRS
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# AUTHORS
# Hervé BREDIN - http://herve.niderb.fr
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
PYANNOTE_URI = 'uri'
PYANNOTE_MODALITY = 'modality'
PYANNOTE_SEGMENT = 'segment'
PYANNOTE_TRACK = 'track'
PYANNOTE_LABEL = 'label'
PYANNOTE_SCORE = 'score'
PYANNOTE_IDENTITY = 'identity'
from .time import T, TStart, TEnd
from .segment import Segment, SlidingWindow
from .timeline import Timeline
from .annotation import Annotation
from .transcription import Transcription
from .scores import Scores
from .feature import SlidingWindowFeature
try:
from .notebook import notebook
except ImportError as e:
pass
| grantjenks/pyannote-core | pyannote/core/__init__.py | Python | mit | 1,810 |
#
# Copyright (c) Dariusz Biskup
#
# This file is part of Spotlight
#
# Spotlight is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of
# the License, or (at your option) any later version.
#
# Spotlight is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
from spotlight.service.util.SynchronizerMixin import SynchronizerMixin
from spotify.search import SearchCallbacks
from spotify import search
from spotlight.service.util import encode
class Search(SynchronizerMixin, SearchCallbacks):
def __init__(self, page, session):
self.page = page
self.session = session
def execute(self):
self.search_result = search.Search(
self.session, encode(self.page.identifier),
track_offset = self.page.start, track_count = self.page.offset,
callbacks = self)
return self.search_result
def search_complete(self, result):
self.done(result)
| oikarinen/plugin.audio.spotlight | spotlight/service/command/Search.py | Python | gpl-3.0 | 1,404 |
#!/usr/bin/env python
"""A simple example demonstrating how one can use numpy arrays
transparently with TVTK.
"""
# Author: Prabhu Ramachandran and Eric Jones
# Copyright (c) 2004-2007, Enthought, Inc.
# License: BSD Style.
from tvtk.api import tvtk
from tvtk.common import configure_input_data
from numpy import array
### DATA
data = array([[0,0,0,10],
[1,0,0,20],
[0,1,0,20],
[0,0,1,30]], 'f')
triangles = array([[0,1,3],
[0,3,2],
[1,2,3],
[0,2,1]])
points = data[:,:3]
temperature = data[:,-1]
### TVTK PIPELINE
# create a renderer
renderer = tvtk.Renderer()
# create a render window and hand it the renderer
render_window = tvtk.RenderWindow(size=(400,400))
render_window.add_renderer(renderer)
# create interactor and hand it the render window
# This handles mouse interaction with window.
interactor = tvtk.RenderWindowInteractor(render_window=render_window)
# Create a mesh from the data created above.
mesh = tvtk.PolyData(points=points, polys=triangles)
mesh.point_data.scalars = temperature
# Set the mapper to scale temperature range
# across the entire range of colors
mapper = tvtk.PolyDataMapper()
configure_input_data(mapper, mesh)
mapper.scalar_range = min(temperature), max(temperature)
# Create mesh actor for display
actor = tvtk.Actor(mapper=mapper)
# Create a scalar bar
scalar_bar = tvtk.ScalarBarActor(title="Temperature",
orientation='horizontal',
width=0.8, height=0.17,
lookup_table = mapper.lookup_table)
scalar_bar.position_coordinate.coordinate_system = 'normalized_viewport'
scalar_bar.position_coordinate.value = 0.1, 0.01, 0.0
# Use the ScalarBarWidget so we can drag the scalar bar around.
sc_bar_widget = tvtk.ScalarBarWidget(interactor=interactor,
scalar_bar_actor=scalar_bar)
# Now add the actors to the renderer and start the interaction.
renderer.add_actor(actor)
interactor.initialize()
# Enable the widget so the scalar bar can be seen. Press 'i' to
# disable the widget.
sc_bar_widget.enabled = True
interactor.start()
| nicjhan/mom-particles | vtk/tiny_mesh.py | Python | gpl-2.0 | 2,212 |
# Copyright 2022 The Kubric Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from kubric.core import cameras
def test_orthographic_camera_constructor():
cam = cameras.OrthographicCamera(orthographic_scale=7)
assert cam.orthographic_scale == 7
def test_perspective_camera_constructor():
cam = cameras.PerspectiveCamera(focal_length=22, sensor_width=33)
assert cam.focal_length == 22
assert cam.sensor_width == 33
def test_perspective_camera_field_of_view():
cam = cameras.PerspectiveCamera(focal_length=28, sensor_width=36)
assert cam.field_of_view == pytest.approx(1.1427, abs=1e-4) # ca 65.5°
| google-research/kubric | test/test_cameras.py | Python | apache-2.0 | 1,140 |
from __future__ import absolute_import
import itertools
import uuid
from textile import textile_restricted
from django.db import models
from django.dispatch import receiver
from django.urls import reverse
from django.contrib.auth.models import User
from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
from talks.events.models import Event, EventGroup
DEFAULT_COLLECTION_NAME = "My Collection"
COLLECTION_ROLES_OWNER = 'owner'
COLLECTION_ROLES_EDITOR = 'editor'
COLLECTION_ROLES_READER = 'reader'
COLLECTION_ROLES = (
(COLLECTION_ROLES_OWNER, 'Owner'),
(COLLECTION_ROLES_EDITOR, 'Collaborator'),
(COLLECTION_ROLES_READER, 'Viewer'),
)
class CollectedDepartment(models.Model):
department = models.TextField(default='')
class Collection(models.Model):
slug = models.SlugField()
title = models.CharField(max_length=250)
description = models.TextField(blank=True)
editor_set = models.ManyToManyField('TalksUser', through='TalksUserCollection', blank=True)
public = models.BooleanField(default=False)
def _get_items_by_model(self, model):
"""Used when selecting a particular type (specified in the `model` arg)
of objects from our Collection.
1) Get the ContentType for that `model`
2) Filter to the CollectionItems of that ContentType and get all
`object_id`s
3) Select these `object_id`s from the `model`
"""
content_type = ContentType.objects.get_for_model(model)
ids = self.collectionitem_set.filter(content_type=content_type
).values_list('object_id')
return model.objects.filter(id__in=itertools.chain.from_iterable(ids))
class ItemAlreadyInCollection(Exception):
pass
class InvalidItemType(Exception):
pass
def get_absolute_url(self):
return reverse('view-list', args=[str(self.slug)])
def get_api_url(self):
return reverse('api-collection', args=[str(self.slug)])
def get_ics_url(self):
return reverse('api-collection-ics', args=[str(self.slug)])
def save(self, *args, **kwargs):
if not self.slug:
# Newly created object, so set slug
self.slug = str(uuid.uuid4())
super(Collection, self).save(*args, **kwargs)
def add_item(self, item):
if isinstance(item, Event):
# Adding an event
content_type = ContentType.objects.get_for_model(Event)
elif isinstance(item, EventGroup):
# Adding event group
content_type = ContentType.objects.get_for_model(EventGroup)
elif isinstance(item, CollectedDepartment):
# Adding department
content_type = ContentType.objects.get_for_model(CollectedDepartment)
else:
raise self.InvalidItemType()
try:
self.collectionitem_set.get(content_type=content_type,
object_id=item.id)
raise self.ItemAlreadyInCollection()
except CollectionItem.DoesNotExist:
item = self.collectionitem_set.create(item=item)
return item
def remove_item(self, item):
if isinstance(item, Event):
content_type = ContentType.objects.get_for_model(Event)
elif isinstance(item, EventGroup):
content_type = ContentType.objects.get_for_model(EventGroup)
elif isinstance(item, CollectedDepartment):
content_type = ContentType.objects.get_for_model(CollectedDepartment)
else:
raise self.InvalidItemType()
try:
item = self.collectionitem_set.get(content_type=content_type,
object_id=item.id)
item.delete()
return True
except CollectionItem.DoesNotExist:
return False
def get_events(self):
return self._get_items_by_model(Event)
def get_event_groups(self):
return self._get_items_by_model(EventGroup)
def get_departments(self):
return self._get_items_by_model(CollectedDepartment)
def get_all_events(self):
"""
Returns all distinct events in this collections events, event groups, and departments:
"""
eventIDs = self.collectionitem_set.filter(content_type=ContentType.objects.get_for_model(Event)
).values_list('object_id')
eventGroupIDs = self.collectionitem_set.filter(content_type=ContentType.objects.get_for_model(EventGroup)
).values_list('object_id')
collectedDepartmentIDs = self.collectionitem_set.filter(content_type=ContentType.objects.get_for_model(CollectedDepartment)
).values_list('object_id')
events = Event.objects.filter(id__in=itertools.chain.from_iterable(eventIDs))
eventsInEventGroups = Event.objects.filter(group__in=eventGroupIDs)
# get all department ids
from talks.api.services import get_all_department_ids
departments = CollectedDepartment.objects.filter(id__in=itertools.chain.from_iterable(collectedDepartmentIDs)).values('department')
departmentIDs = [dep['department'] for dep in departments]
allDepartmentIDs = get_all_department_ids(departmentIDs, True)
departmentEvents = Event.objects.filter(department_organiser__in=allDepartmentIDs)
allEvents = events | eventsInEventGroups | departmentEvents
return allEvents.distinct().order_by('start')
def contains_item(self, item):
if isinstance(item, Event):
content_type = ContentType.objects.get_for_model(Event)
elif isinstance(item, EventGroup):
content_type = ContentType.objects.get_for_model(EventGroup)
elif isinstance(item, CollectedDepartment):
content_type = ContentType.objects.get_for_model(CollectedDepartment)
else:
raise self.InvalidItemType()
try:
self.collectionitem_set.get(content_type=content_type,
object_id=item.id)
return True
except CollectionItem.DoesNotExist:
return False
def contains_department(self, department_id):
try:
collectedDepartment = CollectedDepartment.objects.get(department=department_id)
except CollectedDepartment.DoesNotExist:
# This department hasn't been collected at all, so cannot be in any collection
return False
result = self.contains_item(collectedDepartment)
return result
def user_collection_permission(self, user):
"""
:param user: The user accessing the collection
:return: The role that user has on that collection (if any)
ie. owner, editor, reader or None.
"""
talksuser = user if isinstance(user, TalksUser) else user.talksuser
roles = self.talksusercollection_set.filter(user=talksuser).values_list('role', flat=True)
role = None
if COLLECTION_ROLES_OWNER.encode('utf-8') in roles:
role = COLLECTION_ROLES_OWNER
elif COLLECTION_ROLES_EDITOR in roles:
role = COLLECTION_ROLES_EDITOR
elif COLLECTION_ROLES_READER in roles:
role = COLLECTION_ROLES_READER
return role
@property
def description_html(self):
return textile_restricted(self.description, auto_link=True, lite=False)
def user_can_edit(self, user):
return self.editor_set.filter(user_id=user.id, talksusercollection__role=COLLECTION_ROLES_OWNER).exists()
def user_can_view(self, user):
return self.editor_set.filter(user_id=user.id, talksusercollection__role__in=[COLLECTION_ROLES_OWNER, COLLECTION_ROLES_EDITOR, COLLECTION_ROLES_READER]).exists()
def get_number_of_readers(self):
"""
If this collection is public, return the number of users who have subscribed to this collection
"""
return TalksUserCollection.objects.filter(collection=self, role=COLLECTION_ROLES_READER).count()
def __str__(self):
return self.title
class TalksUserCollection(models.Model):
user = models.ForeignKey("TalksUser", on_delete=models.CASCADE)
collection = models.ForeignKey(Collection, on_delete=models.CASCADE)
role = models.TextField(choices=COLLECTION_ROLES, default=COLLECTION_ROLES_OWNER)
is_main = models.BooleanField(default=False)
class Meta:
# For the admin interface where we only expose owner relationships for public lists
verbose_name = "Public Collection Ownership"
verbose_name_plural = "Public Collection Ownerships"
def __str__(self):
return str(self.user)
class TalksUser(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE)
collections = models.ManyToManyField(Collection, through=TalksUserCollection, blank=True)
def save(self, *args, **kwargs):
super(TalksUser, self).save(*args, **kwargs)
if self.collections.count() == 0:
default_collection = Collection.objects.create(title=DEFAULT_COLLECTION_NAME)
# Link the collection to the user
TalksUserCollection.objects.create(user=self,
collection=default_collection,
role=COLLECTION_ROLES_OWNER,
is_main=True)
def __str__(self):
return str(self.user)
class CollectionItem(models.Model):
collection = models.ForeignKey(Collection, on_delete=models.CASCADE)
content_type = models.ForeignKey(ContentType, on_delete=models.CASCADE)
object_id = models.PositiveIntegerField()
# Currently item can be an Event or EventGroup, or a DepartmentCollection
item = GenericForeignKey('content_type', 'object_id')
class Meta:
unique_together = [('collection', 'content_type', 'object_id')]
class DepartmentFollow(models.Model):
pass
class LocationFollow(models.Model):
pass
@receiver(models.signals.post_save, sender=User)
def ensure_profile_exists(sender, instance, created, **kwargs):
"""If the User has just been created we use a signal to also create a TalksUser
"""
if created:
tuser, tuser_created = TalksUser.objects.get_or_create(user=instance)
| ox-it/talks.ox | talks/users/models.py | Python | apache-2.0 | 10,534 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
def extract(text, start, end):
if start == 0:
sindex = 0
else:
s = text.find(start)
sindex = s + len(start)
if end == -1:
return text[sindex:]
else:
e = text.find(end, sindex)
return text[sindex:e]
def extract_all(text, start, end):
results = []
slen = len(start)
s = text.find(start)
while s != -1:
e = text.find(end, s+slen)
t = text[s+slen:e]
s = text.find(start, s+slen)
results.append(t)
return results
| damonchen/chan | chan/utils/extract.py | Python | bsd-2-clause | 576 |
import os
import tempfile
from kripodb.hdf5 import SimilarityMatrix
from kripodb.frozen import FrozenSimilarityMatrix
def tmpname():
tmpf = tempfile.NamedTemporaryFile()
out_file = tmpf.name
tmpf.close()
return out_file
class SimilarityMatrixInMemory(object):
def __init__(self):
self.matrix_fn = tmpname()
self.matrix = SimilarityMatrix(self.matrix_fn, 'a', driver='H5FD_CORE', driver_core_backing_store=0)
def __enter__(self):
return self.matrix
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
def close(self):
self.matrix.close()
if os.path.isfile(self.matrix_fn):
os.remove(self.matrix_fn)
class FrozenSimilarityMatrixInMemory(object):
def __init__(self):
self.matrix_fn = tmpname()
self.matrix = FrozenSimilarityMatrix(self.matrix_fn, 'a', driver='H5FD_CORE', driver_core_backing_store=0)
def __enter__(self):
return self.matrix
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
def close(self):
self.matrix.close()
if os.path.isfile(self.matrix_fn):
os.remove(self.matrix_fn)
| 3D-e-Chem/python-modified-tanimoto | tests/utils.py | Python | apache-2.0 | 1,185 |
# -*- coding: utf-8 -*-
# vim: ts=4
###
#
# Copyright (c) 2009-2011 J. Félix Ontañón
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Authors : J. Félix Ontañón <[email protected]>
#
###
import gettext
import locale
import __builtin__
__builtin__._ = gettext.gettext
import mapping
# i18n
gettext.install('wiican', '/usr/share/locale', unicode=1)
gettext.bindtextdomain('wiican', '/usr/share/locale')
if hasattr(gettext, 'bind_textdomain_codeset'):
gettext.bind_textdomain_codeset('wiican','UTF-8')
gettext.textdomain('wiican')
locale.bindtextdomain('wiican', '/usr/share/locale')
if hasattr(locale, 'bind_textdomain_codeset'):
locale.bind_textdomain_codeset('wiican','UTF-8')
locale.textdomain('wiican')
| GNOME/wiican | wiican/__init__.py | Python | gpl-3.0 | 1,252 |
#!/usr/bin/env python
# Copyright (C) 2013 Equinor ASA, Norway.
#
# The file 'test_options_enum.py' is part of ERT - Ensemble based Reservoir Tool.
#
# ERT is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ERT is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE.
#
# See the GNU General Public License at <http://www.gnu.org/licenses/gpl.html>
# for more details.
from tests import ResTest
from res.analysis import AnalysisModuleOptionsEnum
class AnalysisOptionsEnumTest(ResTest):
def test_items(self):
source_file_path = "lib/include/ert/analysis/analysis_module.hpp"
self.assertEnumIsFullyDefined(AnalysisModuleOptionsEnum, "analysis_module_flag_enum", source_file_path)
| Statoil/libres | python/tests/res/analysis/test_options_enum.py | Python | gpl-3.0 | 1,013 |
import magic
import os
import re
from gzip import GzipFile
from tarfile import TarFile
from django.apps import apps
PACKAGE_INFO_REGISTRY = []
def infoDetect( target_file ):
magic_helper = magic.open( 0 )
magic_helper.load()
for info in PACKAGE_INFO_REGISTRY:
result = info.detect( target_file, magic_helper )
if result is not None:
return result
return None
class PackageInfo():
def __init__( self, filename, package, arch, version, type ):
super().__init__()
self.filename = filename
self.package = package
self.arch = arch
self.version = version
self.type = type
@property
def distroversion_list( self ):
DistroVersion = apps.get_model( 'Repos.DistroVersion' )
full_list = []
match_list = []
for tmp in DistroVersion.objects.filter( file_type=self.type ):
for name in tmp.release_names.split( '\t' ):
full_list.append( name )
if name in self.version:
match_list.append( tmp.pk )
if match_list:
return match_list
else:
return full_list
class Deb( PackageInfo ):
@classmethod
def detect( cls, target_file, magic_helper ):
( filename, extension ) = os.path.splitext( os.path.basename( target_file.name ) )
if extension != '.deb':
return None
# TODO: get the package, version, arch from the changelog
try:
( package, version, arch ) = filename.split( '_' )
except ValueError:
raise ValueError( 'Unrecognized deb file name Format' )
if arch == 'amd64':
arch = 'x86_64'
elif arch not in ( 'i386', 'all' ):
raise ValueError( 'Unrecognized deb Arch' )
try:
magic_type = magic_helper.descriptor( os.dup( target_file.file.fileno() ) )
except Exception as e:
raise Exception( 'Error getting magic: %s' % e)
if magic_type != b'Debian binary package (format 2.0)':
return None
return cls( filename, package, arch, version, 'deb' )
PACKAGE_INFO_REGISTRY.append( Deb )
class RPM( PackageInfo ):
@classmethod
def detect( cls, target_file, magic_helper ):
( filename, extension ) = os.path.splitext( os.path.basename( target_file.name ) )
if extension != '.rpm':
return None
# <name>-<version>-<release>.<architecture>.rpm for binaries.
# TODO: get these from the rpm file
try:
( package, version, release, arch ) = re.match( '(.+)-([^-]+)-([^-]+)\.(\w+)', filename ).groups()
except ValueError:
raise ValueError( 'Unrecognized rpm file name Format' )
if arch == 'src':
raise ValueError( 'Source rpms are not supported' )
if arch == 'noarch':
arch = 'all'
elif arch not in ( 'i386', 'x86_64' ):
raise ValueError( 'Unrecognized rpm Arch' )
try:
magic_type = magic_helper.descriptor( os.dup( target_file.file.fileno() ) )
except Exception as e:
raise Exception( 'Error getting magic: %s' % e)
if magic_type not in ( b'RPM v3.0 bin noarch', b'RPM v3.0 bin i386/x86_64' ):
return None
return cls( filename, package, arch, '%s-%s' % ( version, release ), 'rpm' )
PACKAGE_INFO_REGISTRY.append( RPM )
class Docker( PackageInfo ):
@classmethod
def detect( cls, target_file, magic_helper ):
( filename, extension ) = os.path.splitext( os.path.basename( target_file.name ) )
if extension != '.tar':
return None
# TODO: get the info from the manifest
try:
( package, version ) = filename.split( '_' )
except ValueError:
raise ValueError( 'Unrecognized Docker Container file name Format' )
try:
magic_type = magic_helper.descriptor( os.dup( target_file.file.fileno() ) )
except Exception as e:
raise Exception( 'Error getting magic: %s' % e)
if not magic_type.startswith( b'POSIX tar archive' ):
return None
tarfile = TarFile( fileobj=target_file.file, mode='r' )
info = tarfile.extractfile( 'manifest.json' )
tarfile.close()
target_file.file.seek( 0 )
if info is None:
return None
return cls( filename, package, 'all', version, 'docker' )
PACKAGE_INFO_REGISTRY.append( Docker )
class Python( PackageInfo ):
@classmethod
def detect( cls, target_file, magic_helper ):
filename = os.path.basename( target_file.name )
if not filename.endswith( '.tar.gz'):
return None
( filename, extension ) = os.path.splitext( filename ) # one for .gz
( filename, extension ) = os.path.splitext( filename ) # second for .tar
try:
( package, version ) = filename.split( '-' ) # ie: cinp-0.9.2.tar.gz
except ValueError:
raise ValueError( 'Unrecognized Python Container file name Format' )
try:
magic_type = magic_helper.descriptor( os.dup( target_file.file.fileno() ) )
except Exception as e:
raise Exception( 'Error getting magic: %s' % e)
if not magic_type.startswith( b'gzip compressed data' ):
return None
gzfile = GzipFile( fileobj=target_file.file, mode='r' )
tarfile = TarFile( fileobj=gzfile, mode='r' )
info = tarfile.extractfile( '{0}/PKG-INFO'.format( filename ) )
tarfile.close()
gzfile.close()
target_file.file.seek( 0 )
if info is None:
return None
return cls( filename, package, 'all', version, 'python' )
PACKAGE_INFO_REGISTRY.append( Python )
# Resource must be last, being it will catch anything with a '_' in the filename
class Resource( PackageInfo ): # This will take *anything* that has one (and only one) "_" in the file name to delinitate the package and version, we are not doing any type checking
@classmethod
def detect( cls, target_file, magic_helper ): # compare with packrat-agent/packratAgent/Json.py -> _splitFileName
filename = os.path.basename( target_file.name )
if filename.endswith( ( '.tar.gz', '.tar.bz2', '.tar.xz', 'img.gz', 'img.bz2', 'img.xz' ) ):
( filename, _, _ ) = filename.rsplit( '.', 2 )
else:
try:
( filename, _ ) = filename.rsplit( '.', 1 )
except ValueError:
pass
try:
( package, version ) = filename.split( '_' )
except ValueError:
return None
return cls( filename, package, 'all', version, 'rsc' )
PACKAGE_INFO_REGISTRY.append( Resource )
| Virtustream-OSS/packrat | packrat/Repos/PackageInfo.py | Python | apache-2.0 | 6,231 |
#!/usr/bin/env python
import rospy
import actionlib
from strands_executive_msgs.msg import Task
from strands_executive_msgs import task_utils
from task_executor.msg import *
from topological_navigation.msg import GotoNodeAction
from task_executor import task_routine
from datetime import *
from threading import Thread
import mongodb_store_msgs.srv as dc_srv
from mongodb_store_msgs.msg import StringPair
import mongodb_store.util as dc_util
from mongodb_store.message_store import MessageStoreProxy
from geometry_msgs.msg import Pose, Point, Quaternion
from dateutil.tz import tzlocal
def dummy_task():
"""
Create an example of a task which we'll copy for other tasks later.
This is a good example of creating a task with a variety of arguments.
"""
# need message store to pass objects around
msg_store = MessageStoreProxy()
# get the pose of a named object
pose_name = "my favourite pose"
# get the pose if it's there
message, meta = msg_store.query_named(pose_name, Pose._type)
# if it's not there, add it in
if message == None:
message = Pose(Point(0, 1, 2), Quaternion(3, 4, 5, 6))
pose_id = msg_store.insert_named(pose_name, message)
else:
pose_id = meta["_id"]
master_task = Task(action='test_task')
task_utils.add_string_argument(master_task, 'hello world')
task_utils.add_object_id_argument(master_task, pose_id, Pose)
task_utils.add_int_argument(master_task, 24)
task_utils.add_float_argument(master_task, 63.678)
return master_task
if __name__ == '__main__':
rospy.init_node("task_routine", log_level=rospy.INFO)
# wait for simulated time to kick in as rospy.get_rostime() is 0 until first clock message received
while not rospy.is_shutdown() and rospy.get_rostime().secs == 0:
pass
localtz = tzlocal()
start = time(8,30, tzinfo=localtz)
ten = time(10,00, tzinfo=localtz)
midday = time(12,00, tzinfo=localtz)
end = time(17,00, tzinfo=localtz)
morning = (start, midday)
afternoon = (midday, end)
task = dummy_task()
routine = task_routine.DailyRoutine(start, end)
routine.repeat_every_hour(task, hours=2)
# all these should except
# routine.add_tasks([task], midday, start)
# routine.add_tasks([task], start, start)
# task.max_duration = 60 * 60 * 12;
# routine.add_tasks([task], start, midday)
# routine = task_routine.DailyRoutineRunner(start, end)
# task.max_duration = rospy.Duration(timedelta(seconds=60).total_seconds());
# routine.add_tasks([([task, task], morning), ([task, task], afternoon)])
rospy.spin()
| bfalacerda/strands_executive | task_executor/scripts/task_routine_node.py | Python | mit | 2,689 |
from django.core.urlresolvers import reverse
from mappings.tests import MappingBaseTest
from mappings.validation_messages import OPENMRS_SINGLE_MAPPING_BETWEEN_TWO_CONCEPTS
from oclapi.models import CUSTOM_VALIDATION_SCHEMA_OPENMRS
from test_helper.base import create_user, create_source, create_concept
class OpenMRSMappingCreateTest(MappingBaseTest):
def test_create_mapping_duplicate_mapping_between_two_concepts(self):
source = create_source(self.user1, validation_schema=CUSTOM_VALIDATION_SCHEMA_OPENMRS)
(concept1, _) = create_concept(user=self.user1, source=source)
(concept2, _) = create_concept(user=self.user1, source=source)
self.client.login(username='user1', password='user1')
kwargs = {
'source': source.mnemonic
}
mapping1 = {
'from_concept_url': concept1.url,
'to_concept_url': concept2.url,
'map_type': 'Same As'
}
mapping2 = {
'from_concept_url': concept1.url,
'to_concept_url': concept2.url,
'map_type': 'Narrower Than'
}
self.client.post(reverse('mapping-list', kwargs=kwargs), mapping1)
response = self.client.post(reverse('mapping-list', kwargs=kwargs), mapping2)
self.assertEquals(response.status_code, 400)
self.assertEquals(response.data, {"errors": OPENMRS_SINGLE_MAPPING_BETWEEN_TWO_CONCEPTS}) | ayseyo/oclapi | django-nonrel/ocl/integration_tests/tests/openmrs_mapping_validation.py | Python | mpl-2.0 | 1,429 |
# -*- coding: utf-8 -*-
from .env import *
from amoco.arch.core import Formatter
def mnemo(i):
return i.mnemonic.lower().ljust(8, " ")
def op0(i):
return str(i.operands[0])
def rel0(i):
return "*%s" % i.misc["to"]
def op1(i):
return ", " + str(i.operands[1])
def dest1(i):
return ", %s" % str(i.dst)
def bitb(i):
return ", %d" % i.operands[1]
def accessbank(i):
return ", BANKED" if i.a == 1 else ", ACCESS"
def adr(i):
x = str(i.operands[0])
return x + ", fast" if i.misc["fast"] == True else x
def tblrw(i):
m = i.mnemonic.lower()
if i.misc["postinc"]:
return m + "*+"
elif i.misc["postdec"]:
return m + "*-"
elif i.misc["preinc"]:
return m + "+*"
return m
format_byte = [mnemo, op0, dest1, accessbank]
format_bit = [mnemo, op0, bitb, accessbank]
format_imm = [mnemo, op0]
format_rel = [mnemo, rel0]
format_call = [mnemo, adr]
PIC_full_formats = {
"byte_oriented": format_byte,
"bit_oriented": format_bit,
"control": format_imm,
"control_rel": format_rel,
"noop": [mnemo],
"tblrw": [tblrw],
"literal": format_imm,
"LFSR": [mnemo, op0, op1],
"decode_movff": [mnemo, op0, op1],
"CALL": format_call,
"MOVWF": [mnemo, op0, accessbank],
"CLRF": [mnemo, op0, accessbank],
"MULWF": [mnemo, op0, accessbank],
}
PIC_full = Formatter(PIC_full_formats)
| LRGH/amoco | amoco/arch/pic/F46K22/formats.py | Python | gpl-2.0 | 1,399 |
#!/usr/bin/env python
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Dialogflow API Python sample showing how to manage Participants.
"""
from google.cloud import dialogflow_v2beta1 as dialogflow
ROLES = ["HUMAN_AGENT", "AUTOMATED_AGENT", "END_USER"]
# [START dialogflow_create_participant]
def create_participant(project_id, conversation_id, role):
"""Creates a participant in a given conversation.
Args:
project_id: The GCP project linked with the conversation profile.
conversation_id: Id of the conversation.
participant: participant to be created."""
client = dialogflow.ParticipantsClient()
conversation_path = dialogflow.ConversationsClient.conversation_path(
project_id, conversation_id
)
if role in ROLES:
response = client.create_participant(
parent=conversation_path, participant={"role": role}, timeout=600
)
print("Participant Created.")
print("Role: {}".format(response.role))
print("Name: {}".format(response.name))
return response
# [END dialogflow_create_participant]
# [START dialogflow_analyze_content_text]
def analyze_content_text(project_id, conversation_id, participant_id, text):
"""Analyze text message content from a participant.
Args:
project_id: The GCP project linked with the conversation profile.
conversation_id: Id of the conversation.
participant_id: Id of the participant.
text: the text message that participant typed."""
client = dialogflow.ParticipantsClient()
participant_path = client.participant_path(
project_id, conversation_id, participant_id
)
text_input = {"text": text, "language_code": "en-US"}
response = client.analyze_content(
participant=participant_path, text_input=text_input
)
print("AnalyzeContent Response:")
print("Reply Text: {}".format(response.reply_text))
for suggestion_result in response.human_agent_suggestion_results:
if suggestion_result.error is not None:
print("Error: {}".format(suggestion_result.error.message))
if suggestion_result.suggest_articles_response:
for answer in suggestion_result.suggest_articles_response.article_answers:
print("Article Suggestion Answer: {}".format(answer.title))
print("Answer Record: {}".format(answer.answer_record))
if suggestion_result.suggest_faq_answers_response:
for answer in suggestion_result.suggest_faq_answers_response.faq_answers:
print("Faq Answer: {}".format(answer.answer))
print("Answer Record: {}".format(answer.answer_record))
if suggestion_result.suggest_smart_replies_response:
for (
answer
) in suggestion_result.suggest_smart_replies_response.smart_reply_answers:
print("Smart Reply: {}".format(answer.reply))
print("Answer Record: {}".format(answer.answer_record))
for suggestion_result in response.end_user_suggestion_results:
if suggestion_result.error:
print("Error: {}".format(suggestion_result.error.message))
if suggestion_result.suggest_articles_response:
for answer in suggestion_result.suggest_articles_response.article_answers:
print("Article Suggestion Answer: {}".format(answer.title))
print("Answer Record: {}".format(answer.answer_record))
if suggestion_result.suggest_faq_answers_response:
for answer in suggestion_result.suggest_faq_answers_response.faq_answers:
print("Faq Answer: {}".format(answer.answer))
print("Answer Record: {}".format(answer.answer_record))
if suggestion_result.suggest_smart_replies_response:
for (
answer
) in suggestion_result.suggest_smart_replies_response.smart_reply_answers:
print("Smart Reply: {}".format(answer.reply))
print("Answer Record: {}".format(answer.answer_record))
return response
# [END dialogflow_analyze_content_text]
| googleapis/python-dialogflow | samples/snippets/participant_management.py | Python | apache-2.0 | 4,681 |
"""
WSGI config for jmp project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "jmp.settings"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "jmp.settings")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
| exarch/massive-sansa | jmp/jmp/wsgi.py | Python | agpl-3.0 | 1,410 |
# -*- coding: utf-8 -*-
###############################################################################
#
# SearchUsers
# Allows you to search for users by name.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class SearchUsers(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the SearchUsers Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(SearchUsers, self).__init__(temboo_session, '/Library/Instagram/SearchUsers')
def new_input_set(self):
return SearchUsersInputSet()
def _make_result_set(self, result, path):
return SearchUsersResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return SearchUsersChoreographyExecution(session, exec_id, path)
class SearchUsersInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the SearchUsers
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_AccessToken(self, value):
"""
Set the value of the AccessToken input for this Choreo. ((conditional, string) The access token retrieved during the OAuth 2.0 process. Required unless you provide the ClientID.)
"""
super(SearchUsersInputSet, self)._set_input('AccessToken', value)
def set_ClientID(self, value):
"""
Set the value of the ClientID input for this Choreo. ((conditional, string) The Client ID provided by Instagram after registering your application. Required unless you provide an AccessToken.)
"""
super(SearchUsersInputSet, self)._set_input('ClientID', value)
def set_Count(self, value):
"""
Set the value of the Count input for this Choreo. ((optional, integer) The number of results to return.)
"""
super(SearchUsersInputSet, self)._set_input('Count', value)
def set_Query(self, value):
"""
Set the value of the Query input for this Choreo. ((required, string) The query string to use for the user search.)
"""
super(SearchUsersInputSet, self)._set_input('Query', value)
class SearchUsersResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the SearchUsers Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. ((json) The response from Instagram.)
"""
return self._output.get('Response', None)
class SearchUsersChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return SearchUsersResultSet(response, path)
| jordanemedlock/psychtruths | temboo/core/Library/Instagram/SearchUsers.py | Python | apache-2.0 | 3,764 |
from __future__ import print_function
from sympy import symbols, sin, cos
from galgebra.ga import Ga
from galgebra.printer import Format, xpdf, Eprint, Print_Function, Get_Program
def main():
Print_Function()
(a, b, c) = abc = symbols('a,b,c',real=True)
(o3d, ea, eb, ec) = Ga.build('e_a e_b e_c', g=[1, 1, 1], coords=abc)
grad = o3d.grad
x = symbols('x',real=True)
A = o3d.lt([[x*a*c**2,x**2*a*b*c,x**2*a**3*b**5],\
[x**3*a**2*b*c,x**4*a*b**2*c**5,5*x**4*a*b**2*c],\
[x**4*a*b**2*c**4,4*x**4*a*b**2*c**2,4*x**4*a**5*b**2*c]])
print('A =',A)
v = a*ea+b*eb+c*ec
print('v =',v)
f = v|A(v)
print(r'%f = v\cdot \f{A}{v} =',f)
(grad * f).Fmt(3,r'%\nabla f')
Av = A(v)
print(r'%\f{A}{v} =', Av)
(grad * Av).Fmt(3,r'%\nabla \f{A}{v}')
return
def dummy():
return
if __name__ == "__main__":
#Eprint()
Format()
Get_Program()
main()
# xpdf()
xpdf(pdfprog=None)
| arsenovic/galgebra | examples/LaTeX/diffeq_sys.py | Python | bsd-3-clause | 989 |
from collections import defaultdict
import logging
from django.http import HttpResponse
from typepad import TypePadObject
from typepadapp.models.users import User
from makeaface.models import Favoriteface, Lastface
from makeaface.views import oops
log = logging.getLogger(__name__)
def all_items(source):
"""Generates all items of a TypePad API List, traversing through subsequent
pages as necessary."""
i = 1
log.debug("Should be %d results", source.total_results)
while True:
page = source.filter(start_index=i, max_results=50)
log.debug("Yay page starting at %d is %r", i, page._location)
i += 50
if len(page):
log.debug("Yay %d items in page (first is %r)", len(page), page[0])
for item in page:
yield item
else:
log.debug("Whee returning since there were no items in page?")
return
@oops
def backface(request):
TypePadObject.batch_requests = False
faces = list(all_items(request.group.events))
# Get all the faces.
author_faces = defaultdict(list)
for newface in faces:
if 'tag:api.typepad.com,2009:NewAsset' not in newface.verbs:
continue
face = newface.object
if face is None:
continue
au = face.author.xid
author_faces[au].append(face)
# Put each author's faces in published order (though they're probably already in that order).
for author, au_faces in author_faces.items():
author_faces[author] = sorted(au_faces, key=lambda x: x.published)
for author, au_faces in author_faces.items():
# Missing Lastface? Add one in.
try:
Lastface.objects.get(owner=author)
except Lastface.DoesNotExist:
# OHNOES MISSING LASTFACE
log.info("Filling in %r's Lastface", au_faces[-1].author.display_name)
au_face = au_faces[-1]
Lastface(owner=author, face=au_face.xid,
created=au_face.published).save()
# Go through the author's favorites, filling in any missing Favoritefaces.
events = User.get_by_url_id(author).events.filter(by_group=request.group.xid)
for fav in all_items(events):
if 'tag:api.typepad.com,2009:AddedFavorite' not in fav.verbs:
continue
# Is there a Favoriteface for that?
favoriter = fav.actor.xid
try:
Favoriteface.objects.get(favoriter=favoriter,
favorited=fav.object.xid)
except Favoriteface.DoesNotExist:
# NO FAVFACE OHNOES
log.info("Filling in %r's Favoriteface for %r", au_faces[-1].author.display_name, fav.object.xid)
when = fav.published
au_faces_before = [x for x in au_faces if x.published < when]
if not au_faces_before:
continue
au_face = au_faces_before[-1]
favface = Favoriteface(favoriter=favoriter, favorited=fav.object.xid)
favface.lastface = au_face.xid
favface.created = fav.published
favface.save()
return HttpResponse('OK', content_type='text/plain')
| markpasc/make-a-face | makeaface/makeaface/backface.py | Python | mit | 3,250 |
#!/usr/bin/python
import os
import koam
from PyQt4.QtCore import *
from PyQt4.QtGui import *
class KoamMainWindow(QMainWindow):
def __init__(self, koamwidget, controller, parent=None):
QMainWindow.__init__(self, parent)
self.setCentralWidget(koamwidget)
self.controller = controller
koamwidget.msg("Startup")
self.resize(850,256)
self.setWindowTitle("koam on " + os.uname()[1])
self.show()
def closeEvent(self, event):
self.controller.stopAll()
event.accept()
| lmiphay/gentoo-koam | koam/mainwindow.py | Python | gpl-2.0 | 545 |
from django import forms
from .models import Reimbursement
class ReimbursementForm(forms.ModelForm):
class Meta:
model = Reimbursement
# Add CSS class 'form-control' to all fields to allow styling
widgets = {
'explanation': forms.TextInput(attrs={'class': 'form-control'}),
'amount': forms.NumberInput(attrs={'class': 'form-control'}),
'receipt': forms.ClearableFileInput(attrs={'class': 'form-control'}),
'purchase_date': forms.TextInput(attrs={'class': 'form-control date-picker'}),
}
fields = [
"explanation",
"amount",
"receipt",
"purchase_date"
]
| amacnair/team-toolkit | clubbr/reimbursement/forms.py | Python | mit | 702 |
import random
"""
Recieve a hello command
"""
def recv(channel, person, arguments):
exclaim_responses = ["hey", "hi", "hello"]
question_responses = ["sup", "how are you", "how's it going"]
responses = []
punct = ""
if (random.choice([1, 2]) == 1):
punct = "!"
responses = exclaim_responses
else:
punct = "?"
responses = question_responses
response = "%s %s%s" %(random.choice(responses), person.name, punct)
channel.message(response)
| SkylarKelty/pyirc | src/commands/hello.py | Python | mit | 454 |
###############################################################################
#
# The MIT License (MIT)
#
# Copyright (c) Tavendo GmbH
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
###############################################################################
from autobahn.twisted.websocket import WebSocketClientProtocol, \
WebSocketClientFactory
from twisted.internet.defer import Deferred, inlineCallbacks
def sleep(delay):
d = Deferred()
reactor.callLater(delay, d.callback, None)
return d
class MyClientProtocol(WebSocketClientProtocol):
def onConnect(self, response):
print("Server connected: {0}".format(response.peer))
@inlineCallbacks
def onOpen(self):
print("WebSocket connection open.")
# start sending messages every second ..
while True:
self.sendMessage(u"Hello, world!".encode('utf8'))
self.sendMessage(b"\x00\x01\x03\x04", isBinary=True)
yield sleep(1)
def onMessage(self, payload, isBinary):
if isBinary:
print("Binary message received: {0} bytes".format(len(payload)))
else:
print("Text message received: {0}".format(payload.decode('utf8')))
def onClose(self, wasClean, code, reason):
print("WebSocket connection closed: {0}".format(reason))
if __name__ == '__main__':
import sys
from twisted.python import log
from twisted.internet import reactor
log.startLogging(sys.stdout)
factory = WebSocketClientFactory(u"ws://127.0.0.1:9000")
factory.protocol = MyClientProtocol
reactor.connectTCP("127.0.0.1", 9000, factory)
reactor.run()
| meejah/AutobahnPython | examples/twisted/websocket/echo/client_coroutines.py | Python | mit | 2,668 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.