text
stringlengths 4
1.02M
| meta
dict |
---|---|
class Formatter(object):
"""
Base class for all formatter classes.
A formatter is an extension point (variation point) for the runner logic.
A formatter is called while processing model elements.
Processing Logic (simplified, without ScenarioOutline and skip logic)::
for feature in runner.features:
formatter = get_formatter(...)
formatter.uri(feature.filename)
formatter.feature(feature)
for scenario in feature.scenarios:
formatter.scenario(scenario)
for step in scenario.all_steps:
formatter.step(step)
step_match = step_registry.find_match(step)
formatter.match(step_match)
if step_match:
step_match.run()
else:
step.status = "undefined"
formatter.result(step.status)
# -- FEATURE-END
formatter.close()
"""
name = None
description = None
def __init__(self, stream, config):
self.stream = stream
self.config = config
def uri(self, uri):
"""
Called before processing a file (normally a feature file).
:param uri: URI or filename (as string).
"""
pass
def feature(self, feature):
"""
Called before a feature is executed.
:param feature: Feature object (as :class:`behave.model.Feature`)
"""
pass
def background(self, background):
"""
Called when a (Feature) Background is provided.
Called after :method:`feature()` is called.
Called before processing any scenarios or scenario outlines.
:param background: Background object (as
:class:`behave.model.Background`)
"""
pass
def scenario(self, scenario):
"""
Called before a scenario is executed (or an example of
ScenarioOutline).
:param scenario: Scenario object (as :class:`behave.model.Scenario`)
"""
pass
def scenario_outline(self, outline):
pass
def examples(self, examples):
pass
def step(self, step):
"""
Called before a step is executed (and matched).
:param step: Step object (as :class:`behave.model.Step`)
"""
def match(self, match):
"""
Called when a step was matched against its step implementation.
:param match: Registered step (as Match), undefined step (as NoMatch).
"""
pass
def result(self, step_result):
"""
Called after processing a step (when the step result is known).
:param step_result: Step result (as string-enum).
"""
pass
def eof(self):
"""
Called after processing a feature (or a feature file).
"""
pass
def close(self):
"""
Called before the formatter is no longer used (stream/io
compatibility).
"""
pass
| {
"content_hash": "fb7eae72e1e3803b0d25aa89a5f23948",
"timestamp": "",
"source": "github",
"line_count": 110,
"max_line_length": 79,
"avg_line_length": 28.19090909090909,
"alnum_prop": 0.5524024508223154,
"repo_name": "tokunbo/behave-parallel",
"id": "d7c61cfc1e0e6a480427fadfcdae3e9e697df8b1",
"size": "3127",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "behave/formatter/base.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "8799"
},
{
"name": "Python",
"bytes": "364307"
},
{
"name": "Shell",
"bytes": "272"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
import wagtail.wagtailcore.fields
import wagtail.wagtailadmin.taggable
import modelcluster.fields
import modelcluster.contrib.taggit
class Migration(migrations.Migration):
dependencies = [
('taggit', '0002_auto_20150616_2121'),
('wagtailcore', '0019_verbose_names_cleanup'),
('home', '0002_create_main'),
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('page_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='NewsEvent',
fields=[
('page_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
('date_start', models.DateField(verbose_name='Start date')),
('date_end', models.DateField(help_text='Not required if event is on a single day', null=True, verbose_name='End date', blank=True)),
('time_start', models.TimeField(null=True, verbose_name='Start time', blank=True)),
('time_end', models.TimeField(null=True, verbose_name='End time', blank=True)),
('location', models.CharField(max_length=255)),
('body', wagtail.wagtailcore.fields.RichTextField(blank=True)),
('province', models.CharField(blank=True, max_length=255, choices=[('easterncape', 'Eastern Cape'), ('freestate', 'Free State'), ('gauteng', 'Gauteng'), ('kwazulunatal', 'Kwazulu Natal'), ('limpopo', 'Limpopo'), ('mpumalanga', 'Mpumalanga'), ('northwest', 'North West'), ('northerncape', 'Northern Cape'), ('westerncape', 'Western Cape')])),
],
options={
'abstract': False,
},
bases=('wagtailcore.page', wagtail.wagtailadmin.taggable.TagSearchable),
),
migrations.CreateModel(
name='NewsEventTag',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('content_object', modelcluster.fields.ParentalKey(related_name='tagged_items', to='home.NewsEvent')),
('tag', models.ForeignKey(related_name='home_newseventtag_items', to='taggit.Tag')),
],
options={
'abstract': False,
},
),
migrations.AddField(
model_name='newsevent',
name='tags',
field=modelcluster.contrib.taggit.ClusterTaggableManager(to='taggit.Tag', through='home.NewsEventTag', blank=True, help_text='A comma-separated list of tags.', verbose_name='Tags'),
),
]
| {
"content_hash": "bf4fb4d0a1336499c9c4a3f7219f46e5",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 357,
"avg_line_length": 47.903225806451616,
"alnum_prop": 0.5892255892255892,
"repo_name": "makhadzi/nds",
"id": "ef108373eccf11713dfa7153f0d62b6e5c361515",
"size": "2994",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "home/migrations/0003_auto_20151024_0827.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "4388"
},
{
"name": "Python",
"bytes": "14395"
}
],
"symlink_target": ""
} |
"""
film.py: a simple tool to manage your movies review
Simon Rozet, http://atonie.org/
@@ :
- manage directors and writers
- manage actors
- handle non IMDB uri
- markdown support in comment
Requires download and import of Python imdb library from
http://imdbpy.sourceforge.net/ - (warning: installation
will trigger automatic installation of several other packages)
--
Usage:
film.py whoami "John Doe <[email protected]>"
Initialize the store and set your name and email.
film.py whoami
Tell you who you are
film.py http://www.imdb.com/title/tt0105236/
Review the movie "Reservoir Dogs"
"""
import datetime, os, sys, re, time
from rdflib import ConjunctiveGraph, Namespace, Literal
from rdflib.store import NO_STORE, VALID_STORE
import xml.etree.ElementTree as ET
from tempfile import mktemp
try:
import imdb
except ImportError:
imdb = None
from rdflib import BNode, Graph, URIRef, Literal, Namespace, RDF
from rdflib.namespace import FOAF, DC
import csv
import pprint
storefn = os.path.dirname(os.path.realpath(__file__)) + '/output/streetlight-stockport.rdf'
#storefn = '/home/simon/codes/film.dev/movies.n3'
storeuri = 'file://'+storefn
title = 'Movies viewed by %s'
r_who = re.compile('^(.*?) <([a-z0-9_-]+(\.[a-z0-9_-]+)*@[a-z0-9_-]+(\.[a-z0-9_-]+)+)>$')
OS = Namespace('http://data.ordnancesurvey.co.uk/ontology/admingeo/')
SPACIAL = Namespace('http://data.ordnancesurvey.co.uk/ontology/spatialrelations/')
RDFS = Namespace('http://www.w3.org/2000/01/rdf-schema#')
GEO = Namespace('http://www.w3.org/2003/01/geo/wgs84_pos#')
VCARD = Namespace('http://www.w3.org/2006/vcard/ns#')
SCHEMA = Namespace('http://schema.org/')
sl = Namespace('http://data.gmdsp.org.uk/id/stockport/street-lights/')
streetdef = Namespace('http://data.gmdsp.org.uk/def/council/streetlighting/')
class Store:
def __init__(self):
self.graph = Graph(identifier=URIRef('http://www.google.com'))
rt = self.graph.open(storeuri, create=False)
if rt == None:
# There is no underlying Sleepycat infrastructure, create it
self.graph.open(storeuri, create=True)
else:
assert rt == VALID_STORE, 'The underlying store is corrupt'
self.graph.bind('os', OS)
self.graph.bind('rdfs', RDFS)
self.graph.bind('geo', GEO)
self.graph.bind('schema', SCHEMA)
self.graph.bind('spacial', SPACIAL)
self.graph.bind('streetlamp', streetdef)
def save(self):
print storeuri
self.graph.serialize(storeuri, format='pretty-xml')
def new_streetlight(self, height, easting, northing, street, objectId, lamptype, watt):
streetlamp = sl[objectId]
self.graph.add((streetlamp, RDF.type, URIRef('http://data.gmdsp.org.uk/def/council/streetlighting/Streetlight')))
self.graph.add((streetlamp, RDFS['label'], Literal(objectId)))
if height != 0:
self.graph.add((streetlamp, streetdef['columnHeight'], Literal(height)))
self.graph.add((streetlamp, SPACIAL['easting'], Literal(easting)))
self.graph.add((streetlamp, SPACIAL['northing'], Literal(northing)))
self.graph.add((streetlamp, streetdef['lampType'], Literal(lamptype)))
if watt != 0:
self.graph.add((streetlamp, streetdef['wattage'], Literal(watt)))
self.graph.add((streetlamp, VCARD["hasAddress"], Literal(self.new_address(easting, northing, street))))
def new_address(self, easting, northing, street):
vcard = sl["address/"+street.replace(" ", "-").replace(",", "")]
self.graph.add((vcard, RDF.type, VCARD["Location"]))
self.graph.add((vcard, RDFS['label'], Literal(street)))
self.graph.add((vcard, VCARD['street-address'], Literal(street)))
return vcard
def help():
print(__doc__.split('--')[1])
def main(argv=None):
s = Store()
tree = ET.parse('./Data/streetlighting.xml')
root = tree.getroot()
for child in root:
#for each street light
for c in child:
#for each atribute in street light
try:
print " -------- "
print c[0].text
print c[1].text
print c[2].text
print c[3].text
print c[4].text
print c[5].text
print c[6].text
print c[7].text
print c[8].text
print c[9].text
print c[10].text
s.new_streetlight(0, c[8].text, c[9].text, c[3].text, c[0].text, c[1].text, 0)
except:
print "Unexpected error:", sys.exc_info()[0]
s.save()
if __name__ == '__main__':
main() | {
"content_hash": "f1c06d1f1da3ff698f7c6897260b65cb",
"timestamp": "",
"source": "github",
"line_count": 132,
"max_line_length": 121,
"avg_line_length": 35.67424242424242,
"alnum_prop": 0.6211509874708006,
"repo_name": "GMDSP-Linked-Data/RDF-work-in-progress",
"id": "578f4b6e75ea8f21249f76ca425fe6771995c7be",
"size": "4731",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "stockport/StockPortStreetLights.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "111180"
}
],
"symlink_target": ""
} |
try:
from lxml import etree
from lxml.etree import ETCompatXMLParser as parser
def xml_fromstring(argument):
return etree.fromstring(argument, parser=parser())
def xml_frompath(path):
return etree.parse(path, parser=parser()).getroot()
except ImportError:
try:
import xml.etree.cElementTree as etree
except ImportError:
import xml.etree.ElementTree as etree
def xml_fromstring(argument):
return etree.fromstring(argument)
def xml_frompath(path):
return etree.parse(path).getroot()
from collections import defaultdict, OrderedDict
from contextlib import closing
from itertools import chain
import sys
import re
if sys.version_info >= (3, 0):
from urllib.request import urlopen
else:
from urllib2 import urlopen
_ARRAY_RE = re.compile(r'\[\d+\]')
class Spec(object):
API = 'https://cvs.khronos.org/svn/repos/ogl/trunk/doc/registry/public/api/'
NAME = ''
def __init__(self, root):
self.root = root
self._types = None
self._groups = None
self._enums = None
self._commands = None
self._features = None
self._extensions = None
@classmethod
def from_url(cls, url):
with closing(urlopen(url)) as f:
raw = f.read()
return cls(xml_fromstring(raw))
@classmethod
def from_svn(cls):
return cls.from_url(cls.API + cls.NAME + '.xml')
@classmethod
def fromstring(cls, string):
return cls(xml_fromstring(string))
@classmethod
def from_file(cls, path):
return cls(xml_frompath(path))
@property
def comment(self):
return self.root.find('comment').text
@property
def types(self):
if self._types is None:
self._types = [Type(element) for element in
self.root.find('types').iter('type')]
return self._types
@property
def groups(self):
if self._groups is None:
self._groups = dict([(element.attrib['name'], Group(element))
for element in self.root.find('groups')])
return self._groups
@property
def commands(self):
if self._commands is None:
self._commands = dict([(element.find('proto').find('name').text,
Command(element, self))
for element in self.root.find('commands')])
return self._commands
@property
def enums(self):
if self._enums is not None:
return self._enums
self._enums = dict()
for element in self.root.iter('enums'):
namespace = element.attrib['namespace']
type_ = element.get('type')
group = element.get('group')
vendor = element.get('vendor')
comment = element.get('comment', '')
for enum in element:
if enum.tag == 'unused':
continue
assert enum.tag == 'enum'
name = enum.attrib['name']
self._enums[name] = Enum(name, enum.attrib['value'], namespace,
type_, group, vendor, comment)
return self._enums
@property
def features(self):
if self._features is not None:
return self._features
self._features = defaultdict(OrderedDict)
for element in self.root.iter('feature'):
num = tuple(map(int, element.attrib['number'].split('.')))
self._features[element.attrib['api']][num] = Feature(element, self)
return self._features
@property
def extensions(self):
if self._extensions is not None:
return self._extensions
self._extensions = defaultdict(dict)
for element in self.root.find('extensions'):
for api in element.attrib['supported'].split('|'):
self._extensions[api][element.attrib['name']] = Extension(element, self)
return self._extensions
class Type(object):
def __init__(self, element):
apientry = element.find('apientry')
if apientry is not None:
apientry.text = 'APIENTRY'
self.raw = ''.join(element.itertext())
self.api = element.get('api')
self.name = element.get('name')
@property
def is_preprocessor(self):
return '#' in self.raw
class Group(object):
def __init__(self, element):
self.name = element.attrib['name']
self.enums = [enum.attrib['name'] for enum in element]
class Enum(object):
def __init__(self, name, value, namespace, type_=None,
group=None, vendor=None, comment=''):
self.name = name
self.value = value
self.namespace = namespace
self.type = type_
self.group = group
self.vendor = vendor
self.comment = comment
def __hash__(self):
return hash(self.name)
def __str__(self):
return self.name
__repr__ = __str__
class Command(object):
def __init__(self, element, spec):
self.proto = Proto(element.find('proto'))
self.params = [Param(ele, spec) for ele in element.iter('param')]
def __hash__(self):
return hash(self.proto.name)
def __str__(self):
return '{self.proto.name}'.format(self=self)
__repr__ = __str__
class Proto(object):
def __init__(self, element):
self.name = element.find('name').text
self.ret = OGLType(element)
def __str__(self):
return '{self.ret} {self.name}'.format(self=self)
class Param(object):
def __init__(self, element, spec):
self.group = element.get('group')
self.type = OGLType(element)
self.name = element.find('name').text
def __str__(self):
return '{0!r} {1}'.format(self.type, self.name)
class OGLType(object):
def __init__(self, element):
text = ''.join(element.itertext())
self.type = (text.replace('const', '').replace('unsigned', '')
.replace('struct', '').strip().split(None, 1)[0]
if element.find('ptype') is None else element.find('ptype').text)
# 0 if no pointer, 1 if *, 2 if **
self.is_pointer = 0 if text is None else text.count('*')
# it can be a pointer to an array, or just an array
self.is_pointer += len(_ARRAY_RE.findall(text))
self.is_const = False if text is None else 'const' in text
self.is_unsigned = False if text is None else 'unsigned' in text
if 'struct' in text and 'struct' not in self.type:
self.type = 'struct {}'.format(self.type)
def to_d(self):
if self.is_pointer > 1 and self.is_const:
s = 'const({}{}*)'.format('u' if self.is_unsigned else '', self.type)
s += '*' * (self.is_pointer - 1)
else:
t = '{}{}'.format('u' if self.is_unsigned else '', self.type)
s = 'const({})'.format(t) if self.is_const else t
s += '*' * self.is_pointer
return s.replace('struct ', '')
to_volt = to_d
def to_c(self):
ut = 'unsigned {}'.format(self.type) if self.is_unsigned else self.type
s = '{}const {}'.format('unsigned ' if self.is_unsigned else '', self.type) \
if self.is_const else ut
s += '*' * self.is_pointer
return s
__str__ = to_d
__repr__ = __str__
class Extension(object):
def __init__(self, element, spec):
self.name = element.attrib['name']
self.require = []
for required in chain.from_iterable(element.findall('require')):
if required.tag == 'type':
continue
data = {'enum': spec.enums, 'command': spec.commands}[required.tag]
try:
self.require.append(data[required.attrib['name']])
except KeyError:
pass # TODO
@property
def enums(self):
for r in self.require:
if isinstance(r, Enum):
yield r
@property
def functions(self):
for r in self.require:
if isinstance(r, Command):
yield r
def __hash__(self):
return hash(self.name)
def __str__(self):
return self.name
__repr__ = __str__
class Feature(Extension):
def __init__(self, element, spec):
Extension.__init__(self, element, spec)
self.spec = spec
# not every spec has a ._remove member, but there shouldn't be a remove
# tag without that member, if there is, blame me!
for removed in chain.from_iterable(element.findall('remove')):
if removed.tag == 'type':
continue
data = {'enum': spec.enums, 'command': spec.commands}[removed.tag]
try:
spec._remove.add(data[removed.attrib['name']])
except KeyError:
pass # TODO
self.number = tuple(map(int, element.attrib['number'].split('.')))
self.api = element.attrib['api']
def __str__(self):
return '{self.name}@{self.number!r}'.format(self=self)
@property
def enums(self):
for enum in super(Feature, self).enums:
if enum not in getattr(self.spec, 'removed', []):
yield enum
@property
def functions(self):
for func in super(Feature, self).functions:
if func not in getattr(self.spec, 'removed', []):
yield func
__repr__ = __str__
| {
"content_hash": "410acb03d747b08ccd25e2a9d4c63ecd",
"timestamp": "",
"source": "github",
"line_count": 331,
"max_line_length": 88,
"avg_line_length": 28.930513595166165,
"alnum_prop": 0.5546157059314955,
"repo_name": "valeriog-crytek/glad",
"id": "35cba2b2b819fecfb5ed2f50888b2fb6de108e32",
"size": "9576",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "glad/parse.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1661"
},
{
"name": "C++",
"bytes": "2445"
},
{
"name": "CMake",
"bytes": "1002"
},
{
"name": "Python",
"bytes": "93161"
},
{
"name": "Shell",
"bytes": "4406"
}
],
"symlink_target": ""
} |
"""
Copyright 2015 Josh Conant
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from setuptools import setup
with open('README.rst') as f:
readme = f.read()
setup(
name='fibserv',
version='0.1',
author='Josh Conant',
author_email='[email protected]',
url='http://github.com/insequent/fibserv',
license='Apache License, Version 2.0',
description='A simple web service with swappable content and backend',
long_description=readme,
packages=['fibserv',
'fibserv.content',
'fibserv.engines'],
data_files=[('etc/fibserv', ['etc/fibserv/fibserv.conf'])],
classifiers=(
'Development Status :: 3 - Alpha',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3',
'Topic :: Software Development :: Testing'
)
)
| {
"content_hash": "ce5354fd117f15c8a21d2d98cd264f20",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 74,
"avg_line_length": 32.62222222222222,
"alnum_prop": 0.6791553133514986,
"repo_name": "insequent/fibserv",
"id": "2f4767ef33de5afb54412e3444d60ec104bc3d0b",
"size": "1513",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ApacheConf",
"bytes": "230"
},
{
"name": "Python",
"bytes": "11902"
}
],
"symlink_target": ""
} |
import json
from datetime import datetime
from opentreewebapputil import (get_opentree_services_method_urls,
fetch_current_TNRS_context_names,
get_data_deposit_message,)
import bleach
from bleach.sanitizer import Cleaner
# Define a consistent cleaner to sanitize user input. We need a few
# elements that are common in our markdown but missing from the Bleach
# whitelist.
# N.B. HTML comments are stripped by default. Non-allowed tags will appear
# "naked" in output, so we can identify any bad actors.
common_version_notes_tags = [u'p', u'br',
u'h1', u'h2', u'h3', u'h4', u'h5', u'h6',
u'table', u'tbody', u'tr', u'td', u'th',
]
ot_markdown_tags = list(set( bleach.sanitizer.ALLOWED_TAGS + common_version_notes_tags))
common_version_notes_attributes={u'table': [u'class'],
}
ot_markdown_attributes = bleach.sanitizer.ALLOWED_ATTRIBUTES.copy()
ot_markdown_attributes.update(common_version_notes_attributes)
ot_cleaner = Cleaner(tags=ot_markdown_tags, attributes=ot_markdown_attributes)
### required - do no delete
def user(): return dict(form=auth())
def download(): return response.download(request,db)
def call(): return service()
### end requires
def index():
# bump to first About page in menu
redirect(URL('about', 'open-tree-of-life'))
# try grabbing shared data just once
default_view_dict = get_opentree_services_method_urls(request)
default_view_dict['taxonSearchContextNames'] = fetch_current_TNRS_context_names(request)
# NOTE that web2py should attempt to convert hyphens (dashes) in URLs into underscores
def open_tree_of_life():
# URL is /opentree/about/open-tree-of-life
return default_view_dict
def privacy_policy():
# URL is /opentree/about/privacy-policy
return default_view_dict
def the_synthetic_tree():
# URL is /opentree/about/the-synthetic-tree
return default_view_dict
def the_source_tree_manager():
# URL is /opentree/about/the-source-tree-manager
return default_view_dict
def developer_resources():
return default_view_dict
def credits():
return default_view_dict
def licenses():
return default_view_dict
def materials_and_methods():
return default_view_dict
def references():
view_dict = default_view_dict.copy()
view_dict['contributing_studies'] = fetch_current_synthesis_source_data()
return view_dict
def progress():
view_dict = default_view_dict.copy()
# Load each JSON document into a list or dict, so we can compile daily entries.
# NB: For simplicity and uniformity, filter these to use only simple dates
# with no time component!
# EXAMPLE u'2015-01-16T23Z' ==> u'2015-01-16'
raw = json.loads(fetch_local_synthesis_stats() or '{}')
# Pre-sort its raw date strings, so we can discard all the but latest info
# for each date (e.g. we might toss the morning stats but keep the evening).
sorted_dates = sorted(raw.keys(), reverse=False)
synth = {}
for d in sorted_dates:
raw_data = raw[d]
simple_date = _force_to_simple_date_string(d)
synth[ simple_date ] = raw_data
# this should overwrite data from earlier in the day
# phylesystem stats also have mixed date formats
warnings = set()
phylesystem = {}
raw = json.loads(fetch_local_phylesystem_stats() or '{}')
sorted_dates = sorted(raw.keys(), reverse=False)
if len(sorted_dates) == 0:
warnings.add('No phylesystem data was found! Most stats below are probably incomplete.')
else:
# N.B. We only want to show monthly changes in phylesystem! For each month
# in this range, include just one record, ideally
# - the first day of the month (if found), OR
# - the nearest prior OR following record; if equally close, use prior
# Use the actual date found for this data, so that glitches in the "monthly"
# reporting of this data are apparent to the user.
# use a recurrence rule to find starting dates for each month for which we have data
import re
from dateutil import rrule
def string2date(s):
s = _force_to_simple_date_string(s)
return datetime.strptime(s, '%Y-%m-%d') # e.g. '2014-08-15'
first_date_string = sorted_dates[ 0 ]
# shift the first date found to the first of that month, e.g. '2014-08-15' => '2014-08-01'
first_date_string = re.sub(r'\d+$', '01', first_date_string)
first_date = string2date(first_date_string)
last_date_string = sorted_dates[ len(sorted_dates)-1 ]
last_date = string2date(last_date_string)
monthly_milestones = list(rrule.rrule(rrule.MONTHLY, dtstart=first_date, until=last_date))
def nearest_phylesystem_datestring(target_date):
# find the closest timestamp and return the corresponding date-string
from bisect import bisect_left
if (isinstance(target_date, str)):
try:
target_date = string2date(target_date)
except:
raise ValueError("Expected a date-string in the form '2014-08-15'!")
nearest_datestring = None
# build a list of timestamps from the pre-sorted date strings
sorted_timestamps = [string2date(ds) for ds in sorted_dates]
prior_position = bisect_left(sorted_timestamps, target_date) - 1
following_position = min(prior_position+1, len(sorted_timestamps)-1)
# fetch and compare the timestamps before and after; which is closer?
prior_timestamp = sorted_timestamps[prior_position]
following_timestamp = sorted_timestamps[following_position]
if abs(target_date - prior_timestamp) <= abs(target_date - following_timestamp):
# in the event of a tie, use the prior date
prior_datestring = sorted_dates[prior_position]
return prior_datestring
else:
following_datestring = sorted_dates[following_position]
return following_datestring
# adjust each "milestone" to the nearest date with phylesystem data, then remove duplicates
monthly_milestones = [nearest_phylesystem_datestring(m) for m in monthly_milestones]
monthly_milestones = sorted(list(set(monthly_milestones)))
for d in monthly_milestones:
raw_data = raw[d]
simple_date = _force_to_simple_date_string(d)
phylesystem[ simple_date ] = raw_data
# this should overwrite data from earlier in the day
# taxonomy stats should always use simple dates
ott = json.loads(fetch_local_ott_stats() or '[]')
# create some otu summary stats for each synthesis that we have info about...
by_date = {}
dates = set(synth.keys() + phylesystem.keys() + [ott_v.get('date') for ott_v in ott])
# Let's creep tallies up in our fake data, with starting values here
num_otu_in_ott = 0
num_phylo_otu_in_synth = 0
num_otu_in_studies = 0
num_otu_in_nominated_studies = 0
# Set initial (empty) values for synthesis and phylesystem stats; these will
# be "carried over" to a subsequent date that has no current data.
synth_v = {}
phyle_v = {}
for date in sorted(dates, reverse=False):
# carry over latest stats, if none found for this day
synth_v = synth.get(date, synth_v)
phyle_v = phylesystem.get(date, phyle_v)
# Note any taxonomy version released on this date, AND the version used
# in today's synthesis. (These are probably not the same)
ott_new_version_info = get_ott_version_info_by_date(date)
synth_ott_version = synth_v.get('OTT_version')
if synth_ott_version:
# If a draft was provided (eg, "ott2.9draft8"), truncate this
# to specify the main version (in this case, "ott2.9")
synth_ott_version = synth_ott_version.split('draft')[0]
ott_synth_version_info = get_ott_version_info(synth_ott_version)
if ott_synth_version_info is None:
warnings.add('specified version {v} of OTT not found!'.format(v=synth_ott_version))
else:
if synth_v:
# this should have specified an OTT version
warnings.add('No specified version of OTT for some synthesis releases; guessing OTT versions based on synth-date!')
else:
# No synthesis has occurred yet; pick up the closest prior taxonomy version
pass
ott_synth_version_info = get_latest_ott_version_info_by_date(date)
if ott_synth_version_info is None:
warnings.add('No version of OTT found on-or-before date {d}!'.format(d=date))
if ott_synth_version_info is None:
ott_synth_version_info = {}
warnings.add('OTT version info not found!')
elif ott is None:
warnings.add('OTT info not found!')
else:
if ott_synth_version_info is None:
warnings.add('OTT info for version {v} not found!'.format(v=ott_synth_version_info.get('version')))
else:
num_otu_in_ott = ott_synth_version_info.get('visible_taxon_count', 0)
# N.B. Some days (esp. early in history) might not have any synthesis data,
# or incomplete data (if synthesis was prior to gathering detailed stats)
if synth_v: # ignore empty dict (no data found)
if synth_v.get('total_OTU_count') is None:
#warnings.add('{d}: "total_OTU_count" info not found!'.format(d=date))
num_phylo_otu_in_synth = None
else:
num_phylo_otu_in_synth = synth_v.get('total_OTU_count')
if phyle_v: # ignore empty dict (no data found)
if phyle_v.get('unique_OTU_count') is None:
warnings.add('phylesystem.unique_OTU_count info not found!')
else:
num_otu_in_studies = phyle_v.get('unique_OTU_count')
if phyle_v.get('nominated_study_unique_OTU_count') is None:
warnings.add('phylesystem.nominated_study_unique_OTU_count info not found!')
else:
num_otu_in_nominated_studies = phyle_v.get('nominated_study_unique_OTU_count')
#print( date, ott_synth_version_info['date'], (ott_synth_version_info['date'] == date and "true" or "false") )
#print( date, (synth.get(date, None) and "true" or "false") )
by_date[date] = {'Unique OTUs in OTT': num_otu_in_ott,
'Unique OTUs in synthesis from studies': num_phylo_otu_in_synth,
'Unique OTUs in studies': num_otu_in_studies,
'Unique OTUs in nominated studies': num_otu_in_nominated_studies,
# TODO: Add pre-calculated stats where provided?
'Date has synthesis release': (synth.get(date, None) and "true" or "false"),
'Synthesis version released today': synth.get(date, None) and synth.get(date).get('version').encode("utf8") or '',
'Date has taxonomy version': (ott_new_version_info and "true" or "false"),
'Date has phylesystem info': (phylesystem.get(date, None) and "true" or "false"),
'OTT version released today': ott_new_version_info and ott_new_version_info.get('version','').encode("utf8") or '',
'OTT version used in synthesis': ott_synth_version_info.get('version','').encode("utf8"),
'Date': str(date)}
# sort by date (allowing for different date formats)
#dk = [(datetime.strptime(i, "%Y-%m-%d"), i) for i in by_date.keys() if i]
dk = []
for i in by_date.keys():
if i:
# remove any time (intra-day) component for uniform dates!
# EXAMPLE u'2015-01-16T23Z' ==> u'2015-01-16'
i = i.split('T')[0]
converted_date = datetime.strptime(i, "%Y-%m-%d")
dk.append((converted_date, i,))
dk.sort()
ks = [i[1] for i in dk]
# create the list of stat objects to return
stat_list = [by_date[i] for i in ks]
view_dict['otu_stats'] = stat_list
view_dict['warnings'] = list(warnings)
view_dict['warnings'].sort()
return view_dict
def _force_to_simple_date_string( date_string ):
# remove any time (intra-day) component for uniform dates!
# EXAMPLE u'2015-01-16T23Z' ==> u'2015-01-16'
return date_string.split('T')[0]
def synthesis_release():
view_dict = default_view_dict.copy()
# Load each JSON document into a list or dict, so we can compile daily entries.
# NB: For simplicity and uniformity, filter these to use only simple dates
# with no time component!
# EXAMPLE u'2015-01-16T23Z' ==> u'2015-01-16'
raw = json.loads(fetch_local_synthesis_stats() or '{}')
# Pre-sort its raw date strings, so we can discard all the but latest info
# for each date (e.g. we might toss the morning stats but keep the evening).
sorted_dates = sorted(raw.keys(), reverse=False)
synth = {}
for d in sorted_dates:
raw_data = raw[d]
simple_date = _force_to_simple_date_string(d)
synth[ simple_date ] = raw_data
# this should overwrite data from earlier in the day
if len(synth.keys()) == 0:
# report this error on the page
view_dict['release_version'] = 'NO RELEASES FOUND'
view_dict['synthesis_stats'] = synth
return view_dict
# Get date or version from URL, or bounce to the latest release by default
if len(request.args) == 0:
release_date = sorted(synth.keys(), reverse=False)[-1]
release_version = synth[release_date].get('version')
redirect(URL('opentree', 'about', 'synthesis_release',
vars={},
args=[release_version]))
synth_release_version = request.args[0]
view_dict['release_version'] = synth_release_version
view_dict['synthesis_stats'] = synth
# fetch and render Markdown release notes as HTML
import requests
from gluon.contrib.markdown.markdown2 import markdown
fetch_url = 'https://raw.githubusercontent.com/OpenTreeOfLife/germinator/master/doc/ot-synthesis-{v}.md'.format(v=synth_release_version)
try:
version_notes_response = requests.get(url=fetch_url).text
# N.B. We assume here that any hyperlinks have the usual Markdown braces!
version_notes_html = markdown(version_notes_response).encode('utf-8')
# scrub HTML output with bleach
version_notes_html = ot_cleaner.clean(version_notes_html)
except:
version_notes_html = None
view_dict['synthesis_release_notes'] = version_notes_html
return view_dict
def taxonomy_version():
view_dict = default_view_dict.copy()
# load taxonomy-version history and basic stats
ott = json.loads(fetch_local_ott_stats() or '[]')
if len(ott) == 0:
# report this error on the page
view_dict['taxonomy_version'] = 'NO VERSIONS FOUND'
view_dict['taxonomy_stats'] = ott
return view_dict
# Get OTT version from URL, or bounce to the latest version by default
if len(request.args) == 0:
# safer to sort by date-strings [yyyy-mm-dd] than version strings
sorted_ott = sorted(ott, key=lambda v: v['date'], reverse=False)
taxonomy_version = sorted_ott[-1].get('version')
redirect(URL('opentree', 'about', 'taxonomy_version',
vars={},
args=[taxonomy_version]))
taxo_version = request.args[0]
view_dict['taxonomy_version'] = taxo_version
view_dict['taxonomy_stats'] = ott
# fetch and render Markdown release notes as HTML
import requests
from gluon.contrib.markdown.markdown2 import markdown
fetch_url = 'https://raw.githubusercontent.com/OpenTreeOfLife/reference-taxonomy/master/doc/{v}.md'.format(v=taxo_version)
try:
version_notes_response = requests.get(url=fetch_url).text
# N.B. We assume here that any hyperlinks have the usual Markdown braces!
version_notes_html = markdown(version_notes_response).encode('utf-8')
# scrub HTML output with bleach
version_notes_html = ot_cleaner.clean(version_notes_html)
except:
version_notes_html = None
view_dict['taxonomy_version_notes'] = version_notes_html
# List all synthesis releases that used this OTT version
synth = json.loads(fetch_local_synthesis_stats() or '{}')
related_releases = []
for date in synth:
synth_ott_version = synth[date]['OTT_version']
if synth_ott_version:
# If a draft was provided (eg, "ott2.9draft8"), truncate this
# to specify the main version (in this case, "ott2.9")
synth_ott_version = synth_ott_version.split('draft')[0]
if synth_ott_version == taxo_version:
related_releases.append(synth[date]['version'])
view_dict['related_synth_releases'] = related_releases
return view_dict
def fetch_local_synthesis_stats():
try:
stats = open("applications/%s/static/statistics/synthesis.json" % request.application).read().strip()
return stats
except Exception, e:
return None
def fetch_local_phylesystem_stats():
try:
stats = open("applications/%s/static/statistics/phylesystem.json" % request.application).read().strip()
return stats
except Exception, e:
return None
def fetch_local_ott_stats():
try:
stats = open("applications/%s/static/statistics/ott.json" % request.application).read().strip()
return stats
except:
return None
_sorted_ott_versions = None
def get_sorted_ott_versions():
global _sorted_ott_versions
if not _sorted_ott_versions:
_sorted_ott_versions = json.loads(fetch_local_ott_stats() or '[]')
# make sure these are sorted by date (chronological order)
_sorted_ott_versions.sort(key = lambda x: x.get('date'))
return _sorted_ott_versions
def get_ott_version_info(specified_version):
for version in get_sorted_ott_versions():
if version.get('version') == specified_version:
return version
return None
def get_ott_version_info_by_date(date):
for version in get_sorted_ott_versions():
try:
v_date = version.get('date')
except:
raise Exception('Missing OTT version date')
if v_date == date:
return version
return None
def get_latest_ott_version_info_by_date(date):
closest_previous_version = None
for version in get_sorted_ott_versions():
try:
#v_date = datetime.strptime(version.get('date'), "%Y-%m-%dT%HZ")
v_date = version.get('date')
except:
raise Exception('Missing OTT version date')
if v_date <= date:
closest_previous_version = version
if closest_previous_version is None:
raise Exception('No OTT version before this date: %s' % date)
return closest_previous_version
def fetch_current_synthesis_source_data():
json_headers = {
'content-type' : 'application/json',
'accept' : 'application/json',
}
try:
import requests
import json
method_dict = get_opentree_services_method_urls(request)
# fetch a list of all studies that contribute to synthesis
fetch_url = method_dict['getSynthesisSourceList_url']
if fetch_url.startswith('//'):
# Prepend scheme to a scheme-relative URL
fetch_url = "https:%s" % fetch_url
# as usual, this needs to be a POST (pass empty fetch_args)
source_list_response = requests.post(fetch_url, data=json.dumps({'include_source_list':True}), headers=json_headers)
source_data = source_list_response.json()
source_id_list = source_data.get('source_list', [ ])
source_id_map = source_data.get('source_id_map')
# split these source descriptions, which are in the form '{STUDY_ID_PREFIX}_{STUDY_NUMERIC_ID}_{TREE_ID}_{COMMIT_SHA}'
contributing_study_info = { } # store (unique) study IDs as keys, commit SHAs as values
for source_id in source_id_list:
source_details = source_id_map.get( source_id )
if 'taxonomy' in source_details:
continue
study_id = source_details.get('study_id')
# N.B. assume that all study IDs have a two-letter prefix!
tree_id = source_details.get('tree_id')
commit_SHA_in_synthesis = source_details.get('git_sha')
# N.B. assume that any listed study has been used!
if study_id in contributing_study_info.keys():
contributing_study_info[ study_id ]['tree_ids'].append( tree_id )
else:
contributing_study_info[ study_id ] = {
'tree_ids': [ tree_id, ],
'commit_SHA_in_synthesis': commit_SHA_in_synthesis
}
# fetch the oti metadata (esp. DOI and full reference text) for each
fetch_url = method_dict['findAllStudies_url']
if fetch_url.startswith('//'):
# Prepend scheme to a scheme-relative URL
fetch_url = "https:%s" % fetch_url
# as usual, this needs to be a POST (pass empty fetch_args)
study_metadata_response = requests.post(fetch_url, data=json.dumps({"verbose": True}), headers=json_headers)
# TODO: add more friendly label to tree metadata? if so, add "includeTreeMetadata":True above
study_metadata = study_metadata_response.json()
# filter just the metadata for studies contributing to synthesis
contributing_studies = [ ]
for study in study_metadata['matched_studies']:
# Add any missing study-ID prefixes (assume 'pg') so we can compare
# with the prefixed IDs provided by getSynthesisSourceList.
id_parts = study['ot:studyId'].split('_')
if len(id_parts) == 1:
prefixed_study_id = 'pg_%s' % study['ot:studyId']
else:
prefixed_study_id = study['ot:studyId']
if prefixed_study_id in contributing_study_info.keys():
contrib_info = contributing_study_info[ prefixed_study_id ]
# and commit SHA to support retrieval of *exact* Nexson from synthesis
study['commit_SHA_in_synthesis'] = contrib_info['commit_SHA_in_synthesis']
# add contributing tree ID(s) so we can directly link to (or download) them
study['tree_ids'] = contrib_info['tree_ids']
contributing_studies.append( study )
# sort these alphabetically by first author, then render in the page
contributing_studies.sort(key = lambda x: x.get('ot:studyPublicationReference'))
# TODO: encode data to utf-8?
## context_names += [n.encode('utf-8') for n in contextnames_json[gname] ]
# translate data-deposit DOIs/URLs into friendlier forms
for study in contributing_studies:
raw_deposit_doi = study.get('ot:dataDeposit', None)
if raw_deposit_doi:
study['friendlyDepositMessage'] = get_data_deposit_message(raw_deposit_doi)
return contributing_studies
except Exception, e:
# throw 403 or 500 or just leave it
return ('ERROR', e.message)
| {
"content_hash": "e9117917c433987050dadd0ed24bdad5",
"timestamp": "",
"source": "github",
"line_count": 521,
"max_line_length": 140,
"avg_line_length": 45.46257197696737,
"alnum_prop": 0.6258549354048806,
"repo_name": "OpenTreeOfLife/opentree",
"id": "7f82feae10291384237d51559718dbabf2640abe",
"size": "23710",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "webapp/controllers/about.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "91586"
},
{
"name": "Go",
"bytes": "6808"
},
{
"name": "HTML",
"bytes": "728798"
},
{
"name": "JavaScript",
"bytes": "2145214"
},
{
"name": "Less",
"bytes": "159886"
},
{
"name": "Makefile",
"bytes": "613"
},
{
"name": "PHP",
"bytes": "52477"
},
{
"name": "Python",
"bytes": "750874"
},
{
"name": "Shell",
"bytes": "4890"
}
],
"symlink_target": ""
} |
"""
Test the OOb sensor command Plugin.
"""
from .. import OobSensorGetCommand
from .test_oob_sensor_command import TestOobSensorCommand
class TestOobSensorGetCommand(TestOobSensorCommand):
def setUp(self):
super(TestOobSensorGetCommand, self).setUp()
self.oob_sensor_get = OobSensorGetCommand(self.node_name, self.configuration_manager,
self.mock_plugin_manager, None, 'temp')
self.oob_sensor_get_empty = OobSensorGetCommand(self.node_name, self.configuration_manager,
self.mock_plugin_manager, None, '')
self.oob_sensor_get_all = OobSensorGetCommand(self.node_name, self.configuration_manager,
self.mock_plugin_manager, None, 'all')
self.oob_sensor_get1 = OobSensorGetCommand(self.node_name, self.configuration_manager,
self.mock_plugin_manager, None, 'temp,ivoc_voltage')
def test_ret_msg(self):
try:
self.oob_sensor_get_empty.execute()
self.fail("no error")
except RuntimeError:
pass
self.assertEqual(self.oob_sensor_get_all.execute()[0].return_code, 0)
self.assertEqual(self.oob_sensor_get1.execute()[0].return_code, 0)
self.assertEqual(self.oob_sensor_get.execute()[0].return_code, 0)
self.oob_manager_mock.get_sensor_value.return_value = {"node": {'temp': [0.88765444]}}
self.assertEqual(self.oob_sensor_get.execute()[0].return_code, 0)
self.oob_manager_mock.get_sensor_value.side_effect = RuntimeError
self.assertEqual(self.oob_sensor_get.execute()[0].return_code, 255)
| {
"content_hash": "560caa39a72b99e52e5da97264d9e6cd",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 103,
"avg_line_length": 54.625,
"alnum_prop": 0.6149885583524027,
"repo_name": "intel-ctrlsys/actsys",
"id": "54ee9c45190428df03bcd153bbb6020168146772",
"size": "1810",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "actsys/control/commands/oob_sensors/tests/test_oob_sensor_get.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "11641"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "1048209"
}
],
"symlink_target": ""
} |
from build_template import *
from optparse import OptionParser
import json
import os
def generate_enum_classfile(json_file, model):
filename = os.path.basename(json_file)
p = os.path.dirname(os.path.dirname(json_file))
enum_name = filename[:-5]
enum_file = os.path.join(p, enum_name + '.php')
print("$")
d = {
'enum_name': enum_name,
'array': model
}
create_file_from_template('enum.tpl.php', d, enum_file)
"""
"""
if __name__ == '__main__':
# sys.argv = ['build_enum.py', '--json=D:\Projects\Badmin\www\defines\const\A.json']
parser = OptionParser()
parser.add_option("-j", "--json", action="store",
dest="json_file", help="Provide JSON file name")
options, args = parser.parse_args()
json_file = options.json_file
with open(json_file, 'r', encoding='utf-8') as f:
content = f.read()
model = json.loads(content)
r = generate_enum_classfile(json_file, model)
print(json.dumps(r))
| {
"content_hash": "8d1825a2ef09c3b7e7f02c1a6500f9c9",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 88,
"avg_line_length": 27.18918918918919,
"alnum_prop": 0.6043737574552683,
"repo_name": "healerkx/AdminBuildr",
"id": "3933957b8fee2dfec2b9e2de3d4b23a597402e4c",
"size": "1007",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/build_enum.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "407827"
},
{
"name": "HTML",
"bytes": "172750"
},
{
"name": "JavaScript",
"bytes": "3274521"
},
{
"name": "PHP",
"bytes": "98933"
},
{
"name": "Python",
"bytes": "42831"
}
],
"symlink_target": ""
} |
from .imputer import Imputer
| {
"content_hash": "416cbfc60cf92832a8d890bccc6e896d",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 28,
"avg_line_length": 29,
"alnum_prop": 0.8275862068965517,
"repo_name": "robinsonkwame/Imputer.py",
"id": "783c449359c8a6133fae459cc099f9196274b16a",
"size": "29",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "imputer/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "57034"
},
{
"name": "Python",
"bytes": "4284"
}
],
"symlink_target": ""
} |
"""
The MIT License (MIT)
Copyright (c) 2015-2017 Rapptz
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
from discord.errors import DiscordException
__all__ = [ 'CommandError', 'MissingRequiredArgument', 'BadArgument',
'NoPrivateMessage', 'CheckFailure', 'CommandNotFound',
'DisabledCommand', 'CommandInvokeError', 'TooManyArguments',
'UserInputError', 'CommandOnCooldown', 'NotOwner' ]
class CommandError(DiscordException):
"""The base exception type for all command related errors.
This inherits from :exc:`discord.DiscordException`.
This exception and exceptions derived from it are handled
in a special way as they are caught and passed into a special event
from :class:`.Bot`\, :func:`on_command_error`.
"""
def __init__(self, message=None, *args):
if message is not None:
# clean-up @everyone and @here mentions
m = message.replace('@everyone', '@\u200beveryone').replace('@here', '@\u200bhere')
super().__init__(m, *args)
else:
super().__init__(*args)
class UserInputError(CommandError):
"""The base exception type for errors that involve errors
regarding user input.
This inherits from :exc:`.CommandError`.
"""
pass
class CommandNotFound(CommandError):
"""Exception raised when a command is attempted to be invoked
but no command under that name is found.
This is not raised for invalid subcommands, rather just the
initial main command that is attempted to be invoked.
"""
pass
class MissingRequiredArgument(UserInputError):
"""Exception raised when parsing a command and a parameter
that is required is not encountered.
Attributes
-----------
param: str
The argument that is missing.
"""
def __init__(self, param):
self.param = param.name
super().__init__('{0.name} is a required argument that is missing.'.format(param))
class TooManyArguments(UserInputError):
"""Exception raised when the command was passed too many arguments and its
:attr:`.Command.ignore_extra` attribute was not set to ``True``.
"""
pass
class BadArgument(UserInputError):
"""Exception raised when a parsing or conversion failure is encountered
on an argument to pass into a command.
"""
pass
class CheckFailure(CommandError):
"""Exception raised when the predicates in :attr:`.Command.checks` have failed."""
pass
class NoPrivateMessage(CheckFailure):
"""Exception raised when an operation does not work in private message
contexts.
"""
pass
class NotOwner(CheckFailure):
"""Exception raised when the message author is not the owner of the bot."""
pass
class DisabledCommand(CommandError):
"""Exception raised when the command being invoked is disabled."""
pass
class CommandInvokeError(CommandError):
"""Exception raised when the command being invoked raised an exception.
Attributes
-----------
original
The original exception that was raised. You can also get this via
the ``__cause__`` attribute.
"""
def __init__(self, e):
self.original = e
super().__init__('Command raised an exception: {0.__class__.__name__}: {0}'.format(e))
class CommandOnCooldown(CommandError):
"""Exception raised when the command being invoked is on cooldown.
Attributes
-----------
cooldown: Cooldown
A class with attributes ``rate``, ``per``, and ``type`` similar to
the :func:`.cooldown` decorator.
retry_after: float
The amount of seconds to wait before you can retry again.
"""
def __init__(self, cooldown, retry_after):
self.cooldown = cooldown
self.retry_after = retry_after
super().__init__('You are on cooldown. Try again in {:.2f}s'.format(retry_after))
| {
"content_hash": "528f417ffeeaf9ceb443257412964be8",
"timestamp": "",
"source": "github",
"line_count": 137,
"max_line_length": 95,
"avg_line_length": 35.44525547445255,
"alnum_prop": 0.6904859967051071,
"repo_name": "mgardne8/discord.py",
"id": "555bfd99403a964ca76377c5f1a2b835d93fbbf6",
"size": "4880",
"binary": false,
"copies": "1",
"ref": "refs/heads/rewrite",
"path": "discord/ext/commands/errors.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "599866"
}
],
"symlink_target": ""
} |
class AutoApiMissingAdminConfig(Exception):
pass
class Message(Exception):
def __init__(self, message):
super(Message, self).__init__()
self.message = message
| {
"content_hash": "e270f2e4ca707fbb487a97d59fa8f329",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 43,
"avg_line_length": 20.666666666666668,
"alnum_prop": 0.6451612903225806,
"repo_name": "fvalverd/AutoApi",
"id": "fe7c17e841b3fce44846743c95f3b0c93df7b586",
"size": "212",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "auto_api/exceptions.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "80970"
}
],
"symlink_target": ""
} |
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Gradient(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "scattersmith.marker"
_path_str = "scattersmith.marker.gradient"
_valid_props = {"color", "colorsrc", "type", "typesrc"}
# color
# -----
@property
def color(self):
"""
Sets the final color of the gradient fill: the center color for
radial, the right for horizontal, or the bottom for vertical.
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# colorsrc
# --------
@property
def colorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `color`.
The 'colorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["colorsrc"]
@colorsrc.setter
def colorsrc(self, val):
self["colorsrc"] = val
# type
# ----
@property
def type(self):
"""
Sets the type of gradient used to fill the markers
The 'type' property is an enumeration that may be specified as:
- One of the following enumeration values:
['radial', 'horizontal', 'vertical', 'none']
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["type"]
@type.setter
def type(self, val):
self["type"] = val
# typesrc
# -------
@property
def typesrc(self):
"""
Sets the source reference on Chart Studio Cloud for `type`.
The 'typesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["typesrc"]
@typesrc.setter
def typesrc(self, val):
self["typesrc"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
color
Sets the final color of the gradient fill: the center
color for radial, the right for horizontal, or the
bottom for vertical.
colorsrc
Sets the source reference on Chart Studio Cloud for
`color`.
type
Sets the type of gradient used to fill the markers
typesrc
Sets the source reference on Chart Studio Cloud for
`type`.
"""
def __init__(
self, arg=None, color=None, colorsrc=None, type=None, typesrc=None, **kwargs
):
"""
Construct a new Gradient object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.scattersmith.marker.Gradient`
color
Sets the final color of the gradient fill: the center
color for radial, the right for horizontal, or the
bottom for vertical.
colorsrc
Sets the source reference on Chart Studio Cloud for
`color`.
type
Sets the type of gradient used to fill the markers
typesrc
Sets the source reference on Chart Studio Cloud for
`type`.
Returns
-------
Gradient
"""
super(Gradient, self).__init__("gradient")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.scattersmith.marker.Gradient
constructor must be a dict or
an instance of :class:`plotly.graph_objs.scattersmith.marker.Gradient`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("color", None)
_v = color if color is not None else _v
if _v is not None:
self["color"] = _v
_v = arg.pop("colorsrc", None)
_v = colorsrc if colorsrc is not None else _v
if _v is not None:
self["colorsrc"] = _v
_v = arg.pop("type", None)
_v = type if type is not None else _v
if _v is not None:
self["type"] = _v
_v = arg.pop("typesrc", None)
_v = typesrc if typesrc is not None else _v
if _v is not None:
self["typesrc"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
| {
"content_hash": "8993ee6bc5579fbda6b375e91662f2ea",
"timestamp": "",
"source": "github",
"line_count": 236,
"max_line_length": 84,
"avg_line_length": 33.42372881355932,
"alnum_prop": 0.5463995943204868,
"repo_name": "plotly/plotly.py",
"id": "e1e91ba222fcb53f7bebfb9d1da9010102fbf460",
"size": "7888",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/graph_objs/scattersmith/marker/_gradient.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "545"
},
{
"name": "JavaScript",
"bytes": "2074"
},
{
"name": "PostScript",
"bytes": "565328"
},
{
"name": "Python",
"bytes": "31506317"
},
{
"name": "TypeScript",
"bytes": "71337"
}
],
"symlink_target": ""
} |
import bee
from bee.segments import *
import libcontext
from libcontext.pluginclasses import *
from libcontext.socketclasses import *
from .. import chessboard as comp_chessboard
class pandachessboard(comp_chessboard.chessboard):
def __init__(self, boardparent):
self.boardparent = boardparent
comp_chessboard.chessboard.__init__(self)
def move(self, move):
self.boardparent.move(move)
class chessboard(bee.worker):
player = variable("str")
parameter(player, "Both")
turn = antenna("pull", "str")
v_turn = buffer("pull", "str")
connect(turn, v_turn)
trig_turn = triggerfunc(v_turn)
make_move = antenna("push", ("str", "chess"))
v_make_move = variable(("str", "chess"))
connect(make_move, v_make_move)
@modifier
def do_make_move(self):
self.board.make_move(self.v_make_move)
trigger(v_make_move, do_make_move, "update")
get_move = output("push", ("str", "chess"))
v_move = variable("str")
t_move = transistor("str")
connect(v_move, t_move)
connect(t_move, get_move)
trig_move = triggerfunc(t_move)
def move(self, move):
try:
self.eventhandler_lock()
if self.player != "Both":
self.trig_turn()
if self.v_turn != self.player or self.player is None: raise ValueError
self.v_move = move
self.trig_move()
finally:
self.eventhandler_unlock()
def set_eventhandler_lock(self, eventhandler_lock):
self.eventhandler_lock = eventhandler_lock
def set_eventhandler_unlock(self, eventhandler_unlock):
self.eventhandler_unlock = eventhandler_unlock
def place(self):
assert self.player in ("White", "Black", "Both", None)
self.board = pandachessboard(self)
libcontext.socket(("eventhandler", "lock"),
socket_single_required(self.set_eventhandler_lock))
libcontext.socket(("eventhandler", "unlock"),
socket_single_required(self.set_eventhandler_unlock))
| {
"content_hash": "e4f87b07ceffddb16526f37f0c7f061a",
"timestamp": "",
"source": "github",
"line_count": 70,
"max_line_length": 86,
"avg_line_length": 29.84285714285714,
"alnum_prop": 0.6203925323121111,
"repo_name": "agoose77/hivesystem",
"id": "ca7c7286a8597f35f3246db42dddb8a2aad101d1",
"size": "2089",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "manual/chess/components/workers/chessboard.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "2491478"
},
{
"name": "Shell",
"bytes": "1164"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, unicode_literals
from .compat import reduce
def build_pipe(*args):
'''builds a pipe (a processing function) of the list of functions'''
return reduce(lambda g, f: (lambda elem: f(g(elem))), args)
def compose(*args):
'''composes the list of function into one'''
return build_pipe(reversed(args))
def identity(elem):
'''identity function'''
return elem
| {
"content_hash": "f5d33ef35f56b77ea46dc30eb7ac256d",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 72,
"avg_line_length": 24.764705882352942,
"alnum_prop": 0.6817102137767221,
"repo_name": "szopu/git-jira-worklog",
"id": "d576025a3a8dd2ddc429e9385e4f755977b4c77c",
"size": "421",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "jira_worklog/utils/functools.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "22327"
}
],
"symlink_target": ""
} |
__author__ = 'jdaniel'
from math import sqrt
from GaiaSolve.model import Model
class ZDT1(Model):
def __init__(self):
super(ZDT1, self).__init__()
def evaluate(self):
g = 1.0 + 9.0*sum(self.x[1:])/(len(self.x) - 1)
f1 = self.x[0]
f2 = g * (1.0 - sqrt(f1/g))
self.obj = [f1, f2]
self.eqcon = []
self.neqcon = []
def number_of_design_variables(self):
return 2
def lower_bound(self):
return [0.0]*2
def upper_bound(self):
return [1.0]*2
def number_of_objectives(self):
return 2
def has_equality_constraints(self):
return False
def number_of_equality_constraints(self):
return 0
def has_inequality_constraints(self):
return False
def number_of_inequality_constraints(self):
return 0
def decision_variable_names(self):
x_names = []
for i in range(2):
x_names.append('x' + str(i))
return x_names
def objective_variable_names(self):
return ['f1', 'f2']
def equality_constraint_variable_names(self):
return []
def inequality_constraint_variable_names(self):
return []
| {
"content_hash": "b7486fb6e4ba8969f3fdd0e5aeeddff2",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 55,
"avg_line_length": 20.266666666666666,
"alnum_prop": 0.5600328947368421,
"repo_name": "jldaniel/Gaia",
"id": "5d25095dbfeee98ba5e69747acc1dc911d92254a",
"size": "1216",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Models/zdt1.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "127670"
}
],
"symlink_target": ""
} |
from datetime import datetime
from typing import Dict, Sequence, Union
import humanreadable as hr
TimeArg = Union[hr.Time, int, str, None]
IcmpReplies = Sequence[Dict[str, Union[str, bool, float, int, datetime]]]
PingAddOpts = Union[str, Sequence[str]]
| {
"content_hash": "a72a6122bec0ce1e33097bcb073f5103",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 73,
"avg_line_length": 28.444444444444443,
"alnum_prop": 0.7578125,
"repo_name": "thombashi/pingparsing",
"id": "dfe8d7a7127270da49a374972a845e3d07749815",
"size": "256",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pingparsing/_typing.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "509"
},
{
"name": "Python",
"bytes": "100698"
},
{
"name": "Shell",
"bytes": "265"
}
],
"symlink_target": ""
} |
import datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
class Migration(DataMigration):
def forwards(self, orm):
eraser = orm.Brush.objects.get(canonical_name='paintbrush')
eraser.ordinal = 999
eraser.save()
paintbrush = orm.Brush.objects.get(canonical_name='paintbrush')
paintbrush.ordinal = 3
paintbrush.save()
orm.Brush.objects.create(
canonical_name='crayon',
ordinal=4,
label='Carlos la Crayon',
iphone_label='Carlos la Crayon',
description='Use Carlos to add some flair to your drawings. He also makes the best enchiladas.',
is_for_sale=False,
is_new=True,
iap_product_id='as.canv.drawquest.products.brushes.crayon',
cost=50,
red=254,
green=120,
blue=143,
)
orm.Brush.objects.create(
canonical_name='paint_bucket',
ordinal=5,
label='Patty the Paint Can',
iphone_label='Patty the Paint Can',
description='Need to color the whole canvas? Patty is always happy to help you out.',
is_for_sale=False,
is_new=True,
iap_product_id='as.canv.drawquest.products.brushes.paint_bucket',
cost=50,
red=254,
green=120,
blue=143,
)
def backwards(self, orm):
"Write your backwards methods here."
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '254', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'brushes.brush': {
'Meta': {'ordering': "['ordinal']", 'object_name': 'Brush'},
'blue': ('django.db.models.fields.IntegerField', [], {}),
'canonical_name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'cost': ('django.db.models.fields.IntegerField', [], {}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '300', 'blank': 'True'}),
'green': ('django.db.models.fields.IntegerField', [], {}),
'iap_product_id': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'iphone_label': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'is_for_sale': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_new': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'ordinal': ('django.db.models.fields.IntegerField', [], {}),
'owned_by_default': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'owners': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['canvas_auth.User']", 'symmetrical': 'False'}),
'red': ('django.db.models.fields.IntegerField', [], {})
},
u'canvas_auth.user': {
'Meta': {'object_name': 'User', 'db_table': "u'auth_user'", '_ormbases': [u'auth.User'], 'proxy': 'True'}
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['brushes']
symmetrical = True
| {
"content_hash": "4a94056e393158ac6066de04033a122e",
"timestamp": "",
"source": "github",
"line_count": 111,
"max_line_length": 187,
"avg_line_length": 57.108108108108105,
"alnum_prop": 0.549297996529421,
"repo_name": "drawquest/drawquest-web",
"id": "f302485351be4ec2a33dbd8b16b3f05454af0573",
"size": "6363",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "website/drawquest/apps/brushes/migrations/0011_add_crayon_and_paint_bucket.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "AppleScript",
"bytes": "57"
},
{
"name": "C",
"bytes": "547"
},
{
"name": "CSS",
"bytes": "634659"
},
{
"name": "CoffeeScript",
"bytes": "8968"
},
{
"name": "HTML",
"bytes": "898627"
},
{
"name": "JavaScript",
"bytes": "1507053"
},
{
"name": "Makefile",
"bytes": "258"
},
{
"name": "PHP",
"bytes": "1983"
},
{
"name": "Python",
"bytes": "7220727"
},
{
"name": "Ruby",
"bytes": "876"
},
{
"name": "Shell",
"bytes": "3700"
}
],
"symlink_target": ""
} |
"""
Copyright 2012 Ali Ok (aliokATapacheDOTorg)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from bson.objectid import ObjectId
from trnltk.morphology.learner.controller.parseresultcorrectmarkercontroller import ParseResultCorrectMarkerController
from trnltk.morphology.learner.controller.sessionmanager import SessionManager
from trnltk.morphology.learner.requesthandler.sessionawarerequesthandler import SessionAwareRequestHandler
from trnltk.morphology.learner.ui import applicationcontext
class ParseResultCorrectMarkerHandler(SessionAwareRequestHandler):
def get(self):
# check word id
param_word_id = self.request.get('wordId')
assert param_word_id
# get parse result
param_parse_result_uuid = self.request.get('parseResultUUID')
if not param_parse_result_uuid:
raise Exception(u"Missing parameter : parseResultUUID")
word_id = ObjectId(param_word_id)
# run controller, which will save the result in the db
dbmanager = applicationcontext.application_context_instance.dbmanager
sessionmanager = SessionManager(self.session)
controller = ParseResultCorrectMarkerController(dbmanager, sessionmanager)
controller.save_parse_result_for_word(word_id, param_parse_result_uuid)
# get word index to go from request
param_next_word_id = self.request.get('nextWordId') or param_word_id
next_word_id = ObjectId(param_next_word_id)
# redirect to "/nextNonParsedWord?corpusId=xx&wordIndex=yy"
return self.redirect("/learner?wordId={}".format(next_word_id))
| {
"content_hash": "3db73afd2e76bc938fde52ae1f9d0a30",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 118,
"avg_line_length": 39.60377358490566,
"alnum_prop": 0.753692234397332,
"repo_name": "aliok/trnltk",
"id": "8c723987e5bb0af2b8bb0a85d6ef60100ecd839a",
"size": "2099",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "trnltk/morphology/learner/requesthandler/parseresultcorrectmarkerhandler.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "60232"
},
{
"name": "Python",
"bytes": "1320401"
},
{
"name": "Shell",
"bytes": "2191"
}
],
"symlink_target": ""
} |
from openerp import fields, models
class Partner(models.Model):
_inherit = 'res.partner'
instructor = fields.Boolean("Instructor", default=False)
session_ids = fields.Many2many('openacademy.session', string="Session as attendee",
readonly=True)
| {
"content_hash": "1216dfa37518b9152acdedd9df83efc3",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 87,
"avg_line_length": 33.22222222222222,
"alnum_prop": 0.6421404682274248,
"repo_name": "ITTANNER/openacademy-project",
"id": "97ef600094fed7ee4edd5bf0fd54cd6fcf31035b",
"size": "323",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "openacademy/model/partner.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "9387"
}
],
"symlink_target": ""
} |
from base import *
try:
from local_settings import *
except ImportError:
raise ImportError('Add settings/local_settings.py to hold secret info ')
| {
"content_hash": "6657ab5b73fca87ac31bcbed7491858b",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 76,
"avg_line_length": 25.833333333333332,
"alnum_prop": 0.7419354838709677,
"repo_name": "rmwdeveloper/webhack",
"id": "95f8651d1aa427f22a73d448f0e7572b7c6291a1",
"size": "155",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "webhack/settings/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1998"
},
{
"name": "HTML",
"bytes": "516"
},
{
"name": "JavaScript",
"bytes": "9888"
},
{
"name": "Python",
"bytes": "3317"
},
{
"name": "Shell",
"bytes": "218"
}
],
"symlink_target": ""
} |
"""This module includes classes used for annotating trace information.
This includes the base StateAnnotation class, as well as an adaption,
which will not be copied on every new state.
"""
class StateAnnotation:
"""The StateAnnotation class is used to persist information over traces.
This allows modules to reason about traces without the need to
traverse the state space themselves.
"""
# TODO: Remove this? It seems to be used only in the MutationPruner, and
# we could simply use world state annotations if we want them to be persisted.
@property
def persist_to_world_state(self) -> bool:
"""If this function returns true then laser will also annotate the
world state.
If you want annotations to persist through different user initiated message call transactions
then this should be enabled.
The default is set to False
"""
return False
class NoCopyAnnotation(StateAnnotation):
"""This class provides a base annotation class for annotations that
shouldn't be copied on every new state.
Rather the same object should be propagated. This is very useful if
you are looking to analyze a property over multiple substates
"""
def __copy__(self):
return self
def __deepcopy__(self, _):
return self
| {
"content_hash": "d8e9aab7af0f17006476a92321d17066",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 101,
"avg_line_length": 31.904761904761905,
"alnum_prop": 0.7029850746268657,
"repo_name": "b-mueller/mythril",
"id": "0f25a31118791ef787c5ea0bb9c8b8c6792d0ec5",
"size": "1340",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mythril/laser/ethereum/state/annotation.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "688"
},
{
"name": "HTML",
"bytes": "3582"
},
{
"name": "JavaScript",
"bytes": "531"
},
{
"name": "Python",
"bytes": "329678"
},
{
"name": "Shell",
"bytes": "1766"
}
],
"symlink_target": ""
} |
"""Example differentially private trainer and evaluator for MNIST.
"""
from __future__ import division
import json
import os
import sys
import time
import numpy as np
from six.moves import xrange
import tensorflow as tf
from differential_privacy.dp_sgd.dp_optimizer import dp_optimizer
from differential_privacy.dp_sgd.dp_optimizer import dp_pca
from differential_privacy.dp_sgd.dp_optimizer import sanitizer
from differential_privacy.dp_sgd.dp_optimizer import utils
from differential_privacy.privacy_accountant.tf import accountant
# parameters for the training
tf.flags.DEFINE_integer("batch_size", 600,
"The training batch size.")
tf.flags.DEFINE_integer("batches_per_lot", 1,
"Number of batches per lot.")
# Together, batch_size and batches_per_lot determine lot_size.
tf.flags.DEFINE_integer("num_training_steps", 50000,
"The number of training steps."
"This counts number of lots.")
tf.flags.DEFINE_bool("randomize", True,
"If true, randomize the input data; otherwise use a fixed "
"seed and non-randomized input.")
tf.flags.DEFINE_bool("freeze_bottom_layers", False,
"If true, only train on the logit layer.")
tf.flags.DEFINE_bool("save_mistakes", False,
"If true, save the mistakes made during testing.")
tf.flags.DEFINE_float("lr", 0.05, "start learning rate")
tf.flags.DEFINE_float("end_lr", 0.05, "end learning rate")
tf.flags.DEFINE_float("lr_saturate_epochs", 0,
"learning rate saturate epochs; set to 0 for a constant "
"learning rate of --lr.")
# For searching parameters
tf.flags.DEFINE_integer("projection_dimensions", 60,
"PCA projection dimensions, or 0 for no projection.")
tf.flags.DEFINE_integer("num_hidden_layers", 1,
"Number of hidden layers in the network")
tf.flags.DEFINE_integer("hidden_layer_num_units", 1000,
"Number of units per hidden layer")
tf.flags.DEFINE_float("default_gradient_l2norm_bound", 4.0, "norm clipping")
tf.flags.DEFINE_integer("num_conv_layers", 0,
"Number of convolutional layers to use.")
tf.flags.DEFINE_string("training_data_path",
"/tmp/mnist/mnist_train.tfrecord",
"Location of the training data.")
tf.flags.DEFINE_string("eval_data_path",
"/tmp/mnist/mnist_test.tfrecord",
"Location of the eval data.")
tf.flags.DEFINE_integer("eval_steps", 10,
"Evaluate the model every eval_steps")
# Parameters for privacy spending. We allow linearly varying eps during
# training.
tf.flags.DEFINE_string("accountant_type", "Moments", "Moments, Amortized.")
# Flags that control privacy spending during training.
tf.flags.DEFINE_float("eps", 1.0,
"Start privacy spending for one epoch of training, "
"used if accountant_type is Amortized.")
tf.flags.DEFINE_float("end_eps", 1.0,
"End privacy spending for one epoch of training, "
"used if accountant_type is Amortized.")
tf.flags.DEFINE_float("eps_saturate_epochs", 0,
"Stop varying epsilon after eps_saturate_epochs. Set to "
"0 for constant eps of --eps. "
"Used if accountant_type is Amortized.")
tf.flags.DEFINE_float("delta", 1e-5,
"Privacy spending for training. Constant through "
"training, used if accountant_type is Amortized.")
tf.flags.DEFINE_float("sigma", 4.0,
"Noise sigma, used only if accountant_type is Moments")
# Flags that control privacy spending for the pca projection
# (only used if --projection_dimensions > 0).
tf.flags.DEFINE_float("pca_eps", 0.5,
"Privacy spending for PCA, used if accountant_type is "
"Amortized.")
tf.flags.DEFINE_float("pca_delta", 0.005,
"Privacy spending for PCA, used if accountant_type is "
"Amortized.")
tf.flags.DEFINE_float("pca_sigma", 7.0,
"Noise sigma for PCA, used if accountant_type is Moments")
tf.flags.DEFINE_string("target_eps", "0.125,0.25,0.5,1,2,4,8",
"Log the privacy loss for the target epsilon's. Only "
"used when accountant_type is Moments.")
tf.flags.DEFINE_float("target_delta", 1e-5,
"Maximum delta for --terminate_based_on_privacy.")
tf.flags.DEFINE_bool("terminate_based_on_privacy", False,
"Stop training if privacy spent exceeds "
"(max(--target_eps), --target_delta), even "
"if --num_training_steps have not yet been completed.")
tf.flags.DEFINE_string("save_path", "/tmp/mnist_dir",
"Directory for saving model outputs.")
FLAGS = tf.flags.FLAGS
NUM_TRAINING_IMAGES = 60000
NUM_TESTING_IMAGES = 10000
IMAGE_SIZE = 28
def MnistInput(mnist_data_file, batch_size, randomize):
"""Create operations to read the MNIST input file.
Args:
mnist_data_file: Path of a file containing the MNIST images to process.
batch_size: size of the mini batches to generate.
randomize: If true, randomize the dataset.
Returns:
images: A tensor with the formatted image data. shape [batch_size, 28*28]
labels: A tensor with the labels for each image. shape [batch_size]
"""
file_queue = tf.train.string_input_producer([mnist_data_file])
reader = tf.TFRecordReader()
_, value = reader.read(file_queue)
example = tf.parse_single_example(
value,
features={"image/encoded": tf.FixedLenFeature(shape=(), dtype=tf.string),
"image/class/label": tf.FixedLenFeature([1], tf.int64)})
image = tf.cast(tf.image.decode_png(example["image/encoded"], channels=1),
tf.float32)
image = tf.reshape(image, [IMAGE_SIZE * IMAGE_SIZE])
image /= 255
label = tf.cast(example["image/class/label"], dtype=tf.int32)
label = tf.reshape(label, [])
if randomize:
images, labels = tf.train.shuffle_batch(
[image, label], batch_size=batch_size,
capacity=(batch_size * 100),
min_after_dequeue=(batch_size * 10))
else:
images, labels = tf.train.batch([image, label], batch_size=batch_size)
return images, labels
def Eval(mnist_data_file, network_parameters, num_testing_images,
randomize, load_path, save_mistakes=False):
"""Evaluate MNIST for a number of steps.
Args:
mnist_data_file: Path of a file containing the MNIST images to process.
network_parameters: parameters for defining and training the network.
num_testing_images: the number of images we will evaluate on.
randomize: if false, randomize; otherwise, read the testing images
sequentially.
load_path: path where to load trained parameters from.
save_mistakes: save the mistakes if True.
Returns:
The evaluation accuracy as a float.
"""
batch_size = 100
# Like for training, we need a session for executing the TensorFlow graph.
with tf.Graph().as_default(), tf.Session() as sess:
# Create the basic Mnist model.
images, labels = MnistInput(mnist_data_file, batch_size, randomize)
logits, _, _ = utils.BuildNetwork(images, network_parameters)
softmax = tf.nn.softmax(logits)
# Load the variables.
ckpt_state = tf.train.get_checkpoint_state(load_path)
if not (ckpt_state and ckpt_state.model_checkpoint_path):
raise ValueError("No model checkpoint to eval at %s\n" % load_path)
saver = tf.train.Saver()
saver.restore(sess, ckpt_state.model_checkpoint_path)
coord = tf.train.Coordinator()
_ = tf.train.start_queue_runners(sess=sess, coord=coord)
total_examples = 0
correct_predictions = 0
image_index = 0
mistakes = []
for _ in xrange((num_testing_images + batch_size - 1) // batch_size):
predictions, label_values = sess.run([softmax, labels])
# Count how many were predicted correctly.
for prediction, label_value in zip(predictions, label_values):
total_examples += 1
if np.argmax(prediction) == label_value:
correct_predictions += 1
elif save_mistakes:
mistakes.append({"index": image_index,
"label": label_value,
"pred": np.argmax(prediction)})
image_index += 1
return (correct_predictions / total_examples,
mistakes if save_mistakes else None)
def Train(mnist_train_file, mnist_test_file, network_parameters, num_steps,
save_path, eval_steps=0):
"""Train MNIST for a number of steps.
Args:
mnist_train_file: path of MNIST train data file.
mnist_test_file: path of MNIST test data file.
network_parameters: parameters for defining and training the network.
num_steps: number of steps to run. Here steps = lots
save_path: path where to save trained parameters.
eval_steps: evaluate the model every eval_steps.
Returns:
the result after the final training step.
Raises:
ValueError: if the accountant_type is not supported.
"""
batch_size = FLAGS.batch_size
params = {"accountant_type": FLAGS.accountant_type,
"task_id": 0,
"batch_size": FLAGS.batch_size,
"projection_dimensions": FLAGS.projection_dimensions,
"default_gradient_l2norm_bound":
network_parameters.default_gradient_l2norm_bound,
"num_hidden_layers": FLAGS.num_hidden_layers,
"hidden_layer_num_units": FLAGS.hidden_layer_num_units,
"num_examples": NUM_TRAINING_IMAGES,
"learning_rate": FLAGS.lr,
"end_learning_rate": FLAGS.end_lr,
"learning_rate_saturate_epochs": FLAGS.lr_saturate_epochs
}
# Log different privacy parameters dependent on the accountant type.
if FLAGS.accountant_type == "Amortized":
params.update({"flag_eps": FLAGS.eps,
"flag_delta": FLAGS.delta,
"flag_pca_eps": FLAGS.pca_eps,
"flag_pca_delta": FLAGS.pca_delta,
})
elif FLAGS.accountant_type == "Moments":
params.update({"sigma": FLAGS.sigma,
"pca_sigma": FLAGS.pca_sigma,
})
with tf.Graph().as_default(), tf.Session() as sess, tf.device('/cpu:0'):
# Create the basic Mnist model.
images, labels = MnistInput(mnist_train_file, batch_size, FLAGS.randomize)
logits, projection, training_params = utils.BuildNetwork(
images, network_parameters)
cost = tf.nn.softmax_cross_entropy_with_logits(
logits=logits, labels=tf.one_hot(labels, 10))
# The actual cost is the average across the examples.
cost = tf.reduce_sum(cost, [0]) / batch_size
if FLAGS.accountant_type == "Amortized":
priv_accountant = accountant.AmortizedAccountant(NUM_TRAINING_IMAGES)
sigma = None
pca_sigma = None
with_privacy = FLAGS.eps > 0
elif FLAGS.accountant_type == "Moments":
priv_accountant = accountant.GaussianMomentsAccountant(
NUM_TRAINING_IMAGES)
sigma = FLAGS.sigma
pca_sigma = FLAGS.pca_sigma
with_privacy = FLAGS.sigma > 0
else:
raise ValueError("Undefined accountant type, needs to be "
"Amortized or Moments, but got %s" % FLAGS.accountant)
# Note: Here and below, we scale down the l2norm_bound by
# batch_size. This is because per_example_gradients computes the
# gradient of the minibatch loss with respect to each individual
# example, and the minibatch loss (for our model) is the *average*
# loss over examples in the minibatch. Hence, the scale of the
# per-example gradients goes like 1 / batch_size.
gaussian_sanitizer = sanitizer.AmortizedGaussianSanitizer(
priv_accountant,
[network_parameters.default_gradient_l2norm_bound / batch_size, True])
for var in training_params:
if "gradient_l2norm_bound" in training_params[var]:
l2bound = training_params[var]["gradient_l2norm_bound"] / batch_size
gaussian_sanitizer.set_option(var,
sanitizer.ClipOption(l2bound, True))
lr = tf.placeholder(tf.float32)
eps = tf.placeholder(tf.float32)
delta = tf.placeholder(tf.float32)
init_ops = []
if network_parameters.projection_type == "PCA":
with tf.variable_scope("pca"):
# Compute differentially private PCA.
all_data, _ = MnistInput(mnist_train_file, NUM_TRAINING_IMAGES, False)
pca_projection = dp_pca.ComputeDPPrincipalProjection(
all_data, network_parameters.projection_dimensions,
gaussian_sanitizer, [FLAGS.pca_eps, FLAGS.pca_delta], pca_sigma)
assign_pca_proj = tf.assign(projection, pca_projection)
init_ops.append(assign_pca_proj)
# Add global_step
global_step = tf.Variable(0, dtype=tf.int32, trainable=False,
name="global_step")
if with_privacy:
gd_op = dp_optimizer.DPGradientDescentOptimizer(
lr,
[eps, delta],
gaussian_sanitizer,
sigma=sigma,
batches_per_lot=FLAGS.batches_per_lot).minimize(
cost, global_step=global_step)
else:
gd_op = tf.train.GradientDescentOptimizer(lr).minimize(cost)
saver = tf.train.Saver()
coord = tf.train.Coordinator()
_ = tf.train.start_queue_runners(sess=sess, coord=coord)
# We need to maintain the intialization sequence.
for v in tf.trainable_variables():
sess.run(tf.variables_initializer([v]))
sess.run(tf.global_variables_initializer())
sess.run(init_ops)
results = []
start_time = time.time()
prev_time = start_time
filename = "results-0.json"
log_path = os.path.join(save_path, filename)
target_eps = [float(s) for s in FLAGS.target_eps.split(",")]
if FLAGS.accountant_type == "Amortized":
# Only matters if --terminate_based_on_privacy is true.
target_eps = [max(target_eps)]
max_target_eps = max(target_eps)
lot_size = FLAGS.batches_per_lot * FLAGS.batch_size
lots_per_epoch = NUM_TRAINING_IMAGES / lot_size
for step in xrange(num_steps):
epoch = step / lots_per_epoch
curr_lr = utils.VaryRate(FLAGS.lr, FLAGS.end_lr,
FLAGS.lr_saturate_epochs, epoch)
curr_eps = utils.VaryRate(FLAGS.eps, FLAGS.end_eps,
FLAGS.eps_saturate_epochs, epoch)
for _ in xrange(FLAGS.batches_per_lot):
_ = sess.run(
[gd_op], feed_dict={lr: curr_lr, eps: curr_eps, delta: FLAGS.delta})
sys.stderr.write("step: %d\n" % step)
# See if we should stop training due to exceeded privacy budget:
should_terminate = False
terminate_spent_eps_delta = None
if with_privacy and FLAGS.terminate_based_on_privacy:
terminate_spent_eps_delta = priv_accountant.get_privacy_spent(
sess, target_eps=[max_target_eps])[0]
# For the Moments accountant, we should always have
# spent_eps == max_target_eps.
if (terminate_spent_eps_delta.spent_delta > FLAGS.target_delta or
terminate_spent_eps_delta.spent_eps > max_target_eps):
should_terminate = True
if (eval_steps > 0 and (step + 1) % eval_steps == 0) or should_terminate:
if with_privacy:
spent_eps_deltas = priv_accountant.get_privacy_spent(
sess, target_eps=target_eps)
else:
spent_eps_deltas = [accountant.EpsDelta(0, 0)]
for spent_eps, spent_delta in spent_eps_deltas:
sys.stderr.write("spent privacy: eps %.4f delta %.5g\n" % (
spent_eps, spent_delta))
saver.save(sess, save_path=save_path + "/ckpt")
train_accuracy, _ = Eval(mnist_train_file, network_parameters,
num_testing_images=NUM_TESTING_IMAGES,
randomize=True, load_path=save_path)
sys.stderr.write("train_accuracy: %.2f\n" % train_accuracy)
test_accuracy, mistakes = Eval(mnist_test_file, network_parameters,
num_testing_images=NUM_TESTING_IMAGES,
randomize=False, load_path=save_path,
save_mistakes=FLAGS.save_mistakes)
sys.stderr.write("eval_accuracy: %.2f\n" % test_accuracy)
curr_time = time.time()
elapsed_time = curr_time - prev_time
prev_time = curr_time
results.append({"step": step+1, # Number of lots trained so far.
"elapsed_secs": elapsed_time,
"spent_eps_deltas": spent_eps_deltas,
"train_accuracy": train_accuracy,
"test_accuracy": test_accuracy,
"mistakes": mistakes})
loginfo = {"elapsed_secs": curr_time-start_time,
"spent_eps_deltas": spent_eps_deltas,
"train_accuracy": train_accuracy,
"test_accuracy": test_accuracy,
"num_training_steps": step+1, # Steps so far.
"mistakes": mistakes,
"result_series": results}
loginfo.update(params)
if log_path:
with tf.gfile.Open(log_path, "w") as f:
json.dump(loginfo, f, indent=2)
f.write("\n")
f.close()
if should_terminate:
break
def main(_):
network_parameters = utils.NetworkParameters()
# If the ASCII proto isn't specified, then construct a config protobuf based
# on 3 flags.
network_parameters.input_size = IMAGE_SIZE ** 2
network_parameters.default_gradient_l2norm_bound = (
FLAGS.default_gradient_l2norm_bound)
if FLAGS.projection_dimensions > 0 and FLAGS.num_conv_layers > 0:
raise ValueError("Currently you can't do PCA and have convolutions"
"at the same time. Pick one")
# could add support for PCA after convolutions.
# Currently BuildNetwork can build the network with conv followed by
# projection, but the PCA training works on data, rather than data run
# through a few layers. Will need to init the convs before running the
# PCA, and need to change the PCA subroutine to take a network and perhaps
# allow for batched inputs, to handle larger datasets.
if FLAGS.num_conv_layers > 0:
conv = utils.ConvParameters()
conv.name = "conv1"
conv.in_channels = 1
conv.out_channels = 128
conv.num_outputs = 128 * 14 * 14
network_parameters.conv_parameters.append(conv)
# defaults for the rest: 5x5,stride 1, relu, maxpool 2x2,stride 2.
# insize 28x28, bias, stddev 0.1, non-trainable.
if FLAGS.num_conv_layers > 1:
conv = network_parameters.ConvParameters()
conv.name = "conv2"
conv.in_channels = 128
conv.out_channels = 128
conv.num_outputs = 128 * 7 * 7
conv.in_size = 14
# defaults for the rest: 5x5,stride 1, relu, maxpool 2x2,stride 2.
# bias, stddev 0.1, non-trainable.
network_parameters.conv_parameters.append(conv)
if FLAGS.num_conv_layers > 2:
raise ValueError("Currently --num_conv_layers must be 0,1 or 2."
"Manually create a network_parameters proto for more.")
if FLAGS.projection_dimensions > 0:
network_parameters.projection_type = "PCA"
network_parameters.projection_dimensions = FLAGS.projection_dimensions
for i in xrange(FLAGS.num_hidden_layers):
hidden = utils.LayerParameters()
hidden.name = "hidden%d" % i
hidden.num_units = FLAGS.hidden_layer_num_units
hidden.relu = True
hidden.with_bias = False
hidden.trainable = not FLAGS.freeze_bottom_layers
network_parameters.layer_parameters.append(hidden)
logits = utils.LayerParameters()
logits.name = "logits"
logits.num_units = 10
logits.relu = False
logits.with_bias = False
network_parameters.layer_parameters.append(logits)
Train(FLAGS.training_data_path,
FLAGS.eval_data_path,
network_parameters,
FLAGS.num_training_steps,
FLAGS.save_path,
eval_steps=FLAGS.eval_steps)
if __name__ == "__main__":
tf.app.run()
| {
"content_hash": "ff8d31ce5afc37c4eb322e4f0723254c",
"timestamp": "",
"source": "github",
"line_count": 493,
"max_line_length": 80,
"avg_line_length": 41.488843813387426,
"alnum_prop": 0.6301456927740295,
"repo_name": "jiaphuan/models",
"id": "d9343974dab6cc1c519672ff2fbb932e546d1db0",
"size": "21143",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "research/differential_privacy/dp_sgd/dp_mnist/dp_mnist.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "1353"
},
{
"name": "C++",
"bytes": "1224262"
},
{
"name": "GLSL",
"bytes": "976"
},
{
"name": "HTML",
"bytes": "147010"
},
{
"name": "JavaScript",
"bytes": "33208"
},
{
"name": "Jupyter Notebook",
"bytes": "71060"
},
{
"name": "Makefile",
"bytes": "4763"
},
{
"name": "Protocol Buffer",
"bytes": "72897"
},
{
"name": "Python",
"bytes": "5957505"
},
{
"name": "Shell",
"bytes": "76858"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from . import lexer, parser
from . import type_equation_solver
from .coretypes import *
from .typesets import *
from .type_symbol_table import *
from .util import *
from .coercion import coercion_cost
from .error import (DataShapeSyntaxError, OverloadError, UnificationError,
CoercionError)
__version__ = '0.1.1-dev'
def test(verbosity=1, xunitfile=None, exit=False):
"""
Runs the full Datashape test suite, outputting
the results of the tests to sys.stdout.
This uses nose tests to discover which tests to
run, and runs tests in any 'tests' subdirectory
within the Datashape module.
Parameters
----------
verbosity : int, optional
Value 0 prints very little, 1 prints a little bit,
and 2 prints the test names while testing.
xunitfile : string, optional
If provided, writes the test results to an xunit
style xml file. This is useful for running the tests
in a CI server such as Jenkins.
exit : bool, optional
If True, the function will call sys.exit with an
error code after the tests are finished.
"""
import nose
import os
import sys
argv = ['nosetests', '--verbosity=%d' % verbosity]
# Output an xunit file if requested
if xunitfile:
argv.extend(['--with-xunit', '--xunit-file=%s' % xunitfile])
# Set the logging level to warn
argv.extend(['--logging-level=WARN'])
# Add all 'tests' subdirectories to the options
rootdir = os.path.dirname(__file__)
for root, dirs, files in os.walk(rootdir):
if 'tests' in dirs:
testsdir = os.path.join(root, 'tests')
argv.append(testsdir)
print('Test dir: %s' % testsdir[len(rootdir)+1:])
# print versions (handy when reporting problems)
print('Datashape version: %s' % __version__)
sys.stdout.flush()
# Ask nose to do its thing
return nose.main(argv=argv, exit=exit)
| {
"content_hash": "66c79e2f29f17ce602bc8ffc727e0ef0",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 74,
"avg_line_length": 34.92982456140351,
"alnum_prop": 0.6534404821697639,
"repo_name": "FrancescAlted/datashape",
"id": "316dce7c8163937d4362ad9c4ffb923608a43eb5",
"size": "1991",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "datashape/__init__.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "184883"
},
{
"name": "Shell",
"bytes": "5111"
}
],
"symlink_target": ""
} |
from rest_framework import permissions
class IsOwnerOrReadOnly(permissions.BasePermission):
"""
Custom permission to only allow owners of an object to edit it.
"""
def has_object_permission(self, request, view, obj):
# Read permissions are allowed to any request,
# so we'll always allow GET, HEAD, or OPTIONS request.
if request.method in permissions.SAFE_METHODS:
return True
# Write permissions are only allowed to the owner of the snippet.
return obj.owner == request.user
| {
"content_hash": "a87ce7eae84022ec5e51caa253f19ce7",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 73,
"avg_line_length": 36.53333333333333,
"alnum_prop": 0.6843065693430657,
"repo_name": "perryhook/django-rest-tutorial",
"id": "6af03476b030609a074069c2f83c0e8dc7fd8ba9",
"size": "548",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tutorial/snippets/permissions.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "8315"
}
],
"symlink_target": ""
} |
import csv
import sys
from collections import defaultdict
fieldnames = ['Position', 'Ref', 'Alt', 'Variation', 'Zygosity', 'Refseq_change', 'Gene', 'Conserved_in_20_mammals', 'Sift_score', 'Polyphen_score', 'Cadd_score', 'Gnomad_af']
#Frequency','Samples']
frequencies = defaultdict(list)
samples = defaultdict(list)
annotations = defaultdict(list)
with open(sys.argv[1],'r') as f_csv:
reader = csv.DictReader(f_csv)
for row in reader:
superkey = row['Position']+'-'+row['Ref']+'-'+row['Alt']
if superkey in frequencies:
frequencies[superkey] += 1
samples[superkey].append(row['Sample'])
else:
frequencies[superkey] = 1
l = []
for key in fieldnames:
l.append(row[key])
l.append('"'+row['Gnomad_af']+'"')
annotations[superkey] = ','.join(l)
ll = []
ll.append(row['Sample'])
samples[superkey] = ll
with open(sys.argv[2],'w') as f_out:
f_out.write(','.join(fieldnames)+',Frequency,Samples')
f_out.write('\n')
for key in sorted(frequencies.keys()):
f_out.write(annotations[key]+','+str(frequencies[key])+','+';'.join(samples[key]))
f_out.write('\n')
f_out.close()
f_csv.close()
| {
"content_hash": "ac5de52b4f42205f94fa0183382ab9dc",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 175,
"avg_line_length": 30.42105263157895,
"alnum_prop": 0.6375432525951558,
"repo_name": "naumenko-sa/cre",
"id": "aeb9cdda28f98a29ef55cedc93705fc4acc4ffa5",
"size": "1226",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cre.database_merge.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Lua",
"bytes": "3568"
},
{
"name": "Perl",
"bytes": "978"
},
{
"name": "Python",
"bytes": "32280"
},
{
"name": "R",
"bytes": "52356"
},
{
"name": "Shell",
"bytes": "61460"
}
],
"symlink_target": ""
} |
import logging
from django.core.urlresolvers import reverse
from django.db import transaction
from django.http import HttpResponseRedirect
from django.shortcuts import render_to_response
from django.template import RequestContext
from peavy_demo.forms import QuoteForm
from peavy_demo.models import Quote
@transaction.commit_on_success
def home(request):
logger = logging.getLogger('peavy_demo.views.home')
if request.method == 'POST':
form = QuoteForm(request.POST)
if form.is_valid():
quote = form.save()
logger.info('Quote from {0} in {1} submitted by {2}'.format(quote.character, quote.show, quote.submitter))
return HttpResponseRedirect(reverse('home'))
else:
form = QuoteForm()
data = {
'form': form,
'quotes': Quote.objects.all()
}
return render_to_response('home.html', data, context_instance=RequestContext(request))
| {
"content_hash": "f3754fd57eb05239cc89a4e3878fc447",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 118,
"avg_line_length": 30.096774193548388,
"alnum_prop": 0.6934619506966774,
"repo_name": "fairview/django-peavy",
"id": "ea5048dfbc2ee72365f4af5c9487e5fdc0cca6bc",
"size": "933",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "peavy_demo/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "40989"
}
],
"symlink_target": ""
} |
import requests
from requests.auth import HTTPBasicAuth
import json
import uuid
url = "http://127.0.0.1:8000"
auth = HTTPBasicAuth('admin', 'admin001')
def call_api(entity, verb='GET', params=None):
headers = {"Content-Type": "application/json"}
# TODO complete with all verbs
if verb == 'POST':
response = requests.post(
url + entity, data=json.dumps(params), auth=auth, headers=headers)
else:
response = requests.get(url + entity, auth=auth, headers=headers)
if response.ok:
return response.json()
else:
response.raise_for_status()
# users list
users = call_api('/users/')
print 'users', len(users)
for user in users:
print user['username']
print
# comment create
new_sku = uuid.uuid1()
print "creating", new_sku
call_api('/comments/', 'POST', {
'sku': str(new_sku),
'content': 'This is an incredibly positive message!'
})
# comments list
comments = call_api('/comments/')
print 'comments', len(comments)
for comment in comments:
print comment['sku'], comment['content'], comment['tone_is_positive']
| {
"content_hash": "8f6322bb91a67b5ade000b4063e8b236",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 78,
"avg_line_length": 24.333333333333332,
"alnum_prop": 0.6602739726027397,
"repo_name": "dcaravana/interview_restapi",
"id": "ef0536496f4fe40ab306e9fa2985f7b7219356db",
"size": "1095",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "clients/py/requests_sku_comment_tone_client.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1836"
},
{
"name": "Java",
"bytes": "4851"
},
{
"name": "JavaScript",
"bytes": "284"
},
{
"name": "PHP",
"bytes": "169816"
},
{
"name": "Python",
"bytes": "17750"
},
{
"name": "Shell",
"bytes": "2243"
}
],
"symlink_target": ""
} |
import json
import requests
def main(query):
#get the data
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko)'
'Chrome/53.0.2785.116 Safari/537.36'}
raw_data = requests.get("https://www.musixmatch.com/search/%s/lyrics" % query, headers=headers).text.encode('utf-8')
#raw_data is now a HTML dump, parse it to make it perfect json
raw_data = raw_data[raw_data.find('{', raw_data.find('mxmProps')) : raw_data.find('</script>', raw_data.find('mxmProps'))]
data = json.loads(raw_data) # Data ready to be queried
total_results = data['lyricsTracks']['length']
print "Top 5 results are:\n"
for x in xrange(5):
song = data['lyricsTracks'][str(x)]['attributes']['track_name']
artist = data['lyricsTracks'][str(x)]['attributes']['artist_name']
album = data['lyricsTracks'][str(x)]['attributes']['album_name']
print "\n\nSong Name:", song
print "\nArtist:", artist
print "\nAlbum:", album
main(str(raw_input("\nEnter the lyrics: \n\n")))
| {
"content_hash": "651c0e00bf4c9231ed3c4ab344239725",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 123,
"avg_line_length": 40,
"alnum_prop": 0.6557692307692308,
"repo_name": "anshuman73/lyric-matcher",
"id": "0c0da528c729a66ca822a5899c09d1e7643aad9d",
"size": "1163",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "get_song.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1163"
}
],
"symlink_target": ""
} |
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Tickfont(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "mesh3d.colorbar"
_path_str = "mesh3d.colorbar.tickfont"
_valid_props = {"color", "family", "size"}
# color
# -----
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# family
# ------
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser will only be able to apply a font if
it is available on the system which it operates. Provide
multiple font families, separated by commas, to indicate the
preference in which to apply fonts if they aren't available on
the system. The Chart Studio Cloud (at https://chart-
studio.plotly.com or on-premise) generates images on a server,
where only a select number of fonts are installed and
supported. These include "Arial", "Balto", "Courier New",
"Droid Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
The 'family' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
# size
# ----
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
Returns
-------
int|float
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The Chart
Studio Cloud (at https://chart-studio.plotly.com or on-
premise) generates images on a server, where only a
select number of fonts are installed and supported.
These include "Arial", "Balto", "Courier New", "Droid
Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT
Sans Narrow", "Raleway", "Times New Roman".
size
"""
def __init__(self, arg=None, color=None, family=None, size=None, **kwargs):
"""
Construct a new Tickfont object
Sets the color bar's tick label font
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.mesh3d.colorbar.Tickfont`
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The Chart
Studio Cloud (at https://chart-studio.plotly.com or on-
premise) generates images on a server, where only a
select number of fonts are installed and supported.
These include "Arial", "Balto", "Courier New", "Droid
Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT
Sans Narrow", "Raleway", "Times New Roman".
size
Returns
-------
Tickfont
"""
super(Tickfont, self).__init__("tickfont")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.mesh3d.colorbar.Tickfont
constructor must be a dict or
an instance of :class:`plotly.graph_objs.mesh3d.colorbar.Tickfont`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("color", None)
_v = color if color is not None else _v
if _v is not None:
self["color"] = _v
_v = arg.pop("family", None)
_v = family if family is not None else _v
if _v is not None:
self["family"] = _v
_v = arg.pop("size", None)
_v = size if size is not None else _v
if _v is not None:
self["size"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
| {
"content_hash": "a100e49ac4ca74dea0e391803f52964d",
"timestamp": "",
"source": "github",
"line_count": 227,
"max_line_length": 82,
"avg_line_length": 37.29955947136564,
"alnum_prop": 0.5588756348175269,
"repo_name": "plotly/python-api",
"id": "e700b3d9adbf9c236069d2303a29658f4dc6e9f4",
"size": "8467",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/graph_objs/mesh3d/colorbar/_tickfont.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "6870"
},
{
"name": "Makefile",
"bytes": "1708"
},
{
"name": "Python",
"bytes": "823245"
},
{
"name": "Shell",
"bytes": "3238"
}
],
"symlink_target": ""
} |
import pytest
import sitchlib
import os
this_file_dirpath = os.path.dirname(os.path.abspath(__file__))
project_basepath = os.path.join(this_file_dirpath, "../../")
fixtures_path = os.path.join(this_file_dirpath, "../fixtures/")
csv_fixture_file = os.path.join(fixtures_path, "testdata.csv.gz")
class TestIntegrationConfigHelper:
def test_create_config_object(self):
os.environ['OCID_KEY'] = '123456'
os.environ['TWILIO_SID'] = 'asdf3456'
os.environ['TWILIO_TOKEN'] = 'asdflnasgin'
config_obj = sitchlib.ConfigHelper()
assert config_obj.ocid_key == '123456'
assert config_obj.twilio_sid == 'asdf3456'
assert config_obj.twilio_token == 'asdflnasgin'
def test_get_from_env_fail(self):
with pytest.raises(KeyError) as excinfo:
sitchlib.ConfigHelper.get_from_env('nonexist')
assert excinfo
| {
"content_hash": "ac815a3f601b8bc7bc0646776182e9db",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 65,
"avg_line_length": 36.708333333333336,
"alnum_prop": 0.6674233825198638,
"repo_name": "sitch-io/feed_builder",
"id": "f5ba01598dd49eba3508077f6bdab5097023e442",
"size": "881",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sitch/test/integration/test_integration_config_helper.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "667"
},
{
"name": "Python",
"bytes": "50646"
}
],
"symlink_target": ""
} |
"""Test various net timeouts.
- Create three iond nodes:
no_verack_node - we never send a verack in response to their version
no_version_node - we never send a version (only a ping)
no_send_node - we never send any P2P message.
- Start all three nodes
- Wait 1 second
- Assert that we're connected
- Send a ping to no_verack_node and no_version_node
- Wait 30 seconds
- Assert that we're still connected
- Send a ping to no_verack_node and no_version_node
- Wait 31 seconds
- Assert that we're no longer connected (timeout to receive version/verack is 60 seconds)
"""
from time import sleep
from test_framework.mininode import *
from test_framework.test_framework import IonTestFramework
from test_framework.util import *
class TestNode(NodeConnCB):
def on_version(self, conn, message):
# Don't send a verack in response
pass
class TimeoutsTest(IonTestFramework):
def __init__(self):
super().__init__()
self.setup_clean_chain = True
self.num_nodes = 1
def run_test(self):
# Setup the p2p connections and start up the network thread.
self.no_verack_node = TestNode() # never send verack
self.no_version_node = TestNode() # never send version (just ping)
self.no_send_node = TestNode() # never send anything
connections = []
connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], self.no_verack_node))
connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], self.no_version_node, send_version=False))
connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], self.no_send_node, send_version=False))
self.no_verack_node.add_connection(connections[0])
self.no_version_node.add_connection(connections[1])
self.no_send_node.add_connection(connections[2])
NetworkThread().start() # Start up network handling in another thread
sleep(1)
assert(self.no_verack_node.connected)
assert(self.no_version_node.connected)
assert(self.no_send_node.connected)
ping_msg = msg_ping()
connections[0].send_message(ping_msg)
connections[1].send_message(ping_msg)
sleep(30)
assert "version" in self.no_verack_node.last_message
assert(self.no_verack_node.connected)
assert(self.no_version_node.connected)
assert(self.no_send_node.connected)
connections[0].send_message(ping_msg)
connections[1].send_message(ping_msg)
sleep(31)
assert(not self.no_verack_node.connected)
assert(not self.no_version_node.connected)
assert(not self.no_send_node.connected)
if __name__ == '__main__':
TimeoutsTest().main()
| {
"content_hash": "b08c3772527f1f57a9f034ec0736bcdd",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 119,
"avg_line_length": 33.851851851851855,
"alnum_prop": 0.6684901531728665,
"repo_name": "aspaas/ion",
"id": "34d0a7272ef5cd08ab65b78407faa16056e979bb",
"size": "2951",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/functional/p2p-timeouts.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "616463"
},
{
"name": "C++",
"bytes": "4560754"
},
{
"name": "CSS",
"bytes": "1127"
},
{
"name": "HTML",
"bytes": "50621"
},
{
"name": "Java",
"bytes": "2100"
},
{
"name": "M4",
"bytes": "18274"
},
{
"name": "Makefile",
"bytes": "16792"
},
{
"name": "NSIS",
"bytes": "5917"
},
{
"name": "Objective-C++",
"bytes": "6205"
},
{
"name": "Python",
"bytes": "96149"
},
{
"name": "QMake",
"bytes": "20721"
},
{
"name": "Shell",
"bytes": "391146"
}
],
"symlink_target": ""
} |
import RPi.GPIO as GPIO
import logging
import Adafruit_PCA9685
import time
# Logging config
logging.basicConfig(filename='servo.log', level=logging.WARNING)
# System variables
pi = 3.14159
servo = 0
arm_angle = 0 # degrees
zero_motion_pl = 410
compensation_factor = 1.027
ccw_full_rot_time = 1.1 # s / 360 deg
base_angle_data_filename = "base_angle.dat" # External file storing base angle value
# GPIO setup function
def GPIO_set(pin, dc):
GPIO.setmode(GPIO.BCM)
GPIO.setup(pin, GPIO.OUT)
servo = GPIO.PWM(pin, 50)
servo.start(dc)
def GPIO_clear(servo):
servo.stop()
GPIO.cleanup()
# Adafruit setup
pwm = Adafruit_PCA9685.PCA9685()
pwm.set_pwm_freq(60)
# Funcs
def determine_zero_pl(pl_min, pl_max, channel):
mid_range_pl = (pl_min + pl_max)/2
initial_pl = mid_range_pl - 100
pl = int(initial_pl)
while pl < (mid_range_pl+100):
pwm.set_pwm(channel, 0, pl)
print(pl)
pl += 10
time.sleep(1)
def speed_loop(pl_min, pl_max, channel):
pwm.set_pwm(channel,0,410)
time.sleep(2)
pwm.set_pwm(channel,0,440)
time.sleep(1.1)
pwm.set_pwm(channel,0,410)
time.sleep(5)
pwm.set_pwm(channel,0,220)
time.sleep(1.1297)
pwm.set_pwm(channel,0,410)
time.sleep(5)
def rotational_positioning(desired_angle, channel):
if desired_angle > 45 or desired_angle < -45:
raise ValueError("Desired angle exceeds current configuration range: min = -45 deg; max \
= 45 deg")
current_angle = 0
with open(base_angle_data_filename, 'r') as f:
current_angle_str = f.read()
print("current_angle_str: '" + current_angle_str + "'")
current_angle = int(current_angle_str)
print(current_angle)
perc_full_rot = 100 * abs((desired_angle - current_angle)) / 360
print(perc_full_rot)
if desired_angle < current_angle:
rot_time = ccw_full_rot_time * perc_full_rot / 100
print(rot_time)
pwm.set_pwm(channel, 0, 440)
time.sleep(rot_time)
pwm.set_pwm(channel, 0, 410)
with open(base_angle_data_filename, 'w') as f:
f.write(str(desired_angle))
time.sleep(100000)
elif desired_angle > current_angle:
rot_time = ccw_full_rot_time * compensation_factor * perc_full_rot / 100
pwm.set_pwm(channel, 0, 220)
time.sleep(rot_time)
pwm.set_pwm(channel, 0, 410)
with open(base_angle_data_filename, 'w') as f:
f.write(str(desired_angle))
time.sleep(1000000)
else:
pwm.set_pwm(channel, 0, 410)
time.sleep(100000)
# cuurent angle must be equal to desired angle
if __name__ == "__main__":
while True:
import argparse
# Arguments
parser = argparse.ArgumentParser()
parser.add_argument("angle", help="Angle of turn (deg)")
# Arguments for direct GPIO control from Pi
# parser.add_argument("dc_min", help="Minimum Duty Cycle")
# parser.add_argument("dc_max", help="Maximum Duty Cycle")
# parser.add_argument("pin", help="PWM Pin No. (BCM)")
# Arguments for Adafruit PWM hat control
parser.add_argument("pl_min", help="Minimum Pulse Length")
parser.add_argument("pl_max", help="Maximum Pulse Length")
parser.add_argument("channel", help="Channel No. (Adafruit PWM Hat)")
# Parse arguments
args = parser.parse_args()
angle = int(args.angle)
# dc_min = float(args.dc_min)
# dc_max = float(args.dc_max)
# pin = int(args.pin)
pl_min = float(args.pl_min)
pl_max = float(args.pl_max)
channel = int(args.channel)
logging.warning("Channel: %s", channel)
# Actuate servo
# GPIO_set(pin, dc)
# GPIO_clear()
rotational_positioning(angle, channel)
| {
"content_hash": "d939e80aa97f5d6891e1d8b68c78bf64",
"timestamp": "",
"source": "github",
"line_count": 131,
"max_line_length": 98,
"avg_line_length": 29.3206106870229,
"alnum_prop": 0.614162978391044,
"repo_name": "MANSEDS/Lunar-Rover-2017-Robotics",
"id": "783f0632d27d49fdd04835995017d55ebe5a25ab",
"size": "4035",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Continuous_servo_controller.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "45695"
}
],
"symlink_target": ""
} |
class BinaryTree(object):
def __init__(self, rootid=None):
self.left = None
self.right = None
self.rootid = rootid
self.parents = [rootid]
def getLeftChild(self):
return self.left
def getRightChild(self):
return self.right
def setNodeValue(self, value):
self.rootid = value
def getNodeValue(self):
return self.rootid
def insert(self, newNode,parent):
if parent in self.parents:
Tree = BinaryTree(parent)
if Tree.right is None:
Tree.right = BinaryTree(newNode)
else:
Tree.left = BinaryTree(newNode)
else:
Tree = BinaryTree(parent)
self.parents.append(parent)
def insertLeft(self, newNode):
if self.left is None:
self.left = BinaryTree(newNode)
else:
Tree = BinaryTree(newNode)
Tree.left = self.left
self.left = Tree
def preorder(tree):
if tree:
preorder(tree.getLeftChild())
print(tree.getNodeValue())
preorder(tree.getRightChild())
b = BinaryTree(0)
b.insert(1,0)
b.insert(3,2)
print b.getLeftChild()
preorder(b) | {
"content_hash": "711af24e522bd628ecce1124b464a9d2",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 48,
"avg_line_length": 23.51851851851852,
"alnum_prop": 0.5464566929133858,
"repo_name": "Faraaz54/python_training_problems",
"id": "381eb0b5476a4a804b9db23c3781a7ae024bce0f",
"size": "1270",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hacker_earth/data_structures/ancenstor_sibling_count.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "DIGITAL Command Language",
"bytes": "191608"
},
{
"name": "HTML",
"bytes": "650319"
},
{
"name": "Python",
"bytes": "138166"
}
],
"symlink_target": ""
} |
from src.settings import Colors
def league_color(league: str) -> Colors:
if league in [
'1 HNL (Croatia)',
'A-League (Australia)',
'Bundesliga 2 (Germany)',
'Coppa Italia (Italy)',
'First Division (Cyprus)',
'Jupiler League (Belgium)',
'League Two (England)',
'Liga 1 (Romania)',
"Ligat hal'Al (Israel)",
'Ligue 2 (France)',
'Major League Soccer (United States)',
'Primera A (Colombia)',
'Primera Division (Argentina)',
'Primera Division (Peru)',
'Prva Liga (Slovenia)',
'Segunda Division (Spain)',
'Segunda Liga (Portugal)',
'Serie B (Brazil)',
'Serie B (Italy)',
'Super Liga (Slovakia)',
'Superliga (Serbia)',
'Tippeligaen (Norway)',
'Vushaya Liga (Russia)',
]:
return Colors.GREEN
if league in [
'1 CFL(Montenegro)',
'A Grupa(Bulgaria)',
'A Lyga(Lithuania)',
'Erste Liga(Austria)',
'J - League 2(Japan)',
'K - League(South Korea)',
'Kubok (Ukraine)',
'OTP Bank Liga (Hungary)',
'Primeira Liga (Portugal)',
'Serie A (Italy)',
'Superettan (Sweden)',
'Veikkausliiga (Finland)',
'Virsliga (Latvia)',
'Vyscha Liga (Ukraine)',
]:
return Colors.RED
if league in [
'1. Division (Russia)',
'Adeccoligaen (Norway)',
'Allsvenskan (Sweden)',
'Bundesliga (Austria)',
'Bundesliga 1 (Germany)',
'Campeonato (Ecuador)',
'Cempionat (Belarus)',
'Challenge League (Switzerland)',
'Championship (England)',
'Championship (Scotland)',
'Conference (England)',
'Ekstraklasa (Poland)',
'Eredivisie (Netherlands)',
'Gambrinus League (Czech Republic)',
'I Liga (Poland)',
'J-League (Japan)',
'Kubok (Belarus)',
'League One (England)',
'Ligue 1 (France)',
'National (France)',
'OFB Cup (Austria)',
'Premier Division (Ireland)',
'Premier League (Scotland)',
'Premier League (Wales)',
'Primera Division (Chile)',
'Primera Division (Mexico)',
'Primera Division (Spain)',
'Primera Division (Uruguay)',
'Proximus League (Belgium)',
'Serie A (Brazil)',
'Super Lig (Turkey)',
'Superleague (Greece)',
'Superligaen (Denmark)',
'Taca da Liga (Portugal)',
'TFF Lig A (Turkey)',
'Viasat Sport Divisionen (Denmark)',
'Ykkonen (Finland)',
]:
return Colors.YELLOW
return Colors.EMPTY
| {
"content_hash": "cf17c8fbe5ddbba98be91f25061a4692",
"timestamp": "",
"source": "github",
"line_count": 88,
"max_line_length": 46,
"avg_line_length": 30.579545454545453,
"alnum_prop": 0.5247120029728726,
"repo_name": "vapkarian/soccer-analyzer",
"id": "0bf5dd99ab9fed207e6cc05787f89ad27322b3ca",
"size": "2691",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/colors/v20/a.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "8543"
},
{
"name": "HTML",
"bytes": "182212"
},
{
"name": "Python",
"bytes": "432305"
},
{
"name": "Shell",
"bytes": "1712"
}
],
"symlink_target": ""
} |
import collections
import copy
import uuid
from keystoneauth1.fixture import V2Token
from keystoneauth1 import loading as ks_loading
import mock
from mox3 import mox
import netaddr
from neutronclient.common import exceptions
from neutronclient.v2_0 import client
from oslo_config import cfg
from oslo_config import fixture as config_fixture
from oslo_policy import policy as oslo_policy
from oslo_serialization import jsonutils
from oslo_utils import timeutils
import requests_mock
import six
from six.moves import range
from nova.compute import flavors
from nova import context
from nova import exception
from nova.network import model
from nova.network.neutronv2 import api as neutronapi
from nova.network.neutronv2 import constants
from nova import objects
from nova.objects import network_request as net_req_obj
from nova.pci import manager as pci_manager
from nova.pci import utils as pci_utils
from nova.pci import whitelist as pci_whitelist
from nova import policy
from nova import test
from nova.tests.unit import fake_instance
from nova.tests import uuidsentinel as uuids
CONF = cfg.CONF
# NOTE: Neutron client raises Exception which is discouraged by HACKING.
# We set this variable here and use it for assertions below to avoid
# the hacking checks until we can make neutron client throw a custom
# exception class instead.
NEUTRON_CLIENT_EXCEPTION = Exception
fake_info_cache = {
'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': False,
'instance_uuid': uuids.instance,
'network_info': '[]',
}
class MyComparator(mox.Comparator):
def __init__(self, lhs):
self.lhs = lhs
def _com_dict(self, lhs, rhs):
if len(lhs) != len(rhs):
return False
for key, value in six.iteritems(lhs):
if key not in rhs:
return False
rhs_value = rhs[key]
if not self._com(value, rhs_value):
return False
return True
def _com_list(self, lhs, rhs):
if len(lhs) != len(rhs):
return False
for lhs_value in lhs:
if lhs_value not in rhs:
return False
return True
def _com(self, lhs, rhs):
if lhs is None:
return rhs is None
if isinstance(lhs, dict):
if not isinstance(rhs, dict):
return False
return self._com_dict(lhs, rhs)
if isinstance(lhs, list):
if not isinstance(rhs, list):
return False
return self._com_list(lhs, rhs)
if isinstance(lhs, tuple):
if not isinstance(rhs, tuple):
return False
return self._com_list(lhs, rhs)
return lhs == rhs
def equals(self, rhs):
return self._com(self.lhs, rhs)
def __repr__(self):
return str(self.lhs)
class TestNeutronClient(test.NoDBTestCase):
def setUp(self):
super(TestNeutronClient, self).setUp()
neutronapi.reset_state()
def test_withtoken(self):
self.flags(url='http://anyhost/', group='neutron')
self.flags(timeout=30, group='neutron')
my_context = context.RequestContext('userid',
uuids.my_tenant,
auth_token='token')
cl = neutronapi.get_client(my_context)
self.assertEqual(CONF.neutron.url, cl.httpclient.endpoint_override)
self.assertEqual(my_context.auth_token,
cl.httpclient.auth.auth_token)
self.assertEqual(CONF.neutron.timeout, cl.httpclient.session.timeout)
def test_withouttoken(self):
my_context = context.RequestContext('userid', uuids.my_tenant)
self.assertRaises(exception.Unauthorized,
neutronapi.get_client,
my_context)
@mock.patch.object(client.Client, "list_networks",
side_effect=exceptions.Unauthorized())
def test_Unauthorized_user(self, mock_list_networks):
my_context = context.RequestContext('userid', uuids.my_tenant,
auth_token='token',
is_admin=False)
client = neutronapi.get_client(my_context)
self.assertRaises(
exception.Unauthorized,
client.list_networks)
@mock.patch.object(client.Client, "list_networks",
side_effect=exceptions.Unauthorized())
def test_Unauthorized_admin(self, mock_list_networks):
my_context = context.RequestContext('userid', uuids.my_tenant,
auth_token='token',
is_admin=True)
client = neutronapi.get_client(my_context)
self.assertRaises(
exception.NeutronAdminCredentialConfigurationInvalid,
client.list_networks)
@mock.patch.object(client.Client, "create_port",
side_effect=exceptions.Forbidden())
def test_Forbidden(self, mock_create_port):
my_context = context.RequestContext('userid', uuids.my_tenant,
auth_token='token',
is_admin=False)
client = neutronapi.get_client(my_context)
self.assertRaises(
exception.Forbidden,
client.create_port)
def test_withtoken_context_is_admin(self):
self.flags(url='http://anyhost/', group='neutron')
self.flags(timeout=30, group='neutron')
my_context = context.RequestContext('userid',
uuids.my_tenant,
auth_token='token',
is_admin=True)
cl = neutronapi.get_client(my_context)
self.assertEqual(CONF.neutron.url, cl.httpclient.endpoint_override)
self.assertEqual(my_context.auth_token,
cl.httpclient.auth.auth_token)
self.assertEqual(CONF.neutron.timeout, cl.httpclient.session.timeout)
def test_withouttoken_keystone_connection_error(self):
self.flags(url='http://anyhost/', group='neutron')
my_context = context.RequestContext('userid', uuids.my_tenant)
self.assertRaises(NEUTRON_CLIENT_EXCEPTION,
neutronapi.get_client,
my_context)
@mock.patch('nova.network.neutronv2.api._ADMIN_AUTH')
@mock.patch.object(client.Client, "list_networks", new=mock.Mock())
def test_reuse_admin_token(self, m):
self.flags(url='http://anyhost/', group='neutron')
my_context = context.RequestContext('userid', uuids.my_tenant,
auth_token='token')
tokens = ['new_token2', 'new_token1']
def token_vals(*args, **kwargs):
return tokens.pop()
m.get_token.side_effect = token_vals
client1 = neutronapi.get_client(my_context, True)
client1.list_networks(retrieve_all=False)
self.assertEqual('new_token1', client1.httpclient.auth.get_token(None))
client1 = neutronapi.get_client(my_context, True)
client1.list_networks(retrieve_all=False)
self.assertEqual('new_token2', client1.httpclient.auth.get_token(None))
@mock.patch.object(ks_loading, 'load_auth_from_conf_options')
def test_load_auth_plugin_failed(self, mock_load_from_conf):
mock_load_from_conf.return_value = None
from neutronclient.common import exceptions as neutron_client_exc
self.assertRaises(neutron_client_exc.Unauthorized,
neutronapi._load_auth_plugin, CONF)
@mock.patch.object(client.Client, "list_networks",
side_effect=exceptions.Unauthorized())
def test_wrapper_exception_translation(self, m):
my_context = context.RequestContext('userid', 'my_tenantid',
auth_token='token')
client = neutronapi.get_client(my_context)
self.assertRaises(
exception.Unauthorized,
client.list_networks)
class TestNeutronv2Base(test.TestCase):
def setUp(self):
super(TestNeutronv2Base, self).setUp()
self.context = context.RequestContext('userid', uuids.my_tenant)
setattr(self.context,
'auth_token',
'bff4a5a6b9eb4ea2a6efec6eefb77936')
self.tenant_id = '9d049e4b60b64716978ab415e6fbd5c0'
self.instance = {'project_id': self.tenant_id,
'uuid': uuids.fake,
'display_name': 'test_instance',
'hostname': 'test-instance',
'availability_zone': 'nova',
'host': 'some_host',
'info_cache': {'network_info': []},
'security_groups': []}
self.instance2 = {'project_id': self.tenant_id,
'uuid': uuids.fake,
'display_name': 'test_instance2',
'availability_zone': 'nova',
'info_cache': {'network_info': []},
'security_groups': []}
self.nets1 = [{'id': uuids.my_netid1,
'name': 'my_netname1',
'subnets': ['mysubnid1'],
'tenant_id': uuids.my_tenant}]
self.nets2 = []
self.nets2.append(self.nets1[0])
self.nets2.append({'id': uuids.my_netid2,
'name': 'my_netname2',
'subnets': ['mysubnid2'],
'tenant_id': uuids.my_tenant})
self.nets3 = self.nets2 + [{'id': uuids.my_netid3,
'name': 'my_netname3',
'subnets': ['mysubnid3'],
'tenant_id': uuids.my_tenant}]
self.nets4 = [{'id': 'his_netid4',
'name': 'his_netname4',
'tenant_id': 'his_tenantid'}]
# A network request with external networks
self.nets5 = self.nets1 + [{'id': 'the-external-one',
'name': 'out-of-this-world',
'subnets': ['mysubnid5'],
'router:external': True,
'tenant_id': 'should-be-an-admin'}]
# A network request with a duplicate
self.nets6 = []
self.nets6.append(self.nets1[0])
self.nets6.append(self.nets1[0])
# A network request with a combo
self.nets7 = []
self.nets7.append(self.nets2[1])
self.nets7.append(self.nets1[0])
self.nets7.append(self.nets2[1])
self.nets7.append(self.nets1[0])
# A network request with only external network
self.nets8 = [self.nets5[1]]
# An empty network
self.nets9 = []
# A network that is both shared and external
self.nets10 = [{'id': 'net_id', 'name': 'net_name',
'router:external': True, 'shared': True,
'subnets': ['mysubnid10']}]
# A network with non-blank dns_domain to test _update_port_dns_name
self.nets11 = [{'id': uuids.my_netid1,
'name': 'my_netname1',
'subnets': ['mysubnid1'],
'tenant_id': uuids.my_tenant,
'dns_domain': 'my-domain.org.'}]
self.nets = [self.nets1, self.nets2, self.nets3, self.nets4,
self.nets5, self.nets6, self.nets7, self.nets8,
self.nets9, self.nets10, self.nets11]
self.port_address = '10.0.1.2'
self.port_data1 = [{'network_id': uuids.my_netid1,
'device_id': self.instance2['uuid'],
'tenant_id': self.tenant_id,
'device_owner': 'compute:nova',
'id': uuids.portid_1,
'binding:vnic_type': model.VNIC_TYPE_NORMAL,
'status': 'DOWN',
'admin_state_up': True,
'fixed_ips': [{'ip_address': self.port_address,
'subnet_id': 'my_subid1'}],
'mac_address': 'my_mac1', }]
self.float_data1 = [{'port_id': uuids.portid_1,
'fixed_ip_address': self.port_address,
'floating_ip_address': '172.0.1.2'}]
self.dhcp_port_data1 = [{'fixed_ips': [{'ip_address': '10.0.1.9',
'subnet_id': 'my_subid1'}],
'status': 'ACTIVE',
'admin_state_up': True}]
self.port_address2 = '10.0.2.2'
self.port_data2 = []
self.port_data2.append(self.port_data1[0])
self.port_data2.append({'network_id': uuids.my_netid2,
'device_id': self.instance['uuid'],
'tenant_id': self.tenant_id,
'admin_state_up': True,
'status': 'ACTIVE',
'device_owner': 'compute:nova',
'id': uuids.portid_2,
'binding:vnic_type': model.VNIC_TYPE_NORMAL,
'fixed_ips':
[{'ip_address': self.port_address2,
'subnet_id': 'my_subid2'}],
'mac_address': 'my_mac2', })
self.float_data2 = []
self.float_data2.append(self.float_data1[0])
self.float_data2.append({'port_id': uuids.portid_2,
'fixed_ip_address': '10.0.2.2',
'floating_ip_address': '172.0.2.2'})
self.port_data3 = [{'network_id': uuids.my_netid1,
'device_id': 'device_id3',
'tenant_id': self.tenant_id,
'status': 'DOWN',
'admin_state_up': True,
'device_owner': 'compute:nova',
'id': uuids.portid_3,
'binding:vnic_type': model.VNIC_TYPE_NORMAL,
'fixed_ips': [], # no fixed ip
'mac_address': 'my_mac3', }]
self.subnet_data1 = [{'id': 'my_subid1',
'cidr': '10.0.1.0/24',
'network_id': uuids.my_netid1,
'gateway_ip': '10.0.1.1',
'dns_nameservers': ['8.8.1.1', '8.8.1.2']}]
self.subnet_data2 = []
self.subnet_data_n = [{'id': 'my_subid1',
'cidr': '10.0.1.0/24',
'network_id': uuids.my_netid1,
'gateway_ip': '10.0.1.1',
'dns_nameservers': ['8.8.1.1', '8.8.1.2']},
{'id': 'my_subid2',
'cidr': '20.0.1.0/24',
'network_id': uuids.my_netid2,
'gateway_ip': '20.0.1.1',
'dns_nameservers': ['8.8.1.1', '8.8.1.2']}]
self.subnet_data2.append({'id': 'my_subid2',
'cidr': '10.0.2.0/24',
'network_id': uuids.my_netid2,
'gateway_ip': '10.0.2.1',
'dns_nameservers': ['8.8.2.1', '8.8.2.2']})
self.fip_pool = {'id': '4fdbfd74-eaf8-4884-90d9-00bd6f10c2d3',
'name': 'ext_net',
'router:external': True,
'tenant_id': 'admin_tenantid'}
self.fip_pool_nova = {'id': '435e20c3-d9f1-4f1b-bee5-4611a1dd07db',
'name': 'nova',
'router:external': True,
'tenant_id': 'admin_tenantid'}
self.fip_unassociated = {'tenant_id': uuids.my_tenant,
'id': uuids.fip_id1,
'floating_ip_address': '172.24.4.227',
'floating_network_id': self.fip_pool['id'],
'port_id': None,
'fixed_ip_address': None,
'router_id': None}
fixed_ip_address = self.port_data2[1]['fixed_ips'][0]['ip_address']
self.fip_associated = {'tenant_id': uuids.my_tenant,
'id': uuids.fip_id2,
'floating_ip_address': '172.24.4.228',
'floating_network_id': self.fip_pool['id'],
'port_id': self.port_data2[1]['id'],
'fixed_ip_address': fixed_ip_address,
'router_id': 'router_id1'}
self._returned_nw_info = []
self.mox.StubOutWithMock(neutronapi, 'get_client')
self.moxed_client = self.mox.CreateMock(client.Client)
self.addCleanup(CONF.reset)
self.addCleanup(self.mox.VerifyAll)
self.addCleanup(self.mox.UnsetStubs)
self.addCleanup(self.stubs.UnsetAll)
def _fake_instance_object(self, instance):
return fake_instance.fake_instance_obj(self.context, **instance)
def _fake_instance_info_cache(self, nw_info, instance_uuid=None):
info_cache = {}
if instance_uuid is None:
info_cache['instance_uuid'] = uuids.fake
else:
info_cache['instance_uuid'] = instance_uuid
info_cache['deleted'] = False
info_cache['created_at'] = timeutils.utcnow()
info_cache['deleted_at'] = timeutils.utcnow()
info_cache['updated_at'] = timeutils.utcnow()
info_cache['network_info'] = model.NetworkInfo.hydrate(six.text_type(
jsonutils.dumps(nw_info)))
return info_cache
def _fake_instance_object_with_info_cache(self, instance):
expected_attrs = ['info_cache']
instance = objects.Instance._from_db_object(self.context,
objects.Instance(), fake_instance.fake_db_instance(**instance),
expected_attrs=expected_attrs)
return instance
def _stub_allocate_for_instance(self, net_idx=1, **kwargs):
self.instance = self._fake_instance_object(self.instance)
self.instance2 = self._fake_instance_object(self.instance2)
api = neutronapi.API()
self.mox.StubOutWithMock(api, 'get_instance_nw_info')
has_extra_dhcp_opts = False
dhcp_options = kwargs.get('dhcp_options')
if dhcp_options is not None:
has_extra_dhcp_opts = True
has_dns_extension = False
if kwargs.get('dns_extension'):
has_dns_extension = True
api.extensions[constants.DNS_INTEGRATION] = 1
# Net idx is 1-based for compatibility with existing unit tests
nets = self.nets[net_idx - 1]
ports = {}
fixed_ips = {}
macs = kwargs.pop('macs', None)
if macs:
macs = set(macs)
req_net_ids = []
ordered_networks = []
self._stub_allocate_for_instance_show_port(nets, ports, fixed_ips,
macs, req_net_ids, ordered_networks, **kwargs)
if kwargs.get('_break') == 'pre_list_networks':
self.mox.ReplayAll()
return api
self._stub_allocate_for_instance_list_networks(req_net_ids, nets)
if kwargs.get('_break') == 'post_list_networks':
self.mox.ReplayAll()
return api
if (('requested_networks' not in kwargs or
kwargs['requested_networks'].as_tuples() == [(None, None, None)])
and len(nets) > 1):
self.mox.ReplayAll()
return api
if kwargs.get('_break') == 'post_list_extensions':
self.mox.ReplayAll()
return api
self._stub_allocate_for_instance_create_port(
ordered_networks, fixed_ips, nets)
has_portbinding = self._stub_allocate_for_instance_port_binding(
api, kwargs.get('portbinding'), has_dns_extension)
preexisting_port_ids = []
ports_in_requested_net_order = []
nets_in_requested_net_order = []
index = 0
for request in ordered_networks:
index += 1
port_req_body = {
'port': {
'device_id': self.instance.uuid,
'device_owner': 'compute:nova',
},
}
# Network lookup for available network_id
network = None
for net in nets:
if net['id'] == request.network_id:
network = net
break
# if net_id did not pass validate_networks() and not available
# here then skip it safely not continuing with a None Network
else:
continue
if has_portbinding:
port_req_body['port'][neutronapi.BINDING_HOST_ID] = (
self.instance.get('host'))
if has_dns_extension and not network.get('dns_domain'):
port_req_body['port']['dns_name'] = self.instance.hostname
if not has_portbinding and not has_dns_extension:
api._populate_neutron_extension_values(mox.IgnoreArg(),
self.instance, mox.IgnoreArg(),
mox.IgnoreArg(), network=network,
neutron=self.moxed_client,
bind_host_id=None).AndReturn(None)
elif has_portbinding:
# since _populate_neutron_extension_values() will call
# _has_port_binding_extension()
api._has_port_binding_extension(mox.IgnoreArg(),
neutron=self.moxed_client).\
AndReturn(has_portbinding)
else:
api._refresh_neutron_extensions_cache(mox.IgnoreArg(),
neutron=self.moxed_client)
if macs:
port_req_body['port']['mac_address'] = macs.pop()
if has_extra_dhcp_opts:
port_req_body['port']['extra_dhcp_opts'] = dhcp_options
if not request.port_id:
port_id = uuids.fake
update_port_res = {'port': {
'id': port_id,
'mac_address': 'fakemac%i' % index}}
ports_in_requested_net_order.append(port_id)
if kwargs.get('_break') == 'mac' + request.network_id:
self.mox.ReplayAll()
return api
else:
ports_in_requested_net_order.append(request.port_id)
preexisting_port_ids.append(request.port_id)
port_id = request.port_id
update_port_res = {'port': ports[port_id]}
new_mac = port_req_body['port'].get('mac_address')
if new_mac:
update_port_res['port']['mac_address'] = new_mac
self.moxed_client.update_port(port_id,
MyComparator(port_req_body)
).AndReturn(update_port_res)
if has_portbinding and has_dns_extension:
api._has_port_binding_extension(mox.IgnoreArg()).\
AndReturn(has_portbinding)
if net_idx == 11:
port_req_body_dns = {
'port': {
'dns_name': self.instance.hostname
}
}
res_port_dns = {
'port': {
'id': ports_in_requested_net_order[-1]
}
}
self.moxed_client.update_port(
ports_in_requested_net_order[-1],
MyComparator(port_req_body_dns)
).AndReturn(res_port_dns)
nets_in_requested_net_order.append(network)
api.get_instance_nw_info(mox.IgnoreArg(),
self.instance,
networks=nets_in_requested_net_order,
port_ids=ports_in_requested_net_order,
admin_client=self.moxed_client,
preexisting_port_ids=preexisting_port_ids,
update_cells=True
).AndReturn(self._returned_nw_info)
self.mox.ReplayAll()
return api
def _stub_allocate_for_instance_port_binding(self, api, portbinding,
has_dns_extension):
if portbinding:
neutronapi.get_client(mox.IgnoreArg()).AndReturn(
self.moxed_client)
neutronapi.get_client(
mox.IgnoreArg(), admin=True).AndReturn(
self.moxed_client)
has_portbinding = False
if portbinding:
has_portbinding = True
api.extensions[constants.PORTBINDING_EXT] = 1
self.mox.StubOutWithMock(api, '_refresh_neutron_extensions_cache')
api._refresh_neutron_extensions_cache(mox.IgnoreArg(),
neutron=self.moxed_client)
self.mox.StubOutWithMock(api, '_has_port_binding_extension')
api._has_port_binding_extension(mox.IgnoreArg(),
neutron=self.moxed_client,
refresh_cache=True).AndReturn(has_portbinding)
elif has_dns_extension:
self.mox.StubOutWithMock(api, '_refresh_neutron_extensions_cache')
api._refresh_neutron_extensions_cache(mox.IgnoreArg(),
neutron=self.moxed_client)
else:
self.mox.StubOutWithMock(api, '_refresh_neutron_extensions_cache')
api._refresh_neutron_extensions_cache(mox.IgnoreArg(),
neutron=self.moxed_client)
self.mox.StubOutWithMock(api, '_populate_neutron_extension_values')
return has_portbinding
def _stub_allocate_for_instance_show_port(self, nets, ports, fixed_ips,
macs, req_net_ids, ordered_networks, **kwargs):
if 'requested_networks' in kwargs:
for request in kwargs['requested_networks']:
if request.port_id:
if request.port_id == uuids.portid_3:
self.moxed_client.show_port(request.port_id
).AndReturn(
{'port': {'id': uuids.portid_3,
'network_id': uuids.my_netid1,
'tenant_id': self.tenant_id,
'mac_address': 'my_mac1',
'device_id': kwargs.get('_device') and
self.instance2.uuid or
''}})
ports[uuids.my_netid1] = [self.port_data1[0],
self.port_data3[0]]
ports[request.port_id] = self.port_data3[0]
request.network_id = uuids.my_netid1
if macs is not None:
macs.discard('my_mac1')
elif request.port_id == uuids.non_existent_uuid:
PortNotFound = exceptions.PortNotFoundClient(
status_code=404)
self.moxed_client.show_port(request.port_id
).AndRaise(PortNotFound)
else:
self.moxed_client.show_port(request.port_id).AndReturn(
{'port': {'id': uuids.portid_1,
'network_id': uuids.my_netid1,
'tenant_id': self.tenant_id,
'mac_address': 'my_mac1',
'device_id': kwargs.get('_device') and
self.instance2.uuid or
'',
'dns_name': kwargs.get('_dns_name') or
''}})
ports[request.port_id] = self.port_data1[0]
request.network_id = uuids.my_netid1
if macs is not None:
macs.discard('my_mac1')
else:
fixed_ips[request.network_id] = request.address
req_net_ids.append(request.network_id)
ordered_networks.append(request)
else:
for n in nets:
ordered_networks.append(
objects.NetworkRequest(network_id=n['id']))
def _stub_allocate_for_instance_list_networks(self, req_net_ids, nets):
# search all req_net_ids as in api.py
search_ids = req_net_ids
if search_ids:
mox_list_params = {'id': mox.SameElementsAs(search_ids)}
self.moxed_client.list_networks(
**mox_list_params).AndReturn({'networks': nets})
else:
mox_list_params = {'tenant_id': self.instance.project_id,
'shared': False}
self.moxed_client.list_networks(
**mox_list_params).AndReturn({'networks': nets})
mox_list_params = {'shared': True}
self.moxed_client.list_networks(
**mox_list_params).AndReturn({'networks': []})
def _stub_allocate_for_instance_create_port(self, ordered_networks,
fixed_ips, nets):
for request in ordered_networks:
if not request.port_id:
# Check network is available, skip if not
network = None
for net in nets:
if net['id'] == request.network_id:
network = net
break
if network is None:
continue
port_req_body_create = {'port': {}}
request.address = fixed_ips.get(request.network_id)
if request.address:
port_req_body_create['port']['fixed_ips'] = [
{'ip_address': str(request.address)}]
port_req_body_create['port']['network_id'] = \
request.network_id
port_req_body_create['port']['admin_state_up'] = True
port_req_body_create['port']['tenant_id'] = \
self.instance.project_id
res_port = {'port': {'id': uuids.fake}}
self.moxed_client.create_port(
MyComparator(port_req_body_create)).AndReturn(res_port)
def _verify_nw_info(self, nw_inf, index=0):
id_suffix = index + 1
self.assertEqual('10.0.%s.2' % id_suffix,
nw_inf.fixed_ips()[index]['address'])
self.assertEqual('172.0.%s.2' % id_suffix,
nw_inf.fixed_ips()[index].floating_ip_addresses()[0])
self.assertEqual('my_netname%s' % id_suffix,
nw_inf[index]['network']['label'])
self.assertEqual(getattr(uuids, 'portid_%s' % id_suffix),
nw_inf[index]['id'])
self.assertEqual('my_mac%s' % id_suffix, nw_inf[index]['address'])
self.assertEqual('10.0.%s.0/24' % id_suffix,
nw_inf[index]['network']['subnets'][0]['cidr'])
ip_addr = model.IP(address='8.8.%s.1' % id_suffix,
version=4, type='dns')
self.assertIn(ip_addr, nw_inf[index]['network']['subnets'][0]['dns'])
def _get_instance_nw_info(self, number):
api = neutronapi.API()
self.mox.StubOutWithMock(api.db, 'instance_info_cache_update')
api.db.instance_info_cache_update(mox.IgnoreArg(),
self.instance['uuid'],
mox.IgnoreArg()).AndReturn(
fake_info_cache)
port_data = number == 1 and self.port_data1 or self.port_data2
net_info_cache = []
for port in port_data:
net_info_cache.append({"network": {"id": port['network_id']},
"id": port['id']})
self.moxed_client.list_ports(
tenant_id=self.instance['project_id'],
device_id=self.instance['uuid']).AndReturn(
{'ports': port_data})
net_ids = [port['network_id'] for port in port_data]
nets = number == 1 and self.nets1 or self.nets2
self.moxed_client.list_networks(
id=net_ids).AndReturn({'networks': nets})
for i in range(1, number + 1):
float_data = number == 1 and self.float_data1 or self.float_data2
for ip in port_data[i - 1]['fixed_ips']:
float_data = [x for x in float_data
if x['fixed_ip_address'] == ip['ip_address']]
self.moxed_client.list_floatingips(
fixed_ip_address=ip['ip_address'],
port_id=port_data[i - 1]['id']).AndReturn(
{'floatingips': float_data})
subnet_data = i == 1 and self.subnet_data1 or self.subnet_data2
self.moxed_client.list_subnets(
id=mox.SameElementsAs(['my_subid%s' % i])).AndReturn(
{'subnets': subnet_data})
self.moxed_client.list_ports(
network_id=subnet_data[0]['network_id'],
device_owner='network:dhcp').AndReturn(
{'ports': []})
self.instance['info_cache'] = self._fake_instance_info_cache(
net_info_cache, self.instance['uuid'])
self.mox.StubOutWithMock(api.db, 'instance_info_cache_get')
api.db.instance_info_cache_get(mox.IgnoreArg(),
self.instance['uuid']).AndReturn(
self.instance['info_cache'])
self.mox.ReplayAll()
instance = self._fake_instance_object_with_info_cache(self.instance)
nw_inf = api.get_instance_nw_info(self.context, instance)
for i in range(0, number):
self._verify_nw_info(nw_inf, i)
def _allocate_for_instance(self, net_idx=1, **kwargs):
api = self._stub_allocate_for_instance(net_idx, **kwargs)
self._vifs_created = []
def _new_vif(*args):
m = mock.MagicMock()
self._vifs_created.append(m)
return m
with mock.patch('nova.objects.VirtualInterface') as mock_vif:
mock_vif.side_effect = _new_vif
requested_networks = kwargs.get("requested_networks", None)
allowed_keys = ["macs", "security_groups",
"dhcp_options", "bind_host_id"]
afi_kwargs = {}
for key in kwargs.keys():
if key in allowed_keys:
afi_kwargs[key] = kwargs[key]
return api.allocate_for_instance(self.context, self.instance,
False, requested_networks, **afi_kwargs)
class TestNeutronv2(TestNeutronv2Base):
def test_get_instance_nw_info_1(self):
# Test to get one port in one network and subnet.
neutronapi.get_client(mox.IgnoreArg(),
admin=True).MultipleTimes().AndReturn(
self.moxed_client)
self._get_instance_nw_info(1)
def test_get_instance_nw_info_2(self):
# Test to get one port in each of two networks and subnets.
neutronapi.get_client(mox.IgnoreArg(),
admin=True).MultipleTimes().AndReturn(
self.moxed_client)
self._get_instance_nw_info(2)
def test_get_instance_nw_info_with_nets_add_interface(self):
# This tests that adding an interface to an instance does not
# remove the first instance from the instance.
network_model = model.Network(id='network_id',
bridge='br-int',
injected='injected',
label='fake_network',
tenant_id='fake_tenant')
network_cache = {'info_cache': {
'network_info': [{'id': self.port_data2[0]['id'],
'address': 'mac_address',
'network': network_model,
'type': 'ovs',
'ovs_interfaceid': 'ovs_interfaceid',
'devname': 'devname'}]}}
self._fake_get_instance_nw_info_helper(network_cache,
self.port_data2,
self.nets2,
[self.port_data2[1]['id']])
def test_get_instance_nw_info_remove_ports_from_neutron(self):
# This tests that when a port is removed in neutron it
# is also removed from the nova.
network_model = model.Network(id=self.port_data2[0]['network_id'],
bridge='br-int',
injected='injected',
label='fake_network',
tenant_id='fake_tenant')
network_cache = {'info_cache': {
'network_info': [{'id': 'network_id',
'address': 'mac_address',
'network': network_model,
'type': 'ovs',
'ovs_interfaceid': 'ovs_interfaceid',
'devname': 'devname'}]}}
self._fake_get_instance_nw_info_helper(network_cache,
self.port_data2,
None,
None)
def test_get_instance_nw_info_ignores_neutron_ports(self):
# Tests that only ports in the network_cache are updated
# and ports returned from neutron that match the same
# instance_id/device_id are ignored.
port_data2 = copy.copy(self.port_data2)
# set device_id on the ports to be the same.
port_data2[1]['device_id'] = port_data2[0]['device_id']
network_model = model.Network(id='network_id',
bridge='br-int',
injected='injected',
label='fake_network',
tenant_id='fake_tenant')
network_cache = {'info_cache': {
'network_info': [{'id': 'network_id',
'address': 'mac_address',
'network': network_model,
'type': 'ovs',
'ovs_interfaceid': 'ovs_interfaceid',
'devname': 'devname'}]}}
self._fake_get_instance_nw_info_helper(network_cache,
port_data2,
None,
None)
def test_get_instance_nw_info_ignores_neutron_ports_empty_cache(self):
# Tests that ports returned from neutron that match the same
# instance_id/device_id are ignored when the instance info cache is
# empty.
port_data2 = copy.copy(self.port_data2)
# set device_id on the ports to be the same.
port_data2[1]['device_id'] = port_data2[0]['device_id']
network_cache = {'info_cache': {'network_info': []}}
self._fake_get_instance_nw_info_helper(network_cache,
port_data2,
None,
None)
def _fake_get_instance_nw_info_helper(self, network_cache,
current_neutron_ports,
networks=None, port_ids=None):
"""Helper function to test get_instance_nw_info.
:param network_cache - data already in the nova network cache.
:param current_neutron_ports - updated list of ports from neutron.
:param networks - networks of ports being added to instance.
:param port_ids - new ports being added to instance.
"""
# keep a copy of the original ports/networks to pass to
# get_instance_nw_info() as the code below changes them.
original_port_ids = copy.copy(port_ids)
original_networks = copy.copy(networks)
api = neutronapi.API()
self.mox.StubOutWithMock(api.db, 'instance_info_cache_update')
api.db.instance_info_cache_update(
mox.IgnoreArg(),
self.instance['uuid'], mox.IgnoreArg()).AndReturn(fake_info_cache)
neutronapi.get_client(mox.IgnoreArg(),
admin=True).MultipleTimes().AndReturn(
self.moxed_client)
self.moxed_client.list_ports(
tenant_id=self.instance['project_id'],
device_id=self.instance['uuid']).AndReturn(
{'ports': current_neutron_ports})
ifaces = network_cache['info_cache']['network_info']
if port_ids is None:
port_ids = [iface['id'] for iface in ifaces]
net_ids = [iface['network']['id'] for iface in ifaces]
nets = [{'id': iface['network']['id'],
'name': iface['network']['label'],
'tenant_id': iface['network']['meta']['tenant_id']}
for iface in ifaces]
if networks is None:
if ifaces:
self.moxed_client.list_networks(
id=net_ids).AndReturn({'networks': nets})
else:
non_shared_nets = [
{'id': iface['network']['id'],
'name': iface['network']['label'],
'tenant_id': iface['network']['meta']['tenant_id']}
for iface in ifaces if not iface['shared']]
shared_nets = [
{'id': iface['network']['id'],
'name': iface['network']['label'],
'tenant_id': iface['network']['meta']['tenant_id']}
for iface in ifaces if iface['shared']]
self.moxed_client.list_networks(
shared=False,
tenant_id=self.instance['project_id']
).AndReturn({'networks': non_shared_nets})
self.moxed_client.list_networks(
shared=True).AndReturn({'networks': shared_nets})
else:
networks = networks + [
dict(id=iface['network']['id'],
name=iface['network']['label'],
tenant_id=iface['network']['meta']['tenant_id'])
for iface in ifaces]
port_ids = [iface['id'] for iface in ifaces] + port_ids
index = 0
current_neutron_port_map = {}
for current_neutron_port in current_neutron_ports:
current_neutron_port_map[current_neutron_port['id']] = (
current_neutron_port)
for port_id in port_ids:
current_neutron_port = current_neutron_port_map.get(port_id)
if current_neutron_port:
for ip in current_neutron_port['fixed_ips']:
self.moxed_client.list_floatingips(
fixed_ip_address=ip['ip_address'],
port_id=current_neutron_port['id']).AndReturn(
{'floatingips': [self.float_data2[index]]})
self.moxed_client.list_subnets(
id=mox.SameElementsAs([ip['subnet_id']])
).AndReturn(
{'subnets': [self.subnet_data_n[index]]})
self.moxed_client.list_ports(
network_id=current_neutron_port['network_id'],
device_owner='network:dhcp').AndReturn(
{'ports': self.dhcp_port_data1})
index += 1
self.instance['info_cache'] = self._fake_instance_info_cache(
network_cache['info_cache']['network_info'], self.instance['uuid'])
self.mox.StubOutWithMock(api.db, 'instance_info_cache_get')
api.db.instance_info_cache_get(
mox.IgnoreArg(),
self.instance['uuid']).MultipleTimes().AndReturn(
self.instance['info_cache'])
self.mox.ReplayAll()
instance = self._fake_instance_object_with_info_cache(self.instance)
nw_infs = api.get_instance_nw_info(self.context,
instance,
networks=original_networks,
port_ids=original_port_ids)
self.assertEqual(index, len(nw_infs))
# ensure that nic ordering is preserved
for iface_index in range(index):
self.assertEqual(port_ids[iface_index],
nw_infs[iface_index]['id'])
def test_get_instance_nw_info_without_subnet(self):
# Test get instance_nw_info for a port without subnet.
api = neutronapi.API()
self.mox.StubOutWithMock(api.db, 'instance_info_cache_update')
api.db.instance_info_cache_update(
mox.IgnoreArg(),
self.instance['uuid'], mox.IgnoreArg()).AndReturn(fake_info_cache)
neutronapi.get_client(mox.IgnoreArg(), admin=True).AndReturn(
self.moxed_client)
self.moxed_client.list_ports(
tenant_id=self.instance['project_id'],
device_id=self.instance['uuid']).AndReturn(
{'ports': self.port_data3})
self.moxed_client.list_networks(
id=[self.port_data1[0]['network_id']]).AndReturn(
{'networks': self.nets1})
net_info_cache = []
for port in self.port_data3:
net_info_cache.append({"network": {"id": port['network_id']},
"id": port['id']})
self.instance['info_cache'] = self._fake_instance_info_cache(
net_info_cache, self.instance['uuid'])
self.mox.StubOutWithMock(api.db, 'instance_info_cache_get')
api.db.instance_info_cache_get(
mox.IgnoreArg(),
self.instance['uuid']).AndReturn(self.instance['info_cache'])
self.mox.ReplayAll()
instance = self._fake_instance_object_with_info_cache(self.instance)
nw_inf = api.get_instance_nw_info(self.context,
instance)
id_suffix = 3
self.assertEqual(0, len(nw_inf.fixed_ips()))
self.assertEqual('my_netname1', nw_inf[0]['network']['label'])
self.assertEqual(uuids.portid_3, nw_inf[0]['id'])
self.assertEqual('my_mac%s' % id_suffix, nw_inf[0]['address'])
self.assertEqual(0, len(nw_inf[0]['network']['subnets']))
def test_refresh_neutron_extensions_cache(self):
api = neutronapi.API()
# Note: Don't want the default get_client from setUp()
self.mox.ResetAll()
neutronapi.get_client(mox.IgnoreArg()).AndReturn(
self.moxed_client)
self.moxed_client.list_extensions().AndReturn(
{'extensions': [{'name': constants.QOS_QUEUE}]})
self.mox.ReplayAll()
api._refresh_neutron_extensions_cache(mox.IgnoreArg())
self.assertEqual(
{constants.QOS_QUEUE: {'name': constants.QOS_QUEUE}},
api.extensions)
def test_populate_neutron_extension_values_rxtx_factor(self):
api = neutronapi.API()
# Note: Don't want the default get_client from setUp()
self.mox.ResetAll()
neutronapi.get_client(mox.IgnoreArg()).AndReturn(
self.moxed_client)
self.moxed_client.list_extensions().AndReturn(
{'extensions': [{'name': constants.QOS_QUEUE}]})
self.mox.ReplayAll()
flavor = flavors.get_default_flavor()
flavor['rxtx_factor'] = 1
instance = objects.Instance(system_metadata={})
instance.flavor = flavor
port_req_body = {'port': {}}
api._populate_neutron_extension_values(self.context, instance,
None, port_req_body)
self.assertEqual(1, port_req_body['port']['rxtx_factor'])
def test_allocate_for_instance_1(self):
# Allocate one port in one network env.
neutronapi.get_client(mox.IgnoreArg()).AndReturn(self.moxed_client)
self._allocate_for_instance(1)
def test_allocate_for_instance_2(self):
# Allocate one port in two networks env.
neutronapi.get_client(mox.IgnoreArg()).AndReturn(self.moxed_client)
api = self._stub_allocate_for_instance(net_idx=2)
self.assertRaises(exception.NetworkAmbiguous,
api.allocate_for_instance,
self.context, self.instance, False, None)
def test_allocate_for_instance_accepts_macs_kwargs_None(self):
# The macs kwarg should be accepted as None.
neutronapi.get_client(mox.IgnoreArg()).AndReturn(self.moxed_client)
self._allocate_for_instance(1, macs=None)
def test_allocate_for_instance_accepts_macs_kwargs_set(self):
# The macs kwarg should be accepted, as a set, the
# _allocate_for_instance helper checks that the mac is used to create a
# port.
neutronapi.get_client(mox.IgnoreArg()).AndReturn(self.moxed_client)
self._allocate_for_instance(1, macs=set(['ab:cd:ef:01:23:45']))
def test_allocate_for_instance_with_mac_added_to_port(self):
requested_networks = objects.NetworkRequestList(
objects=[objects.NetworkRequest(port_id=uuids.portid_1)])
neutronapi.get_client(mox.IgnoreArg()).AndReturn(self.moxed_client)
# NOTE(johngarbutt) we override the provided mac with a new one
self._allocate_for_instance(net_idx=1,
requested_networks=requested_networks,
macs=set(['ab:cd:ef:01:23:45']))
self.assertEqual('ab:cd:ef:01:23:45/%s' % uuids.portid_1,
self._vifs_created[0].address)
def test_allocate_for_instance_accepts_only_portid(self):
# Make sure allocate_for_instance works when only a portid is provided
self._returned_nw_info = self.port_data1
neutronapi.get_client(mox.IgnoreArg()).AndReturn(self.moxed_client)
result = self._allocate_for_instance(
requested_networks=objects.NetworkRequestList(
objects=[objects.NetworkRequest(port_id=uuids.portid_1,
tag='test')]))
self.assertEqual(self.port_data1, result)
self.assertEqual(1, len(self._vifs_created))
self.assertEqual('test', self._vifs_created[0].tag)
self.assertEqual(self.instance.uuid,
self._vifs_created[0].instance_uuid)
self.assertEqual(uuids.portid_1, self._vifs_created[0].uuid)
self.assertEqual('%s/%s' % (self.port_data1[0]['mac_address'],
self.port_data1[0]['id']),
self._vifs_created[0].address)
@mock.patch('nova.network.neutronv2.api.API._unbind_ports')
def test_allocate_for_instance_not_enough_macs_via_ports(self,
mock_unbind):
# using a hypervisor MAC via a pre-created port will stop it being
# used to dynamically create a port on a network. We put the network
# first in requested_networks so that if the code were to not pre-check
# requested ports, it would incorrectly assign the mac and not fail.
requested_networks = objects.NetworkRequestList(
objects = [
objects.NetworkRequest(network_id=self.nets2[1]['id']),
objects.NetworkRequest(port_id=uuids.portid_1)])
neutronapi.get_client(mox.IgnoreArg()).AndReturn(self.moxed_client)
api = self._stub_allocate_for_instance(
net_idx=2, requested_networks=requested_networks,
macs=set(['my_mac1']),
_break='mac' + self.nets2[1]['id'])
self.assertRaises(exception.PortNotFree,
api.allocate_for_instance, self.context,
self.instance, False, requested_networks,
macs=set(['my_mac1']))
mock_unbind.assert_called_once_with(self.context, [],
self.moxed_client, mock.ANY)
@mock.patch('nova.network.neutronv2.api.API._unbind_ports')
def test_allocate_for_instance_not_enough_macs(self, mock_unbind):
# If not enough MAC addresses are available to allocate to networks, an
# error should be raised.
# We could pass in macs=set(), but that wouldn't tell us that
# allocate_for_instance tracks used macs properly, so we pass in one
# mac, and ask for two networks.
requested_networks = objects.NetworkRequestList(
objects=[objects.NetworkRequest(network_id=self.nets2[1]['id']),
objects.NetworkRequest(network_id=self.nets2[0]['id'])])
neutronapi.get_client(mox.IgnoreArg()).AndReturn(self.moxed_client)
api = self._stub_allocate_for_instance(
net_idx=2, requested_networks=requested_networks,
macs=set(['my_mac2']),
_break='mac' + self.nets2[0]['id'])
with mock.patch.object(api, '_delete_ports'):
self.assertRaises(exception.PortNotFree,
api.allocate_for_instance, self.context,
self.instance, False,
requested_networks=requested_networks,
macs=set(['my_mac2']))
mock_unbind.assert_called_once_with(self.context, [],
self.moxed_client, mock.ANY)
def test_allocate_for_instance_two_macs_two_networks(self):
# If two MACs are available and two networks requested, two new ports
# get made and no exceptions raised.
requested_networks = objects.NetworkRequestList(
objects=[objects.NetworkRequest(network_id=self.nets2[1]['id']),
objects.NetworkRequest(network_id=self.nets2[0]['id'])])
neutronapi.get_client(mox.IgnoreArg()).AndReturn(self.moxed_client)
self._allocate_for_instance(
net_idx=2, requested_networks=requested_networks,
macs=set(['my_mac2', 'my_mac1']))
def test_allocate_for_instance_without_requested_networks(self):
neutronapi.get_client(mox.IgnoreArg()).AndReturn(self.moxed_client)
api = self._stub_allocate_for_instance(net_idx=3)
self.assertRaises(exception.NetworkAmbiguous,
api.allocate_for_instance,
self.context, self.instance, False, None)
def test_allocate_for_instance_with_requested_non_available_network(self):
"""verify that a non available network is ignored.
self.nets2 (net_idx=2) is composed of self.nets3[0] and self.nets3[1]
Do not create a port on a non available network self.nets3[2].
"""
requested_networks = objects.NetworkRequestList(
objects=[objects.NetworkRequest(network_id=net['id'])
for net in (self.nets3[0], self.nets3[2], self.nets3[1])])
requested_networks[0].tag = 'foo'
neutronapi.get_client(mox.IgnoreArg()).AndReturn(self.moxed_client)
self._allocate_for_instance(net_idx=2,
requested_networks=requested_networks)
self.assertEqual(2, len(self._vifs_created))
# NOTE(danms) nets3[2] is chosen above as one that won't validate,
# so we never actually run create() on the VIF.
vifs_really_created = [vif for vif in self._vifs_created
if vif.create.called]
self.assertEqual(2, len(vifs_really_created))
self.assertEqual([('foo', 'fakemac1/%s' % uuids.fake),
(None, 'fakemac3/%s' % uuids.fake)],
[(vif.tag, vif.address)
for vif in vifs_really_created])
def test_allocate_for_instance_with_requested_networks(self):
# specify only first and last network
requested_networks = objects.NetworkRequestList(
objects=[objects.NetworkRequest(network_id=net['id'])
for net in (self.nets3[1], self.nets3[0], self.nets3[2])])
neutronapi.get_client(mox.IgnoreArg()).AndReturn(self.moxed_client)
self._allocate_for_instance(net_idx=3,
requested_networks=requested_networks)
def test_allocate_for_instance_with_no_subnet_defined(self):
# net_id=4 does not specify subnet and does not set the option
# port_security_disabled to True, so Neutron will not been
# able to associate the default security group to the port
# requested to be created. We expect an exception to be
# raised.
neutronapi.get_client(mox.IgnoreArg()).AndReturn(self.moxed_client)
self.assertRaises(exception.SecurityGroupCannotBeApplied,
self._allocate_for_instance, net_idx=4,
_break='post_list_extensions')
def test_allocate_for_instance_with_invalid_network_id(self):
requested_networks = objects.NetworkRequestList(
objects=[objects.NetworkRequest(
network_id=uuids.non_existent_uuid)])
neutronapi.get_client(mox.IgnoreArg()).AndReturn(self.moxed_client)
api = self._stub_allocate_for_instance(net_idx=9,
requested_networks=requested_networks,
_break='post_list_networks')
self.assertRaises(exception.NetworkNotFound,
api.allocate_for_instance,
self.context, self.instance, False,
requested_networks=requested_networks)
def test_allocate_for_instance_with_requested_networks_with_fixedip(self):
# specify only first and last network
requested_networks = objects.NetworkRequestList(
objects=[objects.NetworkRequest(network_id=self.nets1[0]['id'],
address='10.0.1.0')])
neutronapi.get_client(mox.IgnoreArg()).AndReturn(self.moxed_client)
self._allocate_for_instance(net_idx=1,
requested_networks=requested_networks)
def test_allocate_for_instance_with_requested_networks_with_port(self):
requested_networks = objects.NetworkRequestList(
objects=[objects.NetworkRequest(port_id=uuids.portid_1)])
neutronapi.get_client(mox.IgnoreArg()).AndReturn(self.moxed_client)
self._allocate_for_instance(net_idx=1,
requested_networks=requested_networks)
def test_allocate_for_instance_no_networks(self):
"""verify the exception thrown when there are no networks defined."""
self.instance = fake_instance.fake_instance_obj(self.context,
**self.instance)
api = neutronapi.API()
self.moxed_client.list_networks(
tenant_id=self.instance.project_id,
shared=False).AndReturn(
{'networks': model.NetworkInfo([])})
neutronapi.get_client(mox.IgnoreArg()).AndReturn(self.moxed_client)
self.moxed_client.list_networks(shared=True).AndReturn(
{'networks': model.NetworkInfo([])})
self.mox.ReplayAll()
nwinfo = api.allocate_for_instance(self.context, self.instance,
False, None)
self.assertEqual(0, len(nwinfo))
@mock.patch(
'nova.network.neutronv2.api.API._populate_neutron_extension_values')
@mock.patch('nova.network.neutronv2.api.API._has_port_binding_extension')
@mock.patch('nova.network.neutronv2.api.API._create_ports_for_instance')
@mock.patch('nova.network.neutronv2.api.API._unbind_ports')
def test_allocate_for_instance_ex1(self, mock_unbind, mock_create_ports,
mock_has_port_binding, mock_populate):
"""verify we will delete created ports
if we fail to allocate all net resources.
Mox to raise exception when creating a second port.
In this case, the code should delete the first created port.
"""
self.instance = fake_instance.fake_instance_obj(self.context,
**self.instance)
api = neutronapi.API()
requested_networks = objects.NetworkRequestList(
objects=[objects.NetworkRequest(network_id=net['id'])
for net in (self.nets2[0], self.nets2[1])])
neutronapi.get_client(mox.IgnoreArg()).AndReturn(self.moxed_client)
self.moxed_client.list_networks(
id=[uuids.my_netid1, uuids.my_netid2]).AndReturn(
{'networks': self.nets2})
mock_has_port_binding.return_value = False
mock_create_ports.return_value = [
(request, (getattr(uuids, 'portid_%s' % request.network_id)))
for request in requested_networks
]
neutronapi.get_client(
mox.IgnoreArg(), admin=True).AndReturn(
self.moxed_client)
index = 0
for network in self.nets2:
binding_port_req_body = {
'port': {
'device_id': self.instance.uuid,
'device_owner': 'compute:nova',
},
}
port_req_body = {
'port': {
'network_id': network['id'],
'admin_state_up': True,
'tenant_id': self.instance.project_id,
},
}
port_req_body['port'].update(binding_port_req_body['port'])
port_id = getattr(uuids, 'portid_%s' % network['id'])
port = {'id': port_id, 'mac_address': 'foo'}
if index == 0:
self.moxed_client.update_port(port_id,
MyComparator(binding_port_req_body)).AndReturn(
{'port': port})
else:
NeutronOverQuota = exceptions.MacAddressInUseClient()
self.moxed_client.update_port(port_id,
MyComparator(binding_port_req_body)).AndRaise(
NeutronOverQuota)
index += 1
self.moxed_client.delete_port(
getattr(uuids, 'portid_%s' % self.nets2[0]['id']))
self.moxed_client.delete_port(
getattr(uuids, 'portid_%s' % self.nets2[1]['id']))
self.mox.ReplayAll()
self.assertRaises(exception.PortInUse,
api.allocate_for_instance,
self.context, self.instance, False,
requested_networks=requested_networks)
mock_unbind.assert_called_once_with(self.context, [],
self.moxed_client, mock.ANY)
def test_allocate_for_instance_ex2(self):
"""verify we have no port to delete
if we fail to allocate the first net resource.
Mox to raise exception when creating the first port.
In this case, the code should not delete any ports.
"""
self.instance = fake_instance.fake_instance_obj(self.context,
**self.instance)
api = neutronapi.API()
requested_networks = objects.NetworkRequestList(
objects=[objects.NetworkRequest(network_id=net['id'])
for net in (self.nets2[0], self.nets2[1])])
neutronapi.get_client(mox.IgnoreArg()).AndReturn(self.moxed_client)
self.moxed_client.list_networks(
id=[uuids.my_netid1, uuids.my_netid2]).AndReturn(
{'networks': self.nets2})
port_req_body = {
'port': {
'network_id': self.nets2[0]['id'],
'admin_state_up': True,
'device_id': self.instance.uuid,
'tenant_id': self.instance.project_id,
},
}
self.moxed_client.create_port(
MyComparator(port_req_body)).AndRaise(
Exception("fail to create port"))
self.mox.ReplayAll()
self.assertRaises(NEUTRON_CLIENT_EXCEPTION, api.allocate_for_instance,
self.context, self.instance, False,
requested_networks=requested_networks)
def test_allocate_for_instance_no_port_or_network(self):
class BailOutEarly(Exception):
pass
self.instance = fake_instance.fake_instance_obj(self.context,
**self.instance)
api = neutronapi.API()
neutronapi.get_client(mox.IgnoreArg()).AndReturn(self.moxed_client)
self.mox.StubOutWithMock(api, '_get_available_networks')
# Make sure we get an empty list and then bail out of the rest
# of the function
api._get_available_networks(self.context, self.instance.project_id,
[],
neutron=self.moxed_client,
auto_allocate=False).\
AndRaise(BailOutEarly)
self.mox.ReplayAll()
requested_networks = objects.NetworkRequestList(
objects=[objects.NetworkRequest()])
self.assertRaises(BailOutEarly,
api.allocate_for_instance, self.context,
self.instance, False, requested_networks)
def test_allocate_for_instance_second_time(self):
# Make sure that allocate_for_instance only returns ports that it
# allocated during _that_ run.
new_port = {'id': uuids.fake}
self._returned_nw_info = self.port_data1 + [new_port]
neutronapi.get_client(mox.IgnoreArg()).AndReturn(self.moxed_client)
nw_info = self._allocate_for_instance()
self.assertEqual([new_port], nw_info)
def test_allocate_for_instance_port_in_use(self):
# If a port is already in use, an exception should be raised.
requested_networks = objects.NetworkRequestList(
objects=[objects.NetworkRequest(port_id=uuids.portid_1)])
neutronapi.get_client(mox.IgnoreArg()).AndReturn(self.moxed_client)
api = self._stub_allocate_for_instance(
requested_networks=requested_networks,
_break='pre_list_networks',
_device=True)
self.assertRaises(exception.PortInUse,
api.allocate_for_instance, self.context,
self.instance, False, requested_networks)
def test_allocate_for_instance_port_not_found(self):
# If a port is not found, an exception should be raised.
requested_networks = objects.NetworkRequestList(
objects=[objects.NetworkRequest(port_id=uuids.non_existent_uuid)])
neutronapi.get_client(mox.IgnoreArg()).AndReturn(self.moxed_client)
api = self._stub_allocate_for_instance(
requested_networks=requested_networks,
_break='pre_list_networks')
self.assertRaises(exception.PortNotFound,
api.allocate_for_instance, self.context,
self.instance, False, requested_networks)
def test_allocate_for_instance_port_invalid_tenantid(self):
self.tenant_id = 'invalid_id'
requested_networks = objects.NetworkRequestList(
objects=[objects.NetworkRequest(port_id=uuids.portid_1)])
neutronapi.get_client(mox.IgnoreArg()).AndReturn(self.moxed_client)
api = self._stub_allocate_for_instance(
requested_networks=requested_networks,
_break='pre_list_networks')
self.assertRaises(exception.PortNotUsable,
api.allocate_for_instance, self.context,
self.instance, False, requested_networks)
def test_allocate_for_instance_with_externalnet_forbidden(self):
"""Only one network is available, it's external, and the client
is unauthorized to use it.
"""
self.instance = fake_instance.fake_instance_obj(self.context,
**self.instance)
neutronapi.get_client(mox.IgnoreArg()).AndReturn(self.moxed_client)
# no networks in the tenant
self.moxed_client.list_networks(
tenant_id=self.instance.project_id,
shared=False).AndReturn(
{'networks': model.NetworkInfo([])})
# external network is shared
self.moxed_client.list_networks(shared=True).AndReturn(
{'networks': self.nets8})
self.mox.ReplayAll()
api = neutronapi.API()
self.assertRaises(exception.ExternalNetworkAttachForbidden,
api.allocate_for_instance, self.context,
self.instance, False, None)
def test_allocate_for_instance_with_externalnet_multiple(self):
"""Multiple networks are available, one the client is authorized
to use, and an external one the client is unauthorized to use.
"""
self.instance = fake_instance.fake_instance_obj(self.context,
**self.instance)
neutronapi.get_client(mox.IgnoreArg()).AndReturn(self.moxed_client)
# network found in the tenant
self.moxed_client.list_networks(
tenant_id=self.instance.project_id,
shared=False).AndReturn(
{'networks': self.nets1})
# external network is shared
self.moxed_client.list_networks(shared=True).AndReturn(
{'networks': self.nets8})
self.mox.ReplayAll()
api = neutronapi.API()
self.assertRaises(
exception.NetworkAmbiguous,
api.allocate_for_instance,
self.context, self.instance, False, None)
def test_allocate_for_instance_with_externalnet_admin_ctx(self):
"""Only one network is available, it's external, and the client
is authorized.
"""
admin_ctx = context.RequestContext('userid', uuids.my_tenant,
is_admin=True)
neutronapi.get_client(mox.IgnoreArg()).AndReturn(self.moxed_client)
api = self._stub_allocate_for_instance(net_idx=8)
api.allocate_for_instance(admin_ctx, self.instance, False, None)
def test_allocate_for_instance_with_external_shared_net(self):
"""Only one network is available, it's external and shared."""
ctx = context.RequestContext('userid', uuids.my_tenant)
neutronapi.get_client(mox.IgnoreArg()).AndReturn(self.moxed_client)
api = self._stub_allocate_for_instance(net_idx=10)
api.allocate_for_instance(ctx, self.instance, False, None)
def _deallocate_for_instance(self, number, requested_networks=None):
# TODO(mriedem): Remove this conversion when all neutronv2 APIs are
# converted to handling instance objects.
self.instance = fake_instance.fake_instance_obj(self.context,
**self.instance)
api = neutronapi.API()
port_data = number == 1 and self.port_data1 or self.port_data2
ports = {port['id'] for port in port_data}
ret_data = copy.deepcopy(port_data)
if requested_networks:
if isinstance(requested_networks, objects.NetworkRequestList):
# NOTE(danms): Temporary and transitional
with mock.patch('nova.utils.is_neutron', return_value=True):
requested_networks = requested_networks.as_tuples()
for net, fip, port, request_id in requested_networks:
ret_data.append({'network_id': net,
'device_id': self.instance.uuid,
'device_owner': 'compute:nova',
'id': port,
'status': 'DOWN',
'admin_state_up': True,
'fixed_ips': [],
'mac_address': 'fake_mac', })
neutronapi.get_client(mox.IgnoreArg()).AndReturn(self.moxed_client)
self.moxed_client.list_ports(
device_id=self.instance.uuid).AndReturn(
{'ports': ret_data})
self.moxed_client.list_extensions().AndReturn({'extensions': []})
if requested_networks:
for net, fip, port, request_id in requested_networks:
self.moxed_client.update_port(port)
for port in ports:
self.moxed_client.delete_port(port).InAnyOrder("delete_port_group")
self.mox.StubOutWithMock(api.db, 'instance_info_cache_update')
api.db.instance_info_cache_update(self.context,
self.instance.uuid,
{'network_info': '[]'}).AndReturn(
fake_info_cache)
self.mox.ReplayAll()
api = neutronapi.API()
api.deallocate_for_instance(self.context, self.instance,
requested_networks=requested_networks)
@mock.patch('nova.network.neutronv2.api.API._get_preexisting_port_ids')
def test_deallocate_for_instance_1_with_requested(self, mock_preexisting):
mock_preexisting.return_value = []
requested = objects.NetworkRequestList(
objects=[objects.NetworkRequest(network_id='fake-net',
address='1.2.3.4',
port_id=uuids.portid_5)])
# Test to deallocate in one port env.
self._deallocate_for_instance(1, requested_networks=requested)
@mock.patch('nova.network.neutronv2.api.API._get_preexisting_port_ids')
def test_deallocate_for_instance_2_with_requested(self, mock_preexisting):
mock_preexisting.return_value = []
requested = objects.NetworkRequestList(
objects=[objects.NetworkRequest(network_id='fake-net',
address='1.2.3.4',
port_id=uuids.portid_6)])
# Test to deallocate in one port env.
self._deallocate_for_instance(2, requested_networks=requested)
@mock.patch('nova.network.neutronv2.api.API._get_preexisting_port_ids')
def test_deallocate_for_instance_1(self, mock_preexisting):
mock_preexisting.return_value = []
# Test to deallocate in one port env.
self._deallocate_for_instance(1)
@mock.patch('nova.network.neutronv2.api.API._get_preexisting_port_ids')
def test_deallocate_for_instance_2(self, mock_preexisting):
mock_preexisting.return_value = []
# Test to deallocate in two ports env.
self._deallocate_for_instance(2)
@mock.patch('nova.network.neutronv2.api.API._get_preexisting_port_ids')
def test_deallocate_for_instance_port_not_found(self,
mock_preexisting):
# TODO(mriedem): Remove this conversion when all neutronv2 APIs are
# converted to handling instance objects.
self.instance = fake_instance.fake_instance_obj(self.context,
**self.instance)
mock_preexisting.return_value = []
port_data = self.port_data1
neutronapi.get_client(mox.IgnoreArg()).AndReturn(self.moxed_client)
self.moxed_client.list_ports(
device_id=self.instance.uuid).AndReturn(
{'ports': port_data})
self.moxed_client.list_extensions().AndReturn({'extensions': []})
NeutronNotFound = exceptions.NeutronClientException(status_code=404)
for port in reversed(port_data):
self.moxed_client.delete_port(port['id']).AndRaise(
NeutronNotFound)
self.mox.ReplayAll()
api = neutronapi.API()
api.deallocate_for_instance(self.context, self.instance)
def _test_deallocate_port_for_instance(self, number):
port_data = number == 1 and self.port_data1 or self.port_data2
nets = number == 1 and self.nets1 or self.nets2
self.moxed_client.delete_port(port_data[0]['id'])
net_info_cache = []
for port in port_data:
net_info_cache.append({"network": {"id": port['network_id']},
"id": port['id']})
self.instance['info_cache'] = self._fake_instance_info_cache(
net_info_cache, self.instance['uuid'])
api = neutronapi.API()
neutronapi.get_client(mox.IgnoreArg()).AndReturn(
self.moxed_client)
self.moxed_client.list_ports(
tenant_id=self.instance['project_id'],
device_id=self.instance['uuid']).AndReturn(
{'ports': port_data[1:]})
net_ids = [port['network_id'] for port in port_data]
neutronapi.get_client(mox.IgnoreArg(), admin=True).AndReturn(
self.moxed_client)
self.moxed_client.list_networks(id=net_ids).AndReturn(
{'networks': nets})
float_data = number == 1 and self.float_data1 or self.float_data2
for data in port_data[1:]:
for ip in data['fixed_ips']:
self.moxed_client.list_floatingips(
fixed_ip_address=ip['ip_address'],
port_id=data['id']).AndReturn(
{'floatingips': float_data[1:]})
for port in port_data[1:]:
self.moxed_client.list_subnets(id=['my_subid2']).AndReturn({})
self.mox.StubOutWithMock(api.db, 'instance_info_cache_get')
api.db.instance_info_cache_get(mox.IgnoreArg(),
self.instance['uuid']).AndReturn(
self.instance['info_cache'])
self.mox.ReplayAll()
instance = self._fake_instance_object_with_info_cache(self.instance)
nwinfo = api.deallocate_port_for_instance(self.context, instance,
port_data[0]['id'])
self.assertEqual(len(port_data[1:]), len(nwinfo))
if len(port_data) > 1:
self.assertEqual(uuids.my_netid2, nwinfo[0]['network']['id'])
def test_deallocate_port_for_instance_1(self):
# Test to deallocate the first and only port
self._test_deallocate_port_for_instance(1)
def test_deallocate_port_for_instance_2(self):
# Test to deallocate the first port of two
self._test_deallocate_port_for_instance(2)
def test_list_ports(self):
search_opts = {'parm': 'value'}
neutronapi.get_client(mox.IgnoreArg()).AndReturn(self.moxed_client)
self.moxed_client.list_ports(**search_opts)
self.mox.ReplayAll()
neutronapi.API().list_ports(self.context, **search_opts)
def test_show_port(self):
neutronapi.get_client(mox.IgnoreArg()).AndReturn(self.moxed_client)
self.moxed_client.show_port('foo').AndReturn(
{'port': self.port_data1[0]})
self.mox.ReplayAll()
neutronapi.API().show_port(self.context, 'foo')
def test_validate_networks(self):
requested_networks = [(uuids.my_netid1, None, None, None),
(uuids.my_netid2, None, None, None)]
ids = [uuids.my_netid1, uuids.my_netid2]
neutronapi.get_client(mox.IgnoreArg()).AndReturn(self.moxed_client)
self.moxed_client.list_networks(
id=mox.SameElementsAs(ids)).AndReturn(
{'networks': self.nets2})
self.moxed_client.show_quota(
uuids.my_tenant).AndReturn(
{'quota': {'port': 50}})
self.moxed_client.list_ports(
tenant_id=uuids.my_tenant, fields=['id']).AndReturn(
{'ports': []})
self.mox.ReplayAll()
api = neutronapi.API()
api.validate_networks(self.context, requested_networks, 1)
def test_validate_networks_without_port_quota_on_network_side(self):
requested_networks = [(uuids.my_netid1, None, None, None),
(uuids.my_netid2, None, None, None)]
ids = [uuids.my_netid1, uuids.my_netid2]
neutronapi.get_client(mox.IgnoreArg()).AndReturn(self.moxed_client)
self.moxed_client.list_networks(
id=mox.SameElementsAs(ids)).AndReturn(
{'networks': self.nets2})
self.moxed_client.show_quota(
uuids.my_tenant).AndReturn(
{'quota': {}})
self.mox.ReplayAll()
api = neutronapi.API()
api.validate_networks(self.context, requested_networks, 1)
def test_validate_networks_ex_1(self):
requested_networks = [(uuids.my_netid1, None, None, None)]
neutronapi.get_client(mox.IgnoreArg()).AndReturn(self.moxed_client)
self.moxed_client.list_networks(
id=mox.SameElementsAs([uuids.my_netid1])).AndReturn(
{'networks': self.nets1})
self.moxed_client.show_quota(
uuids.my_tenant).AndReturn(
{'quota': {'port': 50}})
self.moxed_client.list_ports(
tenant_id=uuids.my_tenant, fields=['id']).AndReturn(
{'ports': []})
self.mox.ReplayAll()
api = neutronapi.API()
try:
api.validate_networks(self.context, requested_networks, 1)
except exception.NetworkNotFound as ex:
self.assertIn("my_netid2", six.text_type(ex))
def test_validate_networks_ex_2(self):
requested_networks = [(uuids.my_netid1, None, None, None),
(uuids.my_netid2, None, None, None),
(uuids.my_netid3, None, None, None)]
ids = [uuids.my_netid1, uuids.my_netid2, uuids.my_netid3]
neutronapi.get_client(mox.IgnoreArg()).AndReturn(self.moxed_client)
self.moxed_client.list_networks(
id=mox.SameElementsAs(ids)).AndReturn(
{'networks': self.nets1})
self.mox.ReplayAll()
api = neutronapi.API()
try:
api.validate_networks(self.context, requested_networks, 1)
except exception.NetworkNotFound as ex:
self.assertIn(uuids.my_netid2, six.text_type(ex))
self.assertIn(uuids.my_netid3, six.text_type(ex))
def test_validate_networks_duplicate_enable(self):
# Verify that no duplicateNetworks exception is thrown when duplicate
# network ids are passed to validate_networks.
requested_networks = objects.NetworkRequestList(
objects=[objects.NetworkRequest(network_id=uuids.my_netid1),
objects.NetworkRequest(network_id=uuids.my_netid1)])
ids = [uuids.my_netid1, uuids.my_netid1]
neutronapi.get_client(mox.IgnoreArg()).AndReturn(self.moxed_client)
self.moxed_client.list_networks(
id=mox.SameElementsAs(ids)).AndReturn(
{'networks': self.nets1})
self.moxed_client.show_quota(
uuids.my_tenant).AndReturn(
{'quota': {'port': 50}})
self.moxed_client.list_ports(
tenant_id=uuids.my_tenant, fields=['id']).AndReturn(
{'ports': []})
self.mox.ReplayAll()
api = neutronapi.API()
api.validate_networks(self.context, requested_networks, 1)
def test_allocate_for_instance_with_requested_networks_duplicates(self):
# specify a duplicate network to allocate to instance
requested_networks = objects.NetworkRequestList(
objects=[objects.NetworkRequest(network_id=net['id'])
for net in (self.nets6[0], self.nets6[1])])
neutronapi.get_client(mox.IgnoreArg()).MultipleTimes().AndReturn(
self.moxed_client)
self._allocate_for_instance(net_idx=6,
requested_networks=requested_networks)
def test_allocate_for_instance_requested_networks_duplicates_port(self):
# specify first port and last port that are in same network
requested_networks = objects.NetworkRequestList(
objects=[objects.NetworkRequest(port_id=port['id'])
for port in (self.port_data1[0], self.port_data3[0])])
neutronapi.get_client(mox.IgnoreArg()).MultipleTimes().AndReturn(
self.moxed_client)
self._allocate_for_instance(net_idx=6,
requested_networks=requested_networks)
def test_allocate_for_instance_requested_networks_duplicates_combo(self):
# specify a combo net_idx=7 : net2, port in net1, net2, port in net1
requested_networks = objects.NetworkRequestList(
objects=[objects.NetworkRequest(network_id=uuids.my_netid2),
objects.NetworkRequest(port_id=self.port_data1[0]['id']),
objects.NetworkRequest(network_id=uuids.my_netid2),
objects.NetworkRequest(port_id=self.port_data3[0]['id'])])
neutronapi.get_client(mox.IgnoreArg()).MultipleTimes().AndReturn(
self.moxed_client)
self._allocate_for_instance(net_idx=7,
requested_networks=requested_networks)
def test_validate_networks_not_specified(self):
requested_networks = objects.NetworkRequestList(objects=[])
neutronapi.get_client(mox.IgnoreArg()).AndReturn(self.moxed_client)
self.moxed_client.list_networks(
tenant_id=self.context.project_id,
shared=False).AndReturn(
{'networks': self.nets1})
self.moxed_client.list_networks(
shared=True).AndReturn(
{'networks': self.nets2})
self.mox.ReplayAll()
api = neutronapi.API()
self.assertRaises(exception.NetworkAmbiguous,
api.validate_networks,
self.context, requested_networks, 1)
def test_validate_networks_port_not_found(self):
# Verify that the correct exception is thrown when a non existent
# port is passed to validate_networks.
requested_networks = objects.NetworkRequestList(
objects=[objects.NetworkRequest(
network_id=uuids.my_netid1,
port_id=uuids.portid_1)])
PortNotFound = exceptions.PortNotFoundClient()
neutronapi.get_client(mox.IgnoreArg()).AndReturn(self.moxed_client)
self.moxed_client.show_port(requested_networks[0].port_id).AndRaise(
PortNotFound)
self.mox.ReplayAll()
api = neutronapi.API()
self.assertRaises(exception.PortNotFound,
api.validate_networks,
self.context, requested_networks, 1)
def test_validate_networks_port_show_raises_non404(self):
# Verify that the correct exception is thrown when a non existent
# port is passed to validate_networks.
fake_port_id = uuids.portid_1
requested_networks = objects.NetworkRequestList(
objects=[objects.NetworkRequest(
network_id=uuids.my_netid1,
port_id=fake_port_id)])
NeutronNotFound = exceptions.NeutronClientException(status_code=0)
neutronapi.get_client(mox.IgnoreArg()).AndReturn(self.moxed_client)
self.moxed_client.show_port(requested_networks[0].port_id).AndRaise(
NeutronNotFound)
self.mox.ReplayAll()
api = neutronapi.API()
exc = self.assertRaises(exception.NovaException,
api.validate_networks,
self.context, requested_networks, 1)
expected_exception_message = ('Failed to access port %(port_id)s: '
'An unknown exception occurred.' %
{'port_id': fake_port_id})
self.assertEqual(expected_exception_message, str(exc))
def test_validate_networks_port_in_use(self):
requested_networks = objects.NetworkRequestList(
objects=[objects.NetworkRequest(port_id=self.port_data3[0]['id'])])
neutronapi.get_client(mox.IgnoreArg()).AndReturn(self.moxed_client)
self.moxed_client.show_port(self.port_data3[0]['id']).\
AndReturn({'port': self.port_data3[0]})
self.mox.ReplayAll()
api = neutronapi.API()
self.assertRaises(exception.PortInUse,
api.validate_networks,
self.context, requested_networks, 1)
def test_validate_networks_port_no_subnet_id(self):
port_a = self.port_data3[0]
port_a['device_id'] = None
port_a['device_owner'] = None
requested_networks = objects.NetworkRequestList(
objects=[objects.NetworkRequest(port_id=port_a['id'])])
neutronapi.get_client(mox.IgnoreArg()).AndReturn(self.moxed_client)
self.moxed_client.show_port(port_a['id']).AndReturn({'port': port_a})
self.mox.ReplayAll()
api = neutronapi.API()
self.assertRaises(exception.PortRequiresFixedIP,
api.validate_networks,
self.context, requested_networks, 1)
def test_validate_networks_no_subnet_id(self):
requested_networks = objects.NetworkRequestList(
objects=[objects.NetworkRequest(network_id='his_netid4')])
ids = ['his_netid4']
neutronapi.get_client(mox.IgnoreArg()).AndReturn(self.moxed_client)
self.moxed_client.list_networks(
id=mox.SameElementsAs(ids)).AndReturn(
{'networks': self.nets4})
self.mox.ReplayAll()
api = neutronapi.API()
self.assertRaises(exception.NetworkRequiresSubnet,
api.validate_networks,
self.context, requested_networks, 1)
def test_validate_networks_ports_in_same_network_enable(self):
# Verify that duplicateNetworks exception is not thrown when ports
# on same duplicate network are passed to validate_networks.
port_a = self.port_data3[0]
port_a['fixed_ips'] = {'ip_address': '10.0.0.2',
'subnet_id': 'subnet_id'}
port_b = self.port_data1[0]
self.assertEqual(port_a['network_id'], port_b['network_id'])
for port in [port_a, port_b]:
port['device_id'] = None
port['device_owner'] = None
requested_networks = objects.NetworkRequestList(
objects=[objects.NetworkRequest(port_id=port_a['id']),
objects.NetworkRequest(port_id=port_b['id'])])
neutronapi.get_client(mox.IgnoreArg()).AndReturn(self.moxed_client)
self.moxed_client.show_port(port_a['id']).AndReturn(
{'port': port_a})
self.moxed_client.show_port(port_b['id']).AndReturn(
{'port': port_b})
self.mox.ReplayAll()
api = neutronapi.API()
api.validate_networks(self.context, requested_networks, 1)
def test_validate_networks_ports_not_in_same_network(self):
port_a = self.port_data3[0]
port_a['fixed_ips'] = {'ip_address': '10.0.0.2',
'subnet_id': 'subnet_id'}
port_b = self.port_data2[1]
self.assertNotEqual(port_a['network_id'], port_b['network_id'])
for port in [port_a, port_b]:
port['device_id'] = None
port['device_owner'] = None
requested_networks = objects.NetworkRequestList(
objects=[objects.NetworkRequest(port_id=port_a['id']),
objects.NetworkRequest(port_id=port_b['id'])])
neutronapi.get_client(mox.IgnoreArg()).AndReturn(self.moxed_client)
self.moxed_client.show_port(port_a['id']).AndReturn({'port': port_a})
self.moxed_client.show_port(port_b['id']).AndReturn({'port': port_b})
self.mox.ReplayAll()
api = neutronapi.API()
api.validate_networks(self.context, requested_networks, 1)
def test_validate_networks_no_quota(self):
# Test validation for a request for one instance needing
# two ports, where the quota is 2 and 2 ports are in use
# => instances which can be created = 0
requested_networks = objects.NetworkRequestList(
objects=[objects.NetworkRequest(network_id=uuids.my_netid1),
objects.NetworkRequest(network_id=uuids.my_netid2)])
ids = [uuids.my_netid1, uuids.my_netid2]
neutronapi.get_client(mox.IgnoreArg()).AndReturn(self.moxed_client)
self.moxed_client.list_networks(
id=mox.SameElementsAs(ids)).AndReturn(
{'networks': self.nets2})
self.moxed_client.show_quota(
uuids.my_tenant).AndReturn(
{'quota': {'port': 2}})
self.moxed_client.list_ports(
tenant_id=uuids.my_tenant, fields=['id']).AndReturn(
{'ports': self.port_data2})
self.mox.ReplayAll()
api = neutronapi.API()
max_count = api.validate_networks(self.context,
requested_networks, 1)
self.assertEqual(0, max_count)
def test_validate_networks_with_ports_and_networks(self):
# Test validation for a request for one instance needing
# one port allocated via nova with another port being passed in.
port_b = self.port_data2[1]
port_b['device_id'] = None
port_b['device_owner'] = None
requested_networks = objects.NetworkRequestList(
objects=[objects.NetworkRequest(network_id=uuids.my_netid1),
objects.NetworkRequest(port_id=port_b['id'])])
neutronapi.get_client(mox.IgnoreArg()).AndReturn(self.moxed_client)
self.moxed_client.show_port(port_b['id']).AndReturn({'port': port_b})
ids = [uuids.my_netid1]
self.moxed_client.list_networks(
id=mox.SameElementsAs(ids)).AndReturn(
{'networks': self.nets1})
self.moxed_client.show_quota(
uuids.my_tenant).AndReturn(
{'quota': {'port': 5}})
self.moxed_client.list_ports(
tenant_id=uuids.my_tenant, fields=['id']).AndReturn(
{'ports': self.port_data2})
self.mox.ReplayAll()
api = neutronapi.API()
max_count = api.validate_networks(self.context,
requested_networks, 1)
self.assertEqual(1, max_count)
def test_validate_networks_one_port_and_no_networks(self):
# Test that show quota is not called if no networks are
# passed in and only ports.
port_b = self.port_data2[1]
port_b['device_id'] = None
port_b['device_owner'] = None
requested_networks = objects.NetworkRequestList(
objects=[objects.NetworkRequest(port_id=port_b['id'])])
neutronapi.get_client(mox.IgnoreArg()).AndReturn(self.moxed_client)
self.moxed_client.show_port(port_b['id']).AndReturn({'port': port_b})
self.mox.ReplayAll()
api = neutronapi.API()
max_count = api.validate_networks(self.context,
requested_networks, 1)
self.assertEqual(1, max_count)
def test_validate_networks_some_quota(self):
# Test validation for a request for two instance needing
# two ports each, where the quota is 5 and 2 ports are in use
# => instances which can be created = 1
requested_networks = objects.NetworkRequestList(
objects=[objects.NetworkRequest(network_id=uuids.my_netid1),
objects.NetworkRequest(network_id=uuids.my_netid2)])
ids = [uuids.my_netid1, uuids.my_netid2]
neutronapi.get_client(mox.IgnoreArg()).AndReturn(self.moxed_client)
self.moxed_client.list_networks(
id=mox.SameElementsAs(ids)).AndReturn(
{'networks': self.nets2})
self.moxed_client.show_quota(
uuids.my_tenant).AndReturn(
{'quota': {'port': 5}})
self.moxed_client.list_ports(
tenant_id=uuids.my_tenant, fields=['id']).AndReturn(
{'ports': self.port_data2})
self.mox.ReplayAll()
api = neutronapi.API()
max_count = api.validate_networks(self.context,
requested_networks, 2)
self.assertEqual(1, max_count)
def test_validate_networks_unlimited_quota(self):
# Test validation for a request for two instance needing
# two ports each, where the quota is -1 (unlimited)
# => instances which can be created = 1
requested_networks = objects.NetworkRequestList(
objects=[objects.NetworkRequest(network_id=uuids.my_netid1),
objects.NetworkRequest(network_id=uuids.my_netid2)])
ids = [uuids.my_netid1, uuids.my_netid2]
neutronapi.get_client(mox.IgnoreArg()).AndReturn(self.moxed_client)
self.moxed_client.list_networks(
id=mox.SameElementsAs(ids)).AndReturn(
{'networks': self.nets2})
self.moxed_client.show_quota(
uuids.my_tenant).AndReturn(
{'quota': {'port': -1}})
self.mox.ReplayAll()
api = neutronapi.API()
max_count = api.validate_networks(self.context,
requested_networks, 2)
self.assertEqual(2, max_count)
def test_validate_networks_no_quota_but_ports_supplied(self):
port_a = self.port_data3[0]
port_a['fixed_ips'] = {'ip_address': '10.0.0.2',
'subnet_id': 'subnet_id'}
port_b = self.port_data2[1]
self.assertNotEqual(port_a['network_id'], port_b['network_id'])
for port in [port_a, port_b]:
port['device_id'] = None
port['device_owner'] = None
requested_networks = objects.NetworkRequestList(
objects=[objects.NetworkRequest(port_id=port_a['id']),
objects.NetworkRequest(port_id=port_b['id'])])
neutronapi.get_client(mox.IgnoreArg()).AndReturn(self.moxed_client)
self.moxed_client.show_port(port_a['id']).AndReturn({'port': port_a})
self.moxed_client.show_port(port_b['id']).AndReturn({'port': port_b})
self.mox.ReplayAll()
api = neutronapi.API()
max_count = api.validate_networks(self.context,
requested_networks, 1)
self.assertEqual(1, max_count)
def _mock_list_ports(self, port_data=None):
if port_data is None:
port_data = self.port_data2
address = self.port_address
neutronapi.get_client(mox.IgnoreArg()).AndReturn(self.moxed_client)
self.moxed_client.list_ports(
fixed_ips=MyComparator('ip_address=%s' % address)).AndReturn(
{'ports': port_data})
self.mox.ReplayAll()
return address
def test_get_fixed_ip_by_address_fails_for_no_ports(self):
address = self._mock_list_ports(port_data=[])
api = neutronapi.API()
self.assertRaises(exception.FixedIpNotFoundForAddress,
api.get_fixed_ip_by_address,
self.context, address)
def test_get_fixed_ip_by_address_succeeds_for_1_port(self):
address = self._mock_list_ports(port_data=self.port_data1)
api = neutronapi.API()
result = api.get_fixed_ip_by_address(self.context, address)
self.assertEqual(self.instance2['uuid'], result['instance_uuid'])
def test_get_fixed_ip_by_address_fails_for_more_than_1_port(self):
address = self._mock_list_ports()
api = neutronapi.API()
self.assertRaises(exception.FixedIpAssociatedWithMultipleInstances,
api.get_fixed_ip_by_address,
self.context, address)
def _get_available_networks(self, prv_nets, pub_nets,
req_ids=None, context=None):
api = neutronapi.API()
neutronapi.get_client(mox.IgnoreArg()).AndReturn(self.moxed_client)
nets = prv_nets + pub_nets
if req_ids:
mox_list_params = {'id': req_ids}
self.moxed_client.list_networks(
**mox_list_params).AndReturn({'networks': nets})
else:
mox_list_params = {'tenant_id': self.instance['project_id'],
'shared': False}
self.moxed_client.list_networks(
**mox_list_params).AndReturn({'networks': prv_nets})
mox_list_params = {'shared': True}
self.moxed_client.list_networks(
**mox_list_params).AndReturn({'networks': pub_nets})
self.mox.ReplayAll()
rets = api._get_available_networks(
context if context else self.context,
self.instance['project_id'],
req_ids)
self.assertEqual(nets, rets)
def test_get_available_networks_all_private(self):
self._get_available_networks(prv_nets=self.nets2, pub_nets=[])
def test_get_available_networks_all_public(self):
self._get_available_networks(prv_nets=[], pub_nets=self.nets2)
def test_get_available_networks_private_and_public(self):
self._get_available_networks(prv_nets=self.nets1, pub_nets=self.nets4)
def test_get_available_networks_with_network_ids(self):
prv_nets = [self.nets3[0]]
pub_nets = [self.nets3[-1]]
# specify only first and last network
req_ids = [net['id'] for net in (self.nets3[0], self.nets3[-1])]
self._get_available_networks(prv_nets, pub_nets, req_ids)
def test_get_available_networks_with_custom_policy(self):
rules = {'network:attach_external_network': ''}
policy.set_rules(oslo_policy.Rules.from_dict(rules))
req_ids = [net['id'] for net in self.nets5]
self._get_available_networks(self.nets5, pub_nets=[], req_ids=req_ids)
def test_get_floating_ip_pools(self):
api = neutronapi.API()
search_opts = {'router:external': True}
neutronapi.get_client(mox.IgnoreArg()).AndReturn(self.moxed_client)
self.moxed_client.list_networks(**search_opts).\
AndReturn({'networks': [self.fip_pool, self.fip_pool_nova]})
self.mox.ReplayAll()
pools = api.get_floating_ip_pools(self.context)
expected = [self.fip_pool['name'], self.fip_pool_nova['name']]
self.assertEqual(expected, pools)
def _get_expected_fip_model(self, fip_data, idx=0):
expected = {'id': fip_data['id'],
'address': fip_data['floating_ip_address'],
'pool': self.fip_pool['name'],
'project_id': fip_data['tenant_id'],
'fixed_ip': None,
'instance': ({'uuid': self.port_data2[idx]['device_id']}
if fip_data['port_id']
else None)}
if fip_data['fixed_ip_address']:
expected['fixed_ip'] = {'address': fip_data['fixed_ip_address']}
return expected
def _compare(self, obj, dic):
for key, value in dic.items():
objvalue = obj[key]
if isinstance(value, dict):
self._compare(objvalue, value)
elif isinstance(objvalue, netaddr.IPAddress):
self.assertEqual(value, str(objvalue))
else:
self.assertEqual(value, objvalue)
def _test_get_floating_ip(self, fip_data, idx=0, by_address=False):
api = neutronapi.API()
fip_id = fip_data['id']
net_id = fip_data['floating_network_id']
address = fip_data['floating_ip_address']
neutronapi.get_client(mox.IgnoreArg()).AndReturn(self.moxed_client)
if by_address:
self.moxed_client.list_floatingips(floating_ip_address=address).\
AndReturn({'floatingips': [fip_data]})
else:
self.moxed_client.show_floatingip(fip_id).\
AndReturn({'floatingip': fip_data})
self.moxed_client.show_network(net_id).\
AndReturn({'network': self.fip_pool})
if fip_data['port_id']:
self.moxed_client.show_port(fip_data['port_id']).\
AndReturn({'port': self.port_data2[idx]})
self.mox.ReplayAll()
expected = self._get_expected_fip_model(fip_data, idx)
if by_address:
fip = api.get_floating_ip_by_address(self.context, address)
else:
fip = api.get_floating_ip(self.context, fip_id)
self._compare(fip, expected)
def test_get_floating_ip_unassociated(self):
self._test_get_floating_ip(self.fip_unassociated, idx=0)
def test_get_floating_ip_associated(self):
self._test_get_floating_ip(self.fip_associated, idx=1)
def test_get_floating_ip_by_address(self):
self._test_get_floating_ip(self.fip_unassociated, idx=0,
by_address=True)
def test_get_floating_ip_by_address_associated(self):
self._test_get_floating_ip(self.fip_associated, idx=1,
by_address=True)
def test_get_floating_ip_by_address_not_found(self):
api = neutronapi.API()
address = self.fip_unassociated['floating_ip_address']
neutronapi.get_client(mox.IgnoreArg()).AndReturn(self.moxed_client)
self.moxed_client.list_floatingips(floating_ip_address=address).\
AndReturn({'floatingips': []})
self.mox.ReplayAll()
self.assertRaises(exception.FloatingIpNotFoundForAddress,
api.get_floating_ip_by_address,
self.context, address)
def test_get_floating_ip_by_id_not_found(self):
api = neutronapi.API()
NeutronNotFound = exceptions.NeutronClientException(status_code=404)
floating_ip_id = self.fip_unassociated['id']
neutronapi.get_client(mox.IgnoreArg()).AndReturn(self.moxed_client)
self.moxed_client.show_floatingip(floating_ip_id).\
AndRaise(NeutronNotFound)
self.mox.ReplayAll()
self.assertRaises(exception.FloatingIpNotFound,
api.get_floating_ip,
self.context, floating_ip_id)
def test_get_floating_ip_raises_non404(self):
api = neutronapi.API()
NeutronNotFound = exceptions.NeutronClientException(status_code=0)
floating_ip_id = self.fip_unassociated['id']
neutronapi.get_client(mox.IgnoreArg()).AndReturn(self.moxed_client)
self.moxed_client.show_floatingip(floating_ip_id).\
AndRaise(NeutronNotFound)
self.mox.ReplayAll()
self.assertRaises(exceptions.NeutronClientException,
api.get_floating_ip,
self.context, floating_ip_id)
def test_get_floating_ip_by_address_multiple_found(self):
api = neutronapi.API()
address = self.fip_unassociated['floating_ip_address']
neutronapi.get_client(mox.IgnoreArg()).AndReturn(self.moxed_client)
self.moxed_client.list_floatingips(floating_ip_address=address).\
AndReturn({'floatingips': [self.fip_unassociated] * 2})
self.mox.ReplayAll()
self.assertRaises(exception.FloatingIpMultipleFoundForAddress,
api.get_floating_ip_by_address,
self.context, address)
def test_get_floating_ips_by_project(self):
api = neutronapi.API()
project_id = self.context.project_id
neutronapi.get_client(mox.IgnoreArg()).AndReturn(self.moxed_client)
self.moxed_client.list_floatingips(tenant_id=project_id).\
AndReturn({'floatingips': [self.fip_unassociated,
self.fip_associated]})
search_opts = {'router:external': True}
self.moxed_client.list_networks(**search_opts).\
AndReturn({'networks': [self.fip_pool, self.fip_pool_nova]})
self.moxed_client.list_ports(tenant_id=project_id).\
AndReturn({'ports': self.port_data2})
self.mox.ReplayAll()
expected = [self._get_expected_fip_model(self.fip_unassociated),
self._get_expected_fip_model(self.fip_associated, idx=1)]
fips = api.get_floating_ips_by_project(self.context)
self.assertEqual(len(expected), len(fips))
for i, expected_value in enumerate(expected):
self._compare(fips[i], expected_value)
def _test_get_instance_id_by_floating_address(self, fip_data,
associated=False):
api = neutronapi.API()
address = fip_data['floating_ip_address']
neutronapi.get_client(mox.IgnoreArg()).AndReturn(self.moxed_client)
self.moxed_client.list_floatingips(floating_ip_address=address).\
AndReturn({'floatingips': [fip_data]})
if associated:
self.moxed_client.show_port(fip_data['port_id']).\
AndReturn({'port': self.port_data2[1]})
self.mox.ReplayAll()
if associated:
expected = self.port_data2[1]['device_id']
else:
expected = None
fip = api.get_instance_id_by_floating_address(self.context, address)
self.assertEqual(expected, fip)
def test_get_instance_id_by_floating_address(self):
self._test_get_instance_id_by_floating_address(self.fip_unassociated)
def test_get_instance_id_by_floating_address_associated(self):
self._test_get_instance_id_by_floating_address(self.fip_associated,
associated=True)
def test_allocate_floating_ip(self):
api = neutronapi.API()
pool_name = self.fip_pool['name']
pool_id = self.fip_pool['id']
search_opts = {'router:external': True,
'fields': 'id',
'name': pool_name}
neutronapi.get_client(mox.IgnoreArg()).AndReturn(self.moxed_client)
self.moxed_client.list_networks(**search_opts).\
AndReturn({'networks': [self.fip_pool]})
self.moxed_client.create_floatingip(
{'floatingip': {'floating_network_id': pool_id}}).\
AndReturn({'floatingip': self.fip_unassociated})
self.mox.ReplayAll()
fip = api.allocate_floating_ip(self.context, 'ext_net')
self.assertEqual(self.fip_unassociated['floating_ip_address'], fip)
def test_allocate_floating_ip_addr_gen_fail(self):
api = neutronapi.API()
pool_name = self.fip_pool['name']
pool_id = self.fip_pool['id']
search_opts = {'router:external': True,
'fields': 'id',
'name': pool_name}
neutronapi.get_client(mox.IgnoreArg()).AndReturn(self.moxed_client)
self.moxed_client.list_networks(**search_opts).\
AndReturn({'networks': [self.fip_pool]})
self.moxed_client.create_floatingip(
{'floatingip': {'floating_network_id': pool_id}}).\
AndRaise(exceptions.IpAddressGenerationFailureClient)
self.mox.ReplayAll()
self.assertRaises(exception.NoMoreFloatingIps,
api.allocate_floating_ip, self.context, 'ext_net')
def test_allocate_floating_ip_exhausted_fail(self):
api = neutronapi.API()
pool_name = self.fip_pool['name']
pool_id = self.fip_pool['id']
search_opts = {'router:external': True,
'fields': 'id',
'name': pool_name}
neutronapi.get_client(mox.IgnoreArg()).AndReturn(self.moxed_client)
self.moxed_client.list_networks(**search_opts).\
AndReturn({'networks': [self.fip_pool]})
self.moxed_client.create_floatingip(
{'floatingip': {'floating_network_id': pool_id}}).\
AndRaise(exceptions.ExternalIpAddressExhaustedClient)
self.mox.ReplayAll()
self.assertRaises(exception.NoMoreFloatingIps,
api.allocate_floating_ip, self.context, 'ext_net')
def test_allocate_floating_ip_with_pool_id(self):
api = neutronapi.API()
pool_id = self.fip_pool['id']
search_opts = {'router:external': True,
'fields': 'id',
'id': pool_id}
neutronapi.get_client(mox.IgnoreArg()).AndReturn(self.moxed_client)
self.moxed_client.list_networks(**search_opts).\
AndReturn({'networks': [self.fip_pool]})
self.moxed_client.create_floatingip(
{'floatingip': {'floating_network_id': pool_id}}).\
AndReturn({'floatingip': self.fip_unassociated})
self.mox.ReplayAll()
fip = api.allocate_floating_ip(self.context, pool_id)
self.assertEqual(self.fip_unassociated['floating_ip_address'], fip)
def test_allocate_floating_ip_with_default_pool(self):
api = neutronapi.API()
pool_name = self.fip_pool_nova['name']
pool_id = self.fip_pool_nova['id']
search_opts = {'router:external': True,
'fields': 'id',
'name': pool_name}
neutronapi.get_client(mox.IgnoreArg()).AndReturn(self.moxed_client)
self.moxed_client.list_networks(**search_opts).\
AndReturn({'networks': [self.fip_pool_nova]})
self.moxed_client.create_floatingip(
{'floatingip': {'floating_network_id': pool_id}}).\
AndReturn({'floatingip': self.fip_unassociated})
self.mox.ReplayAll()
fip = api.allocate_floating_ip(self.context)
self.assertEqual(self.fip_unassociated['floating_ip_address'], fip)
def test_release_floating_ip(self):
api = neutronapi.API()
address = self.fip_unassociated['floating_ip_address']
fip_id = self.fip_unassociated['id']
neutronapi.get_client(mox.IgnoreArg()).AndReturn(self.moxed_client)
self.moxed_client.list_floatingips(floating_ip_address=address).\
AndReturn({'floatingips': [self.fip_unassociated]})
self.moxed_client.delete_floatingip(fip_id)
self.mox.ReplayAll()
api.release_floating_ip(self.context, address)
def test_disassociate_and_release_floating_ip(self):
api = neutronapi.API()
address = self.fip_unassociated['floating_ip_address']
fip_id = self.fip_unassociated['id']
floating_ip = {'address': address}
neutronapi.get_client(mox.IgnoreArg()).AndReturn(self.moxed_client)
self.moxed_client.list_floatingips(floating_ip_address=address).\
AndReturn({'floatingips': [self.fip_unassociated]})
self.moxed_client.delete_floatingip(fip_id)
self.mox.ReplayAll()
api.disassociate_and_release_floating_ip(self.context, None,
floating_ip)
def test_disassociate_and_release_floating_ip_with_instance(self):
api = neutronapi.API()
address = self.fip_unassociated['floating_ip_address']
fip_id = self.fip_unassociated['id']
floating_ip = {'address': address}
instance = self._fake_instance_object(self.instance)
neutronapi.get_client(mox.IgnoreArg()).AndReturn(self.moxed_client)
self.moxed_client.list_floatingips(floating_ip_address=address).\
AndReturn({'floatingips': [self.fip_unassociated]})
self.moxed_client.delete_floatingip(fip_id)
self._setup_mock_for_refresh_cache(api, [instance])
self.mox.ReplayAll()
api.disassociate_and_release_floating_ip(self.context, instance,
floating_ip)
def test_release_floating_ip_associated(self):
api = neutronapi.API()
address = self.fip_associated['floating_ip_address']
neutronapi.get_client(mox.IgnoreArg()).AndReturn(self.moxed_client)
self.moxed_client.list_floatingips(floating_ip_address=address).\
AndReturn({'floatingips': [self.fip_associated]})
self.mox.ReplayAll()
self.assertRaises(exception.FloatingIpAssociated,
api.release_floating_ip, self.context, address)
def _setup_mock_for_refresh_cache(self, api, instances):
nw_info = model.NetworkInfo()
self.mox.StubOutWithMock(api, '_get_instance_nw_info')
self.mox.StubOutWithMock(api.db, 'instance_info_cache_update')
for instance in instances:
api._get_instance_nw_info(mox.IgnoreArg(), instance).\
AndReturn(nw_info)
api.db.instance_info_cache_update(mox.IgnoreArg(),
instance['uuid'],
mox.IgnoreArg()).AndReturn(
fake_info_cache)
def test_associate_floating_ip(self):
api = neutronapi.API()
address = self.fip_unassociated['floating_ip_address']
fixed_address = self.port_address2
fip_id = self.fip_unassociated['id']
instance = self._fake_instance_object(self.instance)
search_opts = {'device_owner': 'compute:nova',
'device_id': instance.uuid}
neutronapi.get_client(mox.IgnoreArg()).AndReturn(self.moxed_client)
self.moxed_client.list_ports(**search_opts).\
AndReturn({'ports': [self.port_data2[1]]})
self.moxed_client.list_floatingips(floating_ip_address=address).\
AndReturn({'floatingips': [self.fip_unassociated]})
self.moxed_client.update_floatingip(
fip_id, {'floatingip': {'port_id': self.fip_associated['port_id'],
'fixed_ip_address': fixed_address}})
self._setup_mock_for_refresh_cache(api, [instance])
self.mox.ReplayAll()
api.associate_floating_ip(self.context, instance,
address, fixed_address)
@mock.patch('nova.objects.Instance.get_by_uuid')
def test_reassociate_floating_ip(self, mock_get):
api = neutronapi.API()
address = self.fip_associated['floating_ip_address']
new_fixed_address = self.port_address
fip_id = self.fip_associated['id']
search_opts = {'device_owner': 'compute:nova',
'device_id': self.instance2['uuid']}
neutronapi.get_client(mox.IgnoreArg()).AndReturn(self.moxed_client)
self.moxed_client.list_ports(**search_opts).\
AndReturn({'ports': [self.port_data2[0]]})
self.moxed_client.list_floatingips(floating_ip_address=address).\
AndReturn({'floatingips': [self.fip_associated]})
self.moxed_client.update_floatingip(
fip_id, {'floatingip': {'port_id': uuids.portid_1,
'fixed_ip_address': new_fixed_address}})
self.moxed_client.show_port(self.fip_associated['port_id']).\
AndReturn({'port': self.port_data2[1]})
mock_get.return_value = fake_instance.fake_instance_obj(
self.context, **self.instance)
instance2 = self._fake_instance_object(self.instance2)
self._setup_mock_for_refresh_cache(api, [mock_get.return_value,
instance2])
self.mox.ReplayAll()
api.associate_floating_ip(self.context, instance2,
address, new_fixed_address)
def test_associate_floating_ip_not_found_fixed_ip(self):
instance = self._fake_instance_object(self.instance)
api = neutronapi.API()
address = self.fip_associated['floating_ip_address']
fixed_address = self.fip_associated['fixed_ip_address']
search_opts = {'device_owner': 'compute:nova',
'device_id': self.instance['uuid']}
neutronapi.get_client(mox.IgnoreArg()).AndReturn(self.moxed_client)
self.moxed_client.list_ports(**search_opts).\
AndReturn({'ports': [self.port_data2[0]]})
self.mox.ReplayAll()
self.assertRaises(exception.FixedIpNotFoundForAddress,
api.associate_floating_ip, self.context,
instance, address, fixed_address)
def test_disassociate_floating_ip(self):
instance = self._fake_instance_object(self.instance)
api = neutronapi.API()
address = self.fip_associated['floating_ip_address']
fip_id = self.fip_associated['id']
neutronapi.get_client(mox.IgnoreArg()).AndReturn(self.moxed_client)
self.moxed_client.list_floatingips(floating_ip_address=address).\
AndReturn({'floatingips': [self.fip_associated]})
self.moxed_client.update_floatingip(
fip_id, {'floatingip': {'port_id': None}})
self._setup_mock_for_refresh_cache(api, [instance])
self.mox.ReplayAll()
api.disassociate_floating_ip(self.context, instance, address)
def test_add_fixed_ip_to_instance(self):
instance = self._fake_instance_object(self.instance)
api = neutronapi.API()
self._setup_mock_for_refresh_cache(api, [instance])
network_id = uuids.my_netid1
search_opts = {'network_id': network_id}
neutronapi.get_client(mox.IgnoreArg()).AndReturn(self.moxed_client)
self.moxed_client.list_subnets(
**search_opts).AndReturn({'subnets': self.subnet_data_n})
search_opts = {'device_id': instance.uuid,
'device_owner': 'compute:nova',
'network_id': network_id}
self.moxed_client.list_ports(
**search_opts).AndReturn({'ports': self.port_data1})
port_req_body = {
'port': {
'fixed_ips': [{'subnet_id': 'my_subid1'},
{'subnet_id': 'my_subid1'}],
},
}
port = self.port_data1[0]
port['fixed_ips'] = [{'subnet_id': 'my_subid1'}]
self.moxed_client.update_port(uuids.portid_1,
MyComparator(port_req_body)).AndReturn({'port': port})
self.mox.ReplayAll()
api.add_fixed_ip_to_instance(self.context,
instance,
network_id)
def test_remove_fixed_ip_from_instance(self):
instance = self._fake_instance_object(self.instance)
api = neutronapi.API()
self._setup_mock_for_refresh_cache(api, [instance])
address = '10.0.0.3'
zone = 'compute:%s' % self.instance['availability_zone']
search_opts = {'device_id': self.instance['uuid'],
'device_owner': zone,
'fixed_ips': 'ip_address=%s' % address}
neutronapi.get_client(mox.IgnoreArg()).AndReturn(self.moxed_client)
self.moxed_client.list_ports(
**search_opts).AndReturn({'ports': self.port_data1})
port_req_body = {
'port': {
'fixed_ips': [],
},
}
port = self.port_data1[0]
port['fixed_ips'] = []
self.moxed_client.update_port(uuids.portid_1,
MyComparator(port_req_body)).AndReturn({'port': port})
self.mox.ReplayAll()
api.remove_fixed_ip_from_instance(self.context, instance,
address)
def test_list_floating_ips_without_l3_support(self):
api = neutronapi.API()
NeutronNotFound = exceptions.NotFound()
neutronapi.get_client(mox.IgnoreArg()).AndReturn(self.moxed_client)
self.moxed_client.list_floatingips(
fixed_ip_address='1.1.1.1', port_id=1).AndRaise(NeutronNotFound)
self.mox.ReplayAll()
neutronapi.get_client(uuids.fake)
floatingips = api._get_floating_ips_by_fixed_and_port(
self.moxed_client, '1.1.1.1', 1)
self.assertEqual([], floatingips)
def test_nw_info_get_ips(self):
fake_port = {
'fixed_ips': [
{'ip_address': '1.1.1.1'}],
'id': 'port-id',
}
api = neutronapi.API()
neutronapi.get_client(mox.IgnoreArg()).AndReturn(self.moxed_client)
self.mox.StubOutWithMock(api, '_get_floating_ips_by_fixed_and_port')
api._get_floating_ips_by_fixed_and_port(
self.moxed_client, '1.1.1.1', 'port-id').AndReturn(
[{'floating_ip_address': '10.0.0.1'}])
self.mox.ReplayAll()
neutronapi.get_client(uuids.fake)
result = api._nw_info_get_ips(self.moxed_client, fake_port)
self.assertEqual(1, len(result))
self.assertEqual('1.1.1.1', result[0]['address'])
self.assertEqual('10.0.0.1', result[0]['floating_ips'][0]['address'])
def test_nw_info_get_subnets(self):
fake_port = {
'fixed_ips': [
{'ip_address': '1.1.1.1'},
{'ip_address': '2.2.2.2'}],
'id': 'port-id',
}
fake_subnet = model.Subnet(cidr='1.0.0.0/8')
fake_ips = [model.IP(x['ip_address']) for x in fake_port['fixed_ips']]
api = neutronapi.API()
self.mox.StubOutWithMock(api, '_get_subnets_from_port')
api._get_subnets_from_port(
self.context, fake_port, None).AndReturn(
[fake_subnet])
self.mox.ReplayAll()
subnets = api._nw_info_get_subnets(self.context, fake_port, fake_ips)
self.assertEqual(1, len(subnets))
self.assertEqual(1, len(subnets[0]['ips']))
self.assertEqual('1.1.1.1', subnets[0]['ips'][0]['address'])
def _test_nw_info_build_network(self, vif_type):
fake_port = {
'fixed_ips': [{'ip_address': '1.1.1.1'}],
'id': 'port-id',
'network_id': 'net-id',
'binding:vif_type': vif_type,
}
fake_subnets = [model.Subnet(cidr='1.0.0.0/8')]
fake_nets = [{'id': 'net-id', 'name': 'foo', 'tenant_id': 'tenant',
'mtu': 9000}]
api = neutronapi.API()
neutronapi.get_client(mox.IgnoreArg()).AndReturn(self.moxed_client)
self.mox.ReplayAll()
neutronapi.get_client(uuids.fake)
net, iid = api._nw_info_build_network(fake_port, fake_nets,
fake_subnets)
self.assertEqual(fake_subnets, net['subnets'])
self.assertEqual('net-id', net['id'])
self.assertEqual('foo', net['label'])
self.assertEqual('tenant', net.get_meta('tenant_id'))
self.assertEqual(9000, net.get_meta('mtu'))
self.assertEqual(CONF.flat_injected, net.get_meta('injected'))
return net, iid
def test_nw_info_build_network_ovs(self):
net, iid = self._test_nw_info_build_network(model.VIF_TYPE_OVS)
self.assertEqual(CONF.neutron.ovs_bridge, net['bridge'])
self.assertNotIn('should_create_bridge', net)
self.assertEqual('port-id', iid)
def test_nw_info_build_network_dvs(self):
net, iid = self._test_nw_info_build_network(model.VIF_TYPE_DVS)
self.assertEqual('net-id', net['bridge'])
self.assertNotIn('should_create_bridge', net)
self.assertNotIn('ovs_interfaceid', net)
self.assertIsNone(iid)
def test_nw_info_build_network_bridge(self):
net, iid = self._test_nw_info_build_network(model.VIF_TYPE_BRIDGE)
self.assertEqual('brqnet-id', net['bridge'])
self.assertTrue(net['should_create_bridge'])
self.assertIsNone(iid)
def test_nw_info_build_network_tap(self):
net, iid = self._test_nw_info_build_network(model.VIF_TYPE_TAP)
self.assertIsNone(net['bridge'])
self.assertNotIn('should_create_bridge', net)
self.assertIsNone(iid)
def test_nw_info_build_network_other(self):
net, iid = self._test_nw_info_build_network(None)
self.assertIsNone(net['bridge'])
self.assertNotIn('should_create_bridge', net)
self.assertIsNone(iid)
def test_nw_info_build_no_match(self):
fake_port = {
'fixed_ips': [{'ip_address': '1.1.1.1'}],
'id': 'port-id',
'network_id': 'net-id1',
'tenant_id': 'tenant',
'binding:vif_type': model.VIF_TYPE_OVS,
}
fake_subnets = [model.Subnet(cidr='1.0.0.0/8')]
fake_nets = [{'id': 'net-id2', 'name': 'foo', 'tenant_id': 'tenant'}]
api = neutronapi.API()
neutronapi.get_client(mox.IgnoreArg()).AndReturn(self.moxed_client)
self.mox.ReplayAll()
neutronapi.get_client(uuids.fake)
net, iid = api._nw_info_build_network(fake_port, fake_nets,
fake_subnets)
self.assertEqual(fake_subnets, net['subnets'])
self.assertEqual('net-id1', net['id'])
self.assertEqual('tenant', net['meta']['tenant_id'])
def test_nw_info_build_network_vhostuser(self):
fake_port = {
'fixed_ips': [{'ip_address': '1.1.1.1'}],
'id': 'port-id',
'network_id': 'net-id',
'binding:vif_type': model.VIF_TYPE_VHOSTUSER,
'binding:vif_details': {
model.VIF_DETAILS_VHOSTUSER_OVS_PLUG: True
}
}
fake_subnets = [model.Subnet(cidr='1.0.0.0/8')]
fake_nets = [{'id': 'net-id', 'name': 'foo', 'tenant_id': 'tenant'}]
api = neutronapi.API()
neutronapi.get_client(mox.IgnoreArg()).AndReturn(self.moxed_client)
self.mox.ReplayAll()
neutronapi.get_client(uuids.fake)
net, iid = api._nw_info_build_network(fake_port, fake_nets,
fake_subnets)
self.assertEqual(fake_subnets, net['subnets'])
self.assertEqual('net-id', net['id'])
self.assertEqual('foo', net['label'])
self.assertEqual('tenant', net.get_meta('tenant_id'))
self.assertEqual(CONF.flat_injected, net.get_meta('injected'))
self.assertEqual(CONF.neutron.ovs_bridge, net['bridge'])
self.assertNotIn('should_create_bridge', net)
self.assertEqual('port-id', iid)
def _test_nw_info_build_custom_bridge(self, vif_type, extra_details=None):
fake_port = {
'fixed_ips': [{'ip_address': '1.1.1.1'}],
'id': 'port-id',
'network_id': 'net-id',
'binding:vif_type': vif_type,
'binding:vif_details': {
model.VIF_DETAILS_BRIDGE_NAME: 'custom-bridge',
}
}
if extra_details:
fake_port['binding:vif_details'].update(extra_details)
fake_subnets = [model.Subnet(cidr='1.0.0.0/8')]
fake_nets = [{'id': 'net-id', 'name': 'foo', 'tenant_id': 'tenant'}]
api = neutronapi.API()
neutronapi.get_client(mox.IgnoreArg()).AndReturn(self.moxed_client)
self.mox.ReplayAll()
neutronapi.get_client(uuids.fake)
net, iid = api._nw_info_build_network(fake_port, fake_nets,
fake_subnets)
self.assertNotEqual(CONF.neutron.ovs_bridge, net['bridge'])
self.assertEqual('custom-bridge', net['bridge'])
def test_nw_info_build_custom_ovs_bridge(self):
self._test_nw_info_build_custom_bridge(model.VIF_TYPE_OVS)
def test_nw_info_build_custom_ovs_bridge_vhostuser(self):
self._test_nw_info_build_custom_bridge(model.VIF_TYPE_VHOSTUSER,
{model.VIF_DETAILS_VHOSTUSER_OVS_PLUG: True})
def test_nw_info_build_custom_lb_bridge(self):
self._test_nw_info_build_custom_bridge(model.VIF_TYPE_BRIDGE)
def test_build_network_info_model(self):
api = neutronapi.API()
fake_inst = objects.Instance()
fake_inst.project_id = uuids.fake
fake_inst.uuid = uuids.instance
fake_inst.info_cache = objects.InstanceInfoCache()
fake_inst.info_cache.network_info = model.NetworkInfo()
fake_ports = [
# admin_state_up=True and status='ACTIVE' thus vif.active=True
{'id': 'port1',
'network_id': 'net-id',
'admin_state_up': True,
'status': 'ACTIVE',
'fixed_ips': [{'ip_address': '1.1.1.1'}],
'mac_address': 'de:ad:be:ef:00:01',
'binding:vif_type': model.VIF_TYPE_BRIDGE,
'binding:vnic_type': model.VNIC_TYPE_NORMAL,
'binding:vif_details': {},
},
# admin_state_up=False and status='DOWN' thus vif.active=True
{'id': 'port2',
'network_id': 'net-id',
'admin_state_up': False,
'status': 'DOWN',
'fixed_ips': [{'ip_address': '1.1.1.1'}],
'mac_address': 'de:ad:be:ef:00:02',
'binding:vif_type': model.VIF_TYPE_BRIDGE,
'binding:vnic_type': model.VNIC_TYPE_NORMAL,
'binding:vif_details': {},
},
# admin_state_up=True and status='DOWN' thus vif.active=False
{'id': 'port0',
'network_id': 'net-id',
'admin_state_up': True,
'status': 'DOWN',
'fixed_ips': [{'ip_address': '1.1.1.1'}],
'mac_address': 'de:ad:be:ef:00:03',
'binding:vif_type': model.VIF_TYPE_BRIDGE,
'binding:vnic_type': model.VNIC_TYPE_NORMAL,
'binding:vif_details': {},
},
# admin_state_up=True and status='ACTIVE' thus vif.active=True
{'id': 'port3',
'network_id': 'net-id',
'admin_state_up': True,
'status': 'ACTIVE',
'fixed_ips': [{'ip_address': '1.1.1.1'}],
'mac_address': 'de:ad:be:ef:00:04',
'binding:vif_type': model.VIF_TYPE_HW_VEB,
'binding:vnic_type': model.VNIC_TYPE_DIRECT,
neutronapi.BINDING_PROFILE: {'pci_vendor_info': '1137:0047',
'pci_slot': '0000:0a:00.1',
'physical_network': 'phynet1'},
'binding:vif_details': {model.VIF_DETAILS_PROFILEID: 'pfid'},
},
# admin_state_up=True and status='ACTIVE' thus vif.active=True
{'id': 'port4',
'network_id': 'net-id',
'admin_state_up': True,
'status': 'ACTIVE',
'fixed_ips': [{'ip_address': '1.1.1.1'}],
'mac_address': 'de:ad:be:ef:00:05',
'binding:vif_type': model.VIF_TYPE_802_QBH,
'binding:vnic_type': model.VNIC_TYPE_MACVTAP,
neutronapi.BINDING_PROFILE: {'pci_vendor_info': '1137:0047',
'pci_slot': '0000:0a:00.2',
'physical_network': 'phynet1'},
'binding:vif_details': {model.VIF_DETAILS_PROFILEID: 'pfid'},
},
# admin_state_up=True and status='ACTIVE' thus vif.active=True
# This port has no binding:vnic_type to verify default is assumed
{'id': 'port5',
'network_id': 'net-id',
'admin_state_up': True,
'status': 'ACTIVE',
'fixed_ips': [{'ip_address': '1.1.1.1'}],
'mac_address': 'de:ad:be:ef:00:06',
'binding:vif_type': model.VIF_TYPE_BRIDGE,
# No binding:vnic_type
'binding:vif_details': {},
},
# This does not match the networks we provide below,
# so it should be ignored (and is here to verify that)
{'id': 'port6',
'network_id': 'other-net-id',
'admin_state_up': True,
'status': 'DOWN',
'binding:vnic_type': model.VNIC_TYPE_NORMAL,
},
]
fake_subnets = [model.Subnet(cidr='1.0.0.0/8')]
fake_nets = [
{'id': 'net-id',
'name': 'foo',
'tenant_id': uuids.fake,
}
]
neutronapi.get_client(mox.IgnoreArg(), admin=True).AndReturn(
self.moxed_client)
self.moxed_client.list_ports(
tenant_id=uuids.fake, device_id=uuids.instance).AndReturn(
{'ports': fake_ports})
self.mox.StubOutWithMock(api, '_get_floating_ips_by_fixed_and_port')
self.mox.StubOutWithMock(api, '_get_subnets_from_port')
requested_ports = [fake_ports[2], fake_ports[0], fake_ports[1],
fake_ports[3], fake_ports[4], fake_ports[5]]
for requested_port in requested_ports:
api._get_floating_ips_by_fixed_and_port(
self.moxed_client, '1.1.1.1', requested_port['id']).AndReturn(
[{'floating_ip_address': '10.0.0.1'}])
for requested_port in requested_ports:
api._get_subnets_from_port(self.context, requested_port,
self.moxed_client).AndReturn(
fake_subnets)
self.mox.StubOutWithMock(api, '_get_preexisting_port_ids')
api._get_preexisting_port_ids(fake_inst).AndReturn(['port5'])
self.mox.ReplayAll()
fake_inst.info_cache = objects.InstanceInfoCache.new(
self.context, uuids.instance)
fake_inst.info_cache.network_info = model.NetworkInfo.hydrate([])
nw_infos = api._build_network_info_model(
self.context, fake_inst,
fake_nets,
[fake_ports[2]['id'],
fake_ports[0]['id'],
fake_ports[1]['id'],
fake_ports[3]['id'],
fake_ports[4]['id'],
fake_ports[5]['id']],
preexisting_port_ids=['port3'])
self.assertEqual(6, len(nw_infos))
index = 0
for nw_info in nw_infos:
self.assertEqual(requested_ports[index]['mac_address'],
nw_info['address'])
self.assertEqual('tapport' + str(index), nw_info['devname'])
self.assertIsNone(nw_info['ovs_interfaceid'])
self.assertEqual(requested_ports[index]['binding:vif_type'],
nw_info['type'])
if nw_info['type'] == model.VIF_TYPE_BRIDGE:
self.assertEqual('brqnet-id', nw_info['network']['bridge'])
self.assertEqual(requested_ports[index].get('binding:vnic_type',
model.VNIC_TYPE_NORMAL), nw_info['vnic_type'])
self.assertEqual(requested_ports[index].get('binding:vif_details'),
nw_info.get('details'))
self.assertEqual(
requested_ports[index].get(neutronapi.BINDING_PROFILE),
nw_info.get('profile'))
index += 1
self.assertFalse(nw_infos[0]['active'])
self.assertTrue(nw_infos[1]['active'])
self.assertTrue(nw_infos[2]['active'])
self.assertTrue(nw_infos[3]['active'])
self.assertTrue(nw_infos[4]['active'])
self.assertTrue(nw_infos[5]['active'])
self.assertEqual('port0', nw_infos[0]['id'])
self.assertEqual('port1', nw_infos[1]['id'])
self.assertEqual('port2', nw_infos[2]['id'])
self.assertEqual('port3', nw_infos[3]['id'])
self.assertEqual('port4', nw_infos[4]['id'])
self.assertEqual('port5', nw_infos[5]['id'])
self.assertFalse(nw_infos[0]['preserve_on_delete'])
self.assertFalse(nw_infos[1]['preserve_on_delete'])
self.assertFalse(nw_infos[2]['preserve_on_delete'])
self.assertTrue(nw_infos[3]['preserve_on_delete'])
self.assertFalse(nw_infos[4]['preserve_on_delete'])
self.assertTrue(nw_infos[5]['preserve_on_delete'])
@mock.patch('nova.network.neutronv2.api.API._nw_info_get_subnets')
@mock.patch('nova.network.neutronv2.api.API._nw_info_get_ips')
@mock.patch('nova.network.neutronv2.api.API._nw_info_build_network')
@mock.patch('nova.network.neutronv2.api.API._get_preexisting_port_ids')
@mock.patch('nova.network.neutronv2.api.API._gather_port_ids_and_networks')
def test_build_network_info_model_empty(
self, mock_gather_port_ids_and_networks,
mock_get_preexisting_port_ids,
mock_nw_info_build_network,
mock_nw_info_get_ips,
mock_nw_info_get_subnets):
# An empty instance info network cache should not be populated from
# ports found in Neutron.
api = neutronapi.API()
fake_inst = objects.Instance()
fake_inst.project_id = uuids.fake
fake_inst.uuid = uuids.instance
fake_inst.info_cache = objects.InstanceInfoCache()
fake_inst.info_cache.network_info = model.NetworkInfo()
fake_ports = [
# admin_state_up=True and status='ACTIVE' thus vif.active=True
{'id': 'port1',
'network_id': 'net-id',
'admin_state_up': True,
'status': 'ACTIVE',
'fixed_ips': [{'ip_address': '1.1.1.1'}],
'mac_address': 'de:ad:be:ef:00:01',
'binding:vif_type': model.VIF_TYPE_BRIDGE,
'binding:vnic_type': model.VNIC_TYPE_NORMAL,
'binding:vif_details': {},
},
]
fake_subnets = [model.Subnet(cidr='1.0.0.0/8')]
neutronapi.get_client(mox.IgnoreArg(), admin=True).AndReturn(
self.moxed_client)
self.moxed_client.list_ports(
tenant_id=uuids.fake, device_id=uuids.instance).AndReturn(
{'ports': fake_ports})
mock_gather_port_ids_and_networks.return_value = ([], [])
mock_get_preexisting_port_ids.return_value = []
mock_nw_info_build_network.return_value = (None, None)
mock_nw_info_get_ips.return_value = []
mock_nw_info_get_subnets.return_value = fake_subnets
self.mox.ReplayAll()
nw_infos = api._build_network_info_model(
self.context, fake_inst)
self.assertEqual(0, len(nw_infos))
def test_get_subnets_from_port(self):
api = neutronapi.API()
port_data = copy.copy(self.port_data1[0])
subnet_data1 = copy.copy(self.subnet_data1)
subnet_data1[0]['host_routes'] = [
{'destination': '192.168.0.0/24', 'nexthop': '1.0.0.10'}
]
neutronapi.get_client(mox.IgnoreArg()).AndReturn(self.moxed_client)
self.moxed_client.list_subnets(
id=[port_data['fixed_ips'][0]['subnet_id']]
).AndReturn({'subnets': subnet_data1})
self.moxed_client.list_ports(
network_id=subnet_data1[0]['network_id'],
device_owner='network:dhcp').AndReturn({'ports': []})
self.mox.ReplayAll()
subnets = api._get_subnets_from_port(self.context, port_data)
self.assertEqual(1, len(subnets))
self.assertEqual(1, len(subnets[0]['routes']))
self.assertEqual(subnet_data1[0]['host_routes'][0]['destination'],
subnets[0]['routes'][0]['cidr'])
self.assertEqual(subnet_data1[0]['host_routes'][0]['nexthop'],
subnets[0]['routes'][0]['gateway']['address'])
def test_get_all_empty_list_networks(self):
api = neutronapi.API()
neutronapi.get_client(mox.IgnoreArg()).AndReturn(self.moxed_client)
self.moxed_client.list_networks().AndReturn({'networks': []})
self.mox.ReplayAll()
networks = api.get_all(self.context)
self.assertIsInstance(networks, objects.NetworkList)
self.assertEqual(0, len(networks))
@mock.patch.object(neutronapi, 'get_client', return_value=mock.Mock())
def test_get_port_vnic_info_1(self, mock_get_client):
api = neutronapi.API()
self.mox.ResetAll()
test_port = {
'port': {'id': 'my_port_id1',
'network_id': 'net-id',
'binding:vnic_type': model.VNIC_TYPE_DIRECT,
},
}
test_net = {'network': {'provider:physical_network': 'phynet1'}}
mock_client = mock_get_client()
mock_client.show_port.return_value = test_port
mock_client.show_network.return_value = test_net
vnic_type, phynet_name = api._get_port_vnic_info(
self.context, mock_client, test_port['port']['id'])
mock_client.show_port.assert_called_once_with(test_port['port']['id'],
fields=['binding:vnic_type', 'network_id'])
mock_client.show_network.assert_called_once_with(
test_port['port']['network_id'],
fields='provider:physical_network')
self.assertEqual(model.VNIC_TYPE_DIRECT, vnic_type)
self.assertEqual('phynet1', phynet_name)
def _test_get_port_vnic_info(self, mock_get_client,
binding_vnic_type=None):
api = neutronapi.API()
self.mox.ResetAll()
test_port = {
'port': {'id': 'my_port_id2',
'network_id': 'net-id',
},
}
if binding_vnic_type:
test_port['port']['binding:vnic_type'] = binding_vnic_type
mock_get_client.reset_mock()
mock_client = mock_get_client()
mock_client.show_port.return_value = test_port
vnic_type, phynet_name = api._get_port_vnic_info(
self.context, mock_client, test_port['port']['id'])
mock_client.show_port.assert_called_once_with(test_port['port']['id'],
fields=['binding:vnic_type', 'network_id'])
self.assertEqual(model.VNIC_TYPE_NORMAL, vnic_type)
self.assertFalse(phynet_name)
@mock.patch.object(neutronapi, 'get_client', return_value=mock.Mock())
def test_get_port_vnic_info_2(self, mock_get_client):
self._test_get_port_vnic_info(mock_get_client,
binding_vnic_type=model.VNIC_TYPE_NORMAL)
@mock.patch.object(neutronapi, 'get_client', return_value=mock.Mock())
def test_get_port_vnic_info_3(self, mock_get_client):
self._test_get_port_vnic_info(mock_get_client)
@mock.patch.object(neutronapi.API, "_get_port_vnic_info")
@mock.patch.object(neutronapi, 'get_client', return_value=mock.Mock())
def test_create_pci_requests_for_sriov_ports(self, mock_get_client,
mock_get_port_vnic_info):
api = neutronapi.API()
self.mox.ResetAll()
requested_networks = objects.NetworkRequestList(
objects = [
objects.NetworkRequest(port_id=uuids.portid_1),
objects.NetworkRequest(network_id='net1'),
objects.NetworkRequest(port_id=uuids.portid_2),
objects.NetworkRequest(port_id=uuids.portid_3),
objects.NetworkRequest(port_id=uuids.portid_4),
objects.NetworkRequest(port_id=uuids.portid_5)])
pci_requests = objects.InstancePCIRequests(requests=[])
mock_get_port_vnic_info.side_effect = [
(model.VNIC_TYPE_DIRECT, 'phynet1'),
(model.VNIC_TYPE_NORMAL, ''),
(model.VNIC_TYPE_MACVTAP, 'phynet1'),
(model.VNIC_TYPE_MACVTAP, 'phynet2'),
(model.VNIC_TYPE_DIRECT_PHYSICAL, 'phynet3')
]
api.create_pci_requests_for_sriov_ports(
None, pci_requests, requested_networks)
self.assertEqual(4, len(pci_requests.requests))
has_pci_request_id = [net.pci_request_id is not None for net in
requested_networks.objects]
self.assertEqual(pci_requests.requests[3].spec[0]["dev_type"],
"type-PF")
expected_results = [True, False, False, True, True, True]
self.assertEqual(expected_results, has_pci_request_id)
class TestNeutronv2WithMock(test.TestCase):
"""Used to test Neutron V2 API with mock."""
def setUp(self):
super(TestNeutronv2WithMock, self).setUp()
self.api = neutronapi.API()
self.context = context.RequestContext(
'fake-user', 'fake-project',
auth_token='bff4a5a6b9eb4ea2a6efec6eefb77936')
@mock.patch('nova.network.neutronv2.api.API._show_port')
def test_deferred_ip_port_immediate_allocation(self, mock_show):
port = {'network_id': 'my_netid1',
'device_id': None,
'id': uuids.port,
'fixed_ips': [], # no fixed ip
'ip_allocation': 'immediate', }
mock_show.return_value = port
requested_networks = objects.NetworkRequestList(
objects=[objects.NetworkRequest(port_id=port['id'])])
self.assertRaises(exception.PortRequiresFixedIP,
self.api.validate_networks,
self.context, requested_networks, 1)
@mock.patch('nova.network.neutronv2.api.API._show_port')
def test_deferred_ip_port_deferred_allocation(self, mock_show):
port = {'network_id': 'my_netid1',
'device_id': None,
'id': uuids.port,
'fixed_ips': [], # no fixed ip
'ip_allocation': 'deferred', }
mock_show.return_value = port
requested_networks = objects.NetworkRequestList(
objects=[objects.NetworkRequest(port_id=port['id'])])
count = self.api.validate_networks(self.context, requested_networks, 1)
self.assertEqual(1, count)
@mock.patch('oslo_concurrency.lockutils.lock')
def test_get_instance_nw_info_locks_per_instance(self, mock_lock):
instance = objects.Instance(uuid=uuids.fake)
api = neutronapi.API()
mock_lock.side_effect = test.TestingException
self.assertRaises(test.TestingException,
api.get_instance_nw_info, 'context', instance)
mock_lock.assert_called_once_with('refresh_cache-%s' % instance.uuid)
@mock.patch('nova.network.neutronv2.api.LOG')
def test_get_instance_nw_info_verify_duplicates_ignored(self, mock_log):
"""test that the returned networks & port_ids from
_gather_port_ids_and_networks doesn't contain any duplicates
The test fakes an instance with two ports connected to two networks.
The _gather_port_ids_and_networks method will be called with the
instance and a list of port ids of which one port id is configured
already to the instance (== duplicate #1) and a list of
networks that already contains a network to which an instance port
is connected (== duplicate #2).
All-in-all, we expect the resulting port ids list to contain 3 items
(["instance_port_1", "port_1", "port_2"]) and the resulting networks
list to contain 3 items (["net_1", "net_2", "instance_network_1"])
while the warning message for duplicate items was executed twice
(due to "duplicate #1" & "duplicate #2")
"""
networks = [model.Network(id="net_1"),
model.Network(id="net_2")]
port_ids = ["port_1", "port_2"]
instance_networks = [{"id": "instance_network_1",
"name": "fake_network",
"tenant_id": "fake_tenant_id"}]
instance_port_ids = ["instance_port_1"]
network_info = model.NetworkInfo(
[{'id': port_ids[0],
'network': networks[0]},
{'id': instance_port_ids[0],
'network': model.Network(
id=instance_networks[0]["id"],
label=instance_networks[0]["name"],
meta={"tenant_id": instance_networks[0]["tenant_id"]})}]
)
instance_uuid = uuids.fake
instance = objects.Instance(uuid=instance_uuid,
info_cache=objects.InstanceInfoCache(
context=self.context,
instance_uuid=instance_uuid,
network_info=network_info))
new_networks, new_port_ids = self.api._gather_port_ids_and_networks(
self.context, instance, networks, port_ids)
self.assertEqual(new_networks, networks + instance_networks)
self.assertEqual(new_port_ids, instance_port_ids + port_ids)
self.assertEqual(2, mock_log.warning.call_count)
@mock.patch('oslo_concurrency.lockutils.lock')
@mock.patch.object(neutronapi.API, '_get_instance_nw_info')
@mock.patch('nova.network.base_api.update_instance_cache_with_nw_info')
def test_get_instance_nw_info(self, mock_update, mock_get, mock_lock):
fake_result = mock.sentinel.get_nw_info_result
mock_get.return_value = fake_result
instance = fake_instance.fake_instance_obj(self.context)
result = self.api.get_instance_nw_info(self.context, instance)
mock_get.assert_called_once_with(self.context, instance)
mock_update.assert_called_once_with(self.api, self.context, instance,
nw_info=fake_result,
update_cells=False)
self.assertEqual(fake_result, result)
def _test_validate_networks_fixed_ip_no_dup(self, nets, requested_networks,
ids, list_port_values):
def _fake_list_ports(**search_opts):
for args, return_value in list_port_values:
if args == search_opts:
return return_value
self.fail('Unexpected call to list_ports %s' % search_opts)
with test.nested(
mock.patch.object(client.Client, 'list_ports',
side_effect=_fake_list_ports),
mock.patch.object(client.Client, 'list_networks',
return_value={'networks': nets}),
mock.patch.object(client.Client, 'show_quota',
return_value={'quota': {'port': 50}})) as (
list_ports_mock, list_networks_mock, show_quota_mock):
self.api.validate_networks(self.context, requested_networks, 1)
self.assertEqual(len(list_port_values),
len(list_ports_mock.call_args_list))
list_networks_mock.assert_called_once_with(id=ids)
show_quota_mock.assert_called_once_with('fake-project')
def test_validate_networks_over_limit_quota(self):
"""Test validates that a relevant exception is being raised when
there are more ports defined, than there is a quota for it.
"""
requested_networks = [(uuids.my_netid1, '10.0.1.2', None, None),
(uuids.my_netid2, '10.0.1.3', None, None)]
list_port_values = [({'network_id': uuids.my_netid1,
'fixed_ips': 'ip_address=10.0.1.2',
'fields': 'device_id'},
{'ports': []}),
({'network_id': uuids.my_netid2,
'fixed_ips': 'ip_address=10.0.1.3',
'fields': 'device_id'},
{'ports': []}),
({'tenant_id': 'fake-project', 'fields': ['id']},
{'ports': [1, 2, 3, 4, 5]})]
nets = [{'subnets': '1'}, {'subnets': '2'}]
def _fake_list_ports(**search_opts):
for args, return_value in list_port_values:
if args == search_opts:
return return_value
with test.nested(
mock.patch.object(self.api, '_get_available_networks',
return_value=nets),
mock.patch.object(client.Client, 'list_ports',
side_effect=_fake_list_ports),
mock.patch.object(client.Client, 'show_quota',
return_value={'quota': {'port': 1}})):
exc = self.assertRaises(exception.PortLimitExceeded,
self.api.validate_networks,
self.context, requested_networks, 1)
expected_exception_msg = ('The number of defined ports: '
'%(ports)d is over the limit: '
'%(quota)d' %
{'ports': 5,
'quota': 1})
self.assertEqual(expected_exception_msg, str(exc))
def test_validate_networks_fixed_ip_no_dup1(self):
# Test validation for a request for a network with a
# fixed ip that is not already in use because no fixed ips in use
nets1 = [{'id': uuids.my_netid1,
'name': 'my_netname1',
'subnets': ['mysubnid1'],
'tenant_id': 'fake-project'}]
requested_networks = [(uuids.my_netid1, '10.0.1.2', None, None)]
ids = [uuids.my_netid1]
list_port_values = [({'network_id': uuids.my_netid1,
'fixed_ips': 'ip_address=10.0.1.2',
'fields': 'device_id'},
{'ports': []}),
({'tenant_id': 'fake-project', 'fields': ['id']},
{'ports': []})]
self._test_validate_networks_fixed_ip_no_dup(nets1, requested_networks,
ids, list_port_values)
def test_validate_networks_fixed_ip_no_dup2(self):
# Test validation for a request for a network with a
# fixed ip that is not already in use because not used on this net id
nets2 = [{'id': uuids.my_netid1,
'name': 'my_netname1',
'subnets': ['mysubnid1'],
'tenant_id': 'fake-project'},
{'id': uuids.my_netid2,
'name': 'my_netname2',
'subnets': ['mysubnid2'],
'tenant_id': 'fake-project'}]
requested_networks = [(uuids.my_netid1, '10.0.1.2', None, None),
(uuids.my_netid2, '10.0.1.3', None, None)]
ids = [uuids.my_netid1, uuids.my_netid2]
list_port_values = [({'network_id': uuids.my_netid1,
'fixed_ips': 'ip_address=10.0.1.2',
'fields': 'device_id'},
{'ports': []}),
({'network_id': uuids.my_netid2,
'fixed_ips': 'ip_address=10.0.1.3',
'fields': 'device_id'},
{'ports': []}),
({'tenant_id': 'fake-project', 'fields': ['id']},
{'ports': []})]
self._test_validate_networks_fixed_ip_no_dup(nets2, requested_networks,
ids, list_port_values)
def test_validate_networks_fixed_ip_dup(self):
# Test validation for a request for a network with a
# fixed ip that is already in use
requested_networks = [(uuids.my_netid1, '10.0.1.2', None, None)]
list_port_mock_params = {'network_id': uuids.my_netid1,
'fixed_ips': 'ip_address=10.0.1.2',
'fields': 'device_id'}
list_port_mock_return = {'ports': [({'device_id': 'my_deviceid'})]}
with mock.patch.object(client.Client, 'list_ports',
return_value=list_port_mock_return) as (
list_ports_mock):
self.assertRaises(exception.FixedIpAlreadyInUse,
self.api.validate_networks,
self.context, requested_networks, 1)
list_ports_mock.assert_called_once_with(**list_port_mock_params)
def test_allocate_floating_ip_exceed_limit(self):
# Verify that the correct exception is thrown when quota exceed
pool_name = 'dummy'
api = neutronapi.API()
with test.nested(
mock.patch.object(client.Client, 'create_floatingip'),
mock.patch.object(api,
'_get_floating_ip_pool_id_by_name_or_id')) as (
create_mock, get_mock):
create_mock.side_effect = exceptions.OverQuotaClient()
self.assertRaises(exception.FloatingIpLimitExceeded,
api.allocate_floating_ip,
self.context, pool_name)
def test_allocate_floating_ip_no_ipv4_subnet(self):
api = neutronapi.API()
net_id = uuids.fake
error_msg = ('Bad floatingip request: Network %s does not contain '
'any IPv4 subnet' % net_id)
with test.nested(
mock.patch.object(client.Client, 'create_floatingip'),
mock.patch.object(api,
'_get_floating_ip_pool_id_by_name_or_id')) as (
create_mock, get_mock):
create_mock.side_effect = exceptions.BadRequest(error_msg)
self.assertRaises(exception.FloatingIpBadRequest,
api.allocate_floating_ip, self.context,
'ext_net')
@mock.patch.object(client.Client, 'create_port')
def test_create_port_minimal_raise_no_more_ip(self, create_port_mock):
instance = fake_instance.fake_instance_obj(self.context)
create_port_mock.side_effect = \
exceptions.IpAddressGenerationFailureClient()
self.assertRaises(exception.NoMoreFixedIps,
self.api._create_port_minimal,
neutronapi.get_client(self.context),
instance, uuids.my_netid1)
self.assertTrue(create_port_mock.called)
@mock.patch.object(client.Client, 'update_port',
side_effect=exceptions.MacAddressInUseClient())
def test_update_port_for_instance_mac_address_in_use(self,
update_port_mock):
port_uuid = uuids.port
instance = objects.Instance(uuid=uuids.instance)
port_req_body = {'port': {
'id': port_uuid,
'mac_address': 'XX:XX:XX:XX:XX:XX',
'network_id': uuids.network_id}}
self.assertRaises(exception.PortInUse,
self.api._update_port,
neutronapi.get_client(self.context),
instance, port_uuid, port_req_body)
update_port_mock.assert_called_once_with(port_uuid, port_req_body)
@mock.patch.object(client.Client, 'update_port',
side_effect=exceptions.HostNotCompatibleWithFixedIpsClient())
def test_update_port_for_instance_fixed_ips_invalid(self,
update_port_mock):
port_uuid = uuids.port
instance = objects.Instance(uuid=uuids.instance)
port_req_body = {'port': {
'id': port_uuid,
'mac_address': 'XX:XX:XX:XX:XX:XX',
'network_id': uuids.network_id}}
self.assertRaises(exception.FixedIpInvalidOnHost,
self.api._update_port,
neutronapi.get_client(self.context),
instance, port_uuid, port_req_body)
update_port_mock.assert_called_once_with(port_uuid, port_req_body)
@mock.patch.object(client.Client, 'update_port')
def test_update_port_for_instance_binding_failure(self,
update_port_mock):
port_uuid = uuids.port
instance = objects.Instance(uuid=uuids.instance)
port_req_body = {'port': {
'id': port_uuid,
'mac_address': 'XX:XX:XX:XX:XX:XX',
'network_id': uuids.network_id}}
update_port_mock.return_value = {'port': {
'id': port_uuid,
'binding:vif_type': model.VIF_TYPE_BINDING_FAILED
}}
self.assertRaises(exception.PortBindingFailed,
self.api._update_port,
neutronapi.get_client(self.context),
instance, port_uuid, port_req_body)
@mock.patch.object(client.Client, 'create_port',
side_effect=exceptions.IpAddressInUseClient())
def test_create_port_minimal_raise_ip_in_use(self, create_port_mock):
instance = fake_instance.fake_instance_obj(self.context)
fake_ip = '1.1.1.1'
self.assertRaises(exception.FixedIpAlreadyInUse,
self.api._create_port_minimal,
neutronapi.get_client(self.context),
instance, uuids.my_netid1, fixed_ip=fake_ip)
self.assertTrue(create_port_mock.called)
@mock.patch.object(client.Client, 'create_port',
side_effect=exceptions.InvalidIpForNetworkClient())
def test_create_port_minimal_raise_invalid_ip(self, create_port_mock):
instance = fake_instance.fake_instance_obj(self.context)
fake_ip = '1.1.1.1'
exc = self.assertRaises(exception.InvalidInput,
self.api._create_port_minimal,
neutronapi.get_client(self.context),
instance, uuids.my_netid1, fixed_ip=fake_ip)
expected_exception_msg = ('Invalid input received: Fixed IP %(ip)s is '
'not a valid ip address for network '
'%(net_id)s.' %
{'ip': fake_ip, 'net_id': uuids.my_netid1})
self.assertEqual(expected_exception_msg, str(exc))
self.assertTrue(create_port_mock.called)
def test_get_network_detail_not_found(self):
api = neutronapi.API()
expected_exc = exceptions.NetworkNotFoundClient()
network_uuid = '02cacbca-7d48-4a2c-8011-43eecf8a9786'
with mock.patch.object(client.Client, 'show_network',
side_effect=expected_exc) as (
fake_show_network):
self.assertRaises(exception.NetworkNotFound,
api.get,
self.context,
network_uuid)
fake_show_network.assert_called_once_with(network_uuid)
@mock.patch('nova.network.neutronv2.api.API._get_preexisting_port_ids')
@mock.patch('nova.network.neutronv2.api.API.'
'_refresh_neutron_extensions_cache')
def test_deallocate_for_instance_uses_delete_helper(self,
mock_refresh,
mock_preexisting):
# setup fake data
instance = fake_instance.fake_instance_obj(self.context)
mock_preexisting.return_value = []
port_data = {'ports': [{'id': uuids.fake}]}
ports = set([port['id'] for port in port_data.get('ports')])
api = neutronapi.API()
# setup mocks
mock_client = mock.Mock()
mock_client.list_ports.return_value = port_data
with test.nested(
mock.patch.object(neutronapi, 'get_client',
return_value=mock_client),
mock.patch.object(api, '_delete_ports')
) as (
mock_get_client, mock_delete
):
# run the code
api.deallocate_for_instance(self.context, instance)
# assert the calls
mock_client.list_ports.assert_called_once_with(
device_id=instance.uuid)
mock_delete.assert_called_once_with(
mock_client, instance, ports, raise_if_fail=True)
def _test_delete_ports(self, expect_raise):
results = [exceptions.NeutronClientException, None]
mock_client = mock.Mock()
with mock.patch.object(mock_client, 'delete_port',
side_effect=results):
api = neutronapi.API()
api._delete_ports(mock_client, {'uuid': 'foo'}, ['port1', 'port2'],
raise_if_fail=expect_raise)
def test_delete_ports_raise(self):
self.assertRaises(exceptions.NeutronClientException,
self._test_delete_ports, True)
def test_delete_ports_no_raise(self):
self._test_delete_ports(False)
def test_delete_ports_never_raise_404(self):
mock_client = mock.Mock()
mock_client.delete_port.side_effect = exceptions.PortNotFoundClient
api = neutronapi.API()
api._delete_ports(mock_client, {'uuid': 'foo'}, ['port1'],
raise_if_fail=True)
mock_client.delete_port.assert_called_once_with('port1')
@mock.patch('nova.network.neutronv2.api.API._get_preexisting_port_ids')
def test_deallocate_port_for_instance_fails(self, mock_preexisting):
mock_preexisting.return_value = []
mock_client = mock.Mock()
api = neutronapi.API()
with test.nested(
mock.patch.object(neutronapi, 'get_client',
return_value=mock_client),
mock.patch.object(api, '_delete_ports',
side_effect=exceptions.Unauthorized),
mock.patch.object(api, 'get_instance_nw_info')
) as (
get_client, delete_ports, get_nw_info
):
self.assertRaises(exceptions.Unauthorized,
api.deallocate_port_for_instance,
self.context, instance={'uuid': uuids.fake},
port_id=uuids.fake)
# make sure that we didn't try to reload nw info
self.assertFalse(get_nw_info.called)
@mock.patch.object(neutronapi, 'get_client', return_value=mock.Mock())
def _test_show_port_exceptions(self, client_exc, expected_nova_exc,
get_client_mock):
show_port_mock = mock.Mock(side_effect=client_exc)
get_client_mock.return_value.show_port = show_port_mock
self.assertRaises(expected_nova_exc, self.api.show_port,
self.context, 'fake_port_id')
def test_show_port_not_found(self):
self._test_show_port_exceptions(exceptions.PortNotFoundClient,
exception.PortNotFound)
def test_show_port_forbidden(self):
self._test_show_port_exceptions(exceptions.Unauthorized,
exception.Forbidden)
def test_show_port_unknown_exception(self):
self._test_show_port_exceptions(exceptions.NeutronClientException,
exception.NovaException)
def test_get_network(self):
api = neutronapi.API()
with mock.patch.object(client.Client, 'show_network') as mock_show:
mock_show.return_value = {
'network': {'id': uuids.instance, 'name': 'fake-network'}
}
net_obj = api.get(self.context, uuids.instance)
self.assertEqual('fake-network', net_obj.label)
self.assertEqual('fake-network', net_obj.name)
self.assertEqual(uuids.instance, net_obj.uuid)
def test_get_all_networks(self):
api = neutronapi.API()
with mock.patch.object(client.Client, 'list_networks') as mock_list:
mock_list.return_value = {
'networks': [
{'id': uuids.network_1, 'name': 'fake-network1'},
{'id': uuids.network_2, 'name': 'fake-network2'},
]}
net_objs = api.get_all(self.context)
self.assertIsInstance(net_objs, objects.NetworkList)
self.assertEqual(2, len(net_objs))
self.assertEqual((uuids.network_1, 'fake-network1'),
(net_objs[0].uuid, net_objs[0].name))
self.assertEqual((uuids.network_2, 'fake-network2'),
(net_objs[1].uuid, net_objs[1].name))
@mock.patch.object(neutronapi.API, "_refresh_neutron_extensions_cache")
@mock.patch.object(neutronapi, 'get_client', return_value=mock.Mock())
def test_update_instance_vnic_index(self, mock_get_client,
mock_refresh_extensions):
api = neutronapi.API()
api.extensions = set([constants.VNIC_INDEX_EXT])
mock_client = mock_get_client()
mock_client.update_port.return_value = 'port'
instance = {'project_id': '9d049e4b60b64716978ab415e6fbd5c0',
'uuid': uuids.fake,
'display_name': 'test_instance',
'availability_zone': 'nova',
'host': 'some_host'}
instance = objects.Instance(**instance)
vif = {'id': 'fake-port-id'}
api.update_instance_vnic_index(self.context, instance, vif, 7)
port_req_body = {'port': {'vnic_index': 7}}
mock_client.update_port.assert_called_once_with('fake-port-id',
port_req_body)
@mock.patch.object(neutronapi, 'get_client', return_value=mock.Mock())
def test_update_port_bindings_for_instance_with_migration_profile(
self, get_client_mock):
instance = fake_instance.fake_instance_obj(self.context)
self.api._has_port_binding_extension = mock.Mock(return_value=True)
# We pass in a port profile which has a migration attribute and also
# a second port profile attribute 'fake_profile' this can be
# an sriov port profile attribute or a pci_slot attribute, but for
# now we are just using a fake one to show that the code does not
# remove the portbinding_profile it there is one.
binding_profile = {'fake_profile': 'fake_data',
neutronapi.MIGRATING_ATTR: 'my-dest-host'}
fake_ports = {'ports': [
{'id': 'fake-port-1',
neutronapi.BINDING_PROFILE: binding_profile,
neutronapi.BINDING_HOST_ID: instance.host}]}
list_ports_mock = mock.Mock(return_value=fake_ports)
get_client_mock.return_value.list_ports = list_ports_mock
update_port_mock = mock.Mock()
get_client_mock.return_value.update_port = update_port_mock
self.api._update_port_binding_for_instance(self.context, instance,
'my-host')
# Assert that update_port was called on the port with a
# different host and also the migration profile from the port is
# removed since it does not match with the current host.
update_port_mock.assert_called_once_with(
'fake-port-1', {'port': {neutronapi.BINDING_HOST_ID: 'my-host',
neutronapi.BINDING_PROFILE: {
'fake_profile': 'fake_data'}}})
@mock.patch.object(neutronapi, 'get_client', return_value=mock.Mock())
def test_update_port_bindings_for_instance_same_host(self,
get_client_mock):
instance = fake_instance.fake_instance_obj(self.context)
self.api._has_port_binding_extension = mock.Mock(return_value=True)
# We test two ports, one with the same host as the host passed in and
# one where binding:host_id isn't set, so we update that port.
fake_ports = {'ports': [
{'id': 'fake-port-1',
neutronapi.BINDING_HOST_ID: instance.host},
{'id': 'fake-port-2'}]}
list_ports_mock = mock.Mock(return_value=fake_ports)
get_client_mock.return_value.list_ports = list_ports_mock
update_port_mock = mock.Mock()
get_client_mock.return_value.update_port = update_port_mock
self.api._update_port_binding_for_instance(self.context, instance,
instance.host)
# Assert that update_port was only called on the port without a host.
update_port_mock.assert_called_once_with(
'fake-port-2',
{'port': {neutronapi.BINDING_HOST_ID: instance.host}})
@mock.patch.object(pci_whitelist.Whitelist, 'get_devspec')
@mock.patch.object(neutronapi, 'get_client', return_value=mock.Mock())
def test_update_port_bindings_for_instance_with_pci(self,
get_client_mock,
get_pci_device_devspec_mock):
self.api._has_port_binding_extension = mock.Mock(return_value=True)
devspec = mock.Mock()
devspec.get_tags.return_value = {'physical_network': 'physnet1'}
get_pci_device_devspec_mock.return_value = devspec
instance = fake_instance.fake_instance_obj(self.context)
instance.migration_context = objects.MigrationContext()
instance.migration_context.old_pci_devices = objects.PciDeviceList(
objects=[objects.PciDevice(vendor_id='1377',
product_id='0047',
address='0000:0a:00.1',
compute_node_id=1,
request_id='1234567890')])
instance.migration_context.new_pci_devices = objects.PciDeviceList(
objects=[objects.PciDevice(vendor_id='1377',
product_id='0047',
address='0000:0b:00.1',
compute_node_id=2,
request_id='1234567890')])
instance.pci_devices = instance.migration_context.old_pci_devices
# Validate that non-direct port aren't updated (fake-port-2).
fake_ports = {'ports': [
{'id': 'fake-port-1',
'binding:vnic_type': 'direct',
neutronapi.BINDING_HOST_ID: 'fake-host-old',
neutronapi.BINDING_PROFILE:
{'pci_slot': '0000:0a:00.1',
'physical_network': 'old_phys_net',
'pci_vendor_info': 'old_pci_vendor_info'}},
{'id': 'fake-port-2',
neutronapi.BINDING_HOST_ID: instance.host}]}
list_ports_mock = mock.Mock(return_value=fake_ports)
get_client_mock.return_value.list_ports = list_ports_mock
update_port_mock = mock.Mock()
get_client_mock.return_value.update_port = update_port_mock
self.api._update_port_binding_for_instance(self.context, instance,
instance.host)
# Assert that update_port is called with the binding:profile
# corresponding to the PCI device specified.
update_port_mock.assert_called_once_with(
'fake-port-1',
{'port':
{neutronapi.BINDING_HOST_ID: 'fake-host',
neutronapi.BINDING_PROFILE:
{'pci_slot': '0000:0b:00.1',
'physical_network': 'physnet1',
'pci_vendor_info': '1377:0047'}}})
@mock.patch.object(pci_whitelist.Whitelist, 'get_devspec')
@mock.patch.object(neutronapi, 'get_client', return_value=mock.Mock())
def test_update_port_bindings_for_instance_with_pci_fail(self,
get_client_mock,
get_pci_device_devspec_mock):
self.api._has_port_binding_extension = mock.Mock(return_value=True)
devspec = mock.Mock()
devspec.get_tags.return_value = {'physical_network': 'physnet1'}
get_pci_device_devspec_mock.return_value = devspec
instance = fake_instance.fake_instance_obj(self.context)
instance.migration_context = objects.MigrationContext()
instance.migration_context.old_pci_devices = objects.PciDeviceList(
objects=[objects.PciDevice(vendor_id='1377',
product_id='0047',
address='0000:0c:00.1',
compute_node_id=1,
request_id='1234567890')])
instance.migration_context.new_pci_devices = objects.PciDeviceList(
objects=[objects.PciDevice(vendor_id='1377',
product_id='0047',
address='0000:0d:00.1',
compute_node_id=2,
request_id='1234567890')])
instance.pci_devices = instance.migration_context.old_pci_devices
fake_ports = {'ports': [
{'id': 'fake-port-1',
'binding:vnic_type': 'direct',
neutronapi.BINDING_HOST_ID: 'fake-host-old',
neutronapi.BINDING_PROFILE:
{'pci_slot': '0000:0a:00.1',
'physical_network': 'old_phys_net',
'pci_vendor_info': 'old_pci_vendor_info'}}]}
list_ports_mock = mock.Mock(return_value=fake_ports)
get_client_mock.return_value.list_ports = list_ports_mock
# Assert exception is raised if the mapping is wrong.
self.assertRaises(exception.PortUpdateFailed,
self.api._update_port_binding_for_instance,
self.context,
instance,
instance.host)
def test_get_pci_mapping_for_migration(self):
instance = fake_instance.fake_instance_obj(self.context)
instance.migration_context = objects.MigrationContext()
old_pci_devices = objects.PciDeviceList(
objects=[objects.PciDevice(vendor_id='1377',
product_id='0047',
address='0000:0a:00.1',
compute_node_id=1,
request_id='1234567890')])
new_pci_devices = objects.PciDeviceList(
objects=[objects.PciDevice(vendor_id='1377',
product_id='0047',
address='0000:0b:00.1',
compute_node_id=2,
request_id='1234567890')])
instance.migration_context.old_pci_devices = old_pci_devices
instance.migration_context.new_pci_devices = new_pci_devices
instance.pci_devices = instance.migration_context.old_pci_devices
migration = {'status': 'confirmed'}
pci_mapping = self.api._get_pci_mapping_for_migration(
self.context, instance, migration)
self.assertEqual(
{old_pci_devices[0].address: new_pci_devices[0]}, pci_mapping)
def test_get_pci_mapping_for_migration_reverted(self):
instance = fake_instance.fake_instance_obj(self.context)
instance.migration_context = objects.MigrationContext()
old_pci_devices = objects.PciDeviceList(
objects=[objects.PciDevice(vendor_id='1377',
product_id='0047',
address='0000:0a:00.1',
compute_node_id=1,
request_id='1234567890')])
new_pci_devices = objects.PciDeviceList(
objects=[objects.PciDevice(vendor_id='1377',
product_id='0047',
address='0000:0b:00.1',
compute_node_id=2,
request_id='1234567890')])
instance.migration_context.old_pci_devices = old_pci_devices
instance.migration_context.new_pci_devices = new_pci_devices
instance.pci_devices = instance.migration_context.old_pci_devices
migration = {'status': 'reverted'}
pci_mapping = self.api._get_pci_mapping_for_migration(
self.context, instance, migration)
self.assertEqual(
{new_pci_devices[0].address: old_pci_devices[0]}, pci_mapping)
@mock.patch.object(neutronapi, 'get_client', return_value=mock.Mock())
def test_update_port_profile_for_migration_teardown_false(
self, get_client_mock):
instance = fake_instance.fake_instance_obj(self.context)
self.api._has_port_binding_extension = mock.Mock(return_value=True)
# We test with an instance host and destination_host where the
# port will be moving.
get_ports = {'ports': [
{'id': uuids.port_id,
neutronapi.BINDING_HOST_ID: instance.host}]}
self.api.list_ports = mock.Mock(return_value=get_ports)
update_port_mock = mock.Mock()
get_client_mock.return_value.update_port = update_port_mock
migrate_profile = {neutronapi.MIGRATING_ATTR: 'my-new-host'}
port_data = {'port':
{neutronapi.BINDING_PROFILE: migrate_profile}}
self.api.setup_networks_on_host(self.context,
instance,
host='my-new-host',
teardown=False)
update_port_mock.assert_called_once_with(
uuids.port_id,
port_data)
@mock.patch.object(neutronapi, 'get_client', return_value=mock.Mock())
def test__setup_migration_port_profile_called_on_teardown_false(
self, get_client_mock):
instance = fake_instance.fake_instance_obj(self.context)
self.api._has_port_binding_extension = mock.Mock(return_value=True)
port_id = uuids.port_id
get_ports = {'ports': [
{'id': port_id,
neutronapi.BINDING_HOST_ID: instance.host}]}
self.api.list_ports = mock.Mock(return_value=get_ports)
self.api._setup_migration_port_profile = mock.Mock()
self.api.setup_networks_on_host(self.context,
instance,
host='my-new-host',
teardown=False)
self.api._setup_migration_port_profile.assert_called_once_with(
self.context, instance, 'my-new-host',
mock.ANY, get_ports['ports'])
@mock.patch.object(neutronapi, 'get_client', return_value=mock.Mock())
def test__setup_migration_port_profile_not_called_with_host_match(
self, get_client_mock):
instance = fake_instance.fake_instance_obj(self.context)
self.api._has_port_binding_extension = mock.Mock(return_value=True)
get_ports = {'ports': [
{'id': uuids.port_id,
neutronapi.BINDING_HOST_ID: instance.host}]}
self.api.list_ports = mock.Mock(return_value=get_ports)
self.api._setup_migration_port_profile = mock.Mock()
self.api._clear_migration_port_profile = mock.Mock()
self.api.setup_networks_on_host(self.context,
instance,
host=instance.host,
teardown=False)
self.api._setup_migration_port_profile.assert_not_called()
self.api._clear_migration_port_profile.assert_not_called()
@mock.patch.object(neutronapi, 'get_client', return_value=mock.Mock())
def test_update_port_profile_for_migration_teardown_true_with_profile(
self, get_client_mock):
instance = fake_instance.fake_instance_obj(self.context)
self.api._has_port_binding_extension = mock.Mock(return_value=True)
migrate_profile = {neutronapi.MIGRATING_ATTR: instance.host}
# Pass a port with an migration porfile attribute.
port_id = uuids.port_id
get_ports = {'ports': [
{'id': port_id,
neutronapi.BINDING_PROFILE: migrate_profile,
neutronapi.BINDING_HOST_ID: instance.host}]}
self.api.list_ports = mock.Mock(return_value=get_ports)
update_port_mock = mock.Mock()
get_client_mock.return_value.update_port = update_port_mock
self.api.setup_networks_on_host(self.context,
instance,
host=instance.host,
teardown=True)
update_port_mock.assert_called_once_with(
port_id, {'port': {neutronapi.BINDING_PROFILE: migrate_profile}})
@mock.patch.object(neutronapi, 'get_client', return_value=mock.Mock())
def test_update_port_profile_for_migration_teardown_true_no_profile(
self, get_client_mock):
instance = fake_instance.fake_instance_obj(self.context)
self.api._has_port_binding_extension = mock.Mock(return_value=True)
# Pass a port without any migration porfile attribute.
get_ports = {'ports': [
{'id': uuids.port_id,
neutronapi.BINDING_HOST_ID: instance.host}]}
self.api.list_ports = mock.Mock(return_value=get_ports)
update_port_mock = mock.Mock()
get_client_mock.return_value.update_port = update_port_mock
self.api.setup_networks_on_host(self.context,
instance,
host=instance.host,
teardown=True)
update_port_mock.assert_not_called()
def test__update_port_with_migration_profile_raise_exception(self):
instance = fake_instance.fake_instance_obj(self.context)
port_id = uuids.port_id
migrate_profile = {'fake-attribute': 'my-new-host'}
port_profile = {'port': {neutronapi.BINDING_PROFILE: migrate_profile}}
update_port_mock = mock.Mock(side_effect=test.TestingException())
admin_client = mock.Mock(update_port=update_port_mock)
self.assertRaises(test.TestingException,
self.api._update_port_with_migration_profile,
instance, port_id, migrate_profile, admin_client)
update_port_mock.assert_called_once_with(port_id, port_profile)
@mock.patch('nova.network.neutronv2.api.compute_utils')
def test_get_preexisting_port_ids(self, mocked_comp_utils):
mocked_comp_utils.get_nw_info_for_instance.return_value = [model.VIF(
id='1', preserve_on_delete=False), model.VIF(
id='2', preserve_on_delete=True), model.VIF(
id='3', preserve_on_delete=True)]
result = self.api._get_preexisting_port_ids(None)
self.assertEqual(['2', '3'], result, "Invalid preexisting ports")
def _test_unbind_ports_get_client(self, mock_neutron,
mock_has_ext, has_ext=False):
mock_ctx = mock.Mock(is_admin=False)
mock_has_ext.return_value = has_ext
ports = ["1", "2", "3"]
self.api._unbind_ports(mock_ctx, ports, mock_neutron)
get_client_calls = []
get_client_calls.append(mock.call(mock_ctx)
if not has_ext else
mock.call(mock_ctx, admin=True))
if has_ext:
self.assertEqual(1, mock_neutron.call_count)
mock_neutron.assert_has_calls(get_client_calls, True)
else:
self.assertEqual(0, mock_neutron.call_count)
@mock.patch('nova.network.neutronv2.api.API._has_port_binding_extension')
@mock.patch('nova.network.neutronv2.api.get_client')
def test_unbind_ports_get_client_binding_extension(self,
mock_neutron,
mock_has_ext):
self._test_unbind_ports_get_client(mock_neutron, mock_has_ext, True)
@mock.patch('nova.network.neutronv2.api.API._has_port_binding_extension')
@mock.patch('nova.network.neutronv2.api.get_client')
def test_unbind_ports_get_client(self, mock_neutron, mock_has_ext):
self._test_unbind_ports_get_client(mock_neutron, mock_has_ext)
def _test_unbind_ports(self, mock_neutron, mock_has_ext, has_ext=False):
mock_client = mock.Mock()
mock_update_port = mock.Mock()
mock_client.update_port = mock_update_port
mock_ctx = mock.Mock(is_admin=False)
mock_has_ext.return_value = has_ext
mock_neutron.return_value = mock_client
ports = ["1", "2", "3"]
api = neutronapi.API()
api._unbind_ports(mock_ctx, ports, mock_client)
body = {'port': {'device_id': '', 'device_owner': ''}}
if has_ext:
body['port'][neutronapi.BINDING_HOST_ID] = None
body['port'][neutronapi.BINDING_PROFILE] = {}
update_port_calls = []
for p in ports:
update_port_calls.append(mock.call(p, body))
self.assertEqual(3, mock_update_port.call_count)
mock_update_port.assert_has_calls(update_port_calls)
@mock.patch('nova.network.neutronv2.api.API._has_port_binding_extension')
@mock.patch('nova.network.neutronv2.api.get_client')
def test_unbind_ports_binding_ext(self, mock_neutron, mock_has_ext):
self._test_unbind_ports(mock_neutron, mock_has_ext, True)
@mock.patch('nova.network.neutronv2.api.API._has_port_binding_extension')
@mock.patch('nova.network.neutronv2.api.get_client')
def test_unbind_ports(self, mock_neutron, mock_has_ext):
self._test_unbind_ports(mock_neutron, mock_has_ext, False)
@mock.patch('nova.network.neutronv2.api.API._has_port_binding_extension')
def test_unbind_ports_no_port_ids(self, mock_has_ext):
# Tests that None entries in the ports list are filtered out.
mock_client = mock.Mock()
mock_update_port = mock.Mock()
mock_client.update_port = mock_update_port
mock_ctx = mock.Mock(is_admin=False)
mock_has_ext.return_value = True
api = neutronapi.API()
api._unbind_ports(mock_ctx, [None], mock_client, mock_client)
self.assertFalse(mock_update_port.called)
@mock.patch('nova.network.neutronv2.api.API.get_instance_nw_info')
@mock.patch('nova.network.neutronv2.api.excutils')
@mock.patch('nova.network.neutronv2.api.API._delete_ports')
@mock.patch('nova.network.neutronv2.api.API.'
'_check_external_network_attach')
@mock.patch('nova.network.neutronv2.api.LOG')
@mock.patch('nova.network.neutronv2.api.API._unbind_ports')
@mock.patch('nova.network.neutronv2.api.API._has_port_binding_extension')
@mock.patch('nova.network.neutronv2.api.API.'
'_populate_neutron_extension_values')
@mock.patch('nova.network.neutronv2.api.API._get_available_networks')
@mock.patch('nova.network.neutronv2.api.get_client')
@mock.patch('nova.objects.VirtualInterface')
def test_allocate_for_instance_unbind(self, mock_vif,
mock_ntrn,
mock_avail_nets,
mock_ext_vals,
mock_has_pbe,
mock_unbind,
mock_log,
mock_cena,
mock_del_ports,
mock_exeu,
mock_giwn):
mock_nc = mock.Mock()
def show_port(port_id):
return {'port': {'network_id': 'net-1', 'id': port_id,
'mac_address': 'fakemac',
'tenant_id': 'proj-1'}}
mock_nc.show_port = show_port
mock_ntrn.return_value = mock_nc
def update_port(port_id, body):
if port_id == uuids.fail_port_id:
raise Exception
return {"port": {'mac_address': 'fakemac',
'id': port_id}}
mock_nc.update_port.side_effect = update_port
mock_inst = mock.Mock(project_id="proj-1",
availability_zone='zone-1',
uuid='inst-1')
mock_has_pbe.return_value = False
nw_req = objects.NetworkRequestList(
objects = [objects.NetworkRequest(port_id=uuids.portid_1),
objects.NetworkRequest(port_id=uuids.portid_2),
objects.NetworkRequest(port_id=uuids.fail_port_id)])
mock_avail_nets.return_value = [{'id': 'net-1',
'subnets': ['subnet1']}]
self.api.allocate_for_instance(mock.sentinel.ctx, mock_inst, False,
requested_networks=nw_req)
mock_unbind.assert_called_once_with(mock.sentinel.ctx,
[uuids.portid_1, uuids.portid_2],
mock.ANY,
mock.ANY)
@mock.patch('nova.network.neutronv2.api._filter_hypervisor_macs')
@mock.patch('nova.network.neutronv2.api.API._validate_requested_port_ids')
@mock.patch('nova.network.neutronv2.api.API._has_port_binding_extension')
@mock.patch('nova.network.neutronv2.api.API._get_available_networks')
@mock.patch('nova.network.neutronv2.api.get_client')
def test_allocate_port_for_instance_no_networks(self,
mock_getclient,
mock_avail_nets,
mock_has_pbe,
mock_validate_port_ids,
mock_filter_macs):
"""Tests that if no networks are requested and no networks are
available, we fail with InterfaceAttachFailedNoNetwork.
"""
instance = fake_instance.fake_instance_obj(self.context)
mock_has_pbe.return_value = False
mock_validate_port_ids.return_value = ({}, [])
mock_filter_macs.return_value = None
mock_avail_nets.return_value = []
api = neutronapi.API()
ex = self.assertRaises(exception.InterfaceAttachFailedNoNetwork,
api.allocate_port_for_instance,
self.context, instance, port_id=None)
self.assertEqual(
"No specific network was requested and none are available for "
"project 'fake-project'.", six.text_type(ex))
@mock.patch('nova.objects.network_request.utils')
@mock.patch('nova.network.neutronv2.api.LOG')
@mock.patch('nova.network.neutronv2.api.base_api')
@mock.patch('nova.network.neutronv2.api.API._delete_ports')
@mock.patch('nova.network.neutronv2.api.API._unbind_ports')
@mock.patch('nova.network.neutronv2.api.API._get_preexisting_port_ids')
@mock.patch('nova.network.neutronv2.api.get_client')
@mock.patch.object(objects.VirtualInterface, 'delete_by_instance_uuid')
def test_preexisting_deallocate_for_instance(self, mock_delete_vifs,
mock_ntrn,
mock_gppids,
mock_unbind,
mock_deletep,
mock_baseapi,
mock_log,
req_utils):
req_utils.is_neutron.return_value = True
mock_inst = mock.Mock(project_id="proj-1",
availability_zone='zone-1',
uuid='inst-1')
mock_nc = mock.Mock()
mock_ntrn.return_value = mock_nc
mock_nc.list_ports.return_value = {'ports': [
{'id': uuids.portid_1}, {'id': uuids.portid_2},
{'id': uuids.portid_3}
]}
nw_req = objects.NetworkRequestList(
objects = [objects.NetworkRequest(network_id='net-1',
address='192.168.0.3',
port_id=uuids.portid_1,
pci_request_id=uuids.pci_1)])
mock_gppids.return_value = [uuids.portid_3]
self.api.deallocate_for_instance(mock.sentinel.ctx, mock_inst,
requested_networks=nw_req)
mock_unbind.assert_called_once_with(mock.sentinel.ctx,
set([uuids.portid_1,
uuids.portid_3]),
mock.ANY)
mock_deletep.assert_called_once_with(mock_nc,
mock_inst,
set([uuids.portid_2]),
raise_if_fail=True)
mock_delete_vifs.assert_called_once_with(mock.sentinel.ctx, 'inst-1')
@mock.patch('nova.network.neutronv2.api.API.get_instance_nw_info')
@mock.patch('nova.network.neutronv2.api.API._unbind_ports')
@mock.patch('nova.network.neutronv2.api.compute_utils')
@mock.patch('nova.network.neutronv2.api.get_client')
@mock.patch.object(objects.VirtualInterface, 'get_by_uuid')
def test_preexisting_deallocate_port_for_instance(self,
mock_get_vif_by_uuid,
mock_ntrn,
mock_comp_utils,
mock_unbind,
mock_netinfo):
mock_comp_utils.get_nw_info_for_instance.return_value = [model.VIF(
id='1', preserve_on_delete=False), model.VIF(
id='2', preserve_on_delete=True), model.VIF(
id='3', preserve_on_delete=True)]
mock_inst = mock.Mock(project_id="proj-1",
availability_zone='zone-1',
uuid='inst-1')
mock_client = mock.Mock()
mock_ntrn.return_value = mock_client
mock_vif = mock.MagicMock(spec=objects.VirtualInterface)
mock_get_vif_by_uuid.return_value = mock_vif
self.api.deallocate_port_for_instance(mock.sentinel.ctx,
mock_inst, '2')
mock_unbind.assert_called_once_with(mock.sentinel.ctx, ['2'],
mock_client)
mock_get_vif_by_uuid.assert_called_once_with(mock.sentinel.ctx, '2')
mock_vif.destroy.assert_called_once_with()
@mock.patch('nova.network.neutronv2.api.API.'
'_check_external_network_attach')
@mock.patch('nova.network.neutronv2.api.API._has_port_binding_extension')
@mock.patch('nova.network.neutronv2.api.API.'
'_populate_neutron_extension_values')
@mock.patch('nova.network.neutronv2.api.API._get_available_networks')
@mock.patch('nova.network.neutronv2.api.get_client')
def test_port_binding_failed_created_port(self, mock_ntrn,
mock_avail_nets,
mock_ext_vals,
mock_has_pbe,
mock_cena):
mock_has_pbe.return_value = True
mock_nc = mock.Mock()
mock_ntrn.return_value = mock_nc
mock_inst = mock.Mock(project_id="proj-1",
availability_zone='zone-1',
uuid=uuids.inst_1)
mock_avail_nets.return_value = [{'id': 'net-1',
'subnets': ['subnet1']}]
mock_nc.create_port.return_value = {'port': {'id': uuids.portid_1}}
port_response = {'port': {'id': uuids.portid_1,
'tenant_id': mock_inst.project_id,
'binding:vif_type': 'binding_failed'}}
mock_nc.update_port.return_value = port_response
self.assertRaises(exception.PortBindingFailed,
self.api.allocate_for_instance,
mock.sentinel.ctx,
mock_inst, False, None)
mock_nc.delete_port.assert_called_once_with(uuids.portid_1)
@mock.patch('nova.network.neutronv2.api.API._show_port')
@mock.patch('nova.network.neutronv2.api.API._has_port_binding_extension')
@mock.patch('nova.network.neutronv2.api.get_client')
def test_port_binding_failed_with_request(self, mock_ntrn,
mock_has_pbe,
mock_show_port):
mock_has_pbe.return_value = True
mock_nc = mock.Mock()
mock_ntrn.return_value = mock_nc
mock_inst = mock.Mock(project_id="proj-1",
availability_zone='zone-1',
uuid='inst-1')
mock_show_port.return_value = {
'id': uuids.portid_1,
'tenant_id': mock_inst.project_id,
'binding:vif_type': 'binding_failed'}
nw_req = objects.NetworkRequestList(
objects = [objects.NetworkRequest(port_id=uuids.portid_1)])
self.assertRaises(exception.PortBindingFailed,
self.api.allocate_for_instance,
mock.sentinel.ctx, mock_inst, False,
requested_networks=nw_req)
@mock.patch('nova.network.neutronv2.api.get_client')
def test_get_floating_ip_by_address_not_found_neutron_not_found(self,
mock_ntrn):
mock_nc = mock.Mock()
mock_ntrn.return_value = mock_nc
mock_nc.list_floatingips.side_effect = exceptions.NotFound()
address = '172.24.4.227'
self.assertRaises(exception.FloatingIpNotFoundForAddress,
self.api.get_floating_ip_by_address,
self.context, address)
@mock.patch('nova.network.neutronv2.api.get_client')
def test_get_floating_ip_by_address_not_found_neutron_raises_non404(self,
mock_ntrn):
mock_nc = mock.Mock()
mock_ntrn.return_value = mock_nc
mock_nc.list_floatingips.side_effect = exceptions.InternalServerError()
address = '172.24.4.227'
self.assertRaises(exceptions.InternalServerError,
self.api.get_floating_ip_by_address,
self.context, address)
@mock.patch('nova.network.neutronv2.api.get_client')
def test_get_floating_ips_by_project_not_found(self, mock_ntrn):
mock_nc = mock.Mock()
mock_ntrn.return_value = mock_nc
mock_nc.list_floatingips.side_effect = exceptions.NotFound()
fips = self.api.get_floating_ips_by_project(self.context)
self.assertEqual([], fips)
@mock.patch('nova.network.neutronv2.api.get_client')
def test_get_floating_ips_by_project_not_found_legacy(self, mock_ntrn):
# FIXME(danms): Remove this test along with the code path it tests
# when bug 1513879 is fixed.
mock_nc = mock.Mock()
mock_ntrn.return_value = mock_nc
# neutronclient doesn't raise NotFound in this scenario, it raises a
# NeutronClientException with status_code=404
notfound = exceptions.NeutronClientException(status_code=404)
mock_nc.list_floatingips.side_effect = notfound
fips = self.api.get_floating_ips_by_project(self.context)
self.assertEqual([], fips)
@mock.patch('nova.network.neutronv2.api.get_client')
def test_get_floating_ips_by_project_raises_non404(self, mock_ntrn):
mock_nc = mock.Mock()
mock_ntrn.return_value = mock_nc
mock_nc.list_floatingips.side_effect = exceptions.InternalServerError()
self.assertRaises(exceptions.InternalServerError,
self.api.get_floating_ips_by_project,
self.context)
def test_unbind_ports_reset_dns_name(self):
neutron = mock.Mock()
port_client = mock.Mock()
with mock.patch.object(self.api, '_has_port_binding_extension',
return_value=False):
self.api.extensions = [constants.DNS_INTEGRATION]
ports = [uuids.port_id]
self.api._unbind_ports(self.context, ports, neutron, port_client)
port_req_body = {'port': {'device_id': '',
'device_owner': '',
'dns_name': ''}}
port_client.update_port.assert_called_once_with(
uuids.port_id, port_req_body)
def test_make_floating_ip_obj(self):
self._test_make_floating_ip_obj()
def test_make_floating_ip_obj_pool_id(self):
self._test_make_floating_ip_obj(set_pool_name=False)
def test_make_floating_ip_obj_no_fixed_ip_address(self):
self._test_make_floating_ip_obj(set_fixed_ip=False)
def test_make_floating_ip_obj_no_port_id(self):
self._test_make_floating_ip_obj(set_port=False)
def _test_make_floating_ip_obj(self, set_port=True, set_fixed_ip=True,
set_pool_name=True):
net_id = '6cd58996-001a-11e6-86aa-5e5517507c66'
float_id = 'ea474936-0016-11e6-86aa-5e5517507c66'
tenant_id = '310b1db6-0017-11e6-86aa-5e5517507c66'
port_id = '40cfc710-0017-11e6-86aa-5e5517507c66' if set_port else None
device_id = '6b892334-0017-11e6-86aa-5e5517507c66'
floating_ip_address = '10.0.0.1'
fixed_ip_address = '192.168.100.100' if set_fixed_ip else None
pool_name = 'my_pool' if set_pool_name else None
pool_id = 'd7f7150e-001b-11e6-86aa-5e5517507c66'
fip = {'id': float_id,
'floating_ip_address': floating_ip_address,
'tenant_id': tenant_id,
'port_id': port_id,
'fixed_ip_address': fixed_ip_address,
'floating_network_id': net_id
}
pool_dict = {net_id: {'name': pool_name, 'id': pool_id}}
port_dict = {port_id: {'device_id': device_id}}
actual_obj = self.api._make_floating_ip_obj(self.context, fip,
pool_dict, port_dict)
expected_pool = pool_name if set_pool_name else pool_id
if set_fixed_ip:
if set_port:
expected_fixed = objects.FixedIP(address=fixed_ip_address,
instance_uuid=device_id)
else:
expected_fixed = objects.FixedIP(address=fixed_ip_address)
else:
expected_fixed = None
if set_port:
expected_instance = objects.Instance(context=context,
uuid=device_id)
else:
expected_instance = None
expected_floating = objects.floating_ip.NeutronFloatingIP(
id=float_id, address=floating_ip_address, pool=expected_pool,
project_id=tenant_id, fixed_ip_id=port_id,
fixed_ip=expected_fixed, instance=expected_instance)
self.assertEqual(expected_floating.obj_to_primitive(),
actual_obj.obj_to_primitive())
@mock.patch('nova.network.neutronv2.api.API._has_port_binding_extension',
return_value=True)
@mock.patch('nova.network.neutronv2.api.API.'
'_populate_neutron_extension_values')
@mock.patch('nova.network.neutronv2.api.API._update_port',
# called twice, fails on the 2nd call and triggers the cleanup
side_effect=(mock.MagicMock(),
exception.PortInUse(
port_id=uuids.created_port_id)))
@mock.patch.object(objects.VirtualInterface, 'create')
@mock.patch.object(objects.VirtualInterface, 'destroy')
@mock.patch('nova.network.neutronv2.api.API._unbind_ports')
@mock.patch('nova.network.neutronv2.api.API._delete_ports')
def test_update_ports_for_instance_fails_rollback_ports_and_vifs(self,
mock_delete_ports,
mock_unbind_ports,
mock_vif_destroy,
mock_vif_create,
mock_update_port,
mock_populate_ext_values,
mock_has_port_binding_extension):
"""Makes sure we rollback ports and VIFs if we fail updating ports"""
instance = fake_instance.fake_instance_obj(self.context)
ntrn = mock.Mock(spec='neutronclient.v2_0.client.Client')
# we have two requests, one with a preexisting port and one where nova
# created the port (on the same network)
requests_and_created_ports = [
(objects.NetworkRequest(network_id=uuids.network_id,
port_id=uuids.preexisting_port_id),
None), # None means Nova didn't create this port
(objects.NetworkRequest(network_id=uuids.network_id,
port_id=uuids.created_port_id),
uuids.created_port_id),
]
network = {'id': uuids.network_id}
nets = {uuids.network_id: network}
self.assertRaises(exception.PortInUse,
self.api._update_ports_for_instance,
self.context, instance, ntrn, ntrn,
requests_and_created_ports, nets, bind_host_id=None,
dhcp_opts=None, available_macs=None)
# assert the calls
mock_update_port.assert_has_calls([
mock.call(ntrn, instance, uuids.preexisting_port_id, mock.ANY),
mock.call(ntrn, instance, uuids.created_port_id, mock.ANY)
])
# we only got to create one vif since the 2nd _update_port call fails
mock_vif_create.assert_called_once_with()
# we only destroy one vif since we only created one
mock_vif_destroy.assert_called_once_with()
# we unbind the pre-existing port
mock_unbind_ports.assert_called_once_with(
self.context, [uuids.preexisting_port_id], ntrn, ntrn)
# we delete the created port
mock_delete_ports.assert_called_once_with(
ntrn, instance, [uuids.created_port_id])
@mock.patch('nova.network.neutronv2.api.API._get_floating_ip_by_address',
return_value={"port_id": "1"})
@mock.patch('nova.network.neutronv2.api.API._show_port',
side_effect=exception.PortNotFound(port_id='1'))
def test_get_instance_id_by_floating_address_port_not_found(self,
mock_show,
mock_get):
api = neutronapi.API()
fip = api.get_instance_id_by_floating_address(self.context,
'172.24.4.227')
self.assertIsNone(fip)
@mock.patch('nova.network.neutronv2.api.API._has_port_binding_extension',
return_value=False)
@mock.patch.object(neutronapi.LOG, 'exception')
def test_unbind_ports_portnotfound(self, mock_log, mock_ext):
api = neutronapi.API()
neutron_client = mock.Mock()
neutron_client.update_port = mock.Mock(
side_effect=exceptions.PortNotFoundClient)
api._unbind_ports(self.context, [uuids.port_id], neutron_client)
neutron_client.update_port.assert_called_once_with(
uuids.port_id, {'port': {'device_id': '', 'device_owner': ''}})
mock_log.assert_not_called()
@mock.patch('nova.network.neutronv2.api.API._has_port_binding_extension',
return_value=False)
@mock.patch.object(neutronapi.LOG, 'exception')
def test_unbind_ports_unexpected_error(self, mock_log, mock_ext):
api = neutronapi.API()
neutron_client = mock.Mock()
neutron_client.update_port = mock.Mock(
side_effect=test.TestingException)
api._unbind_ports(self.context, [uuids.port_id], neutron_client)
neutron_client.update_port.assert_called_once_with(
uuids.port_id, {'port': {'device_id': '', 'device_owner': ''}})
self.assertTrue(mock_log.called)
@mock.patch.object(neutronapi, 'get_client')
def test_create_pci_requests_for_sriov_ports_no_allocate(self, getclient):
"""Tests that create_pci_requests_for_sriov_ports is a noop if
networks are specifically requested to not be allocated.
"""
requested_networks = objects.NetworkRequestList(objects=[
objects.NetworkRequest(network_id=net_req_obj.NETWORK_ID_NONE)
])
pci_requests = objects.InstancePCIRequests()
api = neutronapi.API()
api.create_pci_requests_for_sriov_ports(
self.context, pci_requests, requested_networks)
self.assertFalse(getclient.called)
class TestNeutronv2ModuleMethods(test.NoDBTestCase):
def test_gather_port_ids_and_networks_wrong_params(self):
api = neutronapi.API()
# Test with networks not None and port_ids is None
self.assertRaises(exception.NovaException,
api._gather_port_ids_and_networks,
'fake_context', 'fake_instance',
[{'network': {'name': 'foo'}}], None)
# Test with networks is None and port_ids not None
self.assertRaises(exception.NovaException,
api._gather_port_ids_and_networks,
'fake_context', 'fake_instance',
None, ['list', 'of', 'port_ids'])
def test_ensure_requested_network_ordering_no_preference_ids(self):
l = [1, 2, 3]
neutronapi._ensure_requested_network_ordering(
lambda x: x,
l,
None)
def test_ensure_requested_network_ordering_no_preference_hashes(self):
l = [{'id': 3}, {'id': 1}, {'id': 2}]
neutronapi._ensure_requested_network_ordering(
lambda x: x['id'],
l,
None)
self.assertEqual(l, [{'id': 3}, {'id': 1}, {'id': 2}])
def test_ensure_requested_network_ordering_with_preference(self):
l = [{'id': 3}, {'id': 1}, {'id': 2}]
neutronapi._ensure_requested_network_ordering(
lambda x: x['id'],
l,
[1, 2, 3])
self.assertEqual(l, [{'id': 1}, {'id': 2}, {'id': 3}])
class TestNeutronv2Portbinding(TestNeutronv2Base):
def test_allocate_for_instance_portbinding(self):
self._allocate_for_instance(1, portbinding=True,
bind_host_id=self.instance.get('host'))
def test_populate_neutron_extension_values_binding(self):
api = neutronapi.API()
neutronapi.get_client(mox.IgnoreArg()).AndReturn(
self.moxed_client)
self.moxed_client.list_extensions().AndReturn(
{'extensions': [{'name': constants.PORTBINDING_EXT}]})
self.mox.ReplayAll()
host_id = 'my_host_id'
instance = {'host': host_id}
port_req_body = {'port': {}}
api._populate_neutron_extension_values(self.context, instance,
None, port_req_body,
bind_host_id=host_id)
self.assertEqual(host_id,
port_req_body['port'][neutronapi.BINDING_HOST_ID])
self.assertFalse(port_req_body['port'].get(neutronapi.BINDING_PROFILE))
@mock.patch.object(pci_whitelist.Whitelist, 'get_devspec')
@mock.patch.object(pci_manager, 'get_instance_pci_devs')
def test_populate_neutron_extension_values_binding_sriov(self,
mock_get_instance_pci_devs,
mock_get_pci_device_devspec):
api = neutronapi.API()
host_id = 'my_host_id'
instance = {'host': host_id}
port_req_body = {'port': {}}
pci_req_id = 'my_req_id'
pci_dev = {'vendor_id': '1377',
'product_id': '0047',
'address': '0000:0a:00.1',
}
PciDevice = collections.namedtuple('PciDevice',
['vendor_id', 'product_id', 'address'])
mydev = PciDevice(**pci_dev)
profile = {'pci_vendor_info': '1377:0047',
'pci_slot': '0000:0a:00.1',
'physical_network': 'phynet1',
}
mock_get_instance_pci_devs.return_value = [mydev]
devspec = mock.Mock()
devspec.get_tags.return_value = {'physical_network': 'phynet1'}
mock_get_pci_device_devspec.return_value = devspec
api._populate_neutron_binding_profile(instance,
pci_req_id, port_req_body)
self.assertEqual(profile,
port_req_body['port'][neutronapi.BINDING_PROFILE])
@mock.patch.object(pci_whitelist.Whitelist, 'get_devspec')
@mock.patch.object(pci_manager, 'get_instance_pci_devs')
def test_populate_neutron_extension_values_binding_sriov_fail(
self, mock_get_instance_pci_devs, mock_get_pci_device_devspec):
api = neutronapi.API()
host_id = 'my_host_id'
instance = {'host': host_id}
port_req_body = {'port': {}}
pci_req_id = 'my_req_id'
pci_objs = [objects.PciDevice(vendor_id='1377',
product_id='0047',
address='0000:0a:00.1',
compute_node_id=1,
request_id='1234567890')]
mock_get_instance_pci_devs.return_value = pci_objs
mock_get_pci_device_devspec.return_value = None
self.assertRaises(
exception.PciDeviceNotFound, api._populate_neutron_binding_profile,
instance, pci_req_id, port_req_body)
@mock.patch.object(pci_manager, 'get_instance_pci_devs')
def test_pci_parse_whitelist_called_once(self,
mock_get_instance_pci_devs):
white_list = [
'{"address":"0000:0a:00.1","physical_network":"default"}']
cfg.CONF.set_override('passthrough_whitelist', white_list, 'pci')
api = neutronapi.API()
host_id = 'my_host_id'
instance = {'host': host_id}
pci_req_id = 'my_req_id'
port_req_body = {'port': {}}
pci_dev = {'vendor_id': '1377',
'product_id': '0047',
'address': '0000:0a:00.1',
}
whitelist = pci_whitelist.Whitelist(CONF.pci.passthrough_whitelist)
with mock.patch.object(pci_whitelist.Whitelist,
'_parse_white_list_from_config',
wraps=whitelist._parse_white_list_from_config
) as mock_parse_whitelist:
for i in range(2):
mydev = objects.PciDevice.create(None, pci_dev)
mock_get_instance_pci_devs.return_value = [mydev]
api._populate_neutron_binding_profile(instance,
pci_req_id, port_req_body)
self.assertEqual(0, mock_parse_whitelist.call_count)
def _populate_pci_mac_address_fakes(self):
instance = fake_instance.fake_instance_obj(self.context)
pci_dev = {'vendor_id': '1377',
'product_id': '0047',
'address': '0000:0a:00.1',
'dev_type': 'type-PF'}
pf = objects.PciDevice()
vf = objects.PciDevice()
pf.update_device(pci_dev)
pci_dev['dev_type'] = 'type-VF'
vf.update_device(pci_dev)
return instance, pf, vf
@mock.patch.object(pci_manager, 'get_instance_pci_devs')
@mock.patch.object(pci_utils, 'get_mac_by_pci_address')
def test_populate_pci_mac_address_pf(self, mock_get_mac_by_pci_address,
mock_get_instance_pci_devs):
api = neutronapi.API()
instance, pf, vf = self._populate_pci_mac_address_fakes()
port_req_body = {'port': {}}
mock_get_instance_pci_devs.return_value = [pf]
mock_get_mac_by_pci_address.return_value = 'fake-mac-address'
expected_port_req_body = {'port': {'mac_address': 'fake-mac-address'}}
req = port_req_body.copy()
api._populate_pci_mac_address(instance, 0, req)
self.assertEqual(expected_port_req_body, req)
@mock.patch.object(pci_manager, 'get_instance_pci_devs')
@mock.patch.object(pci_utils, 'get_mac_by_pci_address')
def test_populate_pci_mac_address_vf(self, mock_get_mac_by_pci_address,
mock_get_instance_pci_devs):
api = neutronapi.API()
instance, pf, vf = self._populate_pci_mac_address_fakes()
port_req_body = {'port': {}}
mock_get_instance_pci_devs.return_value = [vf]
req = port_req_body.copy()
api._populate_pci_mac_address(instance, 42, port_req_body)
self.assertEqual(port_req_body, req)
@mock.patch.object(pci_manager, 'get_instance_pci_devs')
@mock.patch.object(pci_utils, 'get_mac_by_pci_address')
def test_populate_pci_mac_address_vf_fail(self,
mock_get_mac_by_pci_address,
mock_get_instance_pci_devs):
api = neutronapi.API()
instance, pf, vf = self._populate_pci_mac_address_fakes()
port_req_body = {'port': {}}
mock_get_instance_pci_devs.return_value = [vf]
mock_get_mac_by_pci_address.side_effect = (
exception.PciDeviceNotFoundById)
req = port_req_body.copy()
api._populate_pci_mac_address(instance, 42, port_req_body)
self.assertEqual(port_req_body, req)
@mock.patch.object(pci_manager, 'get_instance_pci_devs')
def test_populate_pci_mac_address_no_device(self,
mock_get_instance_pci_devs):
api = neutronapi.API()
instance, pf, vf = self._populate_pci_mac_address_fakes()
port_req_body = {'port': {}}
mock_get_instance_pci_devs.return_value = []
req = port_req_body.copy()
api._populate_pci_mac_address(instance, 42, port_req_body)
self.assertEqual(port_req_body, req)
def _test_update_port_binding_false(self, func_name, *args):
api = neutronapi.API()
func = getattr(api, func_name)
self.mox.StubOutWithMock(api, '_has_port_binding_extension')
api._has_port_binding_extension(mox.IgnoreArg(),
refresh_cache=True).AndReturn(False)
self.mox.ReplayAll()
func(*args)
def _test_update_port_binding_true(self, expected_bind_host,
func_name, *args):
api = neutronapi.API()
func = getattr(api, func_name)
self.mox.StubOutWithMock(api, '_has_port_binding_extension')
api._has_port_binding_extension(mox.IgnoreArg(),
refresh_cache=True).AndReturn(True)
neutronapi.get_client(mox.IgnoreArg(), admin=True).AndReturn(
self.moxed_client)
search_opts = {'device_id': self.instance['uuid'],
'tenant_id': self.instance['project_id']}
ports = {'ports': [{'id': 'test1'}]}
self.moxed_client.list_ports(**search_opts).AndReturn(ports)
port_req_body = {'port':
{neutronapi.BINDING_HOST_ID: expected_bind_host}}
self.moxed_client.update_port('test1',
port_req_body).AndReturn(None)
self.mox.ReplayAll()
func(*args)
def _test_update_port_true_exception(self, expected_bind_host,
func_name, *args):
api = neutronapi.API()
func = getattr(api, func_name)
self.mox.StubOutWithMock(api, '_has_port_binding_extension')
api._has_port_binding_extension(mox.IgnoreArg(),
refresh_cache=True).AndReturn(True)
neutronapi.get_client(mox.IgnoreArg(), admin=True).AndReturn(
self.moxed_client)
search_opts = {'device_id': self.instance['uuid'],
'tenant_id': self.instance['project_id']}
ports = {'ports': [{'id': 'test1'}]}
self.moxed_client.list_ports(**search_opts).AndReturn(ports)
port_req_body = {'port':
{neutronapi.BINDING_HOST_ID: expected_bind_host}}
self.moxed_client.update_port('test1',
port_req_body).AndRaise(
Exception("fail to update port"))
self.mox.ReplayAll()
self.assertRaises(NEUTRON_CLIENT_EXCEPTION,
func,
*args)
def test_migrate_instance_finish_binding_false(self):
instance = fake_instance.fake_instance_obj(self.context)
self._test_update_port_binding_false('migrate_instance_finish',
self.context, instance,
{'dest_compute': uuids.fake})
def test_migrate_instance_finish_binding_true(self):
migration = {'source_compute': self.instance.get('host'),
'dest_compute': 'dest_host'}
instance = self._fake_instance_object(self.instance)
self._test_update_port_binding_true('dest_host',
'migrate_instance_finish',
self.context,
instance,
migration)
def test_migrate_instance_finish_binding_true_exception(self):
migration = {'source_compute': self.instance.get('host'),
'dest_compute': 'dest_host'}
instance = self._fake_instance_object(self.instance)
self._test_update_port_true_exception('dest_host',
'migrate_instance_finish',
self.context,
instance,
migration)
def test_setup_instance_network_on_host_false(self):
instance = fake_instance.fake_instance_obj(self.context)
self._test_update_port_binding_false(
'setup_instance_network_on_host', self.context, instance,
'fake_host')
def test_setup_instance_network_on_host_true(self):
instance = self._fake_instance_object(self.instance)
self._test_update_port_binding_true('fake_host',
'setup_instance_network_on_host',
self.context,
instance,
'fake_host')
def test_setup_instance_network_on_host_exception(self):
instance = self._fake_instance_object(self.instance)
self._test_update_port_true_exception(
'fake_host', 'setup_instance_network_on_host',
self.context, instance, 'fake_host')
def test_associate_not_implemented(self):
api = neutronapi.API()
self.assertRaises(NotImplementedError,
api.associate,
self.context, 'id')
class TestNeutronv2ExtraDhcpOpts(TestNeutronv2Base):
def setUp(self):
super(TestNeutronv2ExtraDhcpOpts, self).setUp()
neutronapi.get_client(mox.IgnoreArg()).MultipleTimes().AndReturn(
self.moxed_client)
def test_allocate_for_instance_1_with_extra_dhcp_opts_turned_off(self):
self._allocate_for_instance(1, extra_dhcp_opts=False)
def test_allocate_for_instance_extradhcpopts(self):
dhcp_opts = [{'opt_name': 'bootfile-name',
'opt_value': 'pxelinux.0'},
{'opt_name': 'tftp-server',
'opt_value': '123.123.123.123'},
{'opt_name': 'server-ip-address',
'opt_value': '123.123.123.456'}]
self._allocate_for_instance(1, dhcp_options=dhcp_opts)
def test_allocate_for_instance_extradhcpopts_update(self):
dhcp_opts = [{'opt_name': 'bootfile-name',
'opt_value': 'pxelinux.0'},
{'opt_name': 'tftp-server',
'opt_value': '123.123.123.123'},
{'opt_name': 'server-ip-address',
'opt_value': '123.123.123.456'}]
requested_networks = objects.NetworkRequestList(
objects=[objects.NetworkRequest(port_id=uuids.portid_1)])
self._allocate_for_instance(net_idx=1,
requested_networks=requested_networks,
dhcp_options=dhcp_opts)
class TestAllocateForInstance(test.NoDBTestCase):
def setUp(self):
super(TestAllocateForInstance, self).setUp()
self.context = context.RequestContext('userid', uuids.my_tenant)
self.instance = objects.Instance(uuid=uuids.instance,
project_id=uuids.tenant_id, hostname="host")
def test_allocate_for_instance_raises_invalid_input(self):
api = neutronapi.API()
self.instance.project_id = ""
self.assertRaises(exception.InvalidInput,
api.allocate_for_instance, self.context, self.instance,
False, None)
@mock.patch.object(neutronapi.API, 'get_instance_nw_info')
@mock.patch.object(neutronapi.API, '_update_ports_for_instance')
@mock.patch.object(neutronapi, '_filter_hypervisor_macs')
@mock.patch.object(neutronapi.API, '_create_ports_for_instance')
@mock.patch.object(neutronapi.API, '_process_security_groups')
@mock.patch.object(neutronapi.API, '_clean_security_groups')
@mock.patch.object(neutronapi.API, '_validate_requested_network_ids')
@mock.patch.object(neutronapi.API, '_validate_requested_port_ids')
@mock.patch.object(neutronapi, 'get_client')
def test_allocate_for_instance_minimal_args(self, mock_get_client,
mock_validate_ports, mock_validate_nets, mock_clean_sg, mock_sg,
mock_create_ports, mock_filter_macs, mock_update_ports, mock_gni):
api = neutronapi.API()
mock_get_client.side_effect = ["user", "admin"]
mock_validate_ports.return_value = ("ports", "ordered_nets")
mock_validate_nets.return_value = "nets"
mock_clean_sg.return_value = "security_groups"
mock_sg.return_value = "security_group_ids"
mock_create_ports.return_value = "requests_and_created_ports"
mock_filter_macs.return_value = "available_macs"
mock_update_ports.return_value = (
"nets", "ports", [uuids.preexist], [uuids.created])
mock_gni.return_value = [
{"id": uuids.created}, {"id": uuids.preexist}, {"id": "foo"}
]
result = api.allocate_for_instance(self.context, self.instance,
False, None)
# TODO(johngarbutt) we need to replace the old mox coverage
# with new tests that can build on this very poor test
self.assertEqual(len(result), 2)
self.assertEqual(result[0], {"id": uuids.created})
self.assertEqual(result[1], {"id": uuids.preexist})
def test_populate_mac_address_skip_if_none(self):
api = neutronapi.API()
port_req_body = {}
api._populate_mac_address(None, port_req_body, None)
self.assertEqual({}, port_req_body)
def test_populate_mac_address_raise_if_empty(self):
api = neutronapi.API()
port_req_body = {}
instance = objects.Instance(uuid=uuids.instance)
self.assertRaises(exception.PortNotFree,
api._populate_mac_address,
instance, port_req_body, [])
def test_populate_mac_address_adds_last(self):
api = neutronapi.API()
port_req_body = {'port': {"foo": "bar"}}
api._populate_mac_address(None, port_req_body, ["a", "b"])
expected_port = {"foo": "bar", "mac_address": "b"}
self.assertEqual(expected_port, port_req_body["port"])
def test_ensure_no_port_binding_failure_raises(self):
port = {
'id': uuids.port_id,
'binding:vif_type': model.VIF_TYPE_BINDING_FAILED
}
self.assertRaises(exception.PortBindingFailed,
neutronapi._ensure_no_port_binding_failure, port)
def test_ensure_no_port_binding_failure_passes_if_no_binding(self):
port = {'id': uuids.port_id}
neutronapi._ensure_no_port_binding_failure(port)
def test_filter_hypervisor_macs(self):
ports = {
'id1': {'id': 'id1', 'mac_address': 'a'},
'id2': {'id': 'id2', 'mac_address': 'c'}}
macs = set(['a', 'b'])
instance = objects.Instance(uuid=uuids.instance)
result = neutronapi._filter_hypervisor_macs(instance, ports, macs)
result = list(result)
self.assertEqual(['b'], result)
def test_validate_requested_port_ids_no_ports(self):
api = neutronapi.API()
mock_client = mock.Mock()
network_list = [objects.NetworkRequest(network_id='net-1')]
requested_networks = objects.NetworkRequestList(objects=network_list)
ports, ordered_networks = api._validate_requested_port_ids(
self.context, self.instance, mock_client, requested_networks)
self.assertEqual({}, ports)
self.assertEqual(network_list, ordered_networks)
def test_validate_requested_port_ids_success(self):
api = neutronapi.API()
mock_client = mock.Mock()
requested_networks = objects.NetworkRequestList(objects=[
objects.NetworkRequest(network_id='net-1'),
objects.NetworkRequest(port_id=uuids.port_id)])
port = {
"id": uuids.port_id,
"tenant_id": uuids.tenant_id,
"network_id": 'net-2'
}
mock_client.show_port.return_value = {"port": port}
ports, ordered_networks = api._validate_requested_port_ids(
self.context, self.instance, mock_client, requested_networks)
mock_client.show_port.assert_called_once_with(uuids.port_id)
self.assertEqual({uuids.port_id: port}, ports)
self.assertEqual(2, len(ordered_networks))
self.assertEqual(requested_networks[0], ordered_networks[0])
self.assertEqual('net-2', ordered_networks[1].network_id)
def _assert_validate_requested_port_ids_raises(self, exception, extras):
api = neutronapi.API()
mock_client = mock.Mock()
requested_networks = objects.NetworkRequestList(objects=[
objects.NetworkRequest(port_id=uuids.port_id)])
port = {
"id": uuids.port_id,
"tenant_id": uuids.tenant_id,
"network_id": 'net-2'
}
port.update(extras)
mock_client.show_port.return_value = {"port": port}
self.assertRaises(exception, api._validate_requested_port_ids,
self.context, self.instance, mock_client, requested_networks)
def test_validate_requested_port_ids_raise_not_usable(self):
self._assert_validate_requested_port_ids_raises(
exception.PortNotUsable,
{"tenant_id": "foo"})
def test_validate_requested_port_ids_raise_in_use(self):
self._assert_validate_requested_port_ids_raises(
exception.PortInUse,
{"device_id": "foo"})
def test_validate_requested_port_ids_raise_dns(self):
self._assert_validate_requested_port_ids_raises(
exception.PortNotUsableDNS,
{"dns_name": "foo"})
def test_validate_requested_port_ids_raise_binding(self):
self._assert_validate_requested_port_ids_raises(
exception.PortBindingFailed,
{"binding:vif_type": model.VIF_TYPE_BINDING_FAILED})
def test_validate_requested_network_ids_success_auto_net(self):
requested_networks = []
ordered_networks = []
api = neutronapi.API()
mock_client = mock.Mock()
nets = [{'id': "net1"}]
mock_client.list_networks.side_effect = [{}, {"networks": nets}]
result = api._validate_requested_network_ids(self.context,
self.instance, mock_client, requested_networks, ordered_networks)
self.assertEqual(nets, list(result.values()))
expected_call_list = [
mock.call(shared=False, tenant_id=uuids.tenant_id),
mock.call(shared=True)
]
self.assertEqual(expected_call_list,
mock_client.list_networks.call_args_list)
def test_validate_requested_network_ids_success_found_net(self):
ordered_networks = [objects.NetworkRequest(network_id="net1")]
requested_networks = objects.NetworkRequestList(ordered_networks)
api = neutronapi.API()
mock_client = mock.Mock()
nets = [{'id': "net1"}]
mock_client.list_networks.return_value = {"networks": nets}
result = api._validate_requested_network_ids(self.context,
self.instance, mock_client, requested_networks, ordered_networks)
self.assertEqual(nets, list(result.values()))
mock_client.list_networks.assert_called_once_with(id=['net1'])
def test_validate_requested_network_ids_success_no_nets(self):
requested_networks = []
ordered_networks = []
api = neutronapi.API()
mock_client = mock.Mock()
mock_client.list_networks.side_effect = [{}, {"networks": []}]
result = api._validate_requested_network_ids(self.context,
self.instance, mock_client, requested_networks, ordered_networks)
self.assertEqual({}, result)
expected_call_list = [
mock.call(shared=False, tenant_id=uuids.tenant_id),
mock.call(shared=True)
]
self.assertEqual(expected_call_list,
mock_client.list_networks.call_args_list)
def _assert_validate_requested_network_ids_raises(self, exception, nets,
requested_networks=None):
ordered_networks = []
if requested_networks is None:
requested_networks = objects.NetworkRequestList()
api = neutronapi.API()
mock_client = mock.Mock()
mock_client.list_networks.side_effect = [{}, {"networks": nets}]
self.assertRaises(exception, api._validate_requested_network_ids,
self.context, self.instance, mock_client,
requested_networks, ordered_networks)
def test_validate_requested_network_ids_raises_forbidden(self):
self._assert_validate_requested_network_ids_raises(
exception.ExternalNetworkAttachForbidden,
[{'id': "net1", 'router:external': True, 'shared': False}])
def test_validate_requested_network_ids_raises_net_not_found(self):
requested_networks = objects.NetworkRequestList(objects=[
objects.NetworkRequest(network_id="1")])
self._assert_validate_requested_network_ids_raises(
exception.NetworkNotFound,
[], requested_networks=requested_networks)
def test_validate_requested_network_ids_raises_too_many_nets(self):
self._assert_validate_requested_network_ids_raises(
exception.NetworkAmbiguous,
[{'id': "net1"}, {'id': "net2"}])
def test_create_ports_for_instance_no_security(self):
api = neutronapi.API()
ordered_networks = [objects.NetworkRequest(network_id=uuids.net)]
nets = {uuids.net: {"id": uuids.net, "port_security_enabled": False}}
mock_client = mock.Mock()
mock_client.create_port.return_value = {"port": {"id": uuids.port}}
result = api._create_ports_for_instance(self.context, self.instance,
ordered_networks, nets, mock_client, None)
self.assertEqual([(ordered_networks[0], uuids.port)], result)
mock_client.create_port.assert_called_once_with(
{'port': {
'network_id': uuids.net, 'tenant_id': uuids.tenant_id,
'admin_state_up': True}})
def test_create_ports_for_instance_with_security_groups(self):
api = neutronapi.API()
ordered_networks = [objects.NetworkRequest(network_id=uuids.net)]
nets = {uuids.net: {"id": uuids.net, "subnets": [uuids.subnet]}}
mock_client = mock.Mock()
mock_client.create_port.return_value = {"port": {"id": uuids.port}}
security_groups = [uuids.sg]
result = api._create_ports_for_instance(self.context, self.instance,
ordered_networks, nets, mock_client, security_groups)
self.assertEqual([(ordered_networks[0], uuids.port)], result)
mock_client.create_port.assert_called_once_with(
{'port': {
'network_id': uuids.net, 'tenant_id': uuids.tenant_id,
'admin_state_up': True, 'security_groups': security_groups}})
def test_create_ports_for_instance_with_cleanup_after_pc_failure(self):
api = neutronapi.API()
ordered_networks = [
objects.NetworkRequest(network_id=uuids.net1),
objects.NetworkRequest(network_id=uuids.net2),
objects.NetworkRequest(network_id=uuids.net3),
objects.NetworkRequest(network_id=uuids.net4)
]
nets = {
uuids.net1: {"id": uuids.net1, "port_security_enabled": False},
uuids.net2: {"id": uuids.net2, "port_security_enabled": False},
uuids.net3: {"id": uuids.net3, "port_security_enabled": False},
uuids.net4: {"id": uuids.net4, "port_security_enabled": False}
}
error = exception.PortLimitExceeded()
mock_client = mock.Mock()
mock_client.create_port.side_effect = [
{"port": {"id": uuids.port1}},
{"port": {"id": uuids.port2}},
error
]
self.assertRaises(exception.PortLimitExceeded,
api._create_ports_for_instance,
self.context, self.instance, ordered_networks, nets,
mock_client, None)
self.assertEqual([mock.call(uuids.port1), mock.call(uuids.port2)],
mock_client.delete_port.call_args_list)
self.assertEqual(3, mock_client.create_port.call_count)
def test_create_ports_for_instance_with_cleanup_after_sg_failure(self):
api = neutronapi.API()
ordered_networks = [
objects.NetworkRequest(network_id=uuids.net1),
objects.NetworkRequest(network_id=uuids.net2),
objects.NetworkRequest(network_id=uuids.net3)
]
nets = {
uuids.net1: {"id": uuids.net1, "port_security_enabled": False},
uuids.net2: {"id": uuids.net2, "port_security_enabled": False},
uuids.net3: {"id": uuids.net3, "port_security_enabled": True}
}
mock_client = mock.Mock()
mock_client.create_port.side_effect = [
{"port": {"id": uuids.port1}},
{"port": {"id": uuids.port2}}
]
self.assertRaises(exception.SecurityGroupCannotBeApplied,
api._create_ports_for_instance,
self.context, self.instance, ordered_networks, nets,
mock_client, None)
self.assertEqual([mock.call(uuids.port1), mock.call(uuids.port2)],
mock_client.delete_port.call_args_list)
self.assertEqual(2, mock_client.create_port.call_count)
def test_create_ports_for_instance_raises_subnets_missing(self):
api = neutronapi.API()
ordered_networks = [objects.NetworkRequest(network_id=uuids.net)]
nets = {uuids.net: {"id": uuids.net, "port_security_enabled": True}}
mock_client = mock.Mock()
self.assertRaises(exception.SecurityGroupCannotBeApplied,
api._create_ports_for_instance,
self.context, self.instance,
ordered_networks, nets, mock_client, None)
self.assertFalse(mock_client.create_port.called)
def test_create_ports_for_instance_raises_security_off(self):
api = neutronapi.API()
ordered_networks = [objects.NetworkRequest(network_id=uuids.net)]
nets = {uuids.net: {
"id": uuids.net,
"port_security_enabled": False}}
mock_client = mock.Mock()
self.assertRaises(exception.SecurityGroupCannotBeApplied,
api._create_ports_for_instance,
self.context, self.instance,
ordered_networks, nets, mock_client, [uuids.sg])
self.assertFalse(mock_client.create_port.called)
@mock.patch.object(objects.VirtualInterface, "create")
def test_update_ports_for_instance_with_portbinding(self, mock_create):
api = neutronapi.API()
self.instance.availability_zone = "test_az"
mock_neutron = mock.Mock()
mock_admin = mock.Mock()
requests_and_created_ports = [
(objects.NetworkRequest(
network_id=uuids.net1), uuids.port1),
(objects.NetworkRequest(
network_id=uuids.net2, port_id=uuids.port2), None)]
net1 = {"id": uuids.net1}
net2 = {"id": uuids.net2}
nets = {uuids.net1: net1, uuids.net2: net2}
bind_host_id = "bind_host_id"
dhcp_opts = [{'opt_name': 'tftp-server', 'opt_value': '1.2.3.4'}]
available_macs = ["mac1", "mac2"]
mock_neutron.list_extensions.return_value = {"extensions": [
{"name": "asdf"}, {"name": constants.PORTBINDING_EXT}]}
port1 = {"port": {"id": uuids.port1, "mac_address": "mac1r"}}
port2 = {"port": {"id": uuids.port2, "mac_address": "mac2r"}}
mock_admin.update_port.side_effect = [port1, port2]
ordered_nets, ordered_ports, preexisting_port_ids, \
created_port_ids = api._update_ports_for_instance(
self.context, self.instance,
mock_neutron, mock_admin, requests_and_created_ports, nets,
bind_host_id, dhcp_opts, available_macs)
# TODO(johngarbutt) need to build on this test so we can replace
# all the mox based tests
self.assertEqual([net1, net2], ordered_nets, "ordered_nets")
self.assertEqual([uuids.port1, uuids.port2], ordered_ports,
"ordered_ports")
self.assertEqual([uuids.port2], preexisting_port_ids, "preexisting")
self.assertEqual([uuids.port1], created_port_ids, "created")
mock_admin.update_port.assert_called_with(uuids.port2,
{'port': {
'device_owner': 'compute:test_az',
'mac_address': 'mac1',
neutronapi.BINDING_HOST_ID: bind_host_id,
'extra_dhcp_opts': dhcp_opts,
'device_id': self.instance.uuid}})
class TestNeutronv2NeutronHostnameDNS(TestNeutronv2Base):
def setUp(self):
super(TestNeutronv2NeutronHostnameDNS, self).setUp()
neutronapi.get_client(mox.IgnoreArg()).MultipleTimes().AndReturn(
self.moxed_client)
def test_allocate_for_instance_create_port(self):
# The port's dns_name attribute should be set by the port create
# request in allocate_for_instance
self._allocate_for_instance(1, dns_extension=True)
def test_allocate_for_instance_with_requested_port(self):
# The port's dns_name attribute should be set by the port update
# request in allocate_for_instance
requested_networks = objects.NetworkRequestList(
objects=[objects.NetworkRequest(port_id=uuids.portid_1)])
self._allocate_for_instance(net_idx=1, dns_extension=True,
requested_networks=requested_networks)
def test_allocate_for_instance_port_dns_name_preset_equal_hostname(self):
# The port's dns_name attribute should be set by the port update
# request in allocate_for_instance. The port's dns_name was preset by
# the user with a value equal to the instance's hostname
requested_networks = objects.NetworkRequestList(
objects=[objects.NetworkRequest(port_id=uuids.portid_1)])
self._allocate_for_instance(net_idx=1, dns_extension=True,
requested_networks=requested_networks,
_dns_name='test-instance')
def test_allocate_for_instance_port_dns_name_preset_noteq_hostname(self):
# If a pre-existing port has dns_name set, an exception should be
# raised if dns_name is not equal to the instance's hostname
requested_networks = objects.NetworkRequestList(
objects=[objects.NetworkRequest(port_id=uuids.portid_1)])
api = self._stub_allocate_for_instance(
requested_networks=requested_networks,
dns_extension=True,
_break='pre_list_networks',
_dns_name='my-instance')
self.assertRaises(exception.PortNotUsableDNS,
api.allocate_for_instance, self.context,
self.instance, False, requested_networks)
class TestNeutronv2NeutronHostnameDNSPortbinding(TestNeutronv2Base):
def test_allocate_for_instance_create_port(self):
# The port's dns_name attribute should be set by the port create
# request in allocate_for_instance
self._allocate_for_instance(1, portbinding=True, dns_extension=True,
bind_host_id=self.instance.get('host'))
def test_allocate_for_instance_with_requested_port(self):
# The port's dns_name attribute should be set by the port update
# request in allocate_for_instance
requested_networks = objects.NetworkRequestList(
objects=[objects.NetworkRequest(port_id=uuids.portid_1)])
self._allocate_for_instance(net_idx=1, dns_extension=True,
portbinding=True,
bind_host_id=self.instance.get('host'),
requested_networks=requested_networks)
def test_allocate_for_instance_create_port_with_dns_domain(self):
# The port's dns_name attribute should be set by the port update
# request in _update_port_dns_name. This should happen only when the
# port binding extension is enabled and the port's network has a
# non-blank dns_domain attribute
self._allocate_for_instance(11, portbinding=True, dns_extension=True,
bind_host_id=self.instance.get('host'))
def test_allocate_for_instance_with_requested_port_with_dns_domain(self):
# The port's dns_name attribute should be set by the port update
# request in _update_port_dns_name. This should happen only when the
# port binding extension is enabled and the port's network has a
# non-blank dns_domain attribute
requested_networks = objects.NetworkRequestList(
objects=[objects.NetworkRequest(port_id=uuids.portid_1)])
self._allocate_for_instance(net_idx=11, dns_extension=True,
portbinding=True,
bind_host_id=self.instance.get('host'),
requested_networks=requested_networks)
class TestNeutronClientForAdminScenarios(test.NoDBTestCase):
def setUp(self):
super(TestNeutronClientForAdminScenarios, self).setUp()
# NOTE(morganfainberg): The real configuration fixture here is used
# instead o the already existing fixtures to ensure that the new
# config options are automatically deregistered at the end of the
# test run. Without the use of this fixture, the config options
# from the plugin(s) would persist for all subsequent tests from when
# these are run (due to glonal conf object) and not be fully
# representative of a "clean" slate at the start of a test.
self.config_fixture = self.useFixture(config_fixture.Config())
oslo_opts = ks_loading.get_auth_plugin_conf_options('v2password')
self.config_fixture.register_opts(oslo_opts, 'neutron')
@requests_mock.mock()
def _test_get_client_for_admin(self, req_mock,
use_id=False, admin_context=False):
token_value = uuid.uuid4().hex
auth_url = 'http://anyhost/auth'
token_resp = V2Token(token_id=token_value)
req_mock.post(auth_url + '/tokens', json=token_resp)
self.flags(url='http://anyhost/', group='neutron')
self.flags(auth_type='v2password', group='neutron')
self.flags(auth_url=auth_url, group='neutron')
self.flags(timeout=30, group='neutron')
if use_id:
self.flags(tenant_id='tenant_id', group='neutron')
self.flags(user_id='user_id', group='neutron')
if admin_context:
my_context = context.get_admin_context()
else:
my_context = context.RequestContext('userid', uuids.my_tenant,
auth_token='token')
# clean global
neutronapi.reset_state()
if admin_context:
# Note that the context does not contain a token but is
# an admin context which will force an elevation to admin
# credentials.
context_client = neutronapi.get_client(my_context)
else:
# Note that the context is not elevated, but the True is passed in
# which will force an elevation to admin credentials even though
# the context has an auth_token.
context_client = neutronapi.get_client(my_context, True)
admin_auth = neutronapi._ADMIN_AUTH
self.assertEqual(CONF.neutron.auth_url, admin_auth.auth_url)
self.assertEqual(CONF.neutron.password, admin_auth.password)
if use_id:
self.assertEqual(CONF.neutron.tenant_id,
admin_auth.tenant_id)
self.assertEqual(CONF.neutron.user_id, admin_auth.user_id)
self.assertIsNone(admin_auth.tenant_name)
self.assertIsNone(admin_auth.username)
else:
self.assertEqual(CONF.neutron.username, admin_auth.username)
self.assertIsNone(admin_auth.tenant_id)
self.assertIsNone(admin_auth.user_id)
self.assertEqual(CONF.neutron.timeout,
neutronapi._SESSION.timeout)
self.assertEqual(
token_value,
context_client.httpclient.auth.get_token(neutronapi._SESSION))
self.assertEqual(
CONF.neutron.url,
context_client.httpclient.get_endpoint())
def test_get_client_for_admin(self):
self._test_get_client_for_admin()
def test_get_client_for_admin_with_id(self):
self._test_get_client_for_admin(use_id=True)
def test_get_client_for_admin_context(self):
self._test_get_client_for_admin(admin_context=True)
def test_get_client_for_admin_context_with_id(self):
self._test_get_client_for_admin(use_id=True, admin_context=True)
class TestNeutronPortSecurity(test.NoDBTestCase):
@mock.patch.object(neutronapi.API, 'get_instance_nw_info')
@mock.patch.object(neutronapi.API, '_update_port_dns_name')
@mock.patch.object(neutronapi.API, '_create_port_minimal')
@mock.patch.object(neutronapi.API, '_populate_neutron_extension_values')
@mock.patch.object(neutronapi.API, '_check_external_network_attach')
@mock.patch.object(neutronapi.API, '_process_security_groups')
@mock.patch.object(neutronapi.API, '_get_available_networks')
@mock.patch.object(neutronapi, '_filter_hypervisor_macs')
@mock.patch.object(neutronapi.API, '_validate_requested_port_ids')
@mock.patch.object(neutronapi.API, '_has_port_binding_extension')
@mock.patch.object(neutronapi, 'get_client')
@mock.patch('nova.objects.VirtualInterface')
def test_no_security_groups_requested(
self, mock_vif, mock_get_client, mock_has_port_binding_extension,
mock_validate_requested_port_ids, mock_filter_macs,
mock_get_available_networks, mock_process_security_groups,
mock_check_external_network_attach,
mock_populate_neutron_extension_values, mock_create_port,
mock_update_port_dns_name, mock_get_instance_nw_info):
nets = [
{'id': 'net1',
'name': 'net_name1',
'subnets': ['mysubnid1'],
'port_security_enabled': True},
{'id': 'net2',
'name': 'net_name2',
'subnets': ['mysubnid2'],
'port_security_enabled': True}]
onets = objects.NetworkRequestList(objects=[
objects.NetworkRequest(network_id='net1'),
objects.NetworkRequest(network_id='net2')])
instance = objects.Instance(
project_id=1, availability_zone='nova', uuid=uuids.instance)
secgroups = ['default'] # Nova API provides the 'default'
mock_validate_requested_port_ids.return_value = [None, onets]
mock_filter_macs.return_value = None
mock_get_available_networks.return_value = nets
mock_process_security_groups.return_value = []
api = neutronapi.API()
mock_create_port.return_value = {'id': 'foo', 'mac_address': 'bar'}
api.allocate_for_instance(
'context', instance, False, requested_networks=onets,
security_groups=secgroups)
mock_process_security_groups.assert_called_once_with(
instance, mock.ANY, [])
mock_create_port.assert_has_calls([
mock.call(mock.ANY, instance, u'net1', None, []),
mock.call(mock.ANY, instance, u'net2', None, [])],
any_order=True)
@mock.patch.object(neutronapi.API, 'get_instance_nw_info')
@mock.patch.object(neutronapi.API, '_update_port_dns_name')
@mock.patch.object(neutronapi.API, '_create_port_minimal')
@mock.patch.object(neutronapi.API, '_populate_neutron_extension_values')
@mock.patch.object(neutronapi.API, '_check_external_network_attach')
@mock.patch.object(neutronapi.API, '_process_security_groups')
@mock.patch.object(neutronapi.API, '_get_available_networks')
@mock.patch.object(neutronapi, '_filter_hypervisor_macs')
@mock.patch.object(neutronapi.API, '_validate_requested_port_ids')
@mock.patch.object(neutronapi.API, '_has_port_binding_extension')
@mock.patch.object(neutronapi, 'get_client')
@mock.patch('nova.objects.VirtualInterface')
def test_security_groups_requested(
self, mock_vif, mock_get_client, mock_has_port_binding_extension,
mock_validate_requested_port_ids, mock_filter_macs,
mock_get_available_networks, mock_process_security_groups,
mock_check_external_network_attach,
mock_populate_neutron_extension_values, mock_create_port,
mock_update_port_dns_name, mock_get_instance_nw_info):
nets = [
{'id': 'net1',
'name': 'net_name1',
'subnets': ['mysubnid1'],
'port_security_enabled': True},
{'id': 'net2',
'name': 'net_name2',
'subnets': ['mysubnid2'],
'port_security_enabled': True}]
onets = objects.NetworkRequestList(objects=[
objects.NetworkRequest(network_id='net1'),
objects.NetworkRequest(network_id='net2')])
instance = objects.Instance(
project_id=1, availability_zone='nova', uuid=uuids.instance)
secgroups = ['default', 'secgrp1', 'secgrp2']
mock_validate_requested_port_ids.return_value = [None, onets]
mock_filter_macs.return_value = None
mock_get_available_networks.return_value = nets
mock_process_security_groups.return_value = ['default-uuid',
'secgrp-uuid1',
'secgrp-uuid2']
api = neutronapi.API()
mock_create_port.return_value = {'id': 'foo', 'mac_address': 'bar'}
api.allocate_for_instance(
'context', instance, False, requested_networks=onets,
security_groups=secgroups)
mock_create_port.assert_has_calls([
mock.call(mock.ANY, instance, u'net1', None,
['default-uuid', 'secgrp-uuid1', 'secgrp-uuid2']),
mock.call(mock.ANY, instance, u'net2', None,
['default-uuid', 'secgrp-uuid1', 'secgrp-uuid2'])],
any_order=True)
@mock.patch.object(neutronapi.API, 'get_instance_nw_info')
@mock.patch.object(neutronapi.API, '_update_port_dns_name')
@mock.patch.object(neutronapi.API, '_create_port_minimal')
@mock.patch.object(neutronapi.API, '_populate_neutron_extension_values')
@mock.patch.object(neutronapi.API, '_check_external_network_attach')
@mock.patch.object(neutronapi.API, '_process_security_groups')
@mock.patch.object(neutronapi.API, '_get_available_networks')
@mock.patch.object(neutronapi, '_filter_hypervisor_macs')
@mock.patch.object(neutronapi.API, '_validate_requested_port_ids')
@mock.patch.object(neutronapi.API, '_has_port_binding_extension')
@mock.patch.object(neutronapi, 'get_client')
@mock.patch('nova.objects.VirtualInterface')
def test_port_security_disabled_no_security_groups_requested(
self, mock_vif, mock_get_client, mock_has_port_binding_extension,
mock_validate_requested_port_ids, mock_filter_macs,
mock_get_available_networks, mock_process_security_groups,
mock_check_external_network_attach,
mock_populate_neutron_extension_values, mock_create_port,
mock_update_port_dns_name, mock_get_instance_nw_info):
nets = [
{'id': 'net1',
'name': 'net_name1',
'subnets': ['mysubnid1'],
'port_security_enabled': False},
{'id': 'net2',
'name': 'net_name2',
'subnets': ['mysubnid2'],
'port_security_enabled': False}]
onets = objects.NetworkRequestList(objects=[
objects.NetworkRequest(network_id='net1'),
objects.NetworkRequest(network_id='net2')])
instance = objects.Instance(
project_id=1, availability_zone='nova', uuid=uuids.instance)
secgroups = ['default'] # Nova API provides the 'default'
mock_validate_requested_port_ids.return_value = [None, onets]
mock_filter_macs.return_value = None
mock_get_available_networks.return_value = nets
mock_process_security_groups.return_value = []
api = neutronapi.API()
mock_create_port.return_value = {'id': 'foo', 'mac_address': 'bar'}
api.allocate_for_instance(
'context', instance, False, requested_networks=onets,
security_groups=secgroups)
mock_process_security_groups.assert_called_once_with(
instance, mock.ANY, [])
mock_create_port.assert_has_calls([
mock.call(mock.ANY, instance, u'net1', None, []),
mock.call(mock.ANY, instance, u'net2', None, [])],
any_order=True)
@mock.patch.object(neutronapi.API, 'get_instance_nw_info')
@mock.patch.object(neutronapi.API, '_update_port_dns_name')
@mock.patch.object(neutronapi.API, '_create_port_minimal')
@mock.patch.object(neutronapi.API, '_populate_neutron_extension_values')
@mock.patch.object(neutronapi.API, '_check_external_network_attach')
@mock.patch.object(neutronapi.API, '_process_security_groups')
@mock.patch.object(neutronapi.API, '_get_available_networks')
@mock.patch.object(neutronapi, '_filter_hypervisor_macs')
@mock.patch.object(neutronapi.API, '_validate_requested_port_ids')
@mock.patch.object(neutronapi.API, '_has_port_binding_extension')
@mock.patch.object(neutronapi, 'get_client')
@mock.patch('nova.objects.VirtualInterface')
def test_port_security_disabled_and_security_groups_requested(
self, mock_vif, mock_get_client, mock_has_port_binding_extension,
mock_validate_requested_port_ids, mock_filter_macs,
mock_get_available_networks, mock_process_security_groups,
mock_check_external_network_attach,
mock_populate_neutron_extension_values, mock_create_port,
mock_update_port_dns_name, mock_get_instance_nw_info):
nets = [
{'id': 'net1',
'name': 'net_name1',
'subnets': ['mysubnid1'],
'port_security_enabled': True},
{'id': 'net2',
'name': 'net_name2',
'subnets': ['mysubnid2'],
'port_security_enabled': False}]
onets = objects.NetworkRequestList(objects=[
objects.NetworkRequest(network_id='net1'),
objects.NetworkRequest(network_id='net2')])
instance = objects.Instance(
project_id=1, availability_zone='nova', uuid=uuids.instance)
secgroups = ['default', 'secgrp1', 'secgrp2']
mock_validate_requested_port_ids.return_value = [None, onets]
mock_filter_macs.return_value = None
mock_get_available_networks.return_value = nets
mock_process_security_groups.return_value = ['default-uuid',
'secgrp-uuid1',
'secgrp-uuid2']
api = neutronapi.API()
self.assertRaises(
exception.SecurityGroupCannotBeApplied,
api.allocate_for_instance,
'context', instance, False, requested_networks=onets,
security_groups=secgroups)
mock_process_security_groups.assert_called_once_with(
instance, mock.ANY, ['default', 'secgrp1', 'secgrp2'])
class TestNeutronv2AutoAllocateNetwork(test.NoDBTestCase):
"""Tests auto-allocation scenarios"""
def setUp(self):
super(TestNeutronv2AutoAllocateNetwork, self).setUp()
self.api = neutronapi.API()
self.context = context.RequestContext(uuids.user_id, uuids.project_id)
def test__has_auto_allocate_extension_empty(self):
# Tests that the extension is not available but we refresh the list.
self.api.extensions = {}
with mock.patch.object(self.api,
'_refresh_neutron_extensions_cache') as refresh:
self.assertFalse(
self.api._has_auto_allocate_extension(self.context))
# refresh is called because the extensions dict is empty
refresh.assert_called_once_with(self.context, neutron=None)
def test__has_auto_allocate_extension_false(self):
# Tests that the extension is not available and don't refresh the list.
self.api.extensions = {'foo': 'bar'}
with mock.patch.object(self.api,
'_refresh_neutron_extensions_cache') as refresh:
self.assertFalse(
self.api._has_auto_allocate_extension(self.context))
self.assertFalse(refresh.called)
def test__has_auto_allocate_extension_refresh_ok(self):
# Tests the happy path with refresh.
self.api.extensions = {constants.AUTO_ALLOCATE_TOPO_EXT: mock.Mock()}
with mock.patch.object(self.api,
'_refresh_neutron_extensions_cache') as refresh:
self.assertTrue(
self.api._has_auto_allocate_extension(
self.context, refresh_cache=True))
refresh.assert_called_once_with(self.context, neutron=None)
def test__can_auto_allocate_network_no_extension(self):
# Tests when the auto-allocated-topology extension is not available.
with mock.patch.object(self.api, '_has_auto_allocate_extension',
return_value=False):
self.assertFalse(self.api._can_auto_allocate_network(
self.context, mock.sentinel.neutron))
def test__can_auto_allocate_network_validation_conflict(self):
# Tests that the dry-run validation with neutron fails (not ready).
ntrn = mock.Mock()
ntrn.validate_auto_allocated_topology_requirements.side_effect = \
exceptions.Conflict
with mock.patch.object(self.api, '_has_auto_allocate_extension',
return_value=True):
self.assertFalse(self.api._can_auto_allocate_network(
self.context, ntrn))
validate = ntrn.validate_auto_allocated_topology_requirements
validate.assert_called_once_with(uuids.project_id)
def test__can_auto_allocate_network(self):
# Tests the happy path.
ntrn = mock.Mock()
with mock.patch.object(self.api, '_has_auto_allocate_extension',
return_value=True):
self.assertTrue(self.api._can_auto_allocate_network(
self.context, ntrn))
validate = ntrn.validate_auto_allocated_topology_requirements
validate.assert_called_once_with(uuids.project_id)
def test__ports_needed_per_instance_no_reqs_no_nets(self):
# Tests no requested_networks and no available networks.
with mock.patch.object(self.api, '_get_available_networks',
return_value=[]):
self.assertEqual(
1, self.api._ports_needed_per_instance(self.context,
mock.sentinel.neutron,
None))
def test__ports_needed_per_instance_empty_reqs_no_nets(self):
# Tests empty requested_networks and no available networks.
requested_networks = objects.NetworkRequestList()
with mock.patch.object(self.api, '_get_available_networks',
return_value=[]):
self.assertEqual(
1, self.api._ports_needed_per_instance(self.context,
mock.sentinel.neutron,
requested_networks))
def test__ports_needed_per_instance_auto_reqs_no_nets_not_ready(self):
# Test for when there are no available networks and we're requested
# to auto-allocate the network but auto-allocation is not available.
net_req = objects.NetworkRequest(
network_id=net_req_obj.NETWORK_ID_AUTO)
requested_networks = objects.NetworkRequestList(objects=[net_req])
with mock.patch.object(self.api, '_get_available_networks',
return_value=[]):
with mock.patch.object(self.api, '_can_auto_allocate_network',
spec=True, return_value=False) as can_alloc:
self.assertRaises(
exception.UnableToAutoAllocateNetwork,
self.api._ports_needed_per_instance,
self.context, mock.sentinel.neutron, requested_networks)
can_alloc.assert_called_once_with(
self.context, mock.sentinel.neutron)
def test__ports_needed_per_instance_auto_reqs_no_nets_ok(self):
# Test for when there are no available networks and we're requested
# to auto-allocate the network and auto-allocation is available.
net_req = objects.NetworkRequest(
network_id=net_req_obj.NETWORK_ID_AUTO)
requested_networks = objects.NetworkRequestList(objects=[net_req])
with mock.patch.object(self.api, '_get_available_networks',
return_value=[]):
with mock.patch.object(self.api, '_can_auto_allocate_network',
spec=True, return_value=True) as can_alloc:
self.assertEqual(
1, self.api._ports_needed_per_instance(
self.context,
mock.sentinel.neutron,
requested_networks))
can_alloc.assert_called_once_with(
self.context, mock.sentinel.neutron)
def test__validate_requested_port_ids_auto_allocate(self):
# Tests that _validate_requested_port_ids doesn't really do anything
# if there is an auto-allocate network request.
net_req = objects.NetworkRequest(
network_id=net_req_obj.NETWORK_ID_AUTO)
requested_networks = objects.NetworkRequestList(objects=[net_req])
self.assertEqual(({}, []),
self.api._validate_requested_port_ids(
self.context, mock.sentinel.instance,
mock.sentinel.neutron_client, requested_networks))
def test__auto_allocate_network_conflict(self):
# Tests that we handle a 409 from Neutron when auto-allocating topology
instance = mock.Mock(project_id=self.context.project_id)
ntrn = mock.Mock()
ntrn.get_auto_allocated_topology = mock.Mock(
side_effect=exceptions.Conflict)
self.assertRaises(exception.UnableToAutoAllocateNetwork,
self.api._auto_allocate_network, instance, ntrn)
ntrn.get_auto_allocated_topology.assert_called_once_with(
instance.project_id)
def test__auto_allocate_network_network_not_found(self):
# Tests that we handle a 404 from Neutron when auto-allocating topology
instance = mock.Mock(project_id=self.context.project_id)
ntrn = mock.Mock()
ntrn.get_auto_allocated_topology.return_value = {
'auto_allocated_topology': {
'id': uuids.network_id
}
}
ntrn.show_network = mock.Mock(
side_effect=exceptions.NetworkNotFoundClient)
self.assertRaises(exception.UnableToAutoAllocateNetwork,
self.api._auto_allocate_network, instance, ntrn)
ntrn.show_network.assert_called_once_with(uuids.network_id)
def test__auto_allocate_network(self):
# Tests the happy path.
instance = mock.Mock(project_id=self.context.project_id)
ntrn = mock.Mock()
ntrn.get_auto_allocated_topology.return_value = {
'auto_allocated_topology': {
'id': uuids.network_id
}
}
ntrn.show_network.return_value = {'network': mock.sentinel.network}
self.assertEqual(mock.sentinel.network,
self.api._auto_allocate_network(instance, ntrn))
def test_allocate_for_instance_auto_allocate(self):
# Tests the happy path.
ntrn = mock.Mock()
# mock neutron.list_networks which is called from
# _get_available_networks when net_ids is empty, which it will be
# because _validate_requested_port_ids will return an empty list since
# we requested 'auto' allocation.
ntrn.list_networks.return_value = {}
fake_network = {
'id': uuids.network_id,
'subnets': [
uuids.subnet_id,
]
}
def fake_get_instance_nw_info(context, instance, **kwargs):
# assert the network and port are what was used in the test
self.assertIn('networks', kwargs)
self.assertEqual(1, len(kwargs['networks']))
self.assertEqual(uuids.network_id,
kwargs['networks'][0]['id'])
self.assertIn('port_ids', kwargs)
self.assertEqual(1, len(kwargs['port_ids']))
self.assertEqual(uuids.port_id, kwargs['port_ids'][0])
# return a fake vif
return [model.VIF(id=uuids.port_id)]
@mock.patch('nova.network.neutronv2.api.get_client', return_value=ntrn)
@mock.patch.object(self.api, '_has_port_binding_extension',
return_value=True)
@mock.patch.object(self.api, '_auto_allocate_network',
return_value=fake_network)
@mock.patch.object(self.api, '_check_external_network_attach')
@mock.patch.object(self.api, '_populate_neutron_extension_values')
@mock.patch.object(self.api, '_populate_mac_address')
@mock.patch.object(self.api, '_create_port_minimal', spec=True,
return_value={'id': uuids.port_id,
'mac_address': 'foo'})
@mock.patch.object(self.api, '_update_port')
@mock.patch.object(self.api, '_update_port_dns_name')
@mock.patch.object(self.api, 'get_instance_nw_info',
fake_get_instance_nw_info)
@mock.patch('nova.objects.VirtualInterface')
def do_test(self,
mock_vif,
update_port_dsn_name_mock,
update_port_mock,
create_port_mock,
populate_mac_addr_mock,
populate_ext_values_mock,
check_external_net_attach_mock,
auto_allocate_mock,
has_port_binding_mock,
get_client_mock):
instance = fake_instance.fake_instance_obj(self.context)
net_req = objects.NetworkRequest(
network_id=net_req_obj.NETWORK_ID_AUTO)
requested_networks = objects.NetworkRequestList(objects=[net_req])
nw_info = self.api.allocate_for_instance(
self.context, instance, False, requested_networks)
self.assertEqual(1, len(nw_info))
self.assertEqual(uuids.port_id, nw_info[0]['id'])
# assert that we filtered available networks on admin_state_up=True
ntrn.list_networks.assert_has_calls([
mock.call(tenant_id=instance.project_id, shared=False,
admin_state_up=True),
mock.call(shared=True)])
# assert the calls to create the port are using the network that
# was auto-allocated
port_req_body = mock.ANY
create_port_mock.assert_called_once_with(
ntrn, instance, uuids.network_id,
None, # request.address (fixed IP)
[], # security_group_ids - we didn't request any
)
update_port_mock.assert_called_once_with(
ntrn, instance, uuids.port_id, port_req_body)
do_test(self)
| {
"content_hash": "c40a83621cf765b2f357f495b5485d5e",
"timestamp": "",
"source": "github",
"line_count": 6015,
"max_line_length": 79,
"avg_line_length": 47.54596841230258,
"alnum_prop": 0.5595250166964464,
"repo_name": "hanlind/nova",
"id": "6275e1d593814ac714fee107c9493accb8c010a4",
"size": "286626",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nova/tests/unit/network/test_neutronv2.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "PHP",
"bytes": "3325"
},
{
"name": "Python",
"bytes": "18681206"
},
{
"name": "Shell",
"bytes": "32127"
},
{
"name": "Smarty",
"bytes": "306159"
}
],
"symlink_target": ""
} |
import os
import re
import cPickle as pickle
import copy
from PyQt4.QtCore import Qt, SIGNAL, QAbstractTableModel, QString, QVariant, QModelIndex
from PyQt4.QtGui import QColor
import WordClass
import WordContainerClass
import WordTesterExceptions as wtexception
WORD, MEANINGS, CONTEXT, DIFFICULTY = range(4)
MAGIC_NUMBER = 0x35a5
FILE_VERSION = 1.0
class WordTableModel(QAbstractTableModel):
"""
Subclass of the QAbstractTableModel responsible for handling wordsTable widget's
interaction with the user.
This is the main class that operates on WordContainer.
Responsible for:
saving, loading, importing, exporting, removing duplicates, sorting,
sending and retrieving data from the view, removing and inserting rows
"""
def __init__(self):
super(WordTableModel, self).__init__()
self.fname = QString()
self.dirty = False
self.words = WordContainerClass.WordContainer()
self.reversedDiffSort = self.reversedWordSort = False
# overloaded methods from QAbstractTableModel
def flags(self, index):
if not index.isValid():
return Qt.ItemIsEnabled
return Qt.ItemFlags(QAbstractTableModel.flags(self, index)|
Qt.ItemIsEditable)
def data(self, index, role=Qt.DisplayRole):
"""
Retrieves data from the wordsTable widget, also handles editing, alignment,
and cells backround color (this one really should be in delegate, but..)
"""
if not index.isValid() or not (0 <= index.row() <= len(self.words)):
return QVariant()
word = self.words[index.row()]
column = index.column()
if role == Qt.DisplayRole:
if column == WORD:
return QVariant(word.getWord())
if column == MEANINGS:
return QVariant(word.meaningsToText())
if column == CONTEXT:
return QVariant(word.getContext())
if column == DIFFICULTY:
return QVariant(word.getDifficulty())
elif role == Qt.EditRole:
if column == WORD:
return QVariant(word.getWord())
if column == MEANINGS:
return QVariant(word.meaningsToText())
if column == CONTEXT:
return QVariant(word.getContext())
if column == DIFFICULTY:
return QVariant(word.getDifficulty())
elif role == Qt.TextAlignmentRole:
if column == DIFFICULTY:
return QVariant(int(Qt.AlignCenter|Qt.AlignCenter))
return QVariant(int(Qt.AlignLeft|Qt.AlignCenter))
elif role == Qt.BackgroundColorRole:
if column == DIFFICULTY:
if word.getWeight() < 10:
return QVariant(QColor(255,0,0))
if 10 <= word.getWeight() < 20:
return QVariant(QColor(255,51,0))
if 20 <= word.getWeight() < 30:
return QVariant(QColor(255,102,0))
if 30 <= word.getWeight() < 40:
return QVariant(QColor(255,153,0))
if 40 <= word.getWeight() < 50:
return QVariant(QColor(255,204,0))
if 50 <= word.getWeight() < 60:
return QVariant(QColor(255,255,0))
if 60 <= word.getWeight() < 70:
return QVariant(QColor(195,255,0))
if 70 <= word.getWeight() < 80:
return QVariant(QColor(135,255,0))
if 80 <= word.getWeight() < 90:
return QVariant(QColor(75,255,0))
if word.getWeight() >= 90:
return QVariant(QColor(0,255,0))
if column == WORD:
if word.getDuplicate():
return QVariant(QColor(255,0,0))
return QVariant(QColor(255,255,255))
return QVariant()
def headerData(self, section, orientation, role = Qt.DisplayRole):
"""
Sets headers labels.
"""
if role == Qt.TextAlignmentRole:
if orientation == Qt.Horizontal:
return QVariant(int(Qt.AlignLeft|Qt.AlignVCenter))
return QVariant(int(Qt.AlignRight|Qt.AlignVCenter))
if role != Qt.DisplayRole:
return QVariant()
if orientation == Qt.Horizontal:
if section == WORD:
return QVariant("Word")
elif section == MEANINGS:
return QVariant("Meanings")
elif section == CONTEXT:
return QVariant("Used in context")
elif section == DIFFICULTY:
return QVariant("Difficulty")
return QVariant(int(section + 1))
def rowCount(self, index = QModelIndex()):
return len(self.words)
def columnCount(self, index = QModelIndex()):
return 4
def setData(self, index, value, role = Qt.EditRole):
"""
Receives data from the user and saves them into the WordContainer object.
"""
if index.isValid() and (0 <= index.row() <= len(self.words)):
word = self.words[index.row()]
column = index.column()
if column == WORD:
if unicode((value.toString())) != word.getWord():
word.setWord(unicode(value.toString()))
self.words.findDuplicates()
self.dirty = True
elif column == MEANINGS:
if unicode((value.toString())) != word.meaningsToText():
word.setMeanings(unicode(value.toString()))
self.dirty = True
elif column == CONTEXT:
if unicode((value.toString())) != word.getContext():
word.setContext(unicode(value.toString()))
self.dirty = True
elif column == DIFFICULTY:
if str((value.toString())) != word.getDifficulty():
word.setDifficulty(str(value.toString()))
self.dirty = True
self.emit(SIGNAL("dataChanged(QModelIndex,QModelIndex)"),
index, index)
return True
return False
def insertRows(self, position, rows = 1, index = QModelIndex()):
self.beginInsertRows(QModelIndex(), position,
position + rows - 1)
for row in range(rows):
self.words.insert(position + row,WordClass.Word(" ", " "))
self.endInsertRows()
self.dirty = True
self.words.findDuplicates()
return True
def removeRows(self, position, rows = 1, index = QModelIndex()):
self.beginRemoveRows(QModelIndex(), position,
position + rows - 1)
words = copy.deepcopy(self.words)
self.words.clearWords()
self.words.addWords(words[:position] + \
words[position + rows:])
self.endRemoveRows()
self.dirty = True
self.words.findDuplicates()
return True
# sorting methods
def sortByDifficulty(self):
self.words.sortByDifficulty(self.reversedDiffSort)
self.reversedDiffSort = not self.reversedDiffSort
self.reset()
def sortByWord(self):
self.words.sortByWord(self.reversedWordSort)
self.reversedWordSort = not self.reversedWordSort
self.reset()
# saving and loading methods
def save(self):
"""
Saves (dumps with pickle) whole WordContainer into the .pkl file.
:throws:
IOError, pickle.PicklingError
"""
if self.dirty == False:
return
exception = None
fh = None
try:
# we don't check if the fname exists, main window does it before.
fh = open(unicode(self.fname),'wb')
pickle.dump(self.words, fh)
self.dirty = False
except (pickle.PicklingError, IOError) as e:
exception = e
finally:
if fh is not None:
fh.close()
if exception is not None:
raise exception
def load(self, fname = None, append = False):
"""
Loads from .pkl file into the WordContainer.
:param fname
Unicode, it is only explicitly given if the user wants to append to the
current WordContainer instead of overwriting it.
:param append
Bool, append or overwrite.
:throws:
pickle.UnpicklingError, IOError, AttributeError, wtexception.FileHandlingExceptions,
wtexception.SignatureError, wtexception.VersionError
"""
exception = None
fh = None
if fname is None and self.fname is None:
return None
try:
if fname is None:
fh = open(unicode(self.fname),'rb')
else:
fh = open(unicode(fname),'rb')
pack = pickle.load(fh)
# chceck if this is the current version of the Word Tester file
if not pack.MAGIC_NUMBER == MAGIC_NUMBER:
raise wtexception.SignatureError("Unable to load from %s. Unrecognized file signature" \
% os.path.basename(unicode(fname)))
if not pack.FILE_VERSION == FILE_VERSION:
raise wtexception.VersionError("Unable to load from %s. Unrecognized file type version" \
% os.path.basename(unicode(fname)))
if not append:
self.words = pack
self.dirty = False
else:
wordsAppended = False
# words that were already in WordContainer are not appended
for item in pack:
if item not in self.words:
self.words.addWords(item)
wordsAppended = True
if wordsAppended:
self.dirty = True
self.words.findDuplicates()
self.reset()
except (pickle.UnpicklingError, IOError, AttributeError, wtexception.FileHandlingExceptions) as e:
exception = e
finally:
if fh is not None:
fh.close()
if exception is not None:
raise exception
# importing and exporting methods
def encloseContextInBrackets(self, words = None):
"""
Helper method that transofrms context component of the Word object to
the form it is presented in .txt file.
:param WordContainer words
If words are specified, changes only the ones given.
:rtype WordContainer
"""
# WordContainer has to be copied, because it is changed only to save in .txt
# self.words can't be changed during that process
if words is None:
enclosedWords = copy.deepcopy(self.words)
else:
enclosedWords = copy.deepcopy(words)
context = ""
for item in enclosedWords:
context = item.getContext()
if context is not None and context != "":
contextList = context.split(";")
contextListBrackets = []
for sentence in contextList:
sentence = sentence.strip()
sentence = "[" + sentence + "]"
contextListBrackets.append(sentence)
context = ";".join(contextListBrackets)
item.setContext(context)
return enclosedWords
def importCheckAndFormat(self, text):
"""
Helper method that checks the format of the .txt file before import and
transforms each line into the Word object.
:param str text
Line from the .txt file.
:rtype
Word object.
:throws:
wtexception.ImportSyntaxError
"""
line = text
# if there is a blank line (usually there is at eh EOF)
if line is None or line.strip() == "":
return None
# regular expressions
squareRe = re.compile('\[.*?\]')
roundRe = re.compile('\(.*?\)')
# get everything that is enclosed in square and round brackets
context = squareRe.findall(text)
notes = roundRe.findall(text)
Ucontext = unicode()
for i in range(len(context)):
text = text.replace(context[i],'')
context[i] = context[i].strip(']')
context[i] = context[i].strip('[')
Ucontext += unicode(context[i] + ' ; ')
Ucontext = Ucontext.strip(' ; ')
# remove everything that is enclosed in round brackets
for i in range(len(notes)):
text = text.replace(notes[i],'')
# so far even if we couldn't find either []'s or ()'s it wasn't a problem
text = text.split('-')
if len(text) != 2:
raise wtexception.ImportSyntaxError("Selected file is not in required format. Error in line: %s" % line)
word = text[0].strip()
meanings = text[1].strip()
return WordClass.Word(word,meanings,Ucontext)
def exportCheckAndFormat(self, word, meanings, context):
"""
Helper method that checks the format of the Word object components before
exporting to .txt file, and transforms each Word object to one str.
"""
contextList = []
if context is not None:
context = context.strip()
if context != '':
contextList = context.split(';')
for i in range(len(contextList)):
contextList[i] = contextList[i].strip()
if not contextList[i].startswith('[') or not contextList[i].endswith(']'):
raise wtexception.ExportSyntaxError("Context has to be enclosed in []. Error occured in word: %s" % word)
# join meanings with their contexts as long as there are meanings available
# if no context are present we catch the IndexError and append a comma only
meaningsNcontext = unicode()
for i in range(len(meanings)):
meaningsNcontext += unicode(meanings[i])
try:
meaningsNcontext += ' ' + contextList[i] + ', '
except IndexError:
meaningsNcontext += ', '
meaningsNcontext = meaningsNcontext.strip(', ')
return word + ' - ' + meaningsNcontext + '\r\n'
def importWords(self, fname, append = False):
"""
Imports data from the .txt file and transorms it into the WordContainer object.
:param str fname
File name to import from. It will never be none because it's checked before.
:param bool append
Informs if to append to the current file or open a new one.
:throws:
IOError, wtexception.FileHandlingExceptions, wtexception.EncodingError
"""
pack = WordContainerClass.WordContainer()
words = []
fh = None
exception = None
try:
import codecs
fh = codecs.open(unicode(fname),'r','utf-8')
try:
string = fh.readline()
except UnicodeDecodeError as e:
raise wtexception.EncodingError("""Unable to import from %s.
Please choose a file with UTF-8 encoding!""" \
% os.path.basename(unicode(fname)))
# strip an utf-8 marker char from the begining of the file
# if string[0] == unicode(codecs.BOM_UTF8, 'utf-8'):
# string = string.lstrip(unicode(codecs.BOM_UTF8, 'utf-8'))
# else:
# raise wtexception.EncodingError("""Unable to import from %s.
#Please choose a file with UTF-8 encoding!""" \
# % os.path.basename(unicode(fname)))
wordsAppended = False
# first line has been already read in order to strip BOM_UTF8
word = self.importCheckAndFormat(string)
if append:
if word not in self.words:
pack.append(word)
wordsAppended = True
else:
pack.append(word)
# iterate over other lines in the file
for line in fh:
word = self.importCheckAndFormat(line)
# if there was a blink line just skip it
if word is None:
continue
if append:
if word not in self.words:
pack.append(word)
wordsAppended = True
else:
pack.append(word)
# if append == False, we overwrite self.words with the words retrived
# from the .txt file
if not append:
self.words = copy.deepcopy(pack)
else:
if wordsAppended:
pack.addWords(words)
self.words.addWords(pack)
if (append and wordsAppended) or not append:
self.dirty = True
self.words.findDuplicates()
self.reset()
else:
self.dirty = False
except (IOError, wtexception.FileHandlingExceptions) as e:
exception = e
finally:
if fh is not None:
fh.close()
if exception is not None:
raise exception
def exportWords(self, fname, words = None):
"""
Exports WordContainer into the .txt file.
:param str fname
Name of the file to export to. Just like in import it will never be None.
:words WordContainer
If
"""
exception = None
fh = None
try:
import codecs
fh = codecs.open(unicode(fname),'w')
fh.write(codecs.BOM_UTF8)
fh.close()
fh = codecs.open(unicode(fname),'a','utf-8')
if words is None:
enclosedWords = self.encloseContextInBrackets()
else:
enclosedWords = self.encloseContextInBrackets(words)
for item in enclosedWords:
fh.write(self.exportCheckAndFormat(item.getWord(), item.getMeanings(), item.getContext()))
except IOError, e:
exception = e
finally:
if fh is not None:
fh.close()
if exception is not None:
raise exception
def removeDuplicates(self):
"""
To every word that is repeated through WordContainer this method adds
approperiate number of '*' at the end to make it unique. ('*''s should, and
hopefully will be replaced by superscripts)
"""
while self.hasDuplicates():
i = len(self.words) - 1
while i >= 0 and self.hasDuplicates():
word = self.words[i].getWord()
if self.words.duplicates.has_key(word):
self.words[i].setWord(word + (self.words.duplicates[word]-1)*'*')
self.words.duplicates[word] -= 1
i -= 1
self.reset()
def hasDuplicates(self):
self.words.findDuplicates()
if len(self.words.duplicates):
return True
else:
return False
| {
"content_hash": "a924950fdc6f9bac2a4d3f36b0ce1aae",
"timestamp": "",
"source": "github",
"line_count": 530,
"max_line_length": 129,
"avg_line_length": 36.79056603773585,
"alnum_prop": 0.5461305707985025,
"repo_name": "michaupl/wordtester",
"id": "2057eafd79fb85f681b7f3b4b62c4c10062c2761",
"size": "19523",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/top/WordTableModel.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "133778"
}
],
"symlink_target": ""
} |
from marshmallow_jsonapi import Schema, fields
from marshmallow import validate, ValidationError
from flask.ext.sqlalchemy import SQLAlchemy
from sqlalchemy.exc import SQLAlchemyError
import datetime
from config import APP_ROOT, DEBUG
import hashlib
import os
import subprocess
db = SQLAlchemy(session_options={"autoflush": False})
class CRUD():
def add(self, resource):
db.session.add(resource)
return db.session.commit()
def update(self):
return db.session.commit()
def delete(self, resource):
db.session.delete(resource)
return db.session.commit()
class HaulersImages(db.Model, CRUD):
__tablename__ = 'haulers_images'
id = db.Column(db.Integer, primary_key=True)
HAULER_ID = db.Column(db.Integer)
name = db.Column(db.String(64), nullable=False)
type = db.Column(db.String(6), nullable=False)
def __init__(self, **kwargs):
self.HAULER_ID = kwargs.get('HAULER_ID')
self.name = kwargs.get('name')
self.type = kwargs.get('type')
def get_folder(self, half=None):
half_path = "/data/extensions_data/haulers_images/"+str(self.HAULER_ID)+"/"
path = APP_ROOT + half_path
os.makedirs(path, mode=0o0775, exist_ok=True)
if half:
return (half_path)
return (path)
def save_file(self, file, filename):
if file:
folder = self.get_folder()
file.save(os.path.join(folder, filename))
#Convert to jpg
if DEBUG:
cmd = "convert -quality 95 -type truecolor -colorspace RGB -append " + folder + filename + " " + folder + filename + ".jpg"
#cmd = "convert1 -quality 95 -type truecolor -colorspace RGB -append " + folder + filename + " " + folder + filename + ".jpg"
else:
cmd = "convert -quality 95 -type truecolor -colorspace RGB -append " + folder + filename + " " + folder + filename + ".jpg"
PIPE = subprocess.PIPE
p = subprocess.Popen(cmd, shell=True, stdin=PIPE, stdout=PIPE,
stderr=subprocess.STDOUT)
while True:
s = p.stdout.readline()
if not s: break
print (s,)
os.remove(folder + filename)
class HaulersImagesSchema(Schema):
not_blank = validate.Length(min=1, error='Field cannot be blank')
id = fields.Integer()
HAULER_ID = fields.Integer()
name = fields.String()
type = fields.String()
#self links
def get_top_level_links(self, data, many):
if many:
self_link = "/haulers_images/"
else:
self_link = "/haulers_images/{}".format(data['attributes']['id'])
return {'self': self_link}
class Meta:
type_ = 'haulers_images'
| {
"content_hash": "0591403cb926d0cf6024f8b17c01883e",
"timestamp": "",
"source": "github",
"line_count": 83,
"max_line_length": 141,
"avg_line_length": 36.24096385542169,
"alnum_prop": 0.5598404255319149,
"repo_name": "konstantinKim/vd-backend",
"id": "ee538402fec6769e8c1305d7e848dbe8356e028b",
"size": "3008",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/haulersImages/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "190141"
}
],
"symlink_target": ""
} |
import webapp2
import logging
import fix_path
from routes import route_list
from config import app_config
app = webapp2.WSGIApplication(route_list,
config = app_config,
debug = app_config.get('debug', True))
| {
"content_hash": "a98421984eb30a3470c43142d6d41216",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 70,
"avg_line_length": 25,
"alnum_prop": 0.5963636363636363,
"repo_name": "adrian/feed2me",
"id": "551a17e8e148a386d1865dcd5831d275ee0df0ff",
"size": "291",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "main.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "207"
},
{
"name": "HTML",
"bytes": "2257"
},
{
"name": "JavaScript",
"bytes": "3830"
},
{
"name": "Python",
"bytes": "196098"
}
],
"symlink_target": ""
} |
from django.test import TestCase
from django.urls import reverse
from portal.models import Student, Hobby
from portal.tests.constants import VALID_USERNAME
from portal.utils import get_invalid_id_popup, create_families_from_parents, assign_children_to_families
def create_student_and_return(username, child=False, magic_id=None, party=False, name='Name', gender='M', course='C'):
student = Student(username=username, child=child, magic_id=magic_id, party=party, name=name, gender=gender,
course=course)
student.save()
return student
def create_hobby(description):
Hobby(description=description).save()
class IndexViewTests(TestCase):
DEFAULT = reverse('portal:index')
PARENT = reverse('portal:index', kwargs={'position': 'parent'})
CHILD = reverse('portal:index', kwargs={'position': 'child'})
def test_index_returns_child_submission(self):
response = self.client.get(self.DEFAULT)
self.assertContains(response, 'child')
def test_child_page_correctly_returned(self):
response = self.client.get(self.CHILD)
self.assertContains(response, 'child')
def test_parent_page_correctly_returned(self):
response = self.client.get(self.PARENT)
self.assertContains(response, 'parent')
def test_child_registration_closed(self):
response = self.client.post(self.CHILD,
{'name': 'Name', 'username': VALID_USERNAME, 'gender': 'M', 'course': 'C'})
self.assertFalse(Student.objects.filter(username=VALID_USERNAME).exists())
self.assertContains(response, "closed")
def test_parent_created_correctly(self):
self.client.post(self.PARENT, {'name': 'Name', 'username': VALID_USERNAME, 'gender': 'M', 'course': 'C'})
self.assertTrue(Student.objects.filter(username=VALID_USERNAME).exists())
def test_cannot_create_same_account_twice(self):
response = self.client.post(self.PARENT,
{'name': 'Name', 'username': VALID_USERNAME, 'gender': 'M', 'course': 'C'})
self.assertContains(response, "activate")
response = self.client.post(self.PARENT,
{'name': 'Name', 'username': VALID_USERNAME, 'gender': 'M', 'course': 'C'})
self.assertContains(response, "exists")
self.assertTrue(Student.objects.filter(username=VALID_USERNAME).count() == 1)
def test_account_not_activated_when_created(self):
self.client.post(self.PARENT, {'name': 'Name', 'username': VALID_USERNAME, 'gender': 'M', 'course': 'C'})
student = Student.objects.get(username=VALID_USERNAME)
self.assertFalse(student.activated)
class PreferenceViewTests(TestCase):
def setUp(self):
self.student1 = create_student_and_return('A', name='A')
self.student2 = create_student_and_return('B', name='B')
def refreshSetUp(self):
self.student1 = Student.objects.get(id=self.student1.id)
self.student2 = Student.objects.get(id=self.student2.id)
@staticmethod
def get_preferences_url(magic_id):
return reverse('portal:preferences', kwargs={'id': magic_id})
def test_invalid_id_sends_to_index(self):
response = self.client.get(self.get_preferences_url('12345678'))
self.assertContains(response, "closed")
def test_correctly_returns_saved_data(self):
create_hobby('A')
create_hobby('B')
create_hobby('C')
response = self.client.get(self.get_preferences_url(self.student1.magic_id))
self.assertNotContains(response, 'checked')
self.student1.party = True
self.student1.save()
response = self.client.get(self.get_preferences_url(self.student1.magic_id))
self.assertContains(response, 'checked')
self.student1.party = False
self.student1.hobbies = Hobby.objects.filter(description='A')
self.student1.save()
response = self.client.get(self.get_preferences_url(self.student1.magic_id))
self.assertContains(response, 'checked')
def test_can_select_preferred_partner(self):
response = self.client.post(self.get_preferences_url(self.student1.magic_id), {'partner': self.student2.id})
self.assertContains(response, self.student2.username)
self.assertContains(response, 'success')
self.student1 = Student.objects.get(id=self.student1.id)
self.assertEqual(self.student1.partner, self.student2)
def test_children_cant_select_partner(self):
self.student1 = create_student_and_return('C', child=True)
response = self.client.get(self.get_preferences_url(self.student1.magic_id))
self.assertNotContains(response, 'form-partner')
def test_parent_can_select_partner(self):
response = self.client.get(self.get_preferences_url(self.student1.magic_id))
self.assertContains(response, 'form-partner')
def test_empty_post_is_handled(self):
try:
self.client.post(self.get_preferences_url(self.student1.magic_id))
except:
self.assertTrue(False)
def test_can_accept_proposal(self):
self.client.post(self.get_preferences_url(self.student1.magic_id), {'partner': self.student2.id})
response = self.client.get(self.get_preferences_url(self.student2.magic_id))
self.assertContains(response, self.student1.username)
self.assertContains(response, 'accept')
response = self.client.post(self.get_preferences_url(self.student2.magic_id),
{'username': self.student1.username, 'accept': ''})
self.assertContains(response, self.student1.username)
self.assertContains(response, 'married')
self.refreshSetUp()
self.assertTrue(self.student1.confirmed)
self.assertTrue(self.student2.confirmed)
def test_cannot_withdraw_after_reject(self):
self.client.post(self.get_preferences_url(self.student1.magic_id), {'partner': self.student2.id})
self.client.post(self.get_preferences_url(self.student2.magic_id),
{'username': self.student1.username, 'reject': ''})
self.refreshSetUp()
self.assertNotEqual(self.student1.partner, self.student2)
self.assertNotEqual(self.student2.partner, self.student1)
response = self.client.post(self.get_preferences_url(self.student1.magic_id),
{'username': self.student2.username, 'withdraw': ''})
self.assertContains(response, 'danger')
def test_cannot_propose_twice(self):
self.student3 = create_student_and_return('C')
self.client.post(self.get_preferences_url(self.student1.magic_id), {'partner': self.student2.id})
self.client.post(self.get_preferences_url(self.student1.magic_id), {'partner': self.student3.id})
self.refreshSetUp()
self.assertEqual(self.student1.partner, self.student2)
def test_cannot_reject_twice(self):
self.client.post(self.get_preferences_url(self.student1.magic_id), {'partner': self.student2.id})
response = self.client.post(self.get_preferences_url(self.student1.magic_id),
{'username': self.student2.username, 'withdraw': ''})
self.assertContains(response, 'success')
self.assertNotContains(response, 'danger')
response = self.client.post(self.get_preferences_url(self.student1.magic_id),
{'username': self.student2.username, 'withdraw': ''})
self.assertContains(response, 'danger')
self.assertNotContains(response, 'success')
def test_cannot_withdraw_twice(self):
self.client.post(self.get_preferences_url(self.student1.magic_id), {'partner': self.student2.id})
response = self.client.post(self.get_preferences_url(self.student2.magic_id),
{'username': self.student1.username, 'reject': ''})
self.assertContains(response, 'success')
self.assertNotContains(response, 'danger')
response = self.client.post(self.get_preferences_url(self.student2.magic_id),
{'username': self.student1.username, 'reject': ''})
self.assertContains(response, 'danger')
self.assertNotContains(response, 'success')
def test_cannot_accept_twice(self):
self.client.post(self.get_preferences_url(self.student1.magic_id), {'partner': self.student2.id})
response = self.client.post(self.get_preferences_url(self.student2.magic_id),
{'username': self.student1.username, 'accept': ''})
self.assertContains(response, 'success')
self.assertNotContains(response, 'danger')
response = self.client.post(self.get_preferences_url(self.student2.magic_id),
{'username': self.student1.username, 'accept': ''})
self.assertContains(response, 'danger')
self.assertNotContains(response, 'success')
def test_shows_parents_and_children_after_assignment(self):
child = create_student_and_return('child', child=True)
create_families_from_parents()
assign_children_to_families()
response = self.client.get(self.get_preferences_url(child.magic_id))
self.assertContains(response, self.student1)
self.assertContains(response, self.student2)
response = self.client.get(self.get_preferences_url(self.student1.magic_id))
self.assertContains(response, child)
def test_account_gets_activated_after_visit(self):
self.client.get(self.get_preferences_url(self.student1.magic_id))
student = Student.objects.get(id=self.student1.id)
self.assertTrue(student.activated)
| {
"content_hash": "4b8d78c5f8f26d509f6a0bf4d5cb5173",
"timestamp": "",
"source": "github",
"line_count": 215,
"max_line_length": 118,
"avg_line_length": 45.72558139534884,
"alnum_prop": 0.6562913233648663,
"repo_name": "martinzlocha/mad",
"id": "302ed926d5ede4b8926e312b9a32334a453e976b",
"size": "9831",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "portal/tests/test_views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1010"
},
{
"name": "HTML",
"bytes": "6629"
},
{
"name": "Python",
"bytes": "61720"
},
{
"name": "Shell",
"bytes": "82"
}
],
"symlink_target": ""
} |
"""Drop ``user`` and ``chart`` table
Revision ID: cf5dc11e79ad
Revises: 03afc6b6f902
Create Date: 2019-01-24 15:30:35.834740
"""
from __future__ import annotations
import sqlalchemy as sa
from alembic import op
from sqlalchemy import inspect
from sqlalchemy.dialects import mysql
# revision identifiers, used by Alembic.
revision = 'cf5dc11e79ad'
down_revision = '03afc6b6f902'
branch_labels = None
depends_on = None
airflow_version = '2.0.0'
def upgrade():
# We previously had a KnownEvent's table, but we deleted the table without
# a down migration to remove it (so we didn't delete anyone's data if they
# were happening to use the feature.
#
# But before we can delete the users table we need to drop the FK
conn = op.get_bind()
inspector = inspect(conn)
tables = inspector.get_table_names()
if 'known_event' in tables:
for fkey in inspector.get_foreign_keys(table_name="known_event", referred_table="users"):
if fkey['name']:
with op.batch_alter_table(table_name='known_event') as bop:
bop.drop_constraint(fkey['name'], type_="foreignkey")
if "chart" in tables:
op.drop_table(
"chart",
)
if "users" in tables:
op.drop_table("users")
def downgrade():
conn = op.get_bind()
op.create_table(
'users',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('username', sa.String(length=250), nullable=True),
sa.Column('email', sa.String(length=500), nullable=True),
sa.Column('password', sa.String(255)),
sa.Column('superuser', sa.Boolean(), default=False),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('username'),
)
op.create_table(
'chart',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('label', sa.String(length=200), nullable=True),
sa.Column('conn_id', sa.String(length=250), nullable=False),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.Column('chart_type', sa.String(length=100), nullable=True),
sa.Column('sql_layout', sa.String(length=50), nullable=True),
sa.Column('sql', sa.Text(), nullable=True),
sa.Column('y_log_scale', sa.Boolean(), nullable=True),
sa.Column('show_datatable', sa.Boolean(), nullable=True),
sa.Column('show_sql', sa.Boolean(), nullable=True),
sa.Column('height', sa.Integer(), nullable=True),
sa.Column('default_params', sa.String(length=5000), nullable=True),
sa.Column('x_is_date', sa.Boolean(), nullable=True),
sa.Column('iteration_no', sa.Integer(), nullable=True),
sa.Column('last_modified', sa.DateTime(), nullable=True),
sa.ForeignKeyConstraint(
['user_id'],
['users.id'],
),
sa.PrimaryKeyConstraint('id'),
)
if conn.dialect.name == 'mysql':
conn.execute("SET time_zone = '+00:00'")
op.alter_column(table_name='chart', column_name='last_modified', type_=mysql.TIMESTAMP(fsp=6))
else:
if conn.dialect.name in ('sqlite', 'mssql'):
return
if conn.dialect.name == 'postgresql':
conn.execute("set timezone=UTC")
op.alter_column(table_name='chart', column_name='last_modified', type_=sa.TIMESTAMP(timezone=True))
| {
"content_hash": "6e5ee1c5554c937509a5ca4a27c97371",
"timestamp": "",
"source": "github",
"line_count": 97,
"max_line_length": 107,
"avg_line_length": 34.63917525773196,
"alnum_prop": 0.6199404761904762,
"repo_name": "nathanielvarona/airflow",
"id": "78b3ca28f0b8eaea148ee9de828df447909fa6e0",
"size": "4145",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "airflow/migrations/versions/0059_2_0_0_drop_user_and_chart.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "25980"
},
{
"name": "Dockerfile",
"bytes": "70681"
},
{
"name": "HCL",
"bytes": "3786"
},
{
"name": "HTML",
"bytes": "173025"
},
{
"name": "JavaScript",
"bytes": "142848"
},
{
"name": "Jinja",
"bytes": "38895"
},
{
"name": "Jupyter Notebook",
"bytes": "5482"
},
{
"name": "Mako",
"bytes": "1339"
},
{
"name": "Python",
"bytes": "23169682"
},
{
"name": "R",
"bytes": "313"
},
{
"name": "Shell",
"bytes": "211967"
},
{
"name": "TypeScript",
"bytes": "484556"
}
],
"symlink_target": ""
} |
__revision__ = "test/QT/reentrant.py rel_2.5.1:3735:9dc6cee5c168 2016/11/03 14:02:02 bdbaddog"
"""
Test creation from a copied environment that already has QT variables.
This makes sure the tool initialization is re-entrant.
"""
import TestSCons
test = TestSCons.TestSCons()
test.Qt_dummy_installation('qt')
test.write(['qt', 'include', 'foo5.h'], """\
#include <stdio.h>
void
foo5(void)
{
#ifdef FOO
printf("qt/include/foo5.h\\n");
#endif
}
""")
test.Qt_create_SConstruct('SConstruct')
test.write('SConscript', """\
Import("env")
env = env.Clone(tools=['qt'])
env.Program('main', 'main.cpp', CPPDEFINES=['FOO'], LIBS=[])
""")
test.write('main.cpp', r"""
#include "foo5.h"
int main() { foo5(); return 0; }
""")
test.run()
test.run(program = test.workpath('main' + TestSCons._exe),
stdout = 'qt/include/foo5.h\n')
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| {
"content_hash": "6b3deddf731f4afabec0d59de64ed9e3",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 94,
"avg_line_length": 19.6734693877551,
"alnum_prop": 0.6628630705394191,
"repo_name": "EmanueleCannizzaro/scons",
"id": "cbe2512cabdabad5f122b1b74d7a1e39610b3345",
"size": "2099",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/QT/reentrant.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "2491"
},
{
"name": "C",
"bytes": "659"
},
{
"name": "C++",
"bytes": "598"
},
{
"name": "CSS",
"bytes": "18502"
},
{
"name": "D",
"bytes": "1997"
},
{
"name": "HTML",
"bytes": "817651"
},
{
"name": "Java",
"bytes": "6860"
},
{
"name": "JavaScript",
"bytes": "215495"
},
{
"name": "Makefile",
"bytes": "3795"
},
{
"name": "Perl",
"bytes": "29978"
},
{
"name": "Python",
"bytes": "7510453"
},
{
"name": "Roff",
"bytes": "556545"
},
{
"name": "Ruby",
"bytes": "11074"
},
{
"name": "Shell",
"bytes": "52682"
},
{
"name": "XSLT",
"bytes": "7567242"
}
],
"symlink_target": ""
} |
import os, chardet
import unittest
from transifex.txcommon.tests.base import BaseTestCase
from transifex.languages.models import Language
from transifex.resources.models import *
from transifex.resources.formats.mozillaproperties import MozillaPropertiesHandler
from transifex.addons.suggestions.models import Suggestion
class TestMozillaProperties(BaseTestCase):
"""Suite of tests for the propertiesfile lib."""
def setUp(self):
super(TestMozillaProperties, self).setUp()
self.resource.i18n_method = 'MOZILLA_PROPERTIES'
self.resource.save()
def test_escaped(self):
j = MozillaPropertiesHandler()
self.assertFalse(j._is_escaped(r"es blah", 2))
self.assertTrue(j._is_escaped(r"e\ blah", 2))
self.assertFalse(j._is_escaped(r"\\ blah", 2))
self.assertTrue(j._is_escaped(r"e\\\ blah", 4))
def test_accept(self):
parser = MozillaPropertiesHandler()
self.assertTrue(parser.accepts('MOZILLAPROPERTIES'))
def test_split(self):
j = MozillaPropertiesHandler()
res = j._split("asd sadsf")
self.assertEqual(res[0], "asd")
self.assertEqual(res[1], "sadsf")
res = j._split("asd=sadsf")
self.assertEqual(res[0], "asd")
self.assertEqual(res[1], "sadsf")
res = j._split("asd:sadsf")
self.assertEqual(res[0], "asd")
self.assertEqual(res[1], "sadsf")
res = j._split("asd\tsadsf")
self.assertEqual(res[0], "asd")
self.assertEqual(res[1], "sadsf")
res = j._split(r"asd\ =sadsf")
self.assertEqual(res[0], "asd\ ")
self.assertEqual(res[1], "sadsf")
res = j._split(r"asd = sadsf")
self.assertEqual(res[0], "asd")
self.assertEqual(res[1], "sadsf")
res = j._split(r"asd\\=sadsf")
self.assertEqual(res[0], r"asd\\")
self.assertEqual(res[1], "sadsf")
res = j._split(r"asd\\\=sadsf")
self.assertEqual(res[0], r"asd\\\=sadsf")
self.assertEqual(res[1], None)
res = j._split(r"asd\\\\=sadsf")
self.assertEqual(res[0], r"asd\\\\")
self.assertEqual(res[1], "sadsf")
res = j._split(r"Key21\:WithColon : Value21")
self.assertEqual(res[0], r"Key21\:WithColon")
self.assertEqual(res[1], "Value21")
def test_properties_parser(self):
"""PROPERTIES file tests."""
# Parsing PROPERTIES file
handler = MozillaPropertiesHandler(
os.path.join(os.path.dirname(__file__), 'complex.properties')
)
handler.set_language(self.resource.source_language)
handler.parse_file(is_source=True)
self.stringset = handler.stringset
entities = 0
translations = 0
for s in self.stringset.strings:
entities += 1
if s.translation.strip() != '':
translations += 1
# Asserting number of entities - PROPERTIES file has 25 entries.
# we ignore keys without a value
self.assertEqual(entities, 25)
self.assertEqual(translations, 25)
def test_properties_save2db(self, delete=True):
"""Test creating source strings from a PROPERTIES file works"""
handler = MozillaPropertiesHandler(
os.path.join(os.path.dirname(__file__), 'complex.properties')
)
handler.set_language(self.resource.source_language)
handler.parse_file(is_source=True)
r = self.resource
l = self.resource.source_language
handler.bind_resource(r)
handler.save2db(is_source=True)
# Check that all 25 entities are created in the db
self.assertEqual( SourceEntity.objects.filter(resource=r).count(), 25)
# Check that all source translations are there
self.assertEqual(
len(Translation.objects.filter(source_entity__resource=r, language=l)), 25
)
# Import and save the finish translation
handler.bind_file(os.path.join(os.path.dirname(__file__),'complex_hi_IN.properties'))
l = Language.objects.get(code='hi_IN')
handler.set_language(l)
handler.parse_file()
entities = 0
translations = 0
for s in handler.stringset.strings:
entities += 1
if s.translation.strip() != '':
translations += 1
self.assertEqual(entities, 23)
self.assertEqual(translations, 23)
handler.save2db()
# Check if all Source strings are untouched
self.assertEqual(SourceEntity.objects.filter(resource=r).count(), 25)
# Check that all translations are there
self.assertEqual(len(Translation.objects.filter(source_entity__resource=r,
language=l)), 23)
if delete:
r.delete()
else:
return r
def test_properties_compile(self):
"""Test compiling translations for PROPERTIES files"""
self.test_properties_save2db(delete=False)
handler = MozillaPropertiesHandler()
handler.bind_resource(self.resource)
handler.set_language(self.resource.source_language)
old_template = handler.compiled_template
handler.compile()
self.assertNotEqual(old_template, handler.compiled_template)
handler.set_language(Language.objects.get(code='hi_IN'))
old_template = handler.compiled_template
handler.compile()
self.assertNotEqual(old_template, handler.compiled_template)
#Cleanup
self.resource.delete()
| {
"content_hash": "413a45019e67bc79a97b2c7188c7e9a6",
"timestamp": "",
"source": "github",
"line_count": 155,
"max_line_length": 93,
"avg_line_length": 35.76774193548387,
"alnum_prop": 0.6204906204906205,
"repo_name": "rvanlaar/easy-transifex",
"id": "7d571b937a13ecd6e4c3f8f508527ff40d332dd2",
"size": "5569",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/transifex/transifex/resources/tests/lib/mozilla_properties/__init__.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "105585"
},
{
"name": "HTML",
"bytes": "365175"
},
{
"name": "JavaScript",
"bytes": "187021"
},
{
"name": "Python",
"bytes": "2303001"
},
{
"name": "Shell",
"bytes": "1358"
}
],
"symlink_target": ""
} |
import random
import numpy as np
from scipy.optimize import minimize
from sklearn.linear_model import LinearRegression
from copy import deepcopy
def random_id():
return ''.join([random.choice('abcdefghijklmnopqrstuvwxyz123456789') for i in range(10)])
### VARIABLES
class AbstractVariable():
def __init__(self, range, name, internal=False):
self.range = range
self.name = name
self.internal = internal # whether a variable is user supplied or generated automatically as eg output of some function
class RealVariable(AbstractVariable):
pass
### NODES
class AbstractNode():
def __init__(self):
self.x = [] # list of all variable inputs
self.y = None # variable of output
def __call__(self, variables):
# returns variable or set of variables with propagated ranges
if isinstance(variables, list):
self.x.extend(variables)
else:
self.x.append(variables)
return None
def propagate_constraint(self):
# propagates the boundaries of input variables to the output variables
raise NotImplementedError()
def concave_overestimator(self):
# returns lambda
raise NotImplementedError()
def convex_underestimator(self):
# returns lambda
raise NotImplementedError()
def itself(self):
# returns lambda
raise NotImplementedError()
class DotNode(AbstractNode):
def __init__(self, weights, bias):
AbstractNode.__init__(self)
self.w = weights
self.b = bias
def __call__(self, variables):
# returns variable or set of variables with propagated ranges
AbstractNode.__call__(self, variables)
self.y = RealVariable(None, random_id(), True) # variable range will be defined during
return self.y
def propagate_constraint(self):
weights = self.w
M, m = 0.0, 0.0 # max, min of values of result
for v, w in zip(self.x, weights):
m += min([w * b for b in v.range])
M += max([w * b for b in v.range])
m += self.b
M += self.b
self.y.range = [m, M]
def concave_overestimator(self):
# returns lambda
return lambda x: np.dot(self.w, x) + self.b
def convex_underestimator(self):
# returns lambda
return lambda x: np.dot(self.w, x) + self.b
def itself(self):
# returns lambda
return lambda x: np.dot(self.w, x) + self.b
class SquareNode(AbstractNode):
def __init__(self):
AbstractNode.__init__(self)
def __call__(self, variables):
# returns variable or set of variables with propagated ranges
AbstractNode.__call__(self, variables)
self.y = RealVariable(None, random_id(), True) # variable range will be defined during propagation
return self.y
def propagate_constraint(self):
input = self.x[0]
a, b = input.range
if 0 >= a and 0 <= b:
m = 0.0
else:
m = min(a*a, b*b)
M = max(a*a, b*b)
self.y.range = [m, M]
def concave_overestimator(self):
# returns lambda
input = self.x[0]
a, b = input.range
return lambda x: a*a*((b-x[0]) / (b-a)) + b*b*(1 - (b-x[0]) / (b-a))
def convex_underestimator(self):
# returns lambda
return lambda x: x[0]**2
def itself(self):
# returns lambda
return lambda x: x[0]**2
class MultiplyNode(AbstractNode):
def __init__(self):
AbstractNode.__init__(self)
def __call__(self, variables):
# returns variable or set of variables with propagated ranges
AbstractNode.__call__(self, variables)
self.y = RealVariable(None, random_id(), True) # variable range will be defined during propagation
return self.y
def propagate_constraint(self):
a = self.x[0]
b = self.x[1]
values = []
for x in a.range:
for y in b.range:
values.append(x*y)
self.y.range = [min(values), max(values)]
def concave_overestimator(self):
# returns lambda
a, b = self.x
# get all function values in the corners as points
X = []
Y = []
for x in a.range:
for y in b.range:
X.append([x,y])
Y.append(x*y)
models = []
# construct linear functions that describe upper convex hull
for i in range(4):
# all points except for the ith
Xp = [x for j,x in enumerate(X) if not (j == i)]
Yp = [y for j,y in enumerate(Y) if not (j == i)]
# fit linear function there
model = LinearRegression()
model.fit(Xp, Yp)
# convex hull simplex must
if Y[i] < model.predict([X[i]])[0]:
models.append(model)
return lambda x: min([m.predict([x])[0] for m in models])
def convex_underestimator(self):
# returns lambda
a,b = self.x
# get all function values in the corners as points
X = []
Y = []
for x in a.range:
for y in b.range:
X.append([x,y])
Y.append(x*y)
models = []
# construct linear functions that describe upper convex hull
for i in range(4):
# all points except for the ith
Xp = [x for j,x in enumerate(X) if not (j == i)]
Yp = [y for j,y in enumerate(Y) if not (j == i)]
# fit linear function there
model = LinearRegression()
model.fit(Xp, Yp)
# lower convex hull underestimates values in convex hull
if Y[i] > model.predict([X[i]])[0]:
models.append(model)
return lambda x: max([m.predict([x])[0] for m in models])
def itself(self):
# returns lambda
return lambda x: x[0] * x[1]
### OPTIMIZATION SUBPROBLEM
class EqVarConstraint():
"""
This class is used to define constraints
"""
def __init__(self, inputs, fnc, output):
self.ip = inputs
self.ot = output
self.fnc = fnc
def __call__(self, x):
x_selection = x[self.ip]
a = self.fnc(x_selection)
return a - x[self.ot]
class ConvexLeqVarConstraint():
"""
This class is used to define convex inequality constraints
"""
def __init__(self, inputs, fnc, output, fnc_convex=True):
self.ip = inputs
self.ot = output
self.fnc = fnc
self.fnc_convex = fnc_convex
def __call__(self, x):
result = self.fnc(x[self.ip]) - x[self.ot]
result = -result # current convex optimizer supports x >= 0 constraints only
return result if self.fnc_convex else -result
class IndexConstraint():
"""
This is used to constrain particular variable
"""
def __init__(self, var_idx):
self.idx = var_idx
def __call__(self, x):
return -(x[self.idx]) # current convex optimizer supports x >= 0 constraints only
class OptimizationProblem():
def __init__(self):
self.variables = {}
self.nodes = []
self.objective = None
# constraints go here
self.eq_zero = [] # variables to impose constraint of the form x = 0
self.leq_zero = [] # variables to impose constraint of the form x <= 0
self.children = None
self.parent = None
# this contain lower bound information on subproblem
self.lower_bound = None
self.lower_bound_x = None
self.upper_bound = None
self.upper_bound_x = None
def remember_variable(self, variable):
self.variables[variable.name] = variable
def set_node(self, node, x):
y = node(x)
self.nodes.append(node)
self.remember_variable(y)
return y
def propagate_constraints(self):
# this updates constraints of all variables
for n in self.nodes:
n.propagate_constraint()
def get_lower_bound(self):
# optimize the convex relaxation
self.propagate_constraints()
def get_var_bounds_indicies(self):
# returns bouds and indicies for variables
bounds = []
var_idx = {} # variable name to index
for i, kv in enumerate(self.variables.iteritems()):
k, v = kv
var_idx[v.name] = i
bounds.append(v.range)
return bounds, var_idx
def calculate_bound(self, upper = True):
# optimize the actual problem
self.propagate_constraints()
# get variable size and bounds
bounds, var_idxs = self.get_var_bounds_indicies()
# initial guess: middle of all boundaries
x0 = np.array([np.random.uniform(low=b[0], high=b[1]) for b in bounds ])
obj_idx = var_idxs[self.objective.name]
constraints_list = []
# generate necessary constraints
for n in self.nodes:
inp_idxs = [var_idxs[v.name] for v in n.x] # get indicies of input variables
otp_idx = var_idxs[n.y.name]
if upper:
# use the actual function
constraints_list.append({
'type':'eq',
'fun':EqVarConstraint(inputs=inp_idxs, fnc=n.itself(), output=otp_idx)
})
else:
# constrain function values to be greater than convex underestimator ...
constraints_list.append({
'type':'ineq',
'fun':ConvexLeqVarConstraint(
inputs=inp_idxs,
fnc=n.convex_underestimator(),
output=otp_idx,
fnc_convex=True)
})
# ... and less than concave overestimator
constraints_list.append({
'type':'ineq',
'fun':ConvexLeqVarConstraint(
inputs=inp_idxs,
fnc=n.concave_overestimator(),
output=otp_idx,
fnc_convex=False)
})
for v in self.eq_zero:
constraint = IndexConstraint(var_idxs[v.name])
constraints_list.append({
'type': 'eq',
'fun': constraint
})
for v in self.leq_zero:
constraint = IndexConstraint(var_idxs[v.name])
constraints_list.append({
'type': 'ineq',
'fun': constraint
})
def f(x):
return x[obj_idx]
sol = minimize(f, x0, bounds=bounds, constraints=constraints_list)
x = sol.x # recover solution
x = { v.name : x[var_idxs[v.name]] for k,v in self.variables.iteritems() if not v.internal }
# cache the upper and lower bounds
if upper:
self.upper_bound = sol.fun
self.upper_bound_x = x
else:
self.lower_bound = sol.fun
self.lower_bound_x = x
return sol.fun, x
def split_into_subproblems(self):
"""
Splits the problem into subproblems according to random rule.
It is assumed that the lower bound was computed on the instance of
optimization problem P.
:return: array of subproblems
"""
# split randomly
V = {k:v for k,v in self.variables.iteritems() if not v.internal}
S = random.choice(V.keys()) # split variable name
Sr = V[S].range # range of values to be split
A, B = deepcopy(self), deepcopy(self)
A.variables[S].range = [np.mean(Sr), Sr[1]]
B.variables[S].range = [Sr[0], np.mean(Sr)]
self.children = [A, B]
A.parent = self
B.parent = self
return [A, B]
def propagate_lower_bound(self):
parent = self
while not parent is None:
parent.lower_bound = min([ch.lower_bound for ch in parent.children])
parent = parent.parent
def real_variable(self, range, id):
v = RealVariable(range, id, False)
self.remember_variable(v)
return v
def weighted_sum(self, constant_weights, x, constant_bias=0.0):
# propagate the constraint
node = DotNode(constant_weights, constant_bias)
return self.set_node(node, x) # this automatically remebers the output
def square(self, x):
# propagate the constraint
node = SquareNode()
if isinstance(x, list):
return [self.set_node(node, xv) for xv in x] # this automatically remebers the output
else:
return self.set_node(node, x)
def mul(self, x, y):
node = MultiplyNode()
return self.set_node(node, [x, y])
def leq_0(self, variable):
"""
Adds variable <= 0 constraint
:param variable: variable to constrain
:return: nothing
"""
self.leq_zero.append(variable)
def eq_0(self, variable):
"""
Adds variable == 0 constraint
:param variable: variable to constrain
:return: nothing
"""
self.eq_zero.append(variable)
def min_objective(self, variable):
# sets the objective variable to be minimized
if not self.objective is None:
raise BaseException("Objective already defined")
self.objective = variable
self.remember_variable(variable)
# INTERFACE OPTIMIZATION CLASS
class GlobalOptimizer():
def __init__(self, problem, epsilon=1e-3, optimality_gap = 0.0):
"""
:param problem: OptimizationProblem instance
:param epsilon: Inprecision that is allowed in solver
"""
self.root_problem = problem
self.frontier = [None] # array of items to explore
# this contains data about the best solution found so far
self.min_x = None
self.min_objective = None # this is used to cut down the search space
self.best_lower_bound = None # bounds on solution
self.epsilon = epsilon
self.optimality_gap = optimality_gap
def finished(self):
return len(self.frontier) == 0
def initialize(self):
# this calculates initial upper bound
p = self.root_problem
# calculate both bounds
p.calculate_bound(upper=True)
p.calculate_bound(upper=False)
self.min_objective = p.upper_bound
self.min_x = p.upper_bound_x
self.best_lower_bound = p.lower_bound
self.frontier = [p] # initialize the frontier
def iterate(self):
if len(self.frontier) == 0:
return
# check for termination criterion
""""""
if self.min_objective - self.root_problem.lower_bound <= self.optimality_gap + self.epsilon:
self.frontier = [] # finished - no need to explore any further frontier
return
# for now just take the top item
instance = self.frontier.pop()
# cut space search if possible. adding epsilon is necessary to avoid infinite loop
# due to numeric errors coming from convex optimizer
if instance.lower_bound + self.epsilon >= self.min_objective:
return
# split instance
subs = instance.split_into_subproblems()
# calculate lower and upper bounds
for p in subs:
p.calculate_bound(upper=True)
# if there is an improvement in upper bound - remeber that
if p.upper_bound < self.min_objective:
self.min_objective = p.upper_bound
self.min_x = p.upper_bound_x
p.calculate_bound(upper=False)
# propagate global lower bound
instance.propagate_lower_bound()
# sort the list so that added top item in the frontier has the best lower bound
subs.sort(key= lambda p: p.lower_bound, reverse=True)
# extend the frontier
self.frontier.extend(subs)
def solve(self):
self.initialize()
idx = 0
while not self.finished():
print "Iteration", idx, "lower bound:", self.root_problem.lower_bound, "upper bound:", self.min_objective
idx += 1
self.iterate() | {
"content_hash": "ad855874a5800473589a5e864d4e9510",
"timestamp": "",
"source": "github",
"line_count": 572,
"max_line_length": 127,
"avg_line_length": 28.613636363636363,
"alnum_prop": 0.5646117187022668,
"repo_name": "iaroslav-ai/global-optimization",
"id": "78e8322fca9325ec7f4ac850e3cb9c70e033be2d",
"size": "16367",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "global_optimizer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "18544"
}
],
"symlink_target": ""
} |
class HundredPush(object):
def __init__(self):
self.count = 0
def push(self, line):
if (self.count % 100 == 9):
print("Lチカ!!: {0}".format(line.strip()))
self.count = self.count + 1
return True
print("{0}".format(self.count))
self.count = self.count + 1
return False
def mode(self):
return "100回のうち1回"
| {
"content_hash": "5814353217d1016263f7062f6fbedc61",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 52,
"avg_line_length": 23.58823529411765,
"alnum_prop": 0.5062344139650873,
"repo_name": "youkidearitai/Arduino_LED_on_Python",
"id": "edfbbda293903e9cd0e67762015e714e16e5899a",
"size": "462",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "section2/hundred_push.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Arduino",
"bytes": "3372"
},
{
"name": "HTML",
"bytes": "8158"
},
{
"name": "Python",
"bytes": "14565"
}
],
"symlink_target": ""
} |
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Proyectos'
db.create_table('registros_proyectos', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('proyecto', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['proyectos_academicos.Proyecto'])),
('voluntario', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['proyectos_academicos.Voluntario'])),
('horas', self.gf('django.db.models.fields.PositiveIntegerField')(default=0)),
('fecha', self.gf('django.db.models.fields.DateField')(default=datetime.date.today)),
('actividad', self.gf('django.db.models.fields.CharField')(max_length=200)),
))
db.send_create_signal('registros', ['Proyectos'])
# Adding model 'Servicios'
db.create_table('registros_servicios', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('servicio', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['proyectos_academicos.Servicio'])),
('voluntario', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['proyectos_academicos.Voluntario'])),
('horas', self.gf('django.db.models.fields.PositiveIntegerField')(default=0)),
('fecha', self.gf('django.db.models.fields.DateField')(default=datetime.date.today)),
('actividad', self.gf('django.db.models.fields.CharField')(max_length=200)),
))
db.send_create_signal('registros', ['Servicios'])
def backwards(self, orm):
# Deleting model 'Proyectos'
db.delete_table('registros_proyectos')
# Deleting model 'Servicios'
db.delete_table('registros_servicios')
models = {
'proyectos_academicos.categoriasproyecto': {
'Meta': {'object_name': 'CategoriasProyecto'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'proyectos_academicos.institucion': {
'Meta': {'object_name': 'Institucion'},
'coordinador': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'correo_electronico': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'direccion': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'nucleo': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'telefonos': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'})
},
'proyectos_academicos.proyecto': {
'Meta': {'object_name': 'Proyecto'},
'categorias': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['proyectos_academicos.CategoriasProyecto']", 'null': 'True', 'blank': 'True'}),
'dependencia': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'descripcion': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'especialidad': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'estatus': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'titulo': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'tutor': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'})
},
'proyectos_academicos.servicio': {
'Meta': {'object_name': 'Servicio'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'servicio': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'turno': ('django.db.models.fields.CharField', [], {'max_length': '2'})
},
'proyectos_academicos.voluntario': {
'CI': ('django.db.models.fields.CharField', [], {'max_length': '10', 'primary_key': 'True'}),
'Meta': {'object_name': 'Voluntario'},
'apellido': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'correo_electronico': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'direccion': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'estado_civil': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'fecha_nacimiento': ('django.db.models.fields.DateField', [], {}),
'genero': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'grado_instruccion': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'imagen': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'default': "'imagenes/ninguna.png'"}),
'institucion': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['proyectos_academicos.Institucion']"}),
'lugar_nacimiento': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'ocupacion': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'primer_nombre': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'telefono_casa': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'telefono_celular': ('django.db.models.fields.CharField', [], {'max_length': '30'})
},
'registros.proyectos': {
'Meta': {'object_name': 'Proyectos'},
'actividad': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'fecha': ('django.db.models.fields.DateField', [], {'default': 'datetime.date.today'}),
'horas': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'proyecto': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['proyectos_academicos.Proyecto']"}),
'voluntario': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['proyectos_academicos.Voluntario']"})
},
'registros.servicios': {
'Meta': {'object_name': 'Servicios'},
'actividad': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'fecha': ('django.db.models.fields.DateField', [], {'default': 'datetime.date.today'}),
'horas': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'servicio': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['proyectos_academicos.Servicio']"}),
'voluntario': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['proyectos_academicos.Voluntario']"})
}
}
complete_apps = ['registros'] | {
"content_hash": "bb3a91d5a2cf38383e164d3989c9a85e",
"timestamp": "",
"source": "github",
"line_count": 112,
"max_line_length": 168,
"avg_line_length": 66.63392857142857,
"alnum_prop": 0.5763097949886105,
"repo_name": "pattyreinoso/voluntariadoHOI",
"id": "c9498f83c6849772daa9d15f3196df2d01ed28b6",
"size": "7487",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hoi_project/apps/registros/migrations/0002_initial.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "471933"
},
{
"name": "HTML",
"bytes": "533093"
},
{
"name": "JavaScript",
"bytes": "269362"
},
{
"name": "Python",
"bytes": "196336"
},
{
"name": "Ruby",
"bytes": "879"
}
],
"symlink_target": ""
} |
"""Convenience module providing common shader entry points
The point of this module is to allow client code to use
OpenGL Core names to reference shader-related operations
even if the local hardware only supports ARB extension-based
shader rendering.
There are also two utility methods compileProgram and compileShader
which make it easy to create demos which are shader-using.
"""
import logging
logging.basicConfig()
log = logging.getLogger( __name__ )
from OpenGL.GLES2 import *
from OpenGL._bytes import bytes,unicode,as_8_bit
__all__ = [
'compileProgram',
'compileShader',
]
class ShaderProgram( int ):
"""Integer sub-class with context-manager operation"""
def __enter__( self ):
"""Start use of the program"""
glUseProgram( self )
def __exit__( self, typ, val, tb ):
"""Stop use of the program"""
glUseProgram( 0 )
def check_validate( self ):
"""Check that the program validates
Validation has to occur *after* linking/loading
raises RuntimeError on failures
"""
glValidateProgram( self )
validation = glGetProgramiv( self, GL_VALIDATE_STATUS )
if validation == GL_FALSE:
raise RuntimeError(
"""Validation failure (%s): %s"""%(
validation,
glGetProgramInfoLog( self ),
))
return self
def check_linked( self ):
"""Check link status for this program
raises RuntimeError on failures
"""
link_status = glGetProgramiv( self, GL_LINK_STATUS )
if link_status == GL_FALSE:
raise RuntimeError(
"""Link failure (%s): %s"""%(
link_status,
glGetProgramInfoLog( self ),
))
return self
def retrieve( self ):
"""Attempt to retrieve binary for this compiled shader
Note that binaries for a program are *not* generally portable,
they should be used solely for caching compiled programs for
local use; i.e. to reduce compilation overhead.
returns (format,binaryData) for the shader program
"""
from OpenGL.raw.GL._types import GLint,GLenum
from OpenGL.arrays import GLbyteArray
size = GLint()
glGetProgramiv( self, get_program_binary.GL_PROGRAM_BINARY_LENGTH, size )
result = GLbyteArray.zeros( (size.value,))
size2 = GLint()
format = GLenum()
get_program_binary.glGetProgramBinary( self, size.value, size2, format, result )
return format.value, result
def load( self, format, binary ):
"""Attempt to load binary-format for a pre-compiled shader
See notes in retrieve
"""
get_program_binary.glProgramBinary( self, format, binary, len(binary))
self.check_validate()
self.check_linked()
return self
def compileProgram(*shaders, **named):
"""Create a new program, attach shaders and validate
shaders -- arbitrary number of shaders to attach to the
generated program.
separable (keyword only) -- set the separable flag to allow
for partial installation of shader into the pipeline (see
glUseProgramStages)
retrievable (keyword only) -- set the retrievable flag to
allow retrieval of the program binary representation, (see
glProgramBinary, glGetProgramBinary)
This convenience function is *not* standard OpenGL,
but it does wind up being fairly useful for demos
and the like. You may wish to copy it to your code
base to guard against PyOpenGL changes.
Usage:
shader = compileProgram(
compileShader( source, GL_VERTEX_SHADER ),
compileShader( source2, GL_FRAGMENT_SHADER ),
)
glUseProgram( shader )
Note:
If (and only if) validation of the linked program
*passes* then the passed-in shader objects will be
deleted from the GL.
returns ShaderProgram() (GLuint) program reference
raises RuntimeError when a link/validation failure occurs
"""
program = glCreateProgram()
if named.get('separable'):
glProgramParameteri( program, separate_shader_objects.GL_PROGRAM_SEPARABLE, GL_TRUE )
if named.get('retrievable'):
glProgramParameteri( program, get_program_binary.GL_PROGRAM_BINARY_RETRIEVABLE_HINT, GL_TRUE )
for shader in shaders:
glAttachShader(program, shader)
program = ShaderProgram( program )
glLinkProgram(program)
program.check_validate()
program.check_linked()
for shader in shaders:
glDeleteShader(shader)
return program
def compileShader( source, shaderType ):
"""Compile shader source of given type
source -- GLSL source-code for the shader
shaderType -- GLenum GL_VERTEX_SHADER, GL_FRAGMENT_SHADER, etc,
returns GLuint compiled shader reference
raises RuntimeError when a compilation failure occurs
"""
if isinstance( source, (bytes,unicode)):
source = [ source ]
source = [ as_8_bit(s) for s in source ]
shader = glCreateShader(shaderType)
glShaderSource( shader, source )
glCompileShader( shader )
result = glGetShaderiv( shader, GL_COMPILE_STATUS )
if result == GL_FALSE:
# TODO: this will be wrong if the user has
# disabled traditional unpacking array support.
raise RuntimeError(
"""Shader compile failure (%s): %s"""%(
result,
glGetShaderInfoLog( shader ),
),
source,
shaderType,
)
return shader
| {
"content_hash": "23a68f3607e446c1674ee7be87e9890a",
"timestamp": "",
"source": "github",
"line_count": 164,
"max_line_length": 102,
"avg_line_length": 34.701219512195124,
"alnum_prop": 0.6355649270778422,
"repo_name": "alexus37/AugmentedRealityChess",
"id": "d8d88688e6bcf41604d8662f85de4ee370a22455",
"size": "5691",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "pythonAnimations/pyOpenGLChess/engineDirectory/oglc-env/lib/python2.7/site-packages/OpenGL/GLES2/shaders.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "158062"
},
{
"name": "C++",
"bytes": "267993"
},
{
"name": "CMake",
"bytes": "11319"
},
{
"name": "Fortran",
"bytes": "3707"
},
{
"name": "Makefile",
"bytes": "14618"
},
{
"name": "Python",
"bytes": "12813086"
},
{
"name": "Roff",
"bytes": "3310"
},
{
"name": "Shell",
"bytes": "3855"
}
],
"symlink_target": ""
} |
from core.himesis import Himesis, HimesisPreConditionPatternLHS
import uuid
class HM2_then1_ConnectedLHS(HimesisPreConditionPatternLHS):
def __init__(self):
"""
Creates the himesis graph representing the AToM3 model HM2_then1_ConnectedLHS.
"""
# Flag this instance as compiled now
self.is_compiled = True
super(HM2_then1_ConnectedLHS, self).__init__(name='HM2_then1_ConnectedLHS', num_nodes=0, edges=[])
# Set the graph attributes
self["mm__"] = ['MT_pre__FamiliesToPersonsMM', 'MoTifRule']
self["MT_constraint__"] = """#===============================================================================
# This code is executed after the nodes in the LHS have been matched.
# You can access a matched node labelled n by: PreNode('n').
# To access attribute x of node n, use: PreNode('n')['x'].
# The given constraint must evaluate to a boolean expression:
# returning True enables the rule to be applied,
# returning False forbids the rule from being applied.
#===============================================================================
return True
"""
self["name"] = """"""
self["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'M2_then1')
# Set the node attributes
# Nodes that represent the edges of the property.
# Add the edges
self.add_edges([
])
# Add the attribute equations
self["equations"] = []
def constraint(self, PreNode, graph):
"""
Executable constraint code.
@param PreNode: Function taking an integer as parameter
and returns the node corresponding to that label.
"""
#===============================================================================
# This code is executed after the nodes in the LHS have been matched.
# You can access a matched node labelled n by: PreNode('n').
# To access attribute x of node n, use: PreNode('n')['x'].
# The given constraint must evaluate to a boolean expression:
# returning True enables the rule to be applied,
# returning False forbids the rule from being applied.
#===============================================================================
return True
| {
"content_hash": "ee5889afc8885cbf1d9a961c68f26608",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 125,
"avg_line_length": 43.36065573770492,
"alnum_prop": 0.47334593572778827,
"repo_name": "levilucio/SyVOLT",
"id": "2da4c5460e4eb62ec1e03794eb70e830023527df",
"size": "2645",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "GM2AUTOSAR_MM/Properties/from_eclipse/HM2_then1_ConnectedLHS.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "166159"
},
{
"name": "Python",
"bytes": "34207588"
},
{
"name": "Shell",
"bytes": "1118"
}
],
"symlink_target": ""
} |
"""Tests application-provided metadata, status code, and details."""
import threading
import unittest
import grpc
import grpc_gcp
from grpc_gcp_test.unit import test_common
from grpc_gcp_test.unit.framework.common import test_constants
from grpc_gcp_test.unit.framework.common import test_control
_SERIALIZED_REQUEST = b'\x46\x47\x48'
_SERIALIZED_RESPONSE = b'\x49\x50\x51'
_REQUEST_SERIALIZER = lambda unused_request: _SERIALIZED_REQUEST
_REQUEST_DESERIALIZER = lambda unused_serialized_request: object()
_RESPONSE_SERIALIZER = lambda unused_response: _SERIALIZED_RESPONSE
_RESPONSE_DESERIALIZER = lambda unused_serialized_response: object()
_SERVICE = 'test.TestService'
_UNARY_UNARY = 'UnaryUnary'
_UNARY_STREAM = 'UnaryStream'
_STREAM_UNARY = 'StreamUnary'
_STREAM_STREAM = 'StreamStream'
_CLIENT_METADATA = (('client-md-key', 'client-md-key'), ('client-md-key-bin',
b'\x00\x01'))
_SERVER_INITIAL_METADATA = (('server-initial-md-key',
'server-initial-md-value'),
('server-initial-md-key-bin', b'\x00\x02'))
_SERVER_TRAILING_METADATA = (('server-trailing-md-key',
'server-trailing-md-value'),
('server-trailing-md-key-bin', b'\x00\x03'))
_NON_OK_CODE = grpc.StatusCode.NOT_FOUND
_DETAILS = 'Test details!'
# calling abort should always fail an RPC, even for "invalid" codes
_ABORT_CODES = (_NON_OK_CODE, 3, grpc.StatusCode.OK)
_EXPECTED_CLIENT_CODES = (_NON_OK_CODE, grpc.StatusCode.UNKNOWN,
grpc.StatusCode.UNKNOWN)
_EXPECTED_DETAILS = (_DETAILS, _DETAILS, '')
class _Servicer(object):
def __init__(self):
self._lock = threading.Lock()
self._abort_call = False
self._code = None
self._details = None
self._exception = False
self._return_none = False
self._received_client_metadata = None
def unary_unary(self, request, context):
with self._lock:
self._received_client_metadata = context.invocation_metadata()
context.send_initial_metadata(_SERVER_INITIAL_METADATA)
context.set_trailing_metadata(_SERVER_TRAILING_METADATA)
if self._abort_call:
context.abort(self._code, self._details)
else:
if self._code is not None:
context.set_code(self._code)
if self._details is not None:
context.set_details(self._details)
if self._exception:
raise test_control.Defect()
else:
return None if self._return_none else object()
def unary_stream(self, request, context):
with self._lock:
self._received_client_metadata = context.invocation_metadata()
context.send_initial_metadata(_SERVER_INITIAL_METADATA)
context.set_trailing_metadata(_SERVER_TRAILING_METADATA)
if self._abort_call:
context.abort(self._code, self._details)
else:
if self._code is not None:
context.set_code(self._code)
if self._details is not None:
context.set_details(self._details)
for _ in range(test_constants.STREAM_LENGTH // 2):
yield _SERIALIZED_RESPONSE
if self._exception:
raise test_control.Defect()
def stream_unary(self, request_iterator, context):
with self._lock:
self._received_client_metadata = context.invocation_metadata()
context.send_initial_metadata(_SERVER_INITIAL_METADATA)
context.set_trailing_metadata(_SERVER_TRAILING_METADATA)
# TODO(https://github.com/grpc/grpc/issues/6891): just ignore the
# request iterator.
list(request_iterator)
if self._abort_call:
context.abort(self._code, self._details)
else:
if self._code is not None:
context.set_code(self._code)
if self._details is not None:
context.set_details(self._details)
if self._exception:
raise test_control.Defect()
else:
return None if self._return_none else _SERIALIZED_RESPONSE
def stream_stream(self, request_iterator, context):
with self._lock:
self._received_client_metadata = context.invocation_metadata()
context.send_initial_metadata(_SERVER_INITIAL_METADATA)
context.set_trailing_metadata(_SERVER_TRAILING_METADATA)
# TODO(https://github.com/grpc/grpc/issues/6891): just ignore the
# request iterator.
list(request_iterator)
if self._abort_call:
context.abort(self._code, self._details)
else:
if self._code is not None:
context.set_code(self._code)
if self._details is not None:
context.set_details(self._details)
for _ in range(test_constants.STREAM_LENGTH // 3):
yield object()
if self._exception:
raise test_control.Defect()
def set_abort_call(self):
with self._lock:
self._abort_call = True
def set_code(self, code):
with self._lock:
self._code = code
def set_details(self, details):
with self._lock:
self._details = details
def set_exception(self):
with self._lock:
self._exception = True
def set_return_none(self):
with self._lock:
self._return_none = True
def received_client_metadata(self):
with self._lock:
return self._received_client_metadata
def _generic_handler(servicer):
method_handlers = {
_UNARY_UNARY:
grpc.unary_unary_rpc_method_handler(
servicer.unary_unary,
request_deserializer=_REQUEST_DESERIALIZER,
response_serializer=_RESPONSE_SERIALIZER),
_UNARY_STREAM:
grpc.unary_stream_rpc_method_handler(servicer.unary_stream),
_STREAM_UNARY:
grpc.stream_unary_rpc_method_handler(servicer.stream_unary),
_STREAM_STREAM:
grpc.stream_stream_rpc_method_handler(
servicer.stream_stream,
request_deserializer=_REQUEST_DESERIALIZER,
response_serializer=_RESPONSE_SERIALIZER),
}
return grpc.method_handlers_generic_handler(_SERVICE, method_handlers)
class MetadataCodeDetailsTest(unittest.TestCase):
def setUp(self):
self._servicer = _Servicer()
self._server = test_common.test_server()
self._server.add_generic_rpc_handlers(
(_generic_handler(self._servicer),))
port = self._server.add_insecure_port('[::]:0')
self._server.start()
channel_config = grpc_gcp.api_config_from_text_pb('')
channel = grpc_gcp.insecure_channel(
'localhost:{}'.format(port),
options=((grpc_gcp.API_CONFIG_CHANNEL_ARG, channel_config),)
)
self._unary_unary = channel.unary_unary(
'/'.join((
'',
_SERVICE,
_UNARY_UNARY,
)),
request_serializer=_REQUEST_SERIALIZER,
response_deserializer=_RESPONSE_DESERIALIZER,
)
self._unary_stream = channel.unary_stream('/'.join((
'',
_SERVICE,
_UNARY_STREAM,
)),)
self._stream_unary = channel.stream_unary('/'.join((
'',
_SERVICE,
_STREAM_UNARY,
)),)
self._stream_stream = channel.stream_stream(
'/'.join((
'',
_SERVICE,
_STREAM_STREAM,
)),
request_serializer=_REQUEST_SERIALIZER,
response_deserializer=_RESPONSE_DESERIALIZER,
)
def testSuccessfulUnaryUnary(self):
self._servicer.set_details(_DETAILS)
unused_response, call = self._unary_unary.with_call(
object(), metadata=_CLIENT_METADATA)
self.assertTrue(
test_common.metadata_transmitted(
_CLIENT_METADATA, self._servicer.received_client_metadata()))
self.assertTrue(
test_common.metadata_transmitted(_SERVER_INITIAL_METADATA,
call.initial_metadata()))
self.assertTrue(
test_common.metadata_transmitted(_SERVER_TRAILING_METADATA,
call.trailing_metadata()))
self.assertIs(grpc.StatusCode.OK, call.code())
self.assertEqual(_DETAILS, call.details())
def testSuccessfulUnaryStream(self):
self._servicer.set_details(_DETAILS)
response_iterator_call = self._unary_stream(
_SERIALIZED_REQUEST, metadata=_CLIENT_METADATA)
received_initial_metadata = response_iterator_call.initial_metadata()
list(response_iterator_call)
self.assertTrue(
test_common.metadata_transmitted(
_CLIENT_METADATA, self._servicer.received_client_metadata()))
self.assertTrue(
test_common.metadata_transmitted(_SERVER_INITIAL_METADATA,
received_initial_metadata))
self.assertTrue(
test_common.metadata_transmitted(
_SERVER_TRAILING_METADATA,
response_iterator_call.trailing_metadata()))
self.assertIs(grpc.StatusCode.OK, response_iterator_call.code())
self.assertEqual(_DETAILS, response_iterator_call.details())
def testSuccessfulStreamUnary(self):
self._servicer.set_details(_DETAILS)
unused_response, call = self._stream_unary.with_call(
iter([_SERIALIZED_REQUEST] * test_constants.STREAM_LENGTH),
metadata=_CLIENT_METADATA)
self.assertTrue(
test_common.metadata_transmitted(
_CLIENT_METADATA, self._servicer.received_client_metadata()))
self.assertTrue(
test_common.metadata_transmitted(_SERVER_INITIAL_METADATA,
call.initial_metadata()))
self.assertTrue(
test_common.metadata_transmitted(_SERVER_TRAILING_METADATA,
call.trailing_metadata()))
self.assertIs(grpc.StatusCode.OK, call.code())
self.assertEqual(_DETAILS, call.details())
def testSuccessfulStreamStream(self):
self._servicer.set_details(_DETAILS)
response_iterator_call = self._stream_stream(
iter([object()] * test_constants.STREAM_LENGTH),
metadata=_CLIENT_METADATA)
received_initial_metadata = response_iterator_call.initial_metadata()
list(response_iterator_call)
self.assertTrue(
test_common.metadata_transmitted(
_CLIENT_METADATA, self._servicer.received_client_metadata()))
self.assertTrue(
test_common.metadata_transmitted(_SERVER_INITIAL_METADATA,
received_initial_metadata))
self.assertTrue(
test_common.metadata_transmitted(
_SERVER_TRAILING_METADATA,
response_iterator_call.trailing_metadata()))
self.assertIs(grpc.StatusCode.OK, response_iterator_call.code())
self.assertEqual(_DETAILS, response_iterator_call.details())
def testAbortedUnaryUnary(self):
test_cases = zip(_ABORT_CODES, _EXPECTED_CLIENT_CODES,
_EXPECTED_DETAILS)
for abort_code, expected_code, expected_details in test_cases:
self._servicer.set_code(abort_code)
self._servicer.set_details(_DETAILS)
self._servicer.set_abort_call()
with self.assertRaises(grpc.RpcError) as exception_context:
self._unary_unary.with_call(object(), metadata=_CLIENT_METADATA)
self.assertTrue(
test_common.metadata_transmitted(
_CLIENT_METADATA,
self._servicer.received_client_metadata()))
self.assertTrue(
test_common.metadata_transmitted(
_SERVER_INITIAL_METADATA,
exception_context.exception.initial_metadata()))
self.assertTrue(
test_common.metadata_transmitted(
_SERVER_TRAILING_METADATA,
exception_context.exception.trailing_metadata()))
self.assertIs(expected_code, exception_context.exception.code())
self.assertEqual(expected_details,
exception_context.exception.details())
def testAbortedUnaryStream(self):
test_cases = zip(_ABORT_CODES, _EXPECTED_CLIENT_CODES,
_EXPECTED_DETAILS)
for abort_code, expected_code, expected_details in test_cases:
self._servicer.set_code(abort_code)
self._servicer.set_details(_DETAILS)
self._servicer.set_abort_call()
response_iterator_call = self._unary_stream(
_SERIALIZED_REQUEST, metadata=_CLIENT_METADATA)
received_initial_metadata = \
response_iterator_call.initial_metadata()
with self.assertRaises(grpc.RpcError):
self.assertEqual(len(list(response_iterator_call)), 0)
self.assertTrue(
test_common.metadata_transmitted(
_CLIENT_METADATA,
self._servicer.received_client_metadata()))
self.assertTrue(
test_common.metadata_transmitted(_SERVER_INITIAL_METADATA,
received_initial_metadata))
self.assertTrue(
test_common.metadata_transmitted(
_SERVER_TRAILING_METADATA,
response_iterator_call.trailing_metadata()))
self.assertIs(expected_code, response_iterator_call.code())
self.assertEqual(expected_details, response_iterator_call.details())
def testAbortedStreamUnary(self):
test_cases = zip(_ABORT_CODES, _EXPECTED_CLIENT_CODES,
_EXPECTED_DETAILS)
for abort_code, expected_code, expected_details in test_cases:
self._servicer.set_code(abort_code)
self._servicer.set_details(_DETAILS)
self._servicer.set_abort_call()
with self.assertRaises(grpc.RpcError) as exception_context:
self._stream_unary.with_call(
iter([_SERIALIZED_REQUEST] * test_constants.STREAM_LENGTH),
metadata=_CLIENT_METADATA)
self.assertTrue(
test_common.metadata_transmitted(
_CLIENT_METADATA,
self._servicer.received_client_metadata()))
self.assertTrue(
test_common.metadata_transmitted(
_SERVER_INITIAL_METADATA,
exception_context.exception.initial_metadata()))
self.assertTrue(
test_common.metadata_transmitted(
_SERVER_TRAILING_METADATA,
exception_context.exception.trailing_metadata()))
self.assertIs(expected_code, exception_context.exception.code())
self.assertEqual(expected_details,
exception_context.exception.details())
def testAbortedStreamStream(self):
test_cases = zip(_ABORT_CODES, _EXPECTED_CLIENT_CODES,
_EXPECTED_DETAILS)
for abort_code, expected_code, expected_details in test_cases:
self._servicer.set_code(abort_code)
self._servicer.set_details(_DETAILS)
self._servicer.set_abort_call()
response_iterator_call = self._stream_stream(
iter([object()] * test_constants.STREAM_LENGTH),
metadata=_CLIENT_METADATA)
received_initial_metadata = \
response_iterator_call.initial_metadata()
with self.assertRaises(grpc.RpcError):
self.assertEqual(len(list(response_iterator_call)), 0)
self.assertTrue(
test_common.metadata_transmitted(
_CLIENT_METADATA,
self._servicer.received_client_metadata()))
self.assertTrue(
test_common.metadata_transmitted(_SERVER_INITIAL_METADATA,
received_initial_metadata))
self.assertTrue(
test_common.metadata_transmitted(
_SERVER_TRAILING_METADATA,
response_iterator_call.trailing_metadata()))
self.assertIs(expected_code, response_iterator_call.code())
self.assertEqual(expected_details, response_iterator_call.details())
def testCustomCodeUnaryUnary(self):
self._servicer.set_code(_NON_OK_CODE)
self._servicer.set_details(_DETAILS)
with self.assertRaises(grpc.RpcError) as exception_context:
self._unary_unary.with_call(object(), metadata=_CLIENT_METADATA)
self.assertTrue(
test_common.metadata_transmitted(
_CLIENT_METADATA, self._servicer.received_client_metadata()))
self.assertTrue(
test_common.metadata_transmitted(
_SERVER_INITIAL_METADATA,
exception_context.exception.initial_metadata()))
self.assertTrue(
test_common.metadata_transmitted(
_SERVER_TRAILING_METADATA,
exception_context.exception.trailing_metadata()))
self.assertIs(_NON_OK_CODE, exception_context.exception.code())
self.assertEqual(_DETAILS, exception_context.exception.details())
def testCustomCodeUnaryStream(self):
self._servicer.set_code(_NON_OK_CODE)
self._servicer.set_details(_DETAILS)
response_iterator_call = self._unary_stream(
_SERIALIZED_REQUEST, metadata=_CLIENT_METADATA)
received_initial_metadata = response_iterator_call.initial_metadata()
with self.assertRaises(grpc.RpcError):
list(response_iterator_call)
self.assertTrue(
test_common.metadata_transmitted(
_CLIENT_METADATA, self._servicer.received_client_metadata()))
self.assertTrue(
test_common.metadata_transmitted(_SERVER_INITIAL_METADATA,
received_initial_metadata))
self.assertTrue(
test_common.metadata_transmitted(
_SERVER_TRAILING_METADATA,
response_iterator_call.trailing_metadata()))
self.assertIs(_NON_OK_CODE, response_iterator_call.code())
self.assertEqual(_DETAILS, response_iterator_call.details())
def testCustomCodeStreamUnary(self):
self._servicer.set_code(_NON_OK_CODE)
self._servicer.set_details(_DETAILS)
with self.assertRaises(grpc.RpcError) as exception_context:
self._stream_unary.with_call(
iter([_SERIALIZED_REQUEST] * test_constants.STREAM_LENGTH),
metadata=_CLIENT_METADATA)
self.assertTrue(
test_common.metadata_transmitted(
_CLIENT_METADATA, self._servicer.received_client_metadata()))
self.assertTrue(
test_common.metadata_transmitted(
_SERVER_INITIAL_METADATA,
exception_context.exception.initial_metadata()))
self.assertTrue(
test_common.metadata_transmitted(
_SERVER_TRAILING_METADATA,
exception_context.exception.trailing_metadata()))
self.assertIs(_NON_OK_CODE, exception_context.exception.code())
self.assertEqual(_DETAILS, exception_context.exception.details())
def testCustomCodeStreamStream(self):
self._servicer.set_code(_NON_OK_CODE)
self._servicer.set_details(_DETAILS)
response_iterator_call = self._stream_stream(
iter([object()] * test_constants.STREAM_LENGTH),
metadata=_CLIENT_METADATA)
received_initial_metadata = response_iterator_call.initial_metadata()
with self.assertRaises(grpc.RpcError) as exception_context:
list(response_iterator_call)
self.assertTrue(
test_common.metadata_transmitted(
_CLIENT_METADATA, self._servicer.received_client_metadata()))
self.assertTrue(
test_common.metadata_transmitted(_SERVER_INITIAL_METADATA,
received_initial_metadata))
self.assertTrue(
test_common.metadata_transmitted(
_SERVER_TRAILING_METADATA,
exception_context.exception.trailing_metadata()))
self.assertIs(_NON_OK_CODE, exception_context.exception.code())
self.assertEqual(_DETAILS, exception_context.exception.details())
def testCustomCodeExceptionUnaryUnary(self):
self._servicer.set_code(_NON_OK_CODE)
self._servicer.set_details(_DETAILS)
self._servicer.set_exception()
with self.assertRaises(grpc.RpcError) as exception_context:
self._unary_unary.with_call(object(), metadata=_CLIENT_METADATA)
self.assertTrue(
test_common.metadata_transmitted(
_CLIENT_METADATA, self._servicer.received_client_metadata()))
self.assertTrue(
test_common.metadata_transmitted(
_SERVER_INITIAL_METADATA,
exception_context.exception.initial_metadata()))
self.assertTrue(
test_common.metadata_transmitted(
_SERVER_TRAILING_METADATA,
exception_context.exception.trailing_metadata()))
self.assertIs(_NON_OK_CODE, exception_context.exception.code())
self.assertEqual(_DETAILS, exception_context.exception.details())
def testCustomCodeExceptionUnaryStream(self):
self._servicer.set_code(_NON_OK_CODE)
self._servicer.set_details(_DETAILS)
self._servicer.set_exception()
response_iterator_call = self._unary_stream(
_SERIALIZED_REQUEST, metadata=_CLIENT_METADATA)
received_initial_metadata = response_iterator_call.initial_metadata()
with self.assertRaises(grpc.RpcError):
list(response_iterator_call)
self.assertTrue(
test_common.metadata_transmitted(
_CLIENT_METADATA, self._servicer.received_client_metadata()))
self.assertTrue(
test_common.metadata_transmitted(_SERVER_INITIAL_METADATA,
received_initial_metadata))
self.assertTrue(
test_common.metadata_transmitted(
_SERVER_TRAILING_METADATA,
response_iterator_call.trailing_metadata()))
self.assertIs(_NON_OK_CODE, response_iterator_call.code())
self.assertEqual(_DETAILS, response_iterator_call.details())
def testCustomCodeExceptionStreamUnary(self):
self._servicer.set_code(_NON_OK_CODE)
self._servicer.set_details(_DETAILS)
self._servicer.set_exception()
with self.assertRaises(grpc.RpcError) as exception_context:
self._stream_unary.with_call(
iter([_SERIALIZED_REQUEST] * test_constants.STREAM_LENGTH),
metadata=_CLIENT_METADATA)
self.assertTrue(
test_common.metadata_transmitted(
_CLIENT_METADATA, self._servicer.received_client_metadata()))
self.assertTrue(
test_common.metadata_transmitted(
_SERVER_INITIAL_METADATA,
exception_context.exception.initial_metadata()))
self.assertTrue(
test_common.metadata_transmitted(
_SERVER_TRAILING_METADATA,
exception_context.exception.trailing_metadata()))
self.assertIs(_NON_OK_CODE, exception_context.exception.code())
self.assertEqual(_DETAILS, exception_context.exception.details())
def testCustomCodeExceptionStreamStream(self):
self._servicer.set_code(_NON_OK_CODE)
self._servicer.set_details(_DETAILS)
self._servicer.set_exception()
response_iterator_call = self._stream_stream(
iter([object()] * test_constants.STREAM_LENGTH),
metadata=_CLIENT_METADATA)
received_initial_metadata = response_iterator_call.initial_metadata()
with self.assertRaises(grpc.RpcError):
list(response_iterator_call)
self.assertTrue(
test_common.metadata_transmitted(
_CLIENT_METADATA, self._servicer.received_client_metadata()))
self.assertTrue(
test_common.metadata_transmitted(_SERVER_INITIAL_METADATA,
received_initial_metadata))
self.assertTrue(
test_common.metadata_transmitted(
_SERVER_TRAILING_METADATA,
response_iterator_call.trailing_metadata()))
self.assertIs(_NON_OK_CODE, response_iterator_call.code())
self.assertEqual(_DETAILS, response_iterator_call.details())
def testCustomCodeReturnNoneUnaryUnary(self):
self._servicer.set_code(_NON_OK_CODE)
self._servicer.set_details(_DETAILS)
self._servicer.set_return_none()
with self.assertRaises(grpc.RpcError) as exception_context:
self._unary_unary.with_call(object(), metadata=_CLIENT_METADATA)
self.assertTrue(
test_common.metadata_transmitted(
_CLIENT_METADATA, self._servicer.received_client_metadata()))
self.assertTrue(
test_common.metadata_transmitted(
_SERVER_INITIAL_METADATA,
exception_context.exception.initial_metadata()))
self.assertTrue(
test_common.metadata_transmitted(
_SERVER_TRAILING_METADATA,
exception_context.exception.trailing_metadata()))
self.assertIs(_NON_OK_CODE, exception_context.exception.code())
self.assertEqual(_DETAILS, exception_context.exception.details())
def testCustomCodeReturnNoneStreamUnary(self):
self._servicer.set_code(_NON_OK_CODE)
self._servicer.set_details(_DETAILS)
self._servicer.set_return_none()
with self.assertRaises(grpc.RpcError) as exception_context:
self._stream_unary.with_call(
iter([_SERIALIZED_REQUEST] * test_constants.STREAM_LENGTH),
metadata=_CLIENT_METADATA)
self.assertTrue(
test_common.metadata_transmitted(
_CLIENT_METADATA, self._servicer.received_client_metadata()))
self.assertTrue(
test_common.metadata_transmitted(
_SERVER_INITIAL_METADATA,
exception_context.exception.initial_metadata()))
self.assertTrue(
test_common.metadata_transmitted(
_SERVER_TRAILING_METADATA,
exception_context.exception.trailing_metadata()))
self.assertIs(_NON_OK_CODE, exception_context.exception.code())
self.assertEqual(_DETAILS, exception_context.exception.details())
if __name__ == '__main__':
unittest.main(verbosity=2)
| {
"content_hash": "9756e576593db6b60845a1f9f2134e01",
"timestamp": "",
"source": "github",
"line_count": 652,
"max_line_length": 80,
"avg_line_length": 42.38343558282209,
"alnum_prop": 0.5979590359701816,
"repo_name": "GoogleCloudPlatform/grpc-gcp-python",
"id": "d3fb74196c5cbee4133df6852ca12c70b7a3d519",
"size": "28211",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/grpc_gcp_test/unit/_metadata_code_details_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "284661"
},
{
"name": "Shell",
"bytes": "2082"
}
],
"symlink_target": ""
} |
import yaml
import sys
import random
import string
class Config():
def __init__(self, fileName):
global cfg
try:
with open(fileName, 'r') as ymlfile:
cfg = yaml.load(ymlfile)
except Exception as e:
print(e)
input("Press any key to exit the program")
sys.exit()
if not cfg['config']['connection']['Google api key'] and not cfg['config']['connection']['Discord bot token']:
input('Problem loading Google api key/Discord bot token. Make sure filled the fields')
sys.exit()
if self.getYouTubersNr() == 0:
input('No YouTubers in list')
sys.exit()
def getConnectionData(self):
return [cfg['config']['connection']['Google api key'], cfg['config']['connection']['Discord bot token']]
def getPingTime(self):
return cfg['config']['main']['Ping Every x Minutes']
def getYouTubersList(self):
return cfg['config']['YouTubers']
def getYouTubersNr(self):
if not cfg['config']['YouTubers']:
return 0
return len(cfg['config']['YouTubers'])
| {
"content_hash": "fb702c961b1387bdd53e32a30201c2be",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 118,
"avg_line_length": 29.925,
"alnum_prop": 0.5580618212197159,
"repo_name": "anti0/Discord-YouTube-Feed-BOT",
"id": "269e4448184e1eec7d87130766f474327988985d",
"size": "1197",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "config.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7868"
}
],
"symlink_target": ""
} |
import os
import time
import socket
from resource import getrusage, RUSAGE_SELF
from twisted.application.service import Service
from twisted.internet.task import LoopingCall
from carbon.conf import settings
stats = {}
prior_stats = {}
HOSTNAME = socket.gethostname().replace('.', '_')
PAGESIZE = os.sysconf('SC_PAGESIZE')
rusage = getrusage(RUSAGE_SELF)
lastUsage = rusage.ru_utime + rusage.ru_stime
lastUsageTime = time.time()
# NOTE: Referencing settings in this *top level scope* will
# give you *defaults* only. Probably not what you wanted.
# TODO(chrismd) refactor the graphite metrics hierarchy to be cleaner,
# more consistent, and make room for frontend metrics.
#metric_prefix = "Graphite.backend.%(program)s.%(instance)s." % settings
def increment(stat, increase=1):
try:
stats[stat] += increase
except KeyError:
stats[stat] = increase
def max(stat, newval):
try:
if stats[stat] < newval:
stats[stat] = newval
except KeyError:
stats[stat] = newval
def append(stat, value):
try:
stats[stat].append(value)
except KeyError:
stats[stat] = [value]
def getCpuUsage():
global lastUsage, lastUsageTime
rusage = getrusage(RUSAGE_SELF)
currentUsage = rusage.ru_utime + rusage.ru_stime
currentTime = time.time()
usageDiff = currentUsage - lastUsage
timeDiff = currentTime - lastUsageTime
if timeDiff == 0: # shouldn't be possible, but I've actually seen a ZeroDivisionError from this
timeDiff = 0.000001
cpuUsagePercent = (usageDiff / timeDiff) * 100.0
lastUsage = currentUsage
lastUsageTime = currentTime
return cpuUsagePercent
def getMemUsage():
rss_pages = int(open('/proc/self/statm').read().split()[1])
return rss_pages * PAGESIZE
def recordMetrics():
global lastUsage
global prior_stats
myStats = stats.copy()
myPriorStats = {}
stats.clear()
# cache metrics
if settings.program == 'carbon-cache':
record = cache_record
updateTimes = myStats.get('updateTimes', [])
committedPoints = myStats.get('committedPoints', 0)
creates = myStats.get('creates', 0)
droppedCreates = myStats.get('droppedCreates', 0)
errors = myStats.get('errors', 0)
cacheQueries = myStats.get('cacheQueries', 0)
cacheBulkQueries = myStats.get('cacheBulkQueries', 0)
cacheOverflow = myStats.get('cache.overflow', 0)
cacheBulkQuerySizes = myStats.get('cacheBulkQuerySize', [])
# Calculate cache-data-structure-derived metrics prior to storing anything
# in the cache itself -- which would otherwise affect said metrics.
cache_size = cache.MetricCache.size
cache_queues = len(cache.MetricCache)
record('cache.size', cache_size)
record('cache.queues', cache_queues)
if updateTimes:
avgUpdateTime = sum(updateTimes) / len(updateTimes)
record('avgUpdateTime', avgUpdateTime)
if committedPoints:
pointsPerUpdate = float(committedPoints) / len(updateTimes)
record('pointsPerUpdate', pointsPerUpdate)
if cacheBulkQuerySizes:
avgBulkSize = sum(cacheBulkQuerySizes) / len(cacheBulkQuerySizes)
record('cache.bulk_queries_average_size', avgBulkSize)
record('updateOperations', len(updateTimes))
record('committedPoints', committedPoints)
record('creates', creates)
record('droppedCreates', droppedCreates)
record('errors', errors)
record('cache.queries', cacheQueries)
record('cache.bulk_queries', cacheBulkQueries)
record('cache.overflow', cacheOverflow)
# aggregator metrics
elif settings.program == 'carbon-aggregator':
record = aggregator_record
record('allocatedBuffers', len(BufferManager))
record('bufferedDatapoints',
sum([b.size for b in BufferManager.buffers.values()]))
record('aggregateDatapointsSent', myStats.get('aggregateDatapointsSent', 0))
# relay metrics
else:
record = relay_record
# shared relay stats for relays & aggregators
if settings.program in ['carbon-aggregator', 'carbon-relay']:
prefix = 'destinations.'
relay_stats = [(k,v) for (k,v) in myStats.items() if k.startswith(prefix)]
for stat_name, stat_value in relay_stats:
record(stat_name, stat_value)
# Preserve the count of sent metrics so that the ratio of
# received : sent can be checked per-relay to determine the
# health of the destination.
if stat_name.endswith('.sent'):
myPriorStats[stat_name] = stat_value
# common metrics
record('activeConnections', len(state.connectedMetricReceiverProtocols))
record('metricsReceived', myStats.get('metricsReceived', 0))
record('blacklistMatches', myStats.get('blacklistMatches', 0))
record('whitelistRejects', myStats.get('whitelistRejects', 0))
record('cpuUsage', getCpuUsage())
# And here preserve count of messages received in the prior periiod
myPriorStats['metricsReceived'] = myStats.get('metricsReceived', 0)
prior_stats.clear()
prior_stats.update(myPriorStats)
try: # This only works on Linux
record('memUsage', getMemUsage())
except Exception:
pass
def cache_record(metric, value):
prefix = settings.CARBON_METRIC_PREFIX
if settings.instance is None:
fullMetric = '%s.agents.%s.%s' % (prefix, HOSTNAME, metric)
else:
fullMetric = '%s.agents.%s-%s.%s' % (prefix, HOSTNAME, settings.instance, metric)
datapoint = (time.time(), value)
cache.MetricCache.store(fullMetric, datapoint)
def relay_record(metric, value):
prefix = settings.CARBON_METRIC_PREFIX
if settings.instance is None:
fullMetric = '%s.relays.%s.%s' % (prefix, HOSTNAME, metric)
else:
fullMetric = '%s.relays.%s-%s.%s' % (prefix, HOSTNAME, settings.instance, metric)
datapoint = (time.time(), value)
events.metricGenerated(fullMetric, datapoint)
def aggregator_record(metric, value):
prefix = settings.CARBON_METRIC_PREFIX
if settings.instance is None:
fullMetric = '%s.aggregator.%s.%s' % (prefix, HOSTNAME, metric)
else:
fullMetric = '%s.aggregator.%s-%s.%s' % (prefix, HOSTNAME, settings.instance, metric)
datapoint = (time.time(), value)
events.metricGenerated(fullMetric, datapoint)
class InstrumentationService(Service):
def __init__(self):
self.record_task = LoopingCall(recordMetrics)
def startService(self):
if settings.CARBON_METRIC_INTERVAL > 0:
self.record_task.start(settings.CARBON_METRIC_INTERVAL, False)
Service.startService(self)
def stopService(self):
if settings.CARBON_METRIC_INTERVAL > 0:
self.record_task.stop()
Service.stopService(self)
# Avoid import circularities
from carbon import state, events, cache
from carbon.aggregator.buffers import BufferManager
| {
"content_hash": "7eed79d6428239af877b83affde906f2",
"timestamp": "",
"source": "github",
"line_count": 210,
"max_line_length": 98,
"avg_line_length": 31.976190476190474,
"alnum_prop": 0.7048399106478034,
"repo_name": "johnseekins/carbon",
"id": "c4d50e1ac9484424c5586f1d94cf58b6fc235375",
"size": "6715",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/carbon/instrumentation.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "230964"
},
{
"name": "Shell",
"bytes": "14106"
}
],
"symlink_target": ""
} |
'common functions to classify c++ files'
from __future__ import absolute_import, print_function, unicode_literals
import os
import re
from unilint.unilint_plugin import UnilintPlugin
CPP_SOURCE = 'cpp_source'
CPP_EXTENSIONS = ['c', 'cc', 'c\+\+', 'cxx', 'cpp',
'h', 'hh', 'h\+\+', 'hxx', 'hpp']
CPP_FILE_PATTERN = re.compile('.*\.(%s)$' % '|'.join(CPP_EXTENSIONS))
__pychecker__ = '--unusednames=cls,options,subdirs'
class CppSourcePlugin(UnilintPlugin):
"""Identifies files and folders with cpp code (heuristically)"""
def __init__(self, shell_function):
super(CppSourcePlugin, self).__init__(shell_function)
@classmethod
def get_id(cls):
return CPP_SOURCE
def categorize_type(self, options, path, subdirs, files):
result = {}
if not files:
files = [path]
for filepath in files:
filename = os.path.basename(filepath)
if CPP_FILE_PATTERN.match(filename) is not None:
if filepath != path:
filepath = os.path.join(path, filepath)
result[filepath] = ['cpp-file']
return result
#pylint: disable=R0921
class AbstractCppPlugin(UnilintPlugin):
"""Defines a plugin that depends on the categories of CppSourcePlugin"""
def __init__(self, shell_function):
super(AbstractCppPlugin, self).__init__(shell_function)
@classmethod
def get_depends(cls):
return [CPP_SOURCE]
@classmethod
def get_id(cls):
"""
:returns: short lowercase string
"""
raise NotImplementedError('get_id not implemented by Plugin class')
| {
"content_hash": "af125e801cb58f71de9551cd3e425a7b",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 76,
"avg_line_length": 28.586206896551722,
"alnum_prop": 0.6133896260554885,
"repo_name": "tkruse/unilint",
"id": "3c45c23cbb66622401481f5377d6eaa40ade77ab",
"size": "3056",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/unilint/cpp_source_plugin.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "125362"
}
],
"symlink_target": ""
} |
"""Base configurations to standardize experiments."""
from __future__ import absolute_import
from __future__ import division
# from __future__ import google_type_annotations
from __future__ import print_function
import copy
from typing import Any, List, Mapping, Optional
import dataclasses
import tensorflow as tf
import yaml
from official.modeling.hyperparams import params_dict
@dataclasses.dataclass
class Config(params_dict.ParamsDict):
"""The base configuration class that supports YAML/JSON based overrides."""
default_params: dataclasses.InitVar[Mapping[str, Any]] = None
restrictions: dataclasses.InitVar[List[str]] = None
def __post_init__(self, default_params, restrictions, *args, **kwargs):
super().__init__(default_params=default_params,
restrictions=restrictions,
*args,
**kwargs)
def _set(self, k, v):
if isinstance(v, dict):
if k not in self.__dict__:
self.__dict__[k] = params_dict.ParamsDict(v, [])
else:
self.__dict__[k].override(v)
else:
self.__dict__[k] = copy.deepcopy(v)
def __setattr__(self, k, v):
if k in params_dict.ParamsDict.RESERVED_ATTR:
# Set the essential private ParamsDict attributes.
self.__dict__[k] = copy.deepcopy(v)
else:
self._set(k, v)
def replace(self, **kwargs):
"""Like `override`, but returns a copy with the current config unchanged."""
params = self.__class__(self)
params.override(kwargs, is_strict=True)
return params
@classmethod
def from_yaml(cls, file_path: str):
# Note: This only works if the Config has all default values.
with tf.io.gfile.GFile(file_path, 'r') as f:
loaded = yaml.load(f)
config = cls()
config.override(loaded)
return config
@classmethod
def from_json(cls, file_path: str):
"""Wrapper for `from_yaml`."""
return cls.from_yaml(file_path)
@classmethod
def from_args(cls, *args, **kwargs):
"""Builds a config from the given list of arguments."""
attributes = list(cls.__annotations__.keys())
default_params = {a: p for a, p in zip(attributes, args)}
default_params.update(kwargs)
return cls(default_params)
| {
"content_hash": "a062471c3a8c37c59f023e7e6765d5c5",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 80,
"avg_line_length": 30.958333333333332,
"alnum_prop": 0.6527590847913862,
"repo_name": "alexgorban/models",
"id": "65fab425843892c244e0e524d41c4173af8d8a29",
"size": "2937",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "official/modeling/hyperparams/base_config.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "1619012"
},
{
"name": "Dockerfile",
"bytes": "9821"
},
{
"name": "GLSL",
"bytes": "976"
},
{
"name": "HTML",
"bytes": "147010"
},
{
"name": "JavaScript",
"bytes": "33316"
},
{
"name": "Jupyter Notebook",
"bytes": "454746"
},
{
"name": "Makefile",
"bytes": "4933"
},
{
"name": "Python",
"bytes": "16363107"
},
{
"name": "Shell",
"bytes": "144095"
},
{
"name": "Starlark",
"bytes": "148029"
}
],
"symlink_target": ""
} |
from webassets import Bundle, register
webclientjs = Bundle('static/js/01_Item.js', 'static/js/02.Control.js', 'static/js/03.Util.js', 'static/js/04.Time.js',
'static/js/Event.js', 'static/js/Tag.js',
filters='jsmin', output='webclient.js')
register('webclientjs', js) | {
"content_hash": "3b7ea59586268afdd6f5b65bdd8745d6",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 119,
"avg_line_length": 48.833333333333336,
"alnum_prop": 0.6655290102389079,
"repo_name": "Lapeth/timeline",
"id": "9de1f7ae394844bf31897228c1a4ae648e2b2e4e",
"size": "293",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Timeline/webclient/assets.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "4532"
},
{
"name": "HTML",
"bytes": "53017"
},
{
"name": "JavaScript",
"bytes": "67729"
},
{
"name": "Python",
"bytes": "68922"
}
],
"symlink_target": ""
} |
"""
Tests functionality of commerce API for version 2
"""
import inspect
import functools
import json
import os
import pathlib
import pytest
import requests_mock
from gw2api import GuildWars2Client
def test_coins_to_gems(gw2_client, mock_adapter):
"""Tests conversion of coins to gems
Args:
gw2_client: The pytest "gw2_client" fixture.
"""
register_urls_to_files(
mock_adapter,
{
"commerce/exchange/coins?quantity=100000":
"coinstogems_quantity100000"
})
# get conversion rate of 100000 coins to gems
result = gw2_client.commerceexchangecoins.get(quantity = 100000)
# Result should look similar to:
# {
# "coins_per_gem": 2941,
# "quantity": 34
# }
assert result["coins_per_gem"] == 2941
assert result["quantity"] == 34
def test_coins_to_gems(gw2_client, mock_adapter):
"""Tests conversion of gems to coins
Args:
gw2_client: The pytest "gw2_client" fixture.
"""
register_urls_to_files(
mock_adapter,
{
"commerce/exchange/gems?quantity=100":
"gemstocoins_quantity100"
})
# get conversion rate of 100 gems to coins
result = gw2_client.commerceexchangegems.get(quantity = 100)
# Result should look similar to:
# {
# "coins_per_gem": 1841,
# "quantity": 184134
# }
assert result["coins_per_gem"] == 1841
assert result["quantity"] == 184134
def test_transactions(gw2_client, mock_adapter):
"""Tests transactions API for account, both past and current
Args:
gw2_client: The pytest "gw2_client fixture.
"""
register_urls_to_files(
mock_adapter,
{
"commerce/transactions": "commerce_transactions",
"commerce/transactions/history": "commerce_transactions_secondlevel",
"commerce/transactions/current": "commerce_transactions_secondlevel",
"commerce/transactions/history/buys": "commerce_historybuys",
"commerce/transactions/history/sells": "commerce_historysells",
"commerce/transactions/current/buys": "commerce_currentbuys",
"commerce/transactions/current/sells": "commerce_currentsells"
})
# get list of second-level endpoints
result = gw2_client.commercetransactions.get()
assert all(["current" in result, "history" in result])
# get list of third-level endpoints
result = gw2_client.commercetransactions.history.get()
assert all(["buys" in result, "sells" in result])
result = gw2_client.commercetransactions.current.get()
assert all(["buys" in result, "sells" in result])
# get transaction buy history
expected = load_mock_json("commerce_historybuys")
result = gw2_client.commercetransactions.history.buys.get()
assert result == expected
# get transaction sell history
expected = load_mock_json("commerce_historysells")
result = gw2_client.commercetransactions.history.sells.get()
assert result == expected
# get transaction current buys
expected = load_mock_json("commerce_currentbuys")
result = gw2_client.commercetransactions.current.buys.get()
assert result == expected
# get transaction current sells
expected = load_mock_json("commerce_currentsells")
result = gw2_client.commercetransactions.current.sells.get()
assert result == expected
@functools.lru_cache()
def mocks_path():
"""Returns the path to the stored mock JSON files.
Returns:
The path to the mock files to be loaded and sent to the API requests.
"""
this_file = inspect.getframeinfo(inspect.currentframe()).filename
return pathlib.Path(os.path.dirname(os.path.abspath(this_file))) / 'mocks'
def load_mock_text(filename_stem):
"""Loads the mocks/{filename_stem}.json.
Args:
filename_stem: The stem of the filename to load, e.g. 'continents' would
load mocks/continents.json.
Returns:
The file content as text.
"""
with (mocks_path() / '{}.json'.format(filename_stem)).open() as f:
return f.read()
def load_mock_json(filename_stem):
"""Loads the mocks/{filename_stem}.json and parses it as JSON. Returns
the resulting dictionary.
Args:
filename_stem: The stem of the filename to load, e.g. 'continents' would
load mocks/continents.json.
Returns:
The dictionary from the parsed JSON file.
"""
return json.loads(load_mock_text(filename_stem))
def register_urls_to_files(adapter, url_to_file):
"""Registers a dictionary of urls to filename_stems with the mock adapter:
{
'continents/2': 'continents2'
}
would register the URL https://api.guildwars2.com/v2/continents/2 to
return the contents of mocks/continents2.json.
Args:
adapter: The mock adapter to register the urls against.
url_to_file: A dictionary mapping url parts to filenames, see above.
"""
for url, filename_stem in url_to_file.items():
url = '{}/v2/{}'.format(GuildWars2Client.BASE_URL, url)
response = load_mock_text(filename_stem)
adapter.register_uri('GET', url, text=response)
@pytest.fixture
def mock_adapter():
"""Creates a mock adapter instance.
As this is a pytest fixture, tests only need to provide an argument
with the name `mock_adapter` gain access to the mock adapter.
Returns:
A mock adapter. This exposes the register_uri function which can be used
to mock requests.
"""
return requests_mock.Adapter()
@pytest.fixture
def gw2_client(mock_adapter):
"""Creates a GuildWars2Client instance and mounts the mock adapter onto its
session.
As this is a pytest fixture, tests only need to provide an argument
with the name `gw2_client` to gain access to the mocked instance.
Returns:
A GuildWars2Client instance with a mock session.
"""
gw2_client = GuildWars2Client(api_key = "empty-api-key")
gw2_client.session.mount('https://', mock_adapter)
return gw2_client
| {
"content_hash": "872a8a6ea25ff83ae3c81fc3eb82b3ca",
"timestamp": "",
"source": "github",
"line_count": 201,
"max_line_length": 81,
"avg_line_length": 30.597014925373134,
"alnum_prop": 0.6588617886178861,
"repo_name": "JuxhinDB/gw2-api-interface",
"id": "660f2a3167c096488fb16778e3ac849b1346a2ce",
"size": "6150",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/test_commerce.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "49181"
}
],
"symlink_target": ""
} |
from panda3d.core import *
from panda3d.direct import *
from direct.interval.IntervalGlobal import *
from otp.avatar import Avatar
from otp.nametag.NametagConstants import *
from toontown.char import CharDNA
from toontown.char import DistributedChar
from direct.directnotify import DirectNotifyGlobal
from direct.fsm import ClassicFSM
from direct.fsm import State
from direct.controls.ControlManager import CollisionHandlerRayStart
from toontown.toonbase import ToontownGlobals
from toontown.toonbase.TTLocalizer import Donald, DonaldDock, WesternPluto, Pluto
from toontown.effects import DustCloud
import CCharChatter
import CCharPaths
import string
import copy
class DistributedCCharBase(DistributedChar.DistributedChar):
notify = DirectNotifyGlobal.directNotify.newCategory('DistributedCCharBase')
def __init__(self, cr, name, dnaName):
try:
self.DistributedCCharBase_initialized
return
except:
self.DistributedCCharBase_initialized = 1
DistributedChar.DistributedChar.__init__(self, cr)
dna = CharDNA.CharDNA()
dna.newChar(dnaName)
self.setDNA(dna)
self.setName(name)
self.setTransparency(TransparencyAttrib.MDual, 1)
fadeIn = self.colorScaleInterval(0.5, Vec4(1, 1, 1, 1), startColorScale=Vec4(1, 1, 1, 0), blendType='easeInOut')
fadeIn.start()
self.diffPath = None
self.transitionToCostume = 0
self.__initCollisions()
return
def __initCollisions(self):
self.cSphere = CollisionSphere(0.0, 0.0, 0.0, 8.0)
self.cSphere.setTangible(0)
self.cSphereNode = CollisionNode(self.getName() + 'BlatherSphere')
self.cSphereNode.addSolid(self.cSphere)
self.cSphereNodePath = self.attachNewNode(self.cSphereNode)
self.cSphereNodePath.hide()
self.cSphereNode.setCollideMask(ToontownGlobals.WallBitmask)
self.acceptOnce('enter' + self.cSphereNode.getName(), self.__handleCollisionSphereEnter)
self.cRay = CollisionRay(0.0, 0.0, CollisionHandlerRayStart, 0.0, 0.0, -1.0)
self.cRayNode = CollisionNode(self.getName() + 'cRay')
self.cRayNode.addSolid(self.cRay)
self.cRayNodePath = self.attachNewNode(self.cRayNode)
self.cRayNodePath.hide()
self.cRayBitMask = ToontownGlobals.FloorBitmask
self.cRayNode.setFromCollideMask(self.cRayBitMask)
self.cRayNode.setIntoCollideMask(BitMask32.allOff())
self.lifter = CollisionHandlerFloor()
self.lifter.setOffset(ToontownGlobals.FloorOffset)
self.lifter.setReach(10.0)
self.lifter.setMaxVelocity(0.0)
self.lifter.addCollider(self.cRayNodePath, self)
self.cTrav = base.localAvatar.cTrav
def __deleteCollisions(self):
del self.cSphere
del self.cSphereNode
self.cSphereNodePath.removeNode()
del self.cSphereNodePath
self.cRay = None
self.cRayNode = None
self.cRayNodePath = None
self.lifter = None
self.cTrav = None
return
def disable(self):
self.stopBlink()
self.ignoreAll()
self.chatTrack.finish()
del self.chatTrack
if self.chatterDialogue:
self.chatterDialogue.stop()
del self.chatterDialogue
DistributedChar.DistributedChar.disable(self)
self.stopEarTask()
def delete(self):
try:
self.DistributedCCharBase_deleted
except:
self.setParent(NodePath('Temp'))
self.DistributedCCharBase_deleted = 1
self.__deleteCollisions()
DistributedChar.DistributedChar.delete(self)
def generate(self, diffPath = None):
DistributedChar.DistributedChar.generate(self)
if diffPath == None:
self.setPos(CCharPaths.getNodePos(CCharPaths.startNode, CCharPaths.getPaths(self.getName(), self.getCCLocation())))
else:
self.setPos(CCharPaths.getNodePos(CCharPaths.startNode, CCharPaths.getPaths(diffPath, self.getCCLocation())))
self.setHpr(0, 0, 0)
self.setParent(ToontownGlobals.SPRender)
self.startBlink()
self.startEarTask()
self.chatTrack = Sequence()
self.chatterDialogue = None
self.acceptOnce('enter' + self.cSphereNode.getName(), self.__handleCollisionSphereEnter)
self.accept('exitSafeZone', self.__handleExitSafeZone)
return
def __handleExitSafeZone(self):
self.__handleCollisionSphereExit(None)
return
def __handleCollisionSphereEnter(self, collEntry):
self.notify.debug('Entering collision sphere...')
self.sendUpdate('avatarEnter', [])
self.accept('chatUpdate', self.__handleChatUpdate)
self.accept('chatUpdateSC', self.__handleChatUpdateSC)
self.accept('chatUpdateSCCustom', self.__handleChatUpdateSCCustom)
self.accept('chatUpdateSCToontask', self.__handleChatUpdateSCToontask)
self.nametag3d.setBin('transparent', 100)
self.acceptOnce('exit' + self.cSphereNode.getName(), self.__handleCollisionSphereExit)
def __handleCollisionSphereExit(self, collEntry):
self.notify.debug('Exiting collision sphere...')
self.sendUpdate('avatarExit', [])
self.ignore('chatUpdate')
self.ignore('chatUpdateSC')
self.ignore('chatUpdateSCCustom')
self.ignore('chatUpdateSCToontask')
self.acceptOnce('enter' + self.cSphereNode.getName(), self.__handleCollisionSphereEnter)
def __handleChatUpdate(self, msg, chatFlags):
self.sendUpdate('setNearbyAvatarChat', [msg])
def __handleChatUpdateSC(self, msgIndex):
self.sendUpdate('setNearbyAvatarSC', [msgIndex])
def __handleChatUpdateSCCustom(self, msgIndex):
self.sendUpdate('setNearbyAvatarSCCustom', [msgIndex])
def __handleChatUpdateSCToontask(self, taskId, toNpcId, toonProgress, msgIndex):
self.sendUpdate('setNearbyAvatarSCToontask', [taskId,
toNpcId,
toonProgress,
msgIndex])
def makeTurnToHeadingTrack(self, heading):
curHpr = self.getHpr()
destHpr = self.getHpr()
destHpr.setX(heading)
if destHpr[0] - curHpr[0] > 180.0:
destHpr.setX(destHpr[0] - 360)
elif destHpr[0] - curHpr[0] < -180.0:
destHpr.setX(destHpr[0] + 360)
turnSpeed = 180.0
time = abs(destHpr[0] - curHpr[0]) / turnSpeed
turnTracks = Parallel()
if time > 0.2:
turnTracks.append(Sequence(Func(self.loop, 'walk'), Wait(time), Func(self.loop, 'neutral')))
turnTracks.append(LerpHprInterval(self, time, destHpr, name='lerp' + self.getName() + 'Hpr'))
return turnTracks
def setChat(self, category, msg, avId):
if self.cr.doId2do.has_key(avId):
avatar = self.cr.doId2do[avId]
chatter = CCharChatter.getChatter(self.getName(), self.getCCChatter())
if category >= len(chatter):
self.notify.debug("Chatter's changed")
return
elif len(chatter[category]) <= msg:
self.notify.debug("Chatter's changed")
return
str = chatter[category][msg]
if '%' in str:
str = copy.deepcopy(str)
avName = avatar.getName()
str = str.replace('%', avName)
track = Sequence()
if category != CCharChatter.GOODBYE:
curHpr = self.getHpr()
self.headsUp(avatar)
destHpr = self.getHpr()
self.setHpr(curHpr)
track.append(self.makeTurnToHeadingTrack(destHpr[0]))
if self.getName() == Donald or self.getName() == WesternPluto or self.getName() == Pluto:
chatFlags = CFThought | CFTimeout
if hasattr(base.cr, 'newsManager') and base.cr.newsManager:
holidayIds = base.cr.newsManager.getHolidayIdList()
if ToontownGlobals.APRIL_FOOLS_COSTUMES in holidayIds:
if self.getName() == Pluto:
chatFlags = CFTimeout | CFSpeech
elif self.getName() == DonaldDock:
chatFlags = CFTimeout | CFSpeech
self.nametag3d.hide()
else:
chatFlags = CFTimeout | CFSpeech
self.chatterDialogue = self.getChatterDialogue(category, msg)
track.append(Func(self.setChatAbsolute, str, chatFlags, self.chatterDialogue))
self.chatTrack.finish()
self.chatTrack = track
self.chatTrack.start()
def setWalk(self, srcNode, destNode, timestamp):
pass
def walkSpeed(self):
return 0.1
def enableRaycast(self, enable = 1):
if not self.cTrav or not hasattr(self, 'cRayNode') or not self.cRayNode:
self.notify.debug('raycast info not found for ' + self.getName())
return
self.cTrav.removeCollider(self.cRayNodePath)
if enable:
if self.notify.getDebug():
self.notify.debug('enabling raycast for ' + self.getName())
self.cTrav.addCollider(self.cRayNodePath, self.lifter)
elif self.notify.getDebug():
self.notify.debug('disabling raycast for ' + self.getName())
def getCCLocation(self):
return 0
def getCCChatter(self):
self.handleHolidays()
return self.CCChatter
def handleHolidays(self):
self.CCChatter = 0
if hasattr(base.cr, 'newsManager') and base.cr.newsManager:
holidayIds = base.cr.newsManager.getHolidayIdList()
if ToontownGlobals.CRASHED_LEADERBOARD in holidayIds:
self.CCChatter = ToontownGlobals.CRASHED_LEADERBOARD
elif ToontownGlobals.CIRCUIT_RACING_EVENT in holidayIds:
self.CCChatter = ToontownGlobals.CIRCUIT_RACING_EVENT
elif ToontownGlobals.WINTER_CAROLING in holidayIds:
self.CCChatter = ToontownGlobals.WINTER_CAROLING
elif ToontownGlobals.WINTER_DECORATIONS in holidayIds:
self.CCChatter = ToontownGlobals.WINTER_DECORATIONS
elif ToontownGlobals.WACKY_WINTER_CAROLING in holidayIds:
self.CCChatter = ToontownGlobals.WACKY_WINTER_CAROLING
elif ToontownGlobals.WACKY_WINTER_DECORATIONS in holidayIds:
self.CCChatter = ToontownGlobals.WACKY_WINTER_DECORATIONS
elif ToontownGlobals.VALENTINES_DAY in holidayIds:
self.CCChatter = ToontownGlobals.VALENTINES_DAY
elif ToontownGlobals.APRIL_FOOLS_COSTUMES in holidayIds:
self.CCChatter = ToontownGlobals.APRIL_FOOLS_COSTUMES
elif ToontownGlobals.SILLY_CHATTER_ONE in holidayIds:
self.CCChatter = ToontownGlobals.SILLY_CHATTER_ONE
elif ToontownGlobals.SILLY_CHATTER_TWO in holidayIds:
self.CCChatter = ToontownGlobals.SILLY_CHATTER_TWO
elif ToontownGlobals.SILLY_CHATTER_THREE in holidayIds:
self.CCChatter = ToontownGlobals.SILLY_CHATTER_THREE
elif ToontownGlobals.SILLY_CHATTER_FOUR in holidayIds:
self.CCChatter = ToontownGlobals.SILLY_CHATTER_FOUR
elif ToontownGlobals.SILLY_CHATTER_FIVE in holidayIds:
self.CCChatter = ToontownGlobals.SILLY_CHATTER_FOUR
elif ToontownGlobals.HALLOWEEN_COSTUMES in holidayIds:
self.CCChatter = ToontownGlobals.HALLOWEEN_COSTUMES
elif ToontownGlobals.SPOOKY_COSTUMES in holidayIds:
self.CCChatter = ToontownGlobals.SPOOKY_COSTUMES
elif ToontownGlobals.SELLBOT_FIELD_OFFICE in holidayIds:
self.CCChatter = ToontownGlobals.SELLBOT_FIELD_OFFICE
def fadeAway(self):
fadeOut = self.colorScaleInterval(0.5, Vec4(1, 1, 1, 0.5), startColorScale=Vec4(1, 1, 1, 1), blendType='easeInOut')
fadeOut.start()
self.loop('neutral')
if self.fsm:
self.fsm.addState(State.State('TransitionToCostume', self.enterTransitionToCostume, self.exitTransitionToCostume, ['Off']))
self.fsm.request('TransitionToCostume', force=1)
self.ignoreAll()
def enterTransitionToCostume(self):
def getDustCloudIval():
dustCloud = DustCloud.DustCloud(fBillboard=0, wantSound=1)
dustCloud.setBillboardAxis(2.0)
dustCloud.setZ(4)
dustCloud.setScale(0.6)
dustCloud.createTrack()
return Sequence(Func(dustCloud.reparentTo, self), dustCloud.track, Func(dustCloud.destroy), name='dustCloadIval')
dust = getDustCloudIval()
dust.start()
def exitTransitionToCostume(self):
pass
| {
"content_hash": "418f409a5ce6f3ec56458752bbfa7222",
"timestamp": "",
"source": "github",
"line_count": 294,
"max_line_length": 135,
"avg_line_length": 43.642857142857146,
"alnum_prop": 0.6475722858701582,
"repo_name": "silly-wacky-3-town-toon/SOURCE-COD",
"id": "00e3802e083778084e4100608274eb4d56250b24",
"size": "12831",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "toontown/classicchars/DistributedCCharBase.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "10249"
},
{
"name": "C",
"bytes": "1752256"
},
{
"name": "C#",
"bytes": "8440"
},
{
"name": "C++",
"bytes": "5485400"
},
{
"name": "Emacs Lisp",
"bytes": "210083"
},
{
"name": "F#",
"bytes": "2310"
},
{
"name": "Forth",
"bytes": "506"
},
{
"name": "GLSL",
"bytes": "1040"
},
{
"name": "JavaScript",
"bytes": "7003"
},
{
"name": "Makefile",
"bytes": "895"
},
{
"name": "Mask",
"bytes": "969"
},
{
"name": "NSIS",
"bytes": "1009050"
},
{
"name": "Objective-C",
"bytes": "21821"
},
{
"name": "PLSQL",
"bytes": "10200"
},
{
"name": "Pascal",
"bytes": "4986"
},
{
"name": "Perl6",
"bytes": "30612"
},
{
"name": "Puppet",
"bytes": "259"
},
{
"name": "Python",
"bytes": "33566014"
},
{
"name": "Shell",
"bytes": "14642"
},
{
"name": "Tcl",
"bytes": "2084458"
}
],
"symlink_target": ""
} |
from olliebennett.tweets import tweet_cpu_temp
__author__ = 'Ollie Bennett | http://olliebennett.co.uk/'
if __name__ == '__main__':
# If calling as a script, update the status
tweet_cpu_temp()
| {
"content_hash": "f2112018567bd529881d2555903e6411",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 57,
"avg_line_length": 29,
"alnum_prop": 0.6551724137931034,
"repo_name": "olliebennett/raspberry-pi-scripts",
"id": "a1cd0f18031e3f84f3bda1bfbae8cbba150c0cd7",
"size": "245",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tweet_temperature.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "13646"
}
],
"symlink_target": ""
} |
"""InternetSemLimites URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.urls import include, path
from django.contrib import admin
from InternetSemLimites.core.views import (
provider_details,
provider_new,
redirect_to_api
)
urlpatterns = [
path('', redirect_to_api, name='home'),
path('api/', include('InternetSemLimites.api.urls')),
path('markdown/', include('InternetSemLimites.markdown.urls')),
path('new/', provider_new, name='new'),
path('provider/<int:pk>/', provider_details, name='provider'),
path('admin/', admin.site.urls)
]
| {
"content_hash": "29392986f2e0b234d4e69bb24f7543bd",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 79,
"avg_line_length": 35.333333333333336,
"alnum_prop": 0.6921097770154374,
"repo_name": "InternetSemLimites/PublicAPI",
"id": "144d56533bd555895ef88b2726763e0434d4ea6b",
"size": "1166",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "InternetSemLimites/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "5347"
},
{
"name": "Python",
"bytes": "68314"
},
{
"name": "Shell",
"bytes": "1346"
}
],
"symlink_target": ""
} |
from .workspace_structure import WorkspaceStructure
class Replacement(WorkspaceStructure):
def __init__(self, object_from_initial, object_from_modified, relation):
WorkspaceStructure.__init__(self)
self.object_from_initial = object_from_initial
self.object_from_modified = object_from_modified
self.relation = relation
| {
"content_hash": "b6448e2143561b1f174b3dd54116a005",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 76,
"avg_line_length": 39.666666666666664,
"alnum_prop": 0.7226890756302521,
"repo_name": "jalanb/co.py.cat",
"id": "4f4942fe401ad76ece784f9dfb81369e0b919af5",
"size": "357",
"binary": false,
"copies": "1",
"ref": "refs/heads/__main__",
"path": "copycat/replacement.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "157144"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('walletone', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='walletonesuccesspayment',
name='WMI_MERCHANT_ID',
field=models.CharField(max_length=255),
),
migrations.AlterField(
model_name='walletonesuccesspayment',
name='WMI_TO_USER_ID',
field=models.CharField(blank=True, max_length=255),
),
]
| {
"content_hash": "39b495f4560a7c18658c1bc006153253",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 63,
"avg_line_length": 25.52173913043478,
"alnum_prop": 0.5945485519591142,
"repo_name": "otov4its/django-walletone",
"id": "cc5267c6b3547628520a6377fcb1f39de5cf3922",
"size": "659",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "walletone/migrations/0002_auto_20160421_1941.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "22987"
}
],
"symlink_target": ""
} |
import httplib
import re
import os
import requests
import json
from datetime import datetime
from distutils.version import LooseVersion
from cumulusci.core.exceptions import GithubApiNotFoundError
from cumulusci.core.utils import import_class
from cumulusci.tasks.release_notes.exceptions import CumulusCIException
from cumulusci.tasks.release_notes.parser import ChangeNotesLinesParser
from cumulusci.tasks.release_notes.parser import IssuesParser
from cumulusci.tasks.release_notes.provider import StaticChangeNotesProvider
from cumulusci.tasks.release_notes.provider import DirectoryChangeNotesProvider
from cumulusci.tasks.release_notes.provider import GithubChangeNotesProvider
class BaseReleaseNotesGenerator(object):
def __init__(self):
self.change_notes = []
self.init_parsers()
self.init_change_notes()
def __call__(self):
self._parse_change_notes()
return self.render()
def init_change_notes(self):
self.change_notes = self._init_change_notes()
def _init_change_notes(self):
""" Subclasses should override this method to return an initialized
subclass of BaseChangeNotesProvider """
return []
def init_parsers(self):
""" Initializes the parser instances as the list self.parsers """
self.parsers = []
self._init_parsers()
def _init_parsers(self):
""" Subclasses should override this method to initialize their
parsers """
pass
def _parse_change_notes(self):
""" Parses all change_notes in self.change_notes() through all parsers
in self.parsers """
for change_note in self.change_notes():
self._parse_change_note(change_note)
def _parse_change_note(self, change_note):
""" Parses an individual change note through all parsers in
self.parsers """
for parser in self.parsers:
parser.parse(change_note)
def render(self):
""" Returns the rendered release notes from all parsers as a string """
release_notes = []
for parser in self.parsers:
parser_content = parser.render()
if parser_content is not None:
release_notes.append(parser_content)
return u'\r\n\r\n'.join(release_notes)
class StaticReleaseNotesGenerator(BaseReleaseNotesGenerator):
def __init__(self, change_notes):
self._change_notes = change_notes
super(StaticReleaseNotesGenerator, self).__init__()
def _init_parsers(self):
self.parsers.append(ChangeNotesLinesParser(
self, 'Critical Changes'))
self.parsers.append(ChangeNotesLinesParser(self, 'Changes'))
self.parsers.append(IssuesParser(
self, 'Issues Closed'))
def _init_change_notes(self):
return StaticChangeNotesProvider(self, self._change_notes)
class DirectoryReleaseNotesGenerator(BaseReleaseNotesGenerator):
def __init__(self, directory):
self.directory = directory
super(DirectoryReleaseNotesGenerator, self).__init__()
def _init_parsers(self):
self.parsers.append(ChangeNotesLinesParser(
self, 'Critical Changes'))
self.parsers.append(ChangeNotesLinesParser(self, 'Changes'))
self.parsers.append(IssuesParser(
self, 'Issues Closed'))
def _init_change_notes(self):
return DirectoryChangeNotesProvider(self, self.directory)
class GithubReleaseNotesGenerator(BaseReleaseNotesGenerator):
def __init__(
self,
github,
github_info,
parser_config,
current_tag,
last_tag=None,
link_pr=False,
publish=False,
has_issues=True,
):
self.github = github
self.github_info = github_info
self.parser_config = parser_config
self.current_tag = current_tag
self.last_tag = last_tag
self.link_pr = link_pr
self.do_publish = publish
self.has_issues = has_issues
self.lines_parser_class = None
self.issues_parser_class = None
super(GithubReleaseNotesGenerator, self).__init__()
def __call__(self):
release = self._get_release()
if not release:
raise CumulusCIException(
'Release not found for tag: {}'.format(self.current_tag)
)
content = super(GithubReleaseNotesGenerator, self).__call__()
content = self._update_release_content(release, content)
if self.do_publish:
release.edit(body=content)
return content
def _init_parsers(self):
for cfg in self.parser_config:
parser_class = import_class(cfg['class_path'])
self.parsers.append(parser_class(self, cfg['title']))
def _init_change_notes(self):
return GithubChangeNotesProvider(
self,
self.current_tag,
self.last_tag
)
def _get_release(self):
repo = self.get_repo()
for release in repo.iter_releases():
if release.tag_name == self.current_tag:
return release
def _update_release_content(self, release, content):
"""Merge existing and new release content."""
if release.body:
new_body = []
current_parser = None
is_start_line = False
for parser in self.parsers:
parser.replaced = False
# update existing sections
for line in release.body.splitlines():
if current_parser:
if current_parser._is_end_line(current_parser._process_line(line)):
parser_content = current_parser.render()
if parser_content:
# replace existing section with new content
new_body.append(parser_content + '\r\n')
current_parser = None
for parser in self.parsers:
if parser._render_header().strip() == parser._process_line(line).strip():
parser.replaced = True
current_parser = parser
is_start_line = True
break
else:
is_start_line = False
if is_start_line:
continue
if current_parser:
continue
else:
# preserve existing sections
new_body.append(line.strip())
# catch section without end line
if current_parser:
new_body.append(current_parser.render())
# add new sections at bottom
for parser in self.parsers:
parser_content = parser.render()
if parser_content and not parser.replaced:
new_body.append(parser_content + '\r\n')
content = u'\r\n'.join(new_body)
return content
def get_repo(self):
return self.github.repository(
self.github_info['github_owner'],
self.github_info['github_repo'],
)
| {
"content_hash": "f2300644e4e44c7400dec5ecb7f4c1a6",
"timestamp": "",
"source": "github",
"line_count": 216,
"max_line_length": 93,
"avg_line_length": 33.523148148148145,
"alnum_prop": 0.5945311421074437,
"repo_name": "e02d96ec16/CumulusCI",
"id": "4b81bd0380cd6abc0aeff917061ec826c9ff485b",
"size": "7241",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cumulusci/tasks/release_notes/generator.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "2303"
},
{
"name": "Python",
"bytes": "641697"
},
{
"name": "RobotFramework",
"bytes": "9270"
},
{
"name": "Shell",
"bytes": "5555"
}
],
"symlink_target": ""
} |
""" This file contains email sending functions for Flask-User.
It uses Jinja2 to render email subject and email message. It uses Flask-Mail to send email.
:copyright: (c) 2013 by Ling Thio
:author: Ling Thio ([email protected])
:license: Simplified BSD License, see LICENSE.txt for more details."""
import smtplib
import socket
from flask import current_app, render_template
def _render_email(filename, **kwargs):
# Render subject
subject = render_template(filename+'_subject.txt', **kwargs)
# Make sure that subject lines do not contain newlines
subject = subject.replace('\n', ' ')
subject = subject.replace('\r', ' ')
# Render HTML message
html_message = render_template(filename+'_message.html', **kwargs)
# Render text message
text_message = render_template(filename+'_message.txt', **kwargs)
return (subject, html_message, text_message)
def send_email(recipient, subject, html_message, text_message):
""" Send email from default sender to 'recipient' """
class SendEmailError(Exception):
pass
# Make sure that Flask-Mail has been installed
try:
from flask_mail import Message
except:
raise SendEmailError("Flask-Mail has not been installed. Use 'pip install Flask-Mail' to install Flask-Mail.")
# Make sure that Flask-Mail has been initialized
mail_engine = current_app.extensions.get('mail', None)
if not mail_engine:
raise SendEmailError('Flask-Mail has not been initialized. Initialize Flask-Mail or disable USER_SEND_PASSWORD_CHANGED_EMAIL, USER_SEND_REGISTERED_EMAIL and USER_SEND_USERNAME_CHANGED_EMAIL')
try:
# Construct Flash-Mail message
message = Message(subject,
recipients=[recipient],
html = html_message,
body = text_message)
mail_engine.send(message)
# Print helpful error messages on exceptions
except (socket.gaierror, socket.error) as e:
raise SendEmailError('SMTP Connection error: Check your MAIL_HOSTNAME or MAIL_PORT settings.')
except smtplib.SMTPAuthenticationError:
raise SendEmailError('SMTP Authentication error: Check your MAIL_USERNAME and MAIL_PASSWORD settings.')
def _get_primary_email(user):
user_manager = current_app.user_manager
db_adapter = user_manager.db_adapter
if db_adapter.UserEmailClass:
user_email = db_adapter.find_first_object(db_adapter.UserEmailClass,
user_id=int(user.get_id()),
is_primary=True)
return user_email.email if user_email else None
else:
return user.email
def send_confirm_email_email(user, user_email, confirm_email_link):
# Verify certain conditions
user_manager = current_app.user_manager
if not user_manager.enable_email: return
if not user_manager.send_registered_email and not user_manager.enable_confirm_email: return
# Retrieve email address from User or UserEmail object
email = user_email.email if user_email else user.email
assert(email)
# Render subject, html message and text message
subject, html_message, text_message = _render_email(
user_manager.confirm_email_email_template,
user=user,
app_name=user_manager.app_name,
confirm_email_link=confirm_email_link)
# Send email message using Flask-Mail
user_manager.send_email_function(email, subject, html_message, text_message)
def send_forgot_password_email(user, user_email, reset_password_link):
# Verify certain conditions
user_manager = current_app.user_manager
if not user_manager.enable_email: return
assert user_manager.enable_forgot_password
# Retrieve email address from User or UserEmail object
email = user_email.email if user_email else user.email
assert(email)
# Render subject, html message and text message
subject, html_message, text_message = _render_email(
user_manager.forgot_password_email_template,
user=user,
app_name=user_manager.app_name,
reset_password_link=reset_password_link)
# Send email message using Flask-Mail
user_manager.send_email_function(email, subject, html_message, text_message)
def send_password_changed_email(user):
# Verify certain conditions
user_manager = current_app.user_manager
if not user_manager.enable_email: return
if not user_manager.send_password_changed_email: return
# Retrieve email address from User or UserEmail object
email = _get_primary_email(user)
assert(email)
# Render subject, html message and text message
subject, html_message, text_message = _render_email(
user_manager.password_changed_email_template,
user=user,
app_name=user_manager.app_name)
# Send email message using Flask-Mail
user_manager.send_email_function(email, subject, html_message, text_message)
def send_registered_email(user, user_email, confirm_email_link): # pragma: no cover
# Verify certain conditions
user_manager = current_app.user_manager
if not user_manager.enable_email: return
if not user_manager.send_registered_email: return
# Retrieve email address from User or UserEmail object
email = user_email.email if user_email else user.email
assert(email)
# Render subject, html message and text message
subject, html_message, text_message = _render_email(
user_manager.registered_email_template,
user=user,
app_name=user_manager.app_name,
confirm_email_link=confirm_email_link)
# Send email message using Flask-Mail
user_manager.send_email_function(email, subject, html_message, text_message)
def send_username_changed_email(user): # pragma: no cover
# Verify certain conditions
user_manager = current_app.user_manager
if not user_manager.enable_email: return
if not user_manager.send_username_changed_email: return
# Retrieve email address from User or UserEmail object
email = _get_primary_email(user)
assert(email)
# Render subject, html message and text message
subject, html_message, text_message = _render_email(
user_manager.username_changed_email_template,
user=user,
app_name=user_manager.app_name)
# Send email message using Flask-Mail
user_manager.send_email_function(email, subject, html_message, text_message)
def send_invite_email(user, accept_invite_link):
user_manager = current_app.user_manager
if not user_manager.enable_email: return
# Render subject, html message and text message
subject, html_message, text_message = _render_email(
user_manager.invite_email_template,
user=user,
app_name=user_manager.app_name,
accept_invite_link=accept_invite_link)
# Send email message using Flask-Mail
user_manager.send_email_function(user.email, subject, html_message, text_message)
| {
"content_hash": "ae100b5ba9f856bd48f482638b672dbc",
"timestamp": "",
"source": "github",
"line_count": 179,
"max_line_length": 199,
"avg_line_length": 39.201117318435756,
"alnum_prop": 0.6943138093202224,
"repo_name": "ashwini0529/Oreo",
"id": "1bf066e6a513b838227dd99ff225f09abfa7f9ff",
"size": "7017",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "flask_user/emails.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Groff",
"bytes": "28"
},
{
"name": "HTML",
"bytes": "29193"
},
{
"name": "Python",
"bytes": "193418"
},
{
"name": "Shell",
"bytes": "3737"
}
],
"symlink_target": ""
} |
"""Exceptions used with SQLAlchemy.
The base exception class is :exc:`.SQLAlchemyError`. Exceptions which are
raised as a result of DBAPI exceptions are all subclasses of
:exc:`.DBAPIError`.
"""
from .util import compat
class SQLAlchemyError(Exception):
"""Generic error class."""
code = None
def __init__(self, *arg, **kw):
code = kw.pop("code", None)
if code is not None:
self.code = code
super(SQLAlchemyError, self).__init__(*arg, **kw)
def _code_str(self):
if not self.code:
return ""
else:
return (
"(Background on this error at: "
"http://sqlalche.me/e/%s)" % (self.code,)
)
def _message(self, as_unicode=compat.py3k):
# rules:
#
# 1. under py2k, for __str__ return single string arg as it was
# given without converting to unicode. for __unicode__
# do a conversion but check that it's not unicode already just in
# case
#
# 2. under py3k, single arg string will usually be a unicode
# object, but since __str__() must return unicode, check for
# bytestring just in case
#
# 3. for multiple self.args, this is not a case in current
# SQLAlchemy though this is happening in at least one known external
# library, call str() which does a repr().
#
if len(self.args) == 1:
text = self.args[0]
if as_unicode and isinstance(text, compat.binary_types):
return compat.decode_backslashreplace(text, "utf-8")
else:
return self.args[0]
else:
# this is not a normal case within SQLAlchemy but is here for
# compatibility with Exception.args - the str() comes out as
# a repr() of the tuple
return str(self.args)
def _sql_message(self, as_unicode):
message = self._message(as_unicode)
if self.code:
message = "%s %s" % (message, self._code_str())
return message
def __str__(self):
return self._sql_message(compat.py3k)
def __unicode__(self):
return self._sql_message(as_unicode=True)
class ArgumentError(SQLAlchemyError):
"""Raised when an invalid or conflicting function argument is supplied.
This error generally corresponds to construction time state errors.
"""
class ObjectNotExecutableError(ArgumentError):
"""Raised when an object is passed to .execute() that can't be
executed as SQL.
.. versionadded:: 1.1
"""
def __init__(self, target):
super(ObjectNotExecutableError, self).__init__(
"Not an executable object: %r" % target
)
class NoSuchModuleError(ArgumentError):
"""Raised when a dynamically-loaded module (usually a database dialect)
of a particular name cannot be located."""
class NoForeignKeysError(ArgumentError):
"""Raised when no foreign keys can be located between two selectables
during a join."""
class AmbiguousForeignKeysError(ArgumentError):
"""Raised when more than one foreign key matching can be located
between two selectables during a join."""
class CircularDependencyError(SQLAlchemyError):
"""Raised by topological sorts when a circular dependency is detected.
There are two scenarios where this error occurs:
* In a Session flush operation, if two objects are mutually dependent
on each other, they can not be inserted or deleted via INSERT or
DELETE statements alone; an UPDATE will be needed to post-associate
or pre-deassociate one of the foreign key constrained values.
The ``post_update`` flag described at :ref:`post_update` can resolve
this cycle.
* In a :attr:`.MetaData.sorted_tables` operation, two :class:`.ForeignKey`
or :class:`.ForeignKeyConstraint` objects mutually refer to each
other. Apply the ``use_alter=True`` flag to one or both,
see :ref:`use_alter`.
"""
def __init__(self, message, cycles, edges, msg=None, code=None):
if msg is None:
message += " (%s)" % ", ".join(repr(s) for s in cycles)
else:
message = msg
SQLAlchemyError.__init__(self, message, code=code)
self.cycles = cycles
self.edges = edges
def __reduce__(self):
return self.__class__, (None, self.cycles, self.edges, self.args[0])
class CompileError(SQLAlchemyError):
"""Raised when an error occurs during SQL compilation"""
class UnsupportedCompilationError(CompileError):
"""Raised when an operation is not supported by the given compiler.
.. seealso::
:ref:`faq_sql_expression_string`
:ref:`error_l7de`
"""
code = "l7de"
def __init__(self, compiler, element_type):
super(UnsupportedCompilationError, self).__init__(
"Compiler %r can't render element of type %s"
% (compiler, element_type)
)
class IdentifierError(SQLAlchemyError):
"""Raised when a schema name is beyond the max character limit"""
class DisconnectionError(SQLAlchemyError):
"""A disconnect is detected on a raw DB-API connection.
This error is raised and consumed internally by a connection pool. It can
be raised by the :meth:`.PoolEvents.checkout` event so that the host pool
forces a retry; the exception will be caught three times in a row before
the pool gives up and raises :class:`~sqlalchemy.exc.InvalidRequestError`
regarding the connection attempt.
"""
invalidate_pool = False
class InvalidatePoolError(DisconnectionError):
"""Raised when the connection pool should invalidate all stale connections.
A subclass of :class:`.DisconnectionError` that indicates that the
disconnect situation encountered on the connection probably means the
entire pool should be invalidated, as the database has been restarted.
This exception will be handled otherwise the same way as
:class:`.DisconnectionError`, allowing three attempts to reconnect
before giving up.
.. versionadded:: 1.2
"""
invalidate_pool = True
class TimeoutError(SQLAlchemyError): # noqa
"""Raised when a connection pool times out on getting a connection."""
class InvalidRequestError(SQLAlchemyError):
"""SQLAlchemy was asked to do something it can't do.
This error generally corresponds to runtime state errors.
"""
class NoInspectionAvailable(InvalidRequestError):
"""A subject passed to :func:`sqlalchemy.inspection.inspect` produced
no context for inspection."""
class ResourceClosedError(InvalidRequestError):
"""An operation was requested from a connection, cursor, or other
object that's in a closed state."""
class NoSuchColumnError(KeyError, InvalidRequestError):
"""A nonexistent column is requested from a ``RowProxy``."""
class NoReferenceError(InvalidRequestError):
"""Raised by ``ForeignKey`` to indicate a reference cannot be resolved."""
class NoReferencedTableError(NoReferenceError):
"""Raised by ``ForeignKey`` when the referred ``Table`` cannot be
located.
"""
def __init__(self, message, tname):
NoReferenceError.__init__(self, message)
self.table_name = tname
def __reduce__(self):
return self.__class__, (self.args[0], self.table_name)
class NoReferencedColumnError(NoReferenceError):
"""Raised by ``ForeignKey`` when the referred ``Column`` cannot be
located.
"""
def __init__(self, message, tname, cname):
NoReferenceError.__init__(self, message)
self.table_name = tname
self.column_name = cname
def __reduce__(self):
return (
self.__class__,
(self.args[0], self.table_name, self.column_name),
)
class NoSuchTableError(InvalidRequestError):
"""Table does not exist or is not visible to a connection."""
class UnreflectableTableError(InvalidRequestError):
"""Table exists but can't be reflected for some reason.
.. versionadded:: 1.2
"""
class UnboundExecutionError(InvalidRequestError):
"""SQL was attempted without a database connection to execute it on."""
class DontWrapMixin(object):
"""A mixin class which, when applied to a user-defined Exception class,
will not be wrapped inside of :exc:`.StatementError` if the error is
emitted within the process of executing a statement.
E.g.::
from sqlalchemy.exc import DontWrapMixin
class MyCustomException(Exception, DontWrapMixin):
pass
class MySpecialType(TypeDecorator):
impl = String
def process_bind_param(self, value, dialect):
if value == 'invalid':
raise MyCustomException("invalid!")
"""
# Moved to orm.exc; compatibility definition installed by orm import until 0.6
UnmappedColumnError = None
class StatementError(SQLAlchemyError):
"""An error occurred during execution of a SQL statement.
:class:`StatementError` wraps the exception raised
during execution, and features :attr:`.statement`
and :attr:`.params` attributes which supply context regarding
the specifics of the statement which had an issue.
The wrapped exception object is available in
the :attr:`.orig` attribute.
"""
statement = None
"""The string SQL statement being invoked when this exception occurred."""
params = None
"""The parameter list being used when this exception occurred."""
orig = None
"""The DBAPI exception object."""
def __init__(self, message, statement, params, orig, code=None):
SQLAlchemyError.__init__(self, message, code=code)
self.statement = statement
self.params = params
self.orig = orig
self.detail = []
def add_detail(self, msg):
self.detail.append(msg)
def __reduce__(self):
return (
self.__class__,
(self.args[0], self.statement, self.params, self.orig),
)
def _sql_message(self, as_unicode):
from sqlalchemy.sql import util
details = [self._message(as_unicode=as_unicode)]
if self.statement:
if not as_unicode and not compat.py3k:
stmt_detail = "[SQL: %s]" % compat.safe_bytestring(
self.statement
)
else:
stmt_detail = "[SQL: %s]" % self.statement
details.append(stmt_detail)
if self.params:
params_repr = util._repr_params(self.params, 10)
details.append("[parameters: %r]" % params_repr)
code_str = self._code_str()
if code_str:
details.append(code_str)
return "\n".join(["(%s)" % det for det in self.detail] + details)
class DBAPIError(StatementError):
"""Raised when the execution of a database operation fails.
Wraps exceptions raised by the DB-API underlying the
database operation. Driver-specific implementations of the standard
DB-API exception types are wrapped by matching sub-types of SQLAlchemy's
:class:`DBAPIError` when possible. DB-API's ``Error`` type maps to
:class:`DBAPIError` in SQLAlchemy, otherwise the names are identical. Note
that there is no guarantee that different DB-API implementations will
raise the same exception type for any given error condition.
:class:`DBAPIError` features :attr:`~.StatementError.statement`
and :attr:`~.StatementError.params` attributes which supply context
regarding the specifics of the statement which had an issue, for the
typical case when the error was raised within the context of
emitting a SQL statement.
The wrapped exception object is available in the
:attr:`~.StatementError.orig` attribute. Its type and properties are
DB-API implementation specific.
"""
code = "dbapi"
@classmethod
def instance(
cls,
statement,
params,
orig,
dbapi_base_err,
connection_invalidated=False,
dialect=None,
):
# Don't ever wrap these, just return them directly as if
# DBAPIError didn't exist.
if (
isinstance(orig, BaseException) and not isinstance(orig, Exception)
) or isinstance(orig, DontWrapMixin):
return orig
if orig is not None:
# not a DBAPI error, statement is present.
# raise a StatementError
if isinstance(orig, SQLAlchemyError) and statement:
return StatementError(
"(%s.%s) %s"
% (
orig.__class__.__module__,
orig.__class__.__name__,
orig.args[0],
),
statement,
params,
orig,
code=orig.code,
)
elif not isinstance(orig, dbapi_base_err) and statement:
return StatementError(
"(%s.%s) %s"
% (
orig.__class__.__module__,
orig.__class__.__name__,
orig,
),
statement,
params,
orig,
)
glob = globals()
for super_ in orig.__class__.__mro__:
name = super_.__name__
if dialect:
name = dialect.dbapi_exception_translation_map.get(
name, name
)
if name in glob and issubclass(glob[name], DBAPIError):
cls = glob[name]
break
return cls(
statement, params, orig, connection_invalidated, code=cls.code
)
def __reduce__(self):
return (
self.__class__,
(
self.statement,
self.params,
self.orig,
self.connection_invalidated,
),
)
def __init__(
self, statement, params, orig, connection_invalidated=False, code=None
):
try:
text = str(orig)
except Exception as e:
text = "Error in str() of DB-API-generated exception: " + str(e)
StatementError.__init__(
self,
"(%s.%s) %s"
% (orig.__class__.__module__, orig.__class__.__name__, text),
statement,
params,
orig,
code=code,
)
self.connection_invalidated = connection_invalidated
class InterfaceError(DBAPIError):
"""Wraps a DB-API InterfaceError."""
code = "rvf5"
class DatabaseError(DBAPIError):
"""Wraps a DB-API DatabaseError."""
code = "4xp6"
class DataError(DatabaseError):
"""Wraps a DB-API DataError."""
code = "9h9h"
class OperationalError(DatabaseError):
"""Wraps a DB-API OperationalError."""
code = "e3q8"
class IntegrityError(DatabaseError):
"""Wraps a DB-API IntegrityError."""
code = "gkpj"
class InternalError(DatabaseError):
"""Wraps a DB-API InternalError."""
code = "2j85"
class ProgrammingError(DatabaseError):
"""Wraps a DB-API ProgrammingError."""
code = "f405"
class NotSupportedError(DatabaseError):
"""Wraps a DB-API NotSupportedError."""
code = "tw8g"
# Warnings
class SADeprecationWarning(DeprecationWarning):
"""Issued once per usage of a deprecated API."""
class SAPendingDeprecationWarning(PendingDeprecationWarning):
"""Issued once per usage of a deprecated API."""
class SAWarning(RuntimeWarning):
"""Issued at runtime."""
| {
"content_hash": "4639ea6a2e32e311ebc753b9cd0d6ade",
"timestamp": "",
"source": "github",
"line_count": 541,
"max_line_length": 79,
"avg_line_length": 29.297597042513864,
"alnum_prop": 0.6111671924290221,
"repo_name": "wujuguang/sqlalchemy",
"id": "1e575626a5061b9e41ec285beef581242dd05c0c",
"size": "16088",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "lib/sqlalchemy/exc.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "45930"
},
{
"name": "Python",
"bytes": "11287383"
}
],
"symlink_target": ""
} |
"""
Module for importing remote and local files into CARTO
.. module:: carto.field_import
:platform: Unix, Windows
:synopsis: Module for importing remote and local files into CARTO
.. moduleauthor:: Daniel Carrion <[email protected]>
.. moduleauthor:: Alberto Romeu <[email protected]>
"""
from pyrestcli.fields import IntegerField, CharField, BooleanField
from .exceptions import CartoException
from .resources import AsyncResource, Manager
from .paginators import CartoPaginator
API_VERSION = "v1"
API_ENDPOINT = 'api/{api_version}/imports/'
class FileImportJob(AsyncResource):
"""
This class provides support for one-time uploading and importing of
remote and local files into CARTO
"""
item_queue_id = CharField()
id = CharField()
user_id = CharField()
table_id = CharField()
data_type = CharField()
table_name = CharField()
state = CharField()
error_code = IntegerField()
queue_id = CharField()
tables_created_count = IntegerField()
synchronization_id = CharField()
type_guessing = BooleanField()
quoted_fields_guessing = BooleanField()
content_guessing = BooleanField()
create_visualization = BooleanField()
visualization_id = CharField()
user_defined_limits = CharField()
get_error_text = None
display_name = CharField()
success = BooleanField()
warnings = None
is_raster = BooleanField()
class Meta:
collection_endpoint = API_ENDPOINT.format(api_version=API_VERSION)
id_field = "item_queue_id"
def __init__(self, archive, auth_client):
"""
:param auth_client: Client to make authorized requests
(currently only APIKeyAuthClient is supported)
:param archive: archive can be a pointer to a remote location, a path
to a local file or a StringIO object
:type auth_client: :class:`carto.auth.APIKeyAuthClient`
:type archive: str
:return:
"""
self.file = None
self.files = None
if archive is not None:
if hasattr(archive, "startswith") and archive.startswith("http"):
self.file = archive
else:
self.files = {'file': self.__open(archive, 'rb')}
super(FileImportJob, self).__init__(auth_client)
def run(self, **import_params):
"""
Actually creates the import job on the CARTO server
:param import_params: To be send to the Import API, see CARTO's docs
on Import API for an updated list of accepted
params
:type import_params: kwargs
:return:
.. note:: The import job is asynchronous, so you should take care of the progression, by calling the :func:`carto.resources.AsyncResource.refresh` method and check the import job :py:attr:`~state` attribute. See :func:`carto.datasets.DatasetManager.create` for a unified method to import files into CARTO
"""
if self.file:
import_params["url"] = self.file
self.id_field = "id"
if "connection" in import_params:
self.fields.append("connector")
self.update_from_dict(import_params["connection"])
self.save(force_create=True)
else:
super(FileImportJob, self).run(params=import_params,
files=self.files)
def __open(self, name, mode):
if hasattr(name, "read"):
return name
else:
return open(name, mode)
class FileImportJobManager(Manager):
resource_class = FileImportJob
json_collection_attribute = "imports"
paginator_class = CartoPaginator
def filter(self):
"""
Get a filtered list of file imports
:return: A list of file imports, with only the id set (you need to
refresh them if you want all the attributes to be filled in)
:rtype: list of :class:`carto.file_import.FileImportJob`
:raise: CartoException
"""
try:
response = self.send(self.get_collection_endpoint(), "get")
if self.json_collection_attribute is not None:
resource_ids = self.client.get_response_data(
response,
self.Meta.parse_json)[self.json_collection_attribute]
else:
resource_ids = self.client.get_response_data(
response, self.Meta.parse_json)
except Exception as e:
raise CartoException(e)
resources = []
for resource_id in resource_ids:
try:
resource = self.resource_class(self.client)
except (ValueError, TypeError):
continue
else:
setattr(resource, resource.Meta.id_field, resource_id)
resources.append(resource)
return resources
def create(self, archive, **kwargs):
"""
Creates a file import on the server
:param archive: archive can be a pointer to a remote location, a path
to a local file or a StringIO object
:param kwargs: Attributes (field names and values) of the new resource
:type archive: str
:type kwargs: kwargs
:return: The :class:`carto.file_import.FileImportJob`
"""
resource = self.resource_class(archive, self.client)
resource.update_from_dict(kwargs)
resource.save(force_create=True)
return resource
| {
"content_hash": "21772aaccbd4cfbb0d9d4cf019b8b037",
"timestamp": "",
"source": "github",
"line_count": 169,
"max_line_length": 312,
"avg_line_length": 32.94082840236686,
"alnum_prop": 0.6103826118196515,
"repo_name": "CartoDB/carto-python",
"id": "76e7217280c730822374460125ba6629292011be",
"size": "5567",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "carto/file_import.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "148350"
}
],
"symlink_target": ""
} |
import unittest
"""
Given an array of length n, where each value lies between 1 to n inclusive. Some values can be
repeated multiple times and some other values can be absent. Count frequency of all elements that are
present and print the missing elements. Expected time complexity is O(n) and expected space complexity is O(1).
Input: 2 3 3 2 5
Output: 1 => 0
2 => 2
3 => 2
4 => 0
5 => 1
"""
"""
Approach:
1. Idea is to store the frequency of an index i at arr[i].
2. This is achieved by doing arr[i] = arr[arr[i]%n] + n.
3. We need to do some index manipulations as the range of values is from 1 to n and indices go from 0 to n-1.
"""
def count_frequencies(list_of_numbers):
end = len(list_of_numbers)
divisor = end + 1
for i in range(end):
index = (list_of_numbers[i] % divisor) - 1
list_of_numbers[index] += divisor
counts = [x / divisor for x in list_of_numbers]
return counts
class TestCountFrequencies(unittest.TestCase):
def test_count_frequencies(self):
list_of_numbers = [2, 3, 3, 2, 5]
self.assertEqual(count_frequencies(list_of_numbers), [0, 2, 2, 0, 1])
list_of_numbers = [4, 4, 4, 4]
self.assertEqual(count_frequencies(list_of_numbers), [0, 0, 0, 4])
| {
"content_hash": "414ec17711554cb2646e252ed9e73c2d",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 111,
"avg_line_length": 31.170731707317074,
"alnum_prop": 0.6471048513302035,
"repo_name": "prathamtandon/g4gproblems",
"id": "bd8abcc1707d2a1c4de0d04d6b3d8f43f3de2c82",
"size": "1278",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Arrays/count_frequencies.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "328776"
}
],
"symlink_target": ""
} |
"""
@package mi.dataset.driver
@file mi/dataset/driver/flort_dj/cspp/flort_dj_cspp_telemetered_driver.py
@author Jeff Roy
@brief Driver for the flort_dj_cspp instrument
Release notes:
Initial Release
"""
from mi.dataset.dataset_parser import DataSetDriverConfigKeys
from mi.dataset.dataset_driver import SimpleDatasetDriver
from mi.dataset.parser.flort_dj_cspp import \
FlortDjCsppParser, \
FlortDjCsppMetadataTelemeteredDataParticle, \
FlortDjCsppInstrumentTelemeteredDataParticle
from mi.dataset.parser.cspp_base import \
METADATA_PARTICLE_CLASS_KEY, \
DATA_PARTICLE_CLASS_KEY
from mi.core.versioning import version
@version("0.0.2")
def parse(basePythonCodePath, sourceFilePath, particleDataHdlrObj):
"""
This is the method called by Uframe
:param basePythonCodePath This is the file system location of mi-dataset
:param sourceFilePath This is the full path and filename of the file to be parsed
:param particleDataHdlrObj Java Object to consume the output of the parser
:return particleDataHdlrObj
"""
with open(sourceFilePath, 'rU') as stream_handle:
# create and instance of the concrete driver class defined below
driver = FlortDjCsppTelemeteredDriver(basePythonCodePath, stream_handle, particleDataHdlrObj)
driver.processFileStream()
return particleDataHdlrObj
class FlortDjCsppTelemeteredDriver(SimpleDatasetDriver):
"""
Derived flort_dj_cspp driver class
All this needs to do is create a concrete _build_parser method
"""
def _build_parser(self, stream_handle):
parser_config = {
DataSetDriverConfigKeys.PARTICLE_CLASS: None,
DataSetDriverConfigKeys.PARTICLE_CLASSES_DICT: {
METADATA_PARTICLE_CLASS_KEY: FlortDjCsppMetadataTelemeteredDataParticle,
DATA_PARTICLE_CLASS_KEY: FlortDjCsppInstrumentTelemeteredDataParticle,
}
}
parser = FlortDjCsppParser(parser_config, stream_handle,
self._exception_callback)
return parser
| {
"content_hash": "9acba8c927c2787bf9e9b53406a5ebbb",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 101,
"avg_line_length": 31.560606060606062,
"alnum_prop": 0.7268362938070091,
"repo_name": "JeffRoy/mi-dataset",
"id": "22635bd0b7b18008984ac3cefd3558a7d826668c",
"size": "2106",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mi/dataset/driver/flort_dj/cspp/flort_dj_cspp_telemetered_driver.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "3610231"
}
],
"symlink_target": ""
} |
"""Factories to help in tests."""
from factory import PostGenerationMethodCall, Sequence
from factory.alchemy import SQLAlchemyModelFactory
from hamcwebc.database import db
from hamcwebc.user.models import User
class BaseFactory(SQLAlchemyModelFactory):
"""Base factory."""
class Meta:
"""Factory configuration."""
abstract = True
sqlalchemy_session = db.session
class UserFactory(BaseFactory):
"""User factory."""
username = Sequence(lambda n: 'user{0}'.format(n))
email = Sequence(lambda n: 'user{0}@example.com'.format(n))
password = PostGenerationMethodCall('set_password', 'example')
active = True
class Meta:
"""Factory configuration."""
model = User
| {
"content_hash": "a7c1f6de0a631a23aa658478d61ebe0d",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 66,
"avg_line_length": 24.633333333333333,
"alnum_prop": 0.6847090663058186,
"repo_name": "gurkslask/hamcwebc",
"id": "37c49a03eb605cf9d65152b6153c17d899e03d63",
"size": "763",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/factories.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "1170"
},
{
"name": "HTML",
"bytes": "10224"
},
{
"name": "JavaScript",
"bytes": "182312"
},
{
"name": "Python",
"bytes": "50304"
}
],
"symlink_target": ""
} |
"""
Client load generator
2016 samules (c)
"""
import os
import random
import zmq
import sys
import socket
import redis
from datetime import datetime
from config.redis_config import redis_config
from locking import FLock
sys.path.append(os.path.join(os.path.expanduser('~'), 'qa', 'dynamo'))
from logger import pubsub_logger
from config import CTRL_MSG_PORT
from response_actions import response_action, DynamoException
from config import error_codes
def timestamp():
return datetime.utcnow().strftime('%Y/%m/%d %H:%M:%S.%f')
def build_message(result, action, data, time_stamp, error_code=None, error_message=None, path=None, line=None):
"""
Result message format: Success message format: {'result', 'action', 'target', 'data:{'dirsize, }', 'timestamp'}
Failure message format: {'result', 'action', 'error_code', 'error_message', 'path', 'linenumber', 'timestamp',
'data:{}'}
"""
if result == 'success':
message = {'result': result, 'action': action, 'target': path,
'timestamp': time_stamp, 'data': data}
else:
message = {'result': result, 'action': action, 'error_code': error_code, 'error_message': error_message,
'target': path, 'linenum': line,
'timestamp': time_stamp, 'data': data}
return message
class Dynamo(object):
def __init__(self, mount_points, controller, server, nodes, domains, **kwargs):
try:
self.logger = pubsub_logger.PUBLogger(controller).logger
self.logger.info(f"PUB Logger {self.logger} is started")
self.mount_points = mount_points
self._server = server # Server Cluster hostname
self.nodes = nodes
self.domains = domains
self._context = zmq.Context()
self._controller_ip = socket.gethostbyname(controller)
# Socket to send messages on by client
self._socket = self._context.socket(zmq.DEALER)
# We don't need to store the id anymore, the socket will handle it
# all for us.
# We'll use client host name + process ID to identify the socket
self._socket.identity = "{0}:0x{1:x}".format(socket.gethostname(), os.getpid()).encode()
self.logger.info("Setting up connection to Controller Server...")
self._socket.connect("tcp://{0}:{1}".format(self._controller_ip, CTRL_MSG_PORT))
# Initialising connection to Redis (our byte-range locking DB)
self.logger.info("Setting up Redis connection...")
self.locking_db = redis.StrictRedis(**redis_config)
self.flock = FLock(self.locking_db, kwargs.get('locking_type'))
self.logger.info(f"Dynamo {self._socket.identity} init done")
except Exception as e:
self.logger.error(f"Connection error: {e}")
def run(self):
self.logger.info(f"Dynamo {self._socket.identity} started")
try:
msg = None
job_id = None
# Send a connect message
self._socket.send_json({'message': 'connect'})
self.logger.debug(f"Client {self._socket.identity} sent back 'connect' message.")
# Poll the socket for incoming messages. This will wait up to
# 0.1 seconds before returning False. The other way to do this
# is is to use zmq.NOBLOCK when reading from the socket,
# catching zmq.AGAIN and sleeping for 0.1.
while True:
try:
# Note that we can still use send_json()/recv_json() here,
# the DEALER socket ensures we don't have to deal with
# client ids at all.
# self.logger.debug(f"Blocking waiting for response form socket {self._socket.identity}")
job_id, work = self._socket.recv_json()
# self.logger.debug(f"Job: {job_id} received from socket {self._socket.identity}")
msg = self._do_work(work)
self.logger.debug(f"Going to send {job_id}: {msg}")
self._socket.send_json(
{'message': 'job_done',
'result': msg,
'job_id': job_id})
self.logger.debug(f"{job_id} sent")
except zmq.ZMQError as zmq_error:
self.logger.warn(f"Failed to send message due to: {zmq_error}. Message {job_id} lost!")
except TypeError:
self.logger.error(f"JSON Serialisation error: msg: {msg}")
except KeyboardInterrupt:
pass
except Exception as e:
self.logger.exception(e)
finally:
self._disconnect()
def _disconnect(self):
"""
Send the Controller a disconnect message and end the run loop
"""
self._socket.send_json({'message': 'disconnect'})
def _do_work(self, work):
"""
Args:
work: dict
Returns: str
"""
action = work['action']
data = {}
mount_point = random.choice(self.mount_points)
self.logger.debug(f"Incoming job: '{work['action']}' on '{work['data']['target']}' data: {work['data']}")
try:
if 'None' in work['data']['target']:
raise DynamoException(error_codes.NO_TARGET,
"{0}".format("Target not specified", work['data']['target']))
response = response_action(action, mount_point, work['data'],
dst_mount_point=mount_point, flock=self.flock)
if response:
data = response
except OSError as os_error:
return build_message('failed', action, data, timestamp(), error_code=os_error.errno,
error_message=os_error.strerror,
path='/'.join([mount_point, work['data']['target']]),
line=sys.exc_info()[-1].tb_lineno)
except Exception as unhandled_error:
self.logger.exception(unhandled_error)
return build_message('failed', action, data, timestamp(), error_message=unhandled_error.args[0],
path=''.join([mount_point, work['data']['target']]),
line=sys.exc_info()[-1].tb_lineno)
return build_message('success', action, data, timestamp(), path=work['data']['target'])
| {
"content_hash": "f9501282abbb849f778d2e9eae573786",
"timestamp": "",
"source": "github",
"line_count": 145,
"max_line_length": 115,
"avg_line_length": 45.110344827586204,
"alnum_prop": 0.5638281608316771,
"repo_name": "samuelsh/pyFstress",
"id": "7b8827d03f5f045c84b63bd6a6f6f82d6a698e64",
"size": "6541",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "client/dynamo.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "234629"
},
{
"name": "Shell",
"bytes": "1909"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('forum', '0009_topic_post_it'),
]
operations = [
migrations.AddField(
model_name='profile',
name='banned',
field=models.BooleanField(default=False, verbose_name='Is banned'),
),
]
| {
"content_hash": "70b34c2b9cc81594d761adab9e6c34c5",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 79,
"avg_line_length": 22.166666666666668,
"alnum_prop": 0.5964912280701754,
"repo_name": "rdujardin/icforum",
"id": "b22ee2ac96f448fc4c0d7724d6335c7a398b5039",
"size": "1055",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "icforum/forum/migrations/0010_profile_banned.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "8915"
},
{
"name": "HTML",
"bytes": "34809"
},
{
"name": "JavaScript",
"bytes": "3171"
},
{
"name": "Python",
"bytes": "100413"
}
],
"symlink_target": ""
} |
import json
import os
import codecs
import sys
ESC_CHAR = 27
ERASE_LINE_CTRL_CODE = '[2K'
CURSOR_UP_CTRL_CODE = '[%dA'
CURSOR_DOWN_CTRL_CODE = '[%dB'
class StreamOutputError(Exception):
pass
def stream_output(output, stream):
is_terminal = hasattr(stream, 'fileno') and os.isatty(stream.fileno())
if sys.version_info[0] < 3:
stream = codecs.getwriter('utf-8')(stream)
all_events = []
lines = {}
diff = 0
print(output)
for chunk in output:
print(chunk)
event = json.loads(chunk)
all_events.append(event)
if 'progress' in event or 'progressDetail' in event:
image_id = event.get('id')
if not image_id:
continue
if image_id in lines:
diff = len(lines) - lines[image_id]
else:
lines[image_id] = len(lines)
stream.write("\n")
diff = 0
if is_terminal:
# move cursor up `diff` rows
stream.write("%c%s" % (ESC_CHAR, (CURSOR_UP_CTRL_CODE % diff)))
print_output_event(event, stream, is_terminal)
if 'id' in event and is_terminal:
# move cursor back down
stream.write("%c%s" % (ESC_CHAR, (CURSOR_DOWN_CTRL_CODE % diff)))
stream.flush()
return all_events
def print_output_event(event, stream, is_terminal):
if 'errorDetail' in event:
raise StreamOutputError(event['errorDetail']['message'])
terminator = ''
if is_terminal and 'stream' not in event:
# erase current line
stream.write("%c%s\r" % (ESC_CHAR, ERASE_LINE_CTRL_CODE))
terminator = "\r"
pass
elif 'progressDetail' in event:
return
if 'time' in event:
stream.write("[%s] " % event['time'])
if 'id' in event:
stream.write("%s: " % event['id'])
if 'from' in event:
stream.write("(from %s) " % event['from'])
status = event.get('status', '')
if 'progress' in event:
stream.write("%s %s%s" % (status, event['progress'], terminator))
elif 'progressDetail' in event:
detail = event['progressDetail']
if 'current' in detail:
percentage = float(detail['current']) / float(detail['total']) * 100
stream.write('%s (%.1f%%)%s' % (status, percentage, terminator))
else:
stream.write('%s%s' % (status, terminator))
elif 'stream' in event:
stream.write("%s%s" % (event['stream'], terminator))
else:
stream.write("%s%s\n" % (status, terminator))
| {
"content_hash": "8197c85c41dc2e0d3a1f38e6219bf82d",
"timestamp": "",
"source": "github",
"line_count": 93,
"max_line_length": 80,
"avg_line_length": 27.93548387096774,
"alnum_prop": 0.554657428791378,
"repo_name": "pralexa/awsebcli",
"id": "31419f9c8c74ebb4aca17cc5055fef83015aa60e",
"size": "2598",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ebcli/bundled/_compose/progress_stream.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "877630"
},
{
"name": "Roff",
"bytes": "104"
},
{
"name": "Shell",
"bytes": "280"
}
],
"symlink_target": ""
} |
def length_of_code(code):
"""
Centralize the character counting to one place
"""
return len(code.replace('\r\n', '\n'))
| {
"content_hash": "bf7080796744da734cf9d8f7e778988a",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 50,
"avg_line_length": 27.2,
"alnum_prop": 0.6029411764705882,
"repo_name": "lionleaf/dwitter",
"id": "7872b56ecfbcb35427ec39122d4829234f84bf47",
"size": "137",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dwitter/utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "88799"
},
{
"name": "HTML",
"bytes": "47341"
},
{
"name": "JavaScript",
"bytes": "222169"
},
{
"name": "Makefile",
"bytes": "1239"
},
{
"name": "Python",
"bytes": "153398"
}
],
"symlink_target": ""
} |
import unittest
from handledata import remove_hashtag, get_hashtag
from handledata import split_record
class DataTestCase(unittest.TestCase):
"""
Unittest for tools.py
"""
##################################################
# Unittest for preprocessing data
def test_get_hashtag(self):
"""
Unittest for function get_hashtag hashtag.
"""
self.assertEqual("#ohi", get_hashtag("abcd #ohi"))
self.assertEqual("", get_hashtag("abcd ohi ojk"))
def test_remove_hashtag(self):
"""
Unittest for function remove hashtag.
"""
tweets = ["abcd #ohi",
"abcd #ohi ojk"]
result = ["abcd ",
"abcd ojk"]
self.assertEqual(result, remove_hashtag(tweets))
def test_split_record(self):
"""
Unittest for function split_record.
"""
# test data
sid = '123456'
stamp = '151413'
degree = '1'
content = "this's the content.#hashtag"
url = "http://jkfslkdf.ckd"
tweet = sid + ' | ' + stamp + ' | ' + \
degree + ' | ' + content + \
' |*|' + url
tweet_tuple = (sid, stamp, degree, content, url)
# test result
ret = split_record(tweet)
# data and result should be equal
self.assertEqual(tweet_tuple, ret, msg="Split result should be \
{}, rather than{}".format(tweet_tuple, ret))
| {
"content_hash": "9fbef165cb9fa25dc8508393b0f69503",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 72,
"avg_line_length": 29.76,
"alnum_prop": 0.5114247311827957,
"repo_name": "Shitaibin/tweets-spam-clustering",
"id": "0d4dcbce546ac6bdcac1d15aa6305d42e06225b0",
"size": "1488",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "experiment/tools/test_handledata.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "44781"
}
],
"symlink_target": ""
} |
import os
import shutil
import string
import subprocess
import sys
from os.path import sep
outBase = '..' + sep + '..' + sep + 'output' + sep + 'calcPhifBenchmark'
def findAveragePotentTime(dataPath):
avgTime = 0
f = open(dataPath + sep + 'performance.csv', 'r')
potentTimeSum = 0
numValues = 0
for line in f:
fields = string.split(line, sep=',')
potentTimeSum = potentTimeSum + float(fields[-3])
numValues = numValues + 1
avgTime = potentTimeSum / numValues
return avgTime
def runPic(width, height):
exeName = '../../bin/pic'
width = str(width)
height = str(height)
maxTime = str(4)
logInterval = str(maxTime)
b0 = str(3)
sigmaHe = str(1)
sigmaHi = str(0.5)
sigmaCe = str(100)
sigmaCi = str(100)
sigmaHePerp = str(0.25)
sigmaHiPerp = str(0.125)
sigmaCeSecondary = str(2)
percentageSecondary = str(0.05)
outputDir = outBase + sep + 'width_' + str(width) + '_height_' + str(height)
argList = [ exeName \
, '-x', width \
, '-y', height \
, '-t', maxTime \
, '-l', logInterval \
, '--inject-width', width \
, '--b0', b0 \
, 'sigma-he', sigmaHe \
, 'sigma-hi', sigmaHi \
, 'sigma-ce', sigmaCe \
, 'sigma-ci', sigmaCi \
, 'sigma-he-perp', sigmaHePerp \
, 'sigma-hi-perp', sigmaHiPerp \
, 'sigma-ce-secondary', sigmaCeSecondary \
, 'percentage-secondary', percentageSecondary \
, '-o', outputDir \
]
if sys.platform == 'linux2':
f = open('/dev/null', 'r')
subprocess.call(argList, stdout=f)
else:
subprocess.call(argList)
return findAveragePotentTime(outputDir)
def main():
startWidthPower = 5 # 32
endWidthPower = 10 # 1024
heightIncrement = 2500
endHeight = 30000
if os.path.exists(outBase):
shutil.rmtree(outBase)
os.makedirs(outBase)
f = open('potentPerformance.csv', 'w')
first = True
for widthPower in range(startWidthPower, endWidthPower+1):
runtimeData = []
width = 2**widthPower
for height in range(heightIncrement, endHeight+1, heightIncrement):
if first:
f.write('w=' + str(width) + ' h=' + str(height))
first = False
else:
f.write(',w=' + str(width) + ' h=' + str(height))
for widthPower in range(startWidthPower, endWidthPower+1):
runtimeData = []
width = 2**widthPower
for height in range(heightIncrement, endHeight+1, heightIncrement):
print 'Benchmarking width=' + str(width) + ' height=' + str(height)
runtimeData.append(runPic(width, height))
first = True
f.write('\n')
for i in runtimeData:
if first:
f.write(str(i))
first = False
else:
f.write(',' + str(i))
f.close()
shutil.rmtree(outBase)
return
main()
| {
"content_hash": "e16dd28642e1c2be14a2e7312a121f57",
"timestamp": "",
"source": "github",
"line_count": 109,
"max_line_length": 79,
"avg_line_length": 26.522935779816514,
"alnum_prop": 0.5804219993081978,
"repo_name": "CacheMiss/pic",
"id": "ff0019735ce23d8ce46fceef3a65f37e60f844e2",
"size": "2910",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "performance_tests/potent_test/benchmarkCalcPhif.py",
"mode": "33261",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "67417"
},
{
"name": "C++",
"bytes": "163660"
},
{
"name": "Cuda",
"bytes": "135870"
},
{
"name": "Matlab",
"bytes": "203427"
},
{
"name": "Python",
"bytes": "2910"
},
{
"name": "Shell",
"bytes": "2334"
}
],
"symlink_target": ""
} |
from SceneViewTest import SceneViewTest
from ShaderAssignmentUITest import ShaderAssignmentUITest
from StandardGraphLayoutTest import StandardGraphLayoutTest
from SceneGadgetTest import SceneGadgetTest
from SceneInspectorTest import SceneInspectorTest
from SceneHierarchyTest import SceneHierarchyTest
from DocumentationTest import DocumentationTest
if __name__ == "__main__":
unittest.main()
| {
"content_hash": "289d0b389becd0bad4210acb00cff697",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 59,
"avg_line_length": 39.5,
"alnum_prop": 0.8708860759493671,
"repo_name": "goddardl/gaffer",
"id": "0c7aceac4bc4b18dd8175cf24d4d2a75123c4682",
"size": "2198",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/GafferSceneUITest/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "2228"
},
{
"name": "C++",
"bytes": "4178625"
},
{
"name": "GLSL",
"bytes": "6250"
},
{
"name": "Python",
"bytes": "4152621"
},
{
"name": "Shell",
"bytes": "8787"
},
{
"name": "Slash",
"bytes": "36371"
}
],
"symlink_target": ""
} |
"""
-------------------------------------------------------------------------
AIOpening - __init__.py
Defines the Spaces (action/state space) base class
created: 2017/09/01 in PyCharm
(c) 2017 Sven - ducandu GmbH
-------------------------------------------------------------------------
"""
from .tuple import Tuple
from .discrete import Discrete
from .box import Box
from .base import Space
__all__ = ["Space", "Discrete", "Box", "Tuple"]
| {
"content_hash": "57b512d917976265cdb52182fc387ea2",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 74,
"avg_line_length": 25.055555555555557,
"alnum_prop": 0.45454545454545453,
"repo_name": "ducandu/aiopening",
"id": "d67a73842802e7b7f30f12d6fb76e3f95c488967",
"size": "451",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "aiopening/spaces/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "203260"
}
],
"symlink_target": ""
} |
import pandas as pd
cluster = './c319p70+0p56'
photoz = pd.read_csv(cluster + '_photoz.csv')
specz = pd.read_csv(cluster + '_specz.csv')
photo = pd.read_csv(cluster + '_photo.csv')
c = pd.merge(photo, photoz, left_on='objID', right_on='objID', how='outer')
try:
sc = pd.merge(c, specz, left_on='objID', right_on='objID', how='outer')
sc.rename(columns={'ra_x': 'ra', 'dec_x': 'dec'}, inplace=True)
del sc['ra']
del sc['dec']
del sc['dec_y']
del sc['ra_y']
sc.to_csv(cluster + '_combined.csv')
matched = pd.read_csv(cluster + '_matched.csv')
scm = pd.merge(matched, sc, left_on='objID', right_on='objID', how='outer')
except KeyError:
print('No Speczs')
c.to_csv(cluster + '_combined.csv')
matched = pd.read_csv(cluster + '_matched.csv')
scm = pd.merge(matched, c, left_on='objID', right_on='objID', how='outer')
scm.to_csv(cluster + '_complete.csv')
| {
"content_hash": "442008f6136e672d86fe2e914ab0b583",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 79,
"avg_line_length": 39.26086956521739,
"alnum_prop": 0.6190476190476191,
"repo_name": "boada/vpCluster",
"id": "5a7593a97dce0913cfe8195f7b10c5a97f0cdb83",
"size": "903",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "data/boada/august_2012/catalogs/merge_cats.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Awk",
"bytes": "1096"
},
{
"name": "C",
"bytes": "11445"
},
{
"name": "IDL",
"bytes": "47873"
},
{
"name": "PostScript",
"bytes": "60669635"
},
{
"name": "Python",
"bytes": "359734"
},
{
"name": "TeX",
"bytes": "35070"
}
],
"symlink_target": ""
} |
import pickle
import secfs.store
import secfs.fs
from secfs.types import I, Principal, User, Group
# current_itables represents the current view of the file system's itables
current_itables = {}
# a server connection handle is passed to us at mount time by secfs-fuse
server = None
def register(_server):
global server
server = _server
def pre(refresh, user):
"""
Called before all user file system operations, right after we have obtained
an exclusive server lock.
"""
if refresh != None:
# refresh usermap and groupmap
refresh()
def post(push_vs):
if not push_vs:
# when creating a root, we should not push a VS (yet)
# you will probably want to leave this here and
# put your post() code instead of "pass" below.
return
pass
class Itable:
"""
An itable holds a particular principal's mappings from inumber (the second
element in an i tuple) to an inode hash for users, and to a user's i for
groups.
"""
def __init__(self):
self.mapping = {}
def load(ihandle):
b = secfs.store.block.load(ihandle)
if b == None:
return None
t = Itable()
t.mapping = pickle.loads(b)
return t
def bytes(self):
return pickle.dumps(self.mapping)
def resolve(i, resolve_groups = True):
"""
Resolve the given i into an inode hash. If resolve_groups is not set, group
is will only be resolved to their user i, but not further.
In particular, for some i = (principal, inumber), we first find the itable
for the principal, and then find the inumber-th element of that table. If
the principal was a user, we return the value of that element. If not, we
have a group i, which we resolve again to get the ihash set by the last
user to write the group i.
"""
if not isinstance(i, I):
raise TypeError("{} is not an I, is a {}".format(i, type(i)))
principal = i.p
if not isinstance(principal, Principal):
raise TypeError("{} is not a Principal, is a {}".format(principal, type(principal)))
if not i.allocated():
# someone is trying to look up an i that has not yet been allocated
return None
global current_itables
if principal not in current_itables:
# User does not yet have an itable
return None
t = current_itables[principal]
if i.n not in t.mapping:
raise LookupError("principal {} does not have i {}".format(principal, i))
# santity checks
if principal.is_group() and not isinstance(t.mapping[i.n], I):
raise TypeError("looking up group i, but did not get indirection ihash")
if principal.is_user() and isinstance(t.mapping[i.n], I):
raise TypeError("looking up user i, but got indirection ihash")
if isinstance(t.mapping[i.n], I) and resolve_groups:
# we're looking up a group i
# follow the indirection
return resolve(t.mapping[i.n])
return t.mapping[i.n]
def modmap(mod_as, i, ihash):
"""
Changes or allocates i so it points to ihash.
If i.allocated() is false (i.e. the I was created without an i-number), a
new i-number will be allocated for the principal i.p. This function is
complicated by the fact that i might be a group i, in which case we need
to:
1. Allocate an i as mod_as
2. Allocate/change the group i to point to the new i above
modmap returns the mapped i, with i.n filled in if the passed i was no
allocated.
"""
if not isinstance(i, I):
raise TypeError("{} is not an I, is a {}".format(i, type(i)))
if not isinstance(mod_as, User):
raise TypeError("{} is not a User, is a {}".format(mod_as, type(mod_as)))
assert mod_as.is_user() # only real users can mod
if mod_as != i.p:
print("trying to mod object for {} through {}".format(i.p, mod_as))
assert i.p.is_group() # if not for self, then must be for group
real_i = resolve(i, False)
if isinstance(real_i, I) and real_i.p == mod_as:
# We updated the file most recently, so we can just update our i.
# No need to change the group i at all.
# This is an optimization.
i = real_i
elif isinstance(real_i, I) or real_i == None:
if isinstance(ihash, I):
# Caller has done the work for us, so we just need to link up
# the group entry.
print("mapping", i, "to", ihash, "which again points to", resolve(ihash))
else:
# Allocate a new entry for mod_as, and continue as though ihash
# was that new i.
# XXX: kind of unnecessary to send two VS for this
_ihash = ihash
ihash = modmap(mod_as, I(mod_as), ihash)
print("mapping", i, "to", ihash, "which again points to", _ihash)
else:
# This is not a group i!
# User is trying to overwrite something they don't own!
raise PermissionError("illegal modmap; tried to mod i {0} as {1}".format(i, mod_as))
# find (or create) the principal's itable
t = None
global current_itables
if i.p not in current_itables:
if i.allocated():
# this was unexpected;
# user did not have an itable, but an inumber was given
raise ReferenceError("itable not available")
t = Itable()
print("no current list for principal", i.p, "; creating empty table", t.mapping)
else:
t = current_itables[i.p]
# look up (or allocate) the inumber for the i we want to modify
if not i.allocated():
inumber = 0
while inumber in t.mapping:
inumber += 1
i.allocate(inumber)
else:
if i.n not in t.mapping:
raise IndexError("invalid inumber")
# modify the entry, and store back the updated itable
if i.p.is_group():
print("mapping", i.n, "for group", i.p, "into", t.mapping)
t.mapping[i.n] = ihash # for groups, ihash is an i
current_itables[i.p] = t
return i
| {
"content_hash": "cadba330643209fed6675bd7d813a2a9",
"timestamp": "",
"source": "github",
"line_count": 177,
"max_line_length": 96,
"avg_line_length": 34.7909604519774,
"alnum_prop": 0.611075024358558,
"repo_name": "mit-pdos/secfs-skeleton",
"id": "ee087bb79838e82853cfd04bd279e55eb22720f4",
"size": "6491",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "secfs/tables.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "49646"
},
{
"name": "Shell",
"bytes": "22719"
}
],
"symlink_target": ""
} |
"""Handles database requests from other nova services."""
import copy
import itertools
from oslo import messaging
from oslo.serialization import jsonutils
from oslo.utils import excutils
from oslo.utils import timeutils
import six
from nova.api.ec2 import ec2utils
from nova import block_device
from nova.cells import rpcapi as cells_rpcapi
from nova.compute import api as compute_api
from nova.compute import rpcapi as compute_rpcapi
from nova.compute import task_states
from nova.compute import utils as compute_utils
from nova.compute import vm_states
from nova.conductor.tasks import live_migrate
from nova.db import base
from nova import exception
from nova.i18n import _, _LE, _LW
from nova import image
from nova import manager
from nova import network
from nova.network.security_group import openstack_driver
from nova import notifications
from nova import objects
from nova.objects import base as nova_object
from nova.openstack.common import log as logging
from nova import quota
from nova.scheduler import client as scheduler_client
from nova.scheduler import utils as scheduler_utils
LOG = logging.getLogger(__name__)
# Instead of having a huge list of arguments to instance_update(), we just
# accept a dict of fields to update and use this whitelist to validate it.
allowed_updates = ['task_state', 'vm_state', 'expected_task_state',
'power_state', 'access_ip_v4', 'access_ip_v6',
'launched_at', 'terminated_at', 'host', 'node',
'memory_mb', 'vcpus', 'root_gb', 'ephemeral_gb',
'instance_type_id', 'root_device_name', 'launched_on',
'progress', 'vm_mode', 'default_ephemeral_device',
'default_swap_device', 'root_device_name',
'system_metadata', 'updated_at'
]
# Fields that we want to convert back into a datetime object.
datetime_fields = ['launched_at', 'terminated_at', 'updated_at']
class ConductorManager(manager.Manager):
"""Mission: Conduct things.
The methods in the base API for nova-conductor are various proxy operations
performed on behalf of the nova-compute service running on compute nodes.
Compute nodes are not allowed to directly access the database, so this set
of methods allows them to get specific work done without locally accessing
the database.
The nova-conductor service also exposes an API in the 'compute_task'
namespace. See the ComputeTaskManager class for details.
"""
target = messaging.Target(version='2.1')
def __init__(self, *args, **kwargs):
super(ConductorManager, self).__init__(service_name='conductor',
*args, **kwargs)
self.security_group_api = (
openstack_driver.get_openstack_security_group_driver())
self._network_api = None
self._compute_api = None
self.compute_task_mgr = ComputeTaskManager()
self.cells_rpcapi = cells_rpcapi.CellsAPI()
self.additional_endpoints.append(self.compute_task_mgr)
@property
def network_api(self):
# NOTE(danms): We need to instantiate our network_api on first use
# to avoid the circular dependency that exists between our init
# and network_api's
if self._network_api is None:
self._network_api = network.API()
return self._network_api
@property
def compute_api(self):
if self._compute_api is None:
self._compute_api = compute_api.API()
return self._compute_api
def ping(self, context, arg):
# NOTE(russellb) This method can be removed in 2.0 of this API. It is
# now a part of the base rpc API.
return jsonutils.to_primitive({'service': 'conductor', 'arg': arg})
@messaging.expected_exceptions(KeyError, ValueError,
exception.InvalidUUID,
exception.InstanceNotFound,
exception.UnexpectedTaskStateError)
def instance_update(self, context, instance_uuid,
updates, service):
for key, value in updates.iteritems():
if key not in allowed_updates:
LOG.error(_LE("Instance update attempted for "
"'%(key)s' on %(instance_uuid)s"),
{'key': key, 'instance_uuid': instance_uuid})
raise KeyError("unexpected update keyword '%s'" % key)
if key in datetime_fields and isinstance(value, six.string_types):
updates[key] = timeutils.parse_strtime(value)
# NOTE(danms): the send_update() call below is going to want to know
# about the flavor, so we need to join the appropriate things here,
# and objectify the result.
old_ref, instance_ref = self.db.instance_update_and_get_original(
context, instance_uuid, updates,
columns_to_join=['system_metadata'])
inst_obj = objects.Instance._from_db_object(
context, objects.Instance(),
instance_ref, expected_attrs=['system_metadata'])
notifications.send_update(context, old_ref, inst_obj, service)
return jsonutils.to_primitive(instance_ref)
@messaging.expected_exceptions(exception.InstanceNotFound)
def instance_get_by_uuid(self, context, instance_uuid,
columns_to_join):
return jsonutils.to_primitive(
self.db.instance_get_by_uuid(context, instance_uuid,
columns_to_join))
def instance_get_all_by_host(self, context, host, node,
columns_to_join):
if node is not None:
result = self.db.instance_get_all_by_host_and_node(
context.elevated(), host, node)
else:
result = self.db.instance_get_all_by_host(context.elevated(), host,
columns_to_join)
return jsonutils.to_primitive(result)
def migration_get_in_progress_by_host_and_node(self, context,
host, node):
migrations = self.db.migration_get_in_progress_by_host_and_node(
context, host, node)
return jsonutils.to_primitive(migrations)
@messaging.expected_exceptions(exception.AggregateHostExists)
def aggregate_host_add(self, context, aggregate, host):
host_ref = self.db.aggregate_host_add(context.elevated(),
aggregate['id'], host)
return jsonutils.to_primitive(host_ref)
@messaging.expected_exceptions(exception.AggregateHostNotFound)
def aggregate_host_delete(self, context, aggregate, host):
self.db.aggregate_host_delete(context.elevated(),
aggregate['id'], host)
def aggregate_metadata_get_by_host(self, context, host,
key='availability_zone'):
result = self.db.aggregate_metadata_get_by_host(context, host, key)
return jsonutils.to_primitive(result)
def bw_usage_update(self, context, uuid, mac, start_period,
bw_in, bw_out, last_ctr_in, last_ctr_out,
last_refreshed, update_cells):
if [bw_in, bw_out, last_ctr_in, last_ctr_out].count(None) != 4:
self.db.bw_usage_update(context, uuid, mac, start_period,
bw_in, bw_out, last_ctr_in, last_ctr_out,
last_refreshed,
update_cells=update_cells)
usage = self.db.bw_usage_get(context, uuid, start_period, mac)
return jsonutils.to_primitive(usage)
def provider_fw_rule_get_all(self, context):
rules = self.db.provider_fw_rule_get_all(context)
return jsonutils.to_primitive(rules)
# NOTE(danms): This can be removed in version 3.0 of the RPC API
def agent_build_get_by_triple(self, context, hypervisor, os, architecture):
info = self.db.agent_build_get_by_triple(context, hypervisor, os,
architecture)
return jsonutils.to_primitive(info)
def block_device_mapping_update_or_create(self, context, values, create):
if create is None:
bdm = self.db.block_device_mapping_update_or_create(context,
values)
elif create is True:
bdm = self.db.block_device_mapping_create(context, values)
else:
bdm = self.db.block_device_mapping_update(context,
values['id'],
values)
bdm_obj = objects.BlockDeviceMapping._from_db_object(
context, objects.BlockDeviceMapping(), bdm)
self.cells_rpcapi.bdm_update_or_create_at_top(context, bdm_obj,
create=create)
def block_device_mapping_get_all_by_instance(self, context, instance,
legacy):
bdms = self.db.block_device_mapping_get_all_by_instance(
context, instance['uuid'])
if legacy:
bdms = block_device.legacy_mapping(bdms)
return jsonutils.to_primitive(bdms)
def instance_get_all_by_filters(self, context, filters, sort_key,
sort_dir, columns_to_join,
use_slave):
result = self.db.instance_get_all_by_filters(
context, filters, sort_key, sort_dir,
columns_to_join=columns_to_join, use_slave=use_slave)
return jsonutils.to_primitive(result)
def instance_get_active_by_window(self, context, begin, end,
project_id, host):
# Unused, but cannot remove until major RPC version bump
result = self.db.instance_get_active_by_window(context, begin, end,
project_id, host)
return jsonutils.to_primitive(result)
def instance_get_active_by_window_joined(self, context, begin, end,
project_id, host):
result = self.db.instance_get_active_by_window_joined(
context, begin, end, project_id, host)
return jsonutils.to_primitive(result)
def instance_destroy(self, context, instance):
result = self.db.instance_destroy(context, instance['uuid'])
return jsonutils.to_primitive(result)
def instance_fault_create(self, context, values):
result = self.db.instance_fault_create(context, values)
return jsonutils.to_primitive(result)
# NOTE(kerrin): The last_refreshed argument is unused by this method
# and can be removed in v3.0 of the RPC API.
def vol_usage_update(self, context, vol_id, rd_req, rd_bytes, wr_req,
wr_bytes, instance, last_refreshed, update_totals):
vol_usage = self.db.vol_usage_update(context, vol_id,
rd_req, rd_bytes,
wr_req, wr_bytes,
instance['uuid'],
instance['project_id'],
instance['user_id'],
instance['availability_zone'],
update_totals)
# We have just updated the database, so send the notification now
self.notifier.info(context, 'volume.usage',
compute_utils.usage_volume_info(vol_usage))
@messaging.expected_exceptions(exception.ComputeHostNotFound,
exception.HostBinaryNotFound)
def service_get_all_by(self, context, topic, host, binary):
if not any((topic, host, binary)):
result = self.db.service_get_all(context)
elif all((topic, host)):
if topic == 'compute':
result = self.db.service_get_by_compute_host(context, host)
# FIXME(comstud) Potentially remove this on bump to v3.0
result = [result]
else:
result = self.db.service_get_by_host_and_topic(context,
host, topic)
elif all((host, binary)):
result = self.db.service_get_by_args(context, host, binary)
elif topic:
result = self.db.service_get_all_by_topic(context, topic)
elif host:
result = self.db.service_get_all_by_host(context, host)
return jsonutils.to_primitive(result)
@messaging.expected_exceptions(exception.InstanceActionNotFound)
def action_event_start(self, context, values):
evt = self.db.action_event_start(context, values)
return jsonutils.to_primitive(evt)
@messaging.expected_exceptions(exception.InstanceActionNotFound,
exception.InstanceActionEventNotFound)
def action_event_finish(self, context, values):
evt = self.db.action_event_finish(context, values)
return jsonutils.to_primitive(evt)
def service_create(self, context, values):
svc = self.db.service_create(context, values)
return jsonutils.to_primitive(svc)
@messaging.expected_exceptions(exception.ServiceNotFound)
def service_destroy(self, context, service_id):
self.db.service_destroy(context, service_id)
def compute_node_create(self, context, values):
result = self.db.compute_node_create(context, values)
return jsonutils.to_primitive(result)
def compute_node_update(self, context, node, values):
result = self.db.compute_node_update(context, node['id'], values)
return jsonutils.to_primitive(result)
def compute_node_delete(self, context, node):
result = self.db.compute_node_delete(context, node['id'])
return jsonutils.to_primitive(result)
@messaging.expected_exceptions(exception.ServiceNotFound)
def service_update(self, context, service, values):
svc = self.db.service_update(context, service['id'], values)
return jsonutils.to_primitive(svc)
def task_log_get(self, context, task_name, begin, end, host, state):
result = self.db.task_log_get(context, task_name, begin, end, host,
state)
return jsonutils.to_primitive(result)
def task_log_begin_task(self, context, task_name, begin, end, host,
task_items, message):
result = self.db.task_log_begin_task(context.elevated(), task_name,
begin, end, host, task_items,
message)
return jsonutils.to_primitive(result)
def task_log_end_task(self, context, task_name, begin, end, host,
errors, message):
result = self.db.task_log_end_task(context.elevated(), task_name,
begin, end, host, errors, message)
return jsonutils.to_primitive(result)
def notify_usage_exists(self, context, instance, current_period,
ignore_missing_network_data,
system_metadata, extra_usage_info):
if not isinstance(instance, objects.Instance):
attrs = ['metadata', 'system_metadata']
instance = objects.Instance._from_db_object(context,
objects.Instance(),
instance,
expected_attrs=attrs)
compute_utils.notify_usage_exists(self.notifier, context, instance,
current_period,
ignore_missing_network_data,
system_metadata, extra_usage_info)
def security_groups_trigger_handler(self, context, event, args):
self.security_group_api.trigger_handler(event, context, *args)
def security_groups_trigger_members_refresh(self, context, group_ids):
self.security_group_api.trigger_members_refresh(context, group_ids)
def network_migrate_instance_start(self, context, instance, migration):
self.network_api.migrate_instance_start(context, instance, migration)
def network_migrate_instance_finish(self, context, instance, migration):
self.network_api.migrate_instance_finish(context, instance, migration)
def quota_commit(self, context, reservations, project_id=None,
user_id=None):
quota.QUOTAS.commit(context, reservations, project_id=project_id,
user_id=user_id)
def quota_rollback(self, context, reservations, project_id=None,
user_id=None):
quota.QUOTAS.rollback(context, reservations, project_id=project_id,
user_id=user_id)
def get_ec2_ids(self, context, instance):
ec2_ids = {}
ec2_ids['instance-id'] = ec2utils.id_to_ec2_inst_id(instance['uuid'])
ec2_ids['ami-id'] = ec2utils.glance_id_to_ec2_id(context,
instance['image_ref'])
for image_type in ['kernel', 'ramdisk']:
image_id = instance.get('%s_id' % image_type)
if image_id is not None:
ec2_image_type = ec2utils.image_type(image_type)
ec2_id = ec2utils.glance_id_to_ec2_id(context, image_id,
ec2_image_type)
ec2_ids['%s-id' % image_type] = ec2_id
return ec2_ids
def compute_unrescue(self, context, instance):
self.compute_api.unrescue(context, instance)
def _object_dispatch(self, target, method, context, args, kwargs):
"""Dispatch a call to an object method.
This ensures that object methods get called and any exception
that is raised gets wrapped in an ExpectedException for forwarding
back to the caller (without spamming the conductor logs).
"""
try:
# NOTE(danms): Keep the getattr inside the try block since
# a missing method is really a client problem
return getattr(target, method)(context, *args, **kwargs)
except Exception:
raise messaging.ExpectedException()
def object_class_action(self, context, objname, objmethod,
objver, args, kwargs):
"""Perform a classmethod action on an object."""
objclass = nova_object.NovaObject.obj_class_from_name(objname,
objver)
result = self._object_dispatch(objclass, objmethod, context,
args, kwargs)
# NOTE(danms): The RPC layer will convert to primitives for us,
# but in this case, we need to honor the version the client is
# asking for, so we do it before returning here.
return (result.obj_to_primitive(target_version=objver)
if isinstance(result, nova_object.NovaObject) else result)
def object_action(self, context, objinst, objmethod, args, kwargs):
"""Perform an action on an object."""
oldobj = objinst.obj_clone()
result = self._object_dispatch(objinst, objmethod, context,
args, kwargs)
updates = dict()
# NOTE(danms): Diff the object with the one passed to us and
# generate a list of changes to forward back
for name, field in objinst.fields.items():
if not objinst.obj_attr_is_set(name):
# Avoid demand-loading anything
continue
if (not oldobj.obj_attr_is_set(name) or
getattr(oldobj, name) != getattr(objinst, name)):
updates[name] = field.to_primitive(objinst, name,
getattr(objinst, name))
# This is safe since a field named this would conflict with the
# method anyway
updates['obj_what_changed'] = objinst.obj_what_changed()
return updates, result
def object_backport(self, context, objinst, target_version):
return objinst.obj_to_primitive(target_version=target_version)
class ComputeTaskManager(base.Base):
"""Namespace for compute methods.
This class presents an rpc API for nova-conductor under the 'compute_task'
namespace. The methods here are compute operations that are invoked
by the API service. These methods see the operation to completion, which
may involve coordinating activities on multiple compute nodes.
"""
target = messaging.Target(namespace='compute_task', version='1.11')
def __init__(self):
super(ComputeTaskManager, self).__init__()
self.compute_rpcapi = compute_rpcapi.ComputeAPI()
self.image_api = image.API()
self.scheduler_client = scheduler_client.SchedulerClient()
@messaging.expected_exceptions(exception.NoValidHost,
exception.ComputeServiceUnavailable,
exception.InvalidHypervisorType,
exception.InvalidCPUInfo,
exception.UnableToMigrateToSelf,
exception.DestinationHypervisorTooOld,
exception.InvalidLocalStorage,
exception.InvalidSharedStorage,
exception.HypervisorUnavailable,
exception.InstanceNotRunning,
exception.MigrationPreCheckError,
exception.LiveMigrationWithOldNovaNotSafe,
exception.UnsupportedPolicyException)
def migrate_server(self, context, instance, scheduler_hint, live, rebuild,
flavor, block_migration, disk_over_commit, reservations=None,
clean_shutdown=True):
if instance and not isinstance(instance, nova_object.NovaObject):
# NOTE(danms): Until v2 of the RPC API, we need to tolerate
# old-world instance objects here
attrs = ['metadata', 'system_metadata', 'info_cache',
'security_groups']
instance = objects.Instance._from_db_object(
context, objects.Instance(), instance,
expected_attrs=attrs)
# NOTE(melwitt): Remove this in version 2.0 of the RPC API
if flavor and not isinstance(flavor, objects.Flavor):
# Code downstream may expect extra_specs to be populated since it
# is receiving an object, so lookup the flavor to ensure this.
flavor = objects.Flavor.get_by_id(context, flavor['id'])
if live and not rebuild and not flavor:
self._live_migrate(context, instance, scheduler_hint,
block_migration, disk_over_commit)
elif not live and not rebuild and flavor:
instance_uuid = instance['uuid']
with compute_utils.EventReporter(context, 'cold_migrate',
instance_uuid):
self._cold_migrate(context, instance, flavor,
scheduler_hint['filter_properties'],
reservations, clean_shutdown)
else:
raise NotImplementedError()
def _cold_migrate(self, context, instance, flavor, filter_properties,
reservations, clean_shutdown):
image_ref = instance.image_ref
image = compute_utils.get_image_metadata(
context, self.image_api, image_ref, instance)
request_spec = scheduler_utils.build_request_spec(
context, image, [instance], instance_type=flavor)
quotas = objects.Quotas.from_reservations(context,
reservations,
instance=instance)
try:
scheduler_utils.setup_instance_group(context, request_spec,
filter_properties)
scheduler_utils.populate_retry(filter_properties, instance['uuid'])
hosts = self.scheduler_client.select_destinations(
context, request_spec, filter_properties)
host_state = hosts[0]
except exception.NoValidHost as ex:
vm_state = instance.vm_state
if not vm_state:
vm_state = vm_states.ACTIVE
updates = {'vm_state': vm_state, 'task_state': None}
self._set_vm_state_and_notify(context, instance.uuid,
'migrate_server',
updates, ex, request_spec)
quotas.rollback()
# if the flavor IDs match, it's migrate; otherwise resize
if flavor['id'] == instance['instance_type_id']:
msg = _("No valid host found for cold migrate")
else:
msg = _("No valid host found for resize")
raise exception.NoValidHost(reason=msg)
except exception.UnsupportedPolicyException as ex:
with excutils.save_and_reraise_exception():
vm_state = instance.vm_state
if not vm_state:
vm_state = vm_states.ACTIVE
updates = {'vm_state': vm_state, 'task_state': None}
self._set_vm_state_and_notify(context, instance.uuid,
'migrate_server',
updates, ex, request_spec)
quotas.rollback()
try:
scheduler_utils.populate_filter_properties(filter_properties,
host_state)
# context is not serializable
filter_properties.pop('context', None)
(host, node) = (host_state['host'], host_state['nodename'])
self.compute_rpcapi.prep_resize(
context, image, instance,
flavor, host,
reservations, request_spec=request_spec,
filter_properties=filter_properties, node=node,
clean_shutdown=clean_shutdown)
except Exception as ex:
with excutils.save_and_reraise_exception():
updates = {'vm_state': instance.vm_state,
'task_state': None}
self._set_vm_state_and_notify(context, instance.uuid,
'migrate_server',
updates, ex, request_spec)
quotas.rollback()
def _set_vm_state_and_notify(self, context, instance_uuid, method, updates,
ex, request_spec):
scheduler_utils.set_vm_state_and_notify(
context, instance_uuid, 'compute_task', method, updates,
ex, request_spec, self.db)
def _live_migrate(self, context, instance, scheduler_hint,
block_migration, disk_over_commit):
destination = scheduler_hint.get("host")
def _set_vm_state(context, instance, ex, vm_state=None,
task_state=None):
request_spec = {'instance_properties': {
'uuid': instance['uuid'], },
}
scheduler_utils.set_vm_state_and_notify(context,
instance.uuid,
'compute_task', 'migrate_server',
dict(vm_state=vm_state,
task_state=task_state,
expected_task_state=task_states.MIGRATING,),
ex, request_spec, self.db)
try:
live_migrate.execute(context, instance, destination,
block_migration, disk_over_commit)
except (exception.NoValidHost,
exception.ComputeServiceUnavailable,
exception.InvalidHypervisorType,
exception.InvalidCPUInfo,
exception.UnableToMigrateToSelf,
exception.DestinationHypervisorTooOld,
exception.InvalidLocalStorage,
exception.InvalidSharedStorage,
exception.HypervisorUnavailable,
exception.InstanceNotRunning,
exception.MigrationPreCheckError,
exception.LiveMigrationWithOldNovaNotSafe) as ex:
with excutils.save_and_reraise_exception():
# TODO(johngarbutt) - eventually need instance actions here
_set_vm_state(context, instance, ex, instance.vm_state)
except Exception as ex:
LOG.error(_LE('Migration of instance %(instance_id)s to host'
' %(dest)s unexpectedly failed.'),
{'instance_id': instance['uuid'], 'dest': destination},
exc_info=True)
_set_vm_state(context, instance, ex, vm_states.ERROR,
instance['task_state'])
raise exception.MigrationError(reason=six.text_type(ex))
def build_instances(self, context, instances, image, filter_properties,
admin_password, injected_files, requested_networks,
security_groups, block_device_mapping=None, legacy_bdm=True):
# TODO(ndipanov): Remove block_device_mapping and legacy_bdm in version
# 2.0 of the RPC API.
request_spec = scheduler_utils.build_request_spec(context, image,
instances)
# TODO(danms): Remove this in version 2.0 of the RPC API
if (requested_networks and
not isinstance(requested_networks,
objects.NetworkRequestList)):
requested_networks = objects.NetworkRequestList(
objects=[objects.NetworkRequest.from_tuple(t)
for t in requested_networks])
# TODO(melwitt): Remove this in version 2.0 of the RPC API
flavor = filter_properties.get('instance_type')
if flavor and not isinstance(flavor, objects.Flavor):
# Code downstream may expect extra_specs to be populated since it
# is receiving an object, so lookup the flavor to ensure this.
flavor = objects.Flavor.get_by_id(context, flavor['id'])
filter_properties = dict(filter_properties, instance_type=flavor)
try:
scheduler_utils.setup_instance_group(context, request_spec,
filter_properties)
# check retry policy. Rather ugly use of instances[0]...
# but if we've exceeded max retries... then we really only
# have a single instance.
scheduler_utils.populate_retry(filter_properties,
instances[0].uuid)
hosts = self.scheduler_client.select_destinations(context,
request_spec, filter_properties)
except Exception as exc:
updates = {'vm_state': vm_states.ERROR, 'task_state': None}
for instance in instances:
self._set_vm_state_and_notify(
context, instance.uuid, 'build_instances', updates,
exc, request_spec)
return
for (instance, host) in itertools.izip(instances, hosts):
try:
instance.refresh()
except (exception.InstanceNotFound,
exception.InstanceInfoCacheNotFound):
LOG.debug('Instance deleted during build', instance=instance)
continue
local_filter_props = copy.deepcopy(filter_properties)
scheduler_utils.populate_filter_properties(local_filter_props,
host)
# The block_device_mapping passed from the api doesn't contain
# instance specific information
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
self.compute_rpcapi.build_and_run_instance(context,
instance=instance, host=host['host'], image=image,
request_spec=request_spec,
filter_properties=local_filter_props,
admin_password=admin_password,
injected_files=injected_files,
requested_networks=requested_networks,
security_groups=security_groups,
block_device_mapping=bdms, node=host['nodename'],
limits=host['limits'])
def _delete_image(self, context, image_id):
return self.image_api.delete(context, image_id)
def _schedule_instances(self, context, image, filter_properties,
*instances):
request_spec = scheduler_utils.build_request_spec(context, image,
instances)
scheduler_utils.setup_instance_group(context, request_spec,
filter_properties)
hosts = self.scheduler_client.select_destinations(context,
request_spec, filter_properties)
return hosts
def unshelve_instance(self, context, instance):
sys_meta = instance.system_metadata
def safe_image_show(ctx, image_id):
if image_id:
return self.image_api.get(ctx, image_id, show_deleted=False)
else:
raise exception.ImageNotFound(image_id='')
if instance.vm_state == vm_states.SHELVED:
instance.task_state = task_states.POWERING_ON
instance.save(expected_task_state=task_states.UNSHELVING)
self.compute_rpcapi.start_instance(context, instance)
snapshot_id = sys_meta.get('shelved_image_id')
if snapshot_id:
self._delete_image(context, snapshot_id)
elif instance.vm_state == vm_states.SHELVED_OFFLOADED:
image = None
image_id = sys_meta.get('shelved_image_id')
# No need to check for image if image_id is None as
# "shelved_image_id" key is not set for volume backed
# instance during the shelve process
if image_id:
with compute_utils.EventReporter(
context, 'get_image_info', instance.uuid):
try:
image = safe_image_show(context, image_id)
except exception.ImageNotFound:
instance.vm_state = vm_states.ERROR
instance.save()
reason = _('Unshelve attempted but the image %s '
'cannot be found.') % image_id
LOG.error(reason, instance=instance)
raise exception.UnshelveException(
instance_id=instance.uuid, reason=reason)
try:
with compute_utils.EventReporter(context, 'schedule_instances',
instance.uuid):
filter_properties = {}
hosts = self._schedule_instances(context, image,
filter_properties,
instance)
host_state = hosts[0]
scheduler_utils.populate_filter_properties(
filter_properties, host_state)
(host, node) = (host_state['host'], host_state['nodename'])
self.compute_rpcapi.unshelve_instance(
context, instance, host, image=image,
filter_properties=filter_properties, node=node)
except (exception.NoValidHost,
exception.UnsupportedPolicyException):
instance.task_state = None
instance.save()
LOG.warning(_LW("No valid host found for unshelve instance"),
instance=instance)
return
else:
LOG.error(_LE('Unshelve attempted but vm_state not SHELVED or '
'SHELVED_OFFLOADED'), instance=instance)
instance.vm_state = vm_states.ERROR
instance.save()
return
for key in ['shelved_at', 'shelved_image_id', 'shelved_host']:
if key in sys_meta:
del(sys_meta[key])
instance.system_metadata = sys_meta
instance.save()
def rebuild_instance(self, context, instance, orig_image_ref, image_ref,
injected_files, new_pass, orig_sys_metadata,
bdms, recreate, on_shared_storage,
preserve_ephemeral=False, host=None):
with compute_utils.EventReporter(context, 'rebuild_server',
instance.uuid):
if not host:
# NOTE(lcostantino): Retrieve scheduler filters for the
# instance when the feature is available
filter_properties = {'ignore_hosts': [instance.host]}
request_spec = scheduler_utils.build_request_spec(context,
image_ref,
[instance])
try:
scheduler_utils.setup_instance_group(context, request_spec,
filter_properties)
hosts = self.scheduler_client.select_destinations(context,
request_spec,
filter_properties)
host = hosts.pop(0)['host']
except exception.NoValidHost as ex:
with excutils.save_and_reraise_exception():
self._set_vm_state_and_notify(context, instance.uuid,
'rebuild_server',
{'vm_state': instance.vm_state,
'task_state': None}, ex, request_spec)
LOG.warning(_LW("No valid host found for rebuild"),
instance=instance)
except exception.UnsupportedPolicyException as ex:
with excutils.save_and_reraise_exception():
self._set_vm_state_and_notify(context, instance.uuid,
'rebuild_server',
{'vm_state': instance.vm_state,
'task_state': None}, ex, request_spec)
LOG.warning(_LW("Server with unsupported policy "
"cannot be rebuilt"),
instance=instance)
self.compute_rpcapi.rebuild_instance(context,
instance=instance,
new_pass=new_pass,
injected_files=injected_files,
image_ref=image_ref,
orig_image_ref=orig_image_ref,
orig_sys_metadata=orig_sys_metadata,
bdms=bdms,
recreate=recreate,
on_shared_storage=on_shared_storage,
preserve_ephemeral=preserve_ephemeral,
host=host)
| {
"content_hash": "cadc10c7ec10c9f30e936c0a49c14cf8",
"timestamp": "",
"source": "github",
"line_count": 818,
"max_line_length": 79,
"avg_line_length": 49.06845965770171,
"alnum_prop": 0.5579749862972744,
"repo_name": "mgagne/nova",
"id": "88ee72c154f887bcb4780e9dd6b5ffa128ce6fe7",
"size": "40743",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nova/conductor/manager.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "15421976"
},
{
"name": "Shell",
"bytes": "21612"
}
],
"symlink_target": ""
} |
class Solution(object):
def numberToWords(self, num):
"""
:type num: int
:rtype: str
"""
if num==0:
return 'Zero'
mapping=["","Thousand","Million", "Billion"]
resstr=''
for i in range(len(mapping)):
if num%1000 != 0:
resstr = self.helperths(num%1000) + mapping[i] + ' ' + resstr
num /= 1000
return resstr.strip()
def helperths(self,num):
"""
deal with the number less than one thousand
"""
lesstw = ["","One","Two","Three","Four","Five","Six","Seven","Eight","Nine","Ten","Eleven","Twelve","Thirteen","Fourteen","Fifteen","Sixteen","Seventeen","Eighteen","Nineteen"]
tens = ["","Ten","Twenty","Thirty","Forty","Fifty","Sixty","Seventy","Eighty","Ninety"]
if num==0:
return ""
if 0<num<20:
return lesstw[num]+' '
if 20<=num<100:
return tens[num/10]+' '+self.helperths(num%10)
if num>=100:
return lesstw[num/100]+' Hundred '+self.helperths(num%100)
| {
"content_hash": "b4959f709f7dbf8bf606c6c903874f41",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 184,
"avg_line_length": 33.26470588235294,
"alnum_prop": 0.4916003536693192,
"repo_name": "Tanych/CodeTracking",
"id": "8688a919696ed9c344a5ef1cdcfa0e4425c6bf93",
"size": "1131",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "273-Integer-to-English-Words/solution.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1723"
},
{
"name": "C++",
"bytes": "1024"
},
{
"name": "Java",
"bytes": "1261"
},
{
"name": "Python",
"bytes": "409211"
}
],
"symlink_target": ""
} |
usage = '''
Write extra flags to outfile for DM based on the bot name:
$ python dm_flags.py outfile Test-Mac10.9-MacMini6.2-HD4000-x86_64-Release
Or run self-tests:
$ python dm_flags.py test
'''
import inspect
import json
import os
import sys
def lineno():
caller = inspect.stack()[1] # Up one level to our caller.
return inspect.getframeinfo(caller[0]).lineno
cov_start = lineno()+1 # We care about coverage starting just past this def.
def get_args(bot):
args = []
configs = ['565', '8888', 'gpu']
# The S4 crashes and the NP produces a long error stream when we run with
# MSAA.
if ('GalaxyS4' not in bot and
'NexusPlayer' not in bot):
if 'Android' in bot:
configs.extend(['msaa4', 'nvprmsaa4'])
else:
configs.extend(['msaa16', 'nvprmsaa16'])
# Xoom and NP are running out of RAM when we run all these modes. skia:3255
if ('Xoom' not in bot and
'NexusPlayer' not in bot):
configs.extend(mode + '-8888' for mode in
['serialize', 'tiles_rt', 'pipe'])
configs.append('tiles_rt-gpu')
if 'ANGLE' in bot:
configs.append('angle')
args.append('--config')
args.extend(configs)
blacklist = []
# This image is too large to be a texture for many GPUs.
blacklist.extend('gpu _ PANO_20121023_214540.jpg'.split(' '))
blacklist.extend('msaa _ PANO_20121023_214540.jpg'.split(' '))
# Drawing SKPs or images into GPU canvases is a New Thing.
# It seems like we're running out of RAM on some Android bots, so start off
# with a very wide blacklist disabling all these tests on all Android bots.
if 'Android' in bot: # skia:3255
blacklist.extend('gpu skp _ gpu image _ gpu subset _'.split(' '))
blacklist.extend('msaa skp _ msaa image _ gpu subset _'.split(' '))
if blacklist:
args.append('--blacklist')
args.extend(blacklist)
match = []
if 'Alex' in bot: # skia:2793
# This machine looks to be running out of heap.
# Running with fewer threads may help.
args.extend(['--threads', '1'])
if 'Valgrind' in bot: # skia:3021
match.append('~Threaded')
if 'Xoom' in bot: # skia:1699
match.append('~WritePixels')
# skia:3249: these images flakily don't decode on Android.
if 'Android' in bot:
match.append('~tabl_mozilla_0')
match.append('~desk_yahoonews_0')
if match:
args.append('--match')
args.extend(match)
# Though their GPUs are interesting, these don't test anything on
# the CPU that other ARMv7+NEON bots don't test faster (N5).
if ('Nexus10' in bot or
'Nexus7' in bot or
'GalaxyS3' in bot or
'GalaxyS4' in bot):
args.append('--nocpu')
return args
cov_end = lineno() # Don't care about code coverage past here.
def self_test():
import coverage # This way the bots don't need coverage.py to be installed.
args = {}
cases = [
'Test-Android-Nexus7-Tegra3-Arm7-Release',
'Test-Android-Xoom-Tegra2-Arm7-Release',
'Test-ChromeOS-Alex-GMA3150-x86-Debug',
'Test-Ubuntu12-ShuttleA-GTX550Ti-x86_64-Release-Valgrind',
'Test-Win7-ShuttleA-HD2000-x86-Debug-ANGLE',
]
cov = coverage.coverage()
cov.start()
for case in cases:
args[case] = get_args(case)
cov.stop()
this_file = os.path.basename(__file__)
_, _, not_run, _ = cov.analysis(this_file)
filtered = [line for line in not_run if line > cov_start and line < cov_end]
if filtered:
print 'Lines not covered by test cases: ', filtered
sys.exit(1)
golden = this_file.replace('.py', '.json')
with open(os.path.join(os.path.dirname(__file__), golden), 'w') as f:
json.dump(args, f, indent=2, sort_keys=True)
if __name__ == '__main__':
if len(sys.argv) == 2 and sys.argv[1] == 'test':
self_test()
sys.exit(0)
if len(sys.argv) != 3:
print usage
sys.exit(1)
with open(sys.argv[1], 'w') as out:
json.dump(get_args(sys.argv[2]), out)
| {
"content_hash": "323b49c1967c4946b943a3d8e0486f7a",
"timestamp": "",
"source": "github",
"line_count": 129,
"max_line_length": 78,
"avg_line_length": 30.162790697674417,
"alnum_prop": 0.6448213826779748,
"repo_name": "Igalia/skia",
"id": "a7ed8ca7afaf804f93bd63633fea4c74f68c631c",
"size": "3914",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tools/dm_flags.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Assembly",
"bytes": "10339"
},
{
"name": "C",
"bytes": "580861"
},
{
"name": "C++",
"bytes": "25954052"
},
{
"name": "CSS",
"bytes": "2042"
},
{
"name": "Go",
"bytes": "677"
},
{
"name": "HTML",
"bytes": "24562"
},
{
"name": "Java",
"bytes": "24340"
},
{
"name": "JavaScript",
"bytes": "7593"
},
{
"name": "Lua",
"bytes": "25531"
},
{
"name": "Makefile",
"bytes": "9003"
},
{
"name": "Objective-C",
"bytes": "22720"
},
{
"name": "Objective-C++",
"bytes": "107142"
},
{
"name": "Python",
"bytes": "326166"
},
{
"name": "Shell",
"bytes": "41399"
}
],
"symlink_target": ""
} |
r"""
models
~~~~~~~
Data model of blog.
:copyright: (c) 2013 by Harvey Wang.
"""
import re
from datetime import datetime
from werkzeug import security
from ._base import db, SessionMixin
__all__ = [
'Category', 'Tag',
'Article', 'Comment',
'User', 'Link',
'BlackList', 'Subscriber',
'article_tags']
article_tags = db.Table(
'tags',
db.Column('tag_id', db.Integer, db.ForeignKey('tag.tid')),
db.Column('article_id', db.Integer, db.ForeignKey('article.aid')))
class Category(db.Model, SessionMixin):
caid = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(50), unique=True, index=True, nullable=False)
title = db.Column(db.String(50), unique=True, index=True, nullable=False)
def __unicode__(self):
return self.name
def __repr__(self):
return '<Category: %r>' % self.name
class Tag(db.Model, SessionMixin):
tid = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(50), unique=True, index=True, nullable=False)
title = db.Column(db.String(50), unique=True, index=True, nullable=False)
def __unicode__(self):
return self.name
def __repr__(self):
return '<Tag: %r>' % self.name
class Article(db.Model, SessionMixin):
__searchable__ = ['title', 'content']
aid = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(100), unique=True, index=True, nullable=False)
content = db.Column(db.Text)
status = db.Column(db.Integer, default=1) # 0, 草稿、1, 完成、-1, 失效
created_time = db.Column(db.DateTime, default=datetime.now)
modified_time = db.Column(db.DateTime, default=datetime.now)
is_always_above = db.Column(db.Integer, default=0) # 置顶 0,1
share = db.Column(db.Integer, default=0) # 分享到社交网络
click_count = db.Column(db.Integer, default=0)
category_id = db.Column(db.Integer, db.ForeignKey('category.caid'))
category = db.relationship('Category', backref=db.backref('articles', lazy='dynamic'), lazy='select')
author_id = db.Column(db.Integer, db.ForeignKey('user.id'), default=1)
author = db.relationship('User', backref='articles', lazy='select')
tags = db.relationship('Tag', secondary=article_tags, backref=db.backref('articles', lazy='dynamic'))
def __unicode__(self):
return self.title
def __repr__(self):
return '<Article: %r, posted on %r>' % (self.title, self.modified_time)
def article_abstract(self):
return re.split(r'<!--more-->', self.content)[0]
def inc_click(self):
self.click_count += 1
db.session.commit()
class Comment(db.Model, SessionMixin):
coid = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(50))
email_address = db.Column(db.String(80))
site = db.Column(db.String(100))
avatar = db.Column(db.String(100)) # 头像
content = db.Column(db.Text)
post_date = db.Column(db.DateTime, default=datetime.now)
visible = db.Column(db.Integer, default=1) # 是否展示
ip = db.Column(db.String(15))
reply_to_comment_id = db.Column(db.Integer, db.ForeignKey('comment.coid'))
reply_to_comment = db.relationship('Comment', backref='comments', remote_side=[coid])
article_id = db.Column(db.Integer, db.ForeignKey('article.aid'))
article = db.relationship('Article', backref=db.backref('comments', lazy='dynamic'))
def __unicode__(self):
return self.content
def __repr__(self):
return '<Comment: #%r, posted on %r>' % (self.coid, self.post_date)
class User(db.Model, SessionMixin):
id = db.Column(db.Integer, primary_key=True)
uid = db.Column(db.BigInteger)
name = db.Column(db.String(50), unique=True, nullable=False, index=True)
password = db.Column(db.String(100), nullable=False)
email = db.Column(db.String(200), unique=True, nullable=False)
role = db.Column(db.String(10), default='admin')
active = db.Column(db.DateTime, default=datetime.utcnow, index=True)
created = db.Column(db.DateTime, default=datetime.utcnow)
avatar = db.Column(db.String(100))
token = db.Column(db.String(20))
login_type = db.Column(db.Integer) # 1:weibo; 2: uid;
def __init__(self, **kwargs):
self.token = self.create_token(16)
if 'username' in kwargs:
username = kwargs.pop('username')
self.name = username.lower()
if 'password' in kwargs:
rawpass = kwargs.pop('password')
self.password = self.create_password(rawpass)
if 'email' in kwargs:
email = kwargs.pop('email')
self.email = email.lower()
for k, v in kwargs.items():
setattr(self, k, v)
def __unicode__(self):
return self.name
def __repr__(self):
return '<User: %r at %r>' % (self.name, self.email)
@staticmethod
def create_password(rawpass):
passwd = '%s%s' % (rawpass, db.app.config['PASSWORD_SECRET'])
return security.generate_password_hash(passwd)
@staticmethod
def create_token(length=16):
return security.gen_salt(length)
@property
def is_admin(self):
if self.role == 'admin':
return True
def check_password(self, rawpass):
passwd = '%s%s' % (rawpass, db.app.config['PASSWORD_SECRET'])
return security.check_password_hash(self.password, passwd)
def change_password(self, rawpass):
self.password = self.create_password(rawpass)
self.token = self.create_token()
return self
class Link(db.Model, SessionMixin):
lid = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(50))
site = db.Column(db.String(100)) # url
def __unicode__(self):
return self.name
class BlackList(db.Model, SessionMixin):
blid = db.Column(db.Integer, primary_key=True)
ip_address = db.Column(db.String(15))
def __unicode__(self):
return self.ip_address
class Subscriber(db.Model, SessionMixin):
sid = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(50))
email_address = db.Column(db.String(80))
subscrible_time = db.Column(db.DateTime, default=datetime.now)
enabled = db.Column(db.Integer, default=True)
def __unicode__(self):
return self.username
| {
"content_hash": "fac1a73f2f6b2ff79a69063ff8a1eb83",
"timestamp": "",
"source": "github",
"line_count": 202,
"max_line_length": 105,
"avg_line_length": 32.31188118811881,
"alnum_prop": 0.6151371227210051,
"repo_name": "harveyqing/Qingblog",
"id": "99b759ceea78cc300cd3b239211543715d3dc492",
"size": "6598",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Qingblog/models/models.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "123541"
},
{
"name": "JavaScript",
"bytes": "291506"
},
{
"name": "Python",
"bytes": "42557"
},
{
"name": "Shell",
"bytes": "0"
}
],
"symlink_target": ""
} |
import argparse
import subprocess
import textwrap
import portslicer
import fieldslicer
import combiner
import bro
#Create the command-line capability
parser = argparse.ArgumentParser(prog="Bro Log Utility Script",
description=textwrap.dedent('''\
This program will slice conn.log's based off of a given port or field. It will also
combine conn.log's together in order to make slicing and analysis easier.'''),
formatter_class=argparse.RawTextHelpFormatter)
mandatory = parser.add_argument_group("Mandatory",description="These are mandatory.")
optional = parser.add_argument_group("Optional", description="These are optional switches.")
mandatory.add_argument("-i", "--input", help="The path to the conn.log", metavar="")
mandatory.add_argument("-p", "--ports", help="List of ports seperated by a comma", metavar="")
mandatory.add_argument("-f", "--fields", help="List of fields seperated by a comma", metavar="")
optional.add_argument("-b", "--bro", help="Takes in the file path of PCAPs and runs thems against Bro IDS", metavar="")
optional.add_argument("-v", "--verbose", help="Outputs status to screen", action="store_true")
optional.add_argument("-c", "--combine", help=textwrap.dedent('''\
Combine all files of a specified type into one. Specify the path to where the
files are located followed by the type enclosed in quotes. This will find all
files with the specified type in them. You just have to specify the base directory.
Example: If you wanted your conn.log's combined and they are in your home
directory in a folder titled bro, you would type:
- c "/home/user/bro/ conn.log"
This will find all conn.log's within /home/user/bro/ no matter how nested.'''),
nargs=2, metavar="")
optional.add_argument("-o", "--output", help="Specify the output name when combining conn.log's", metavar="")
args = parser.parse_args()
def main():
if args.ports > 0:
portslicer.portslicer(args.input, args.verbose, args.ports)
elif args.fields > 0:
fieldslicer.fieldslicer(args.input, args.verbose, args.fields)
elif args.combine > 0:
#runs the linux find command to find the files the user wants to combine
temp_files = subprocess.check_output(["find",args.combine[0],"-name",args.combine[-1]])
combiner.combiner(args.verbose, args.output, args.combine[-1].upper(),temp_files)
elif args.bro > 0:
#uses the linux find command to find the pcaps to run.
temp_files = subprocess.check_output(["find",args.bro,"-name snort.log"])
bro.bro(args.verbose, args.bro)
if __name__ == "__main__":
main()
| {
"content_hash": "7149f7535816ed2005da4973723760a1",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 119,
"avg_line_length": 48.92727272727273,
"alnum_prop": 0.6882199925678186,
"repo_name": "jacobdshimer/Bro-Log-Utility-Script",
"id": "1e9e9bff6d96081a380c7d198607f044578f3e50",
"size": "2766",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bro_utility.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "10390"
}
],
"symlink_target": ""
} |
from __future__ import division
import math
class SakaCalendar(object):
JULIAN_EPOCH = 1721425.5
SAKA_MONTH_NAMES= [u"Chaitra", u"Vaisakha", u"Jyaishta", u"Asadha", u"Sravana", u"Bhadra",u"Aswina", u"Kartiak", u"Agrahayana",u"Pausa",u"Magha",u"Phalguna"]
SAKA_WEEK_NAMES= [u"Ravivar", u"Somavar", u"Mangalvar", u"Budhvar", u"Sukhravar", u"Guruvar",u"Sanivar"]
IE = 0
# The only practical difference from a Gregorian calendar is that years
#are numbered since the Saka Era. A couple of overrides will
#take care of that....
# Starts in 78 AD,
INDIAN_ERA_START = 78
# The Indian year starts 80 days later than the Gregorian year.
INDIAN_YEAR_START = 80
def get_month_length(self, extendedYear, month):
if month < 0 or month > 11:
extendedYear += month/12 # floorDivide(month, 12, remainder)
month = month%12
if self.is_gregorian_leap(extendedYear + self.INDIAN_ERA_START) and month == 0:
return 31
if month >= 1 and month <=5 :
return 31
return 30
u"""
This routine converts an Indian date to the corresponding Julian date
@param year The year in Saka Era according to Indian calendar.
@param month The month according to Indian calendar (between 1 to 12)
@param date The date in month
"""
def saka_to_julian_date(self,year,month, date):
gyear = year + self.INDIAN_ERA_START
if self.is_gregorian_leap(gyear) :
leapMonth = 31
start = self.gregorian_to_julian_date(gyear, 3, 21)
else :
leapMonth = 30
start = self.gregorian_to_julian_date(gyear, 3, 22)
if month == 1 :
jd = start + (date - 1)
else:
jd = start + leapMonth
m = month - 2
m = m if m <= 5 else 5
jd += m * 31
if month >= 8 :
m = month - 7
jd += m * 30
jd += date - 1
return jd
u"""
The following function is not needed for basic calendar functioning.
This routine converts a gregorian date to the corresponding Julian date"
@param year The year in standard Gregorian calendar (AD/BC) .
@param month The month according to Gregorian calendar (between 0 to 11)
@param date The date in month
"""
def gregorian_to_julian_date(self, year, month, date) :
jd = ((self.JULIAN_EPOCH - 1) +
(365 * (year - 1)) +
math.floor((year - 1) / 4) +
(-math.floor((year - 1) / 100)) +
math.floor((year - 1) / 400) +
math.floor((((367 * month) - 362) / 12) +
( 0 if (month <= 2) else -1 if self.is_gregorian_leap(year) else -2) ) +
date)
return jd
u"""
The following function is not needed for basic calendar functioning.
This routine converts a julian day (jd) to the corresponding date in Gregorian calendar"
@param jd The Julian date in Julian Calendar which is to be converted to Indian date"
"""
def julian_date_to_gregorian(self, jd) :
julianDate=[None, None,None]
wjd = math.floor(jd - 0.5) + 0.5
depoch = wjd - self.JULIAN_EPOCH
quadricent = math.floor(depoch / 146097)
dqc = depoch % 146097
cent = math.floor(dqc / 36524)
dcent = dqc % 36524
quad = math.floor(dcent / 1461)
dquad = dcent % 1461
yindex = math.floor(dquad / 365)
year = int((quadricent * 400) + (cent * 100) + (quad * 4) + yindex)
if not ((cent == 4) or (yindex == 4)) :
year+=1
yearday = wjd - self.gregorian_to_julian_date(year, 1, 1)
leapadj = ( 0 if (wjd < self.gregorian_to_julian_date(year, 3, 1)) else 1 if self.is_gregorian_leap(year) else 2)
month = int(math.floor((((yearday + leapadj) * 12) + 373) / 367))
day = int((wjd - self.gregorian_to_julian_date(year, month, 1)) + 1)
julianDate[0] = year
julianDate[1] = month
julianDate[2] = day
return julianDate
u"""
The following function is not needed for basic calendar functioning.
This routine checks if the Gregorian year is a leap year"
@param year The year in Gregorian Calendar
"""
def is_gregorian_leap(self,year):
return ((year % 4) == 0) and (not(((year % 100) == 0) and ((year % 400) != 0)))
def gregorian_to_saka_date(self, gregorianDay):
indDate=[None,None,None]
IndianYear = gregorianDay[0] - self.INDIAN_ERA_START # Year in Saka era
jdAtStartOfGregYear = self. gregorian_to_julian_date(gregorianDay[0], 1, 1) # JD at start of Gregorian year
julianDay = self.gregorian_to_julian_date( gregorianDay[0], gregorianDay[1], gregorianDay[2])
yday = int(julianDay - jdAtStartOfGregYear) # Day number in Gregorian year (starting from 0)
if yday < self.INDIAN_YEAR_START :
# Day is at the end of the preceding Saka year
IndianYear -= 1
leapMonth = 31 if self.is_gregorian_leap(gregorianDay[0] - 1) else 30 # Days in leapMonth this year, previous Gregorian year
yday += leapMonth + (31 * 5) + (30 * 3) + 10
else:
leapMonth = 31 if self.is_gregorian_leap(gregorianDay[0]) else 30 # Days in leapMonth this year
yday -= self.INDIAN_YEAR_START
if yday < leapMonth :
IndianMonth = 0
IndianDayOfMonth = yday + 1
else :
mday = yday - leapMonth
if mday < (31 * 5) :
IndianMonth = int(math.floor(mday / 31) + 1)
IndianDayOfMonth = (mday % 31) + 1
else :
mday -= 31 * 5
IndianMonth = int(math.floor(mday / 30)) + 6
IndianDayOfMonth = (mday % 30) + 1
#Month is 0 based.converting it to 1 based
if IndianMonth == 12 :
IndianMonth = 1
else :
IndianMonth = IndianMonth +1
indDate[0]=IndianYear
indDate[1]=IndianMonth
indDate[2]=IndianDayOfMonth
return indDate
def get_month_name(self, month_index):
return self.SAKA_MONTH_NAMES[month_index-1]
def get_week_name(self, week_index) :
return self.SAKA_WEEK_NAMES[week_index-1]
# Arelle modifications follow this comment. They do not modify the behavior of the original module.
def gregorianToSaka(gregorianDateArray):
if (not isinstance(gregorianDateArray, (tuple,list)) or
len(gregorianDateArray) != 3):
raise ValueError(u"sakaCalendar:Invalid argument, must be tuple or list of yr,mo,day: {}".format(gregorianDateArray))
return SakaCalendar().gregorian_to_saka_date(gregorianDateArray)
def sakaToGregorian(sakaDateArray):
if (not isinstance(sakaDateArray, (tuple,list)) or
len(sakaDateArray) != 3):
raise ValueError(u"sakaCalendar:Invalid argument, must be tuple or list of yr,mo,day: {}".format(sakaDateArray))
sakaCal = SakaCalendar()
sakaYr = sakaDateArray[0]
sakaMo = sakaDateArray[1]
sakaDay = sakaDateArray[2]
# validate date
if (not isinstance(sakaDateArray, (tuple,list)) or
len(sakaDateArray) != 3 or
not 1 <= sakaYr <= 9999 or
not 1 <= sakaMo <= 12 or
not 1 <= sakaDay <= sakaCal.get_month_length(sakaYr, sakaMo)):
raise ValueError(u"sakaCalendar:InvalidDate: {} {} {}".format(sakaYr, sakaMo, sakaDay))
# convert to gregorian calendar
return sakaCal.julian_date_to_gregorian(sakaCal.saka_to_julian_date(sakaYr, sakaMo, sakaDay))
__pluginInfo__ = {
u'name': u'Saka Calendar',
u'version': u'1.0',
u'description': u"India National Calendar date validation and conversion. ",
u'license': u'LGPL-v3',
u'author': u'Santhosh Thottingal',
u'copyright': u'(c) Copyright 2008 Santhosh Thottingal <[email protected]>.',
# classes of mount points (required)
u'SakaCalendar.FromGregorian': gregorianToSaka,
u'SakaCalendar.ToGregorian': sakaToGregorian,
}
| {
"content_hash": "456075bcd279c4e830986deb92a8b84b",
"timestamp": "",
"source": "github",
"line_count": 189,
"max_line_length": 161,
"avg_line_length": 44.42857142857143,
"alnum_prop": 0.5828272001905442,
"repo_name": "sternshus/arelle2.7",
"id": "70a39aefdec6708c7a4f82e8ff2a778072bb2948",
"size": "9630",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "svr-2.7/arelle/examples/plugin/sakaCalendar.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C#",
"bytes": "850"
},
{
"name": "Java",
"bytes": "4663"
},
{
"name": "PLSQL",
"bytes": "1056369"
},
{
"name": "Python",
"bytes": "4877037"
},
{
"name": "Shell",
"bytes": "42"
}
],
"symlink_target": ""
} |
from __future__ import division, absolute_import, print_function
"""Provides the basic, interface-agnostic workflow for importing and
autotagging music files.
"""
import os
import re
import pickle
import itertools
from collections import defaultdict
from tempfile import mkdtemp
from bisect import insort, bisect_left
from contextlib import contextmanager
import shutil
import time
from beets import logging
from beets import autotag
from beets import library
from beets import dbcore
from beets import plugins
from beets import util
from beets import config
from beets.util import pipeline, sorted_walk, ancestry, MoveOperation
from beets.util import syspath, normpath, displayable_path
from enum import Enum
import mediafile
action = Enum('action',
['SKIP', 'ASIS', 'TRACKS', 'APPLY', 'ALBUMS', 'RETAG'])
# The RETAG action represents "don't apply any match, but do record
# new metadata". It's not reachable via the standard command prompt but
# can be used by plugins.
QUEUE_SIZE = 128
SINGLE_ARTIST_THRESH = 0.25
PROGRESS_KEY = 'tagprogress'
HISTORY_KEY = 'taghistory'
# Global logger.
log = logging.getLogger('beets')
class ImportAbort(Exception):
"""Raised when the user aborts the tagging operation.
"""
pass
# Utilities.
def _open_state():
"""Reads the state file, returning a dictionary."""
try:
with open(config['statefile'].as_filename(), 'rb') as f:
return pickle.load(f)
except Exception as exc:
# The `pickle` module can emit all sorts of exceptions during
# unpickling, including ImportError. We use a catch-all
# exception to avoid enumerating them all (the docs don't even have a
# full list!).
log.debug(u'state file could not be read: {0}', exc)
return {}
def _save_state(state):
"""Writes the state dictionary out to disk."""
try:
with open(config['statefile'].as_filename(), 'wb') as f:
pickle.dump(state, f)
except IOError as exc:
log.error(u'state file could not be written: {0}', exc)
# Utilities for reading and writing the beets progress file, which
# allows long tagging tasks to be resumed when they pause (or crash).
def progress_read():
state = _open_state()
return state.setdefault(PROGRESS_KEY, {})
@contextmanager
def progress_write():
state = _open_state()
progress = state.setdefault(PROGRESS_KEY, {})
yield progress
_save_state(state)
def progress_add(toppath, *paths):
"""Record that the files under all of the `paths` have been imported
under `toppath`.
"""
with progress_write() as state:
imported = state.setdefault(toppath, [])
for path in paths:
# Normally `progress_add` will be called with the path
# argument increasing. This is because of the ordering in
# `albums_in_dir`. We take advantage of that to make the
# code faster
if imported and imported[len(imported) - 1] <= path:
imported.append(path)
else:
insort(imported, path)
def progress_element(toppath, path):
"""Return whether `path` has been imported in `toppath`.
"""
state = progress_read()
if toppath not in state:
return False
imported = state[toppath]
i = bisect_left(imported, path)
return i != len(imported) and imported[i] == path
def has_progress(toppath):
"""Return `True` if there exist paths that have already been
imported under `toppath`.
"""
state = progress_read()
return toppath in state
def progress_reset(toppath):
with progress_write() as state:
if toppath in state:
del state[toppath]
# Similarly, utilities for manipulating the "incremental" import log.
# This keeps track of all directories that were ever imported, which
# allows the importer to only import new stuff.
def history_add(paths):
"""Indicate that the import of the album in `paths` is completed and
should not be repeated in incremental imports.
"""
state = _open_state()
if HISTORY_KEY not in state:
state[HISTORY_KEY] = set()
state[HISTORY_KEY].add(tuple(paths))
_save_state(state)
def history_get():
"""Get the set of completed path tuples in incremental imports.
"""
state = _open_state()
if HISTORY_KEY not in state:
return set()
return state[HISTORY_KEY]
# Abstract session class.
class ImportSession(object):
"""Controls an import action. Subclasses should implement methods to
communicate with the user or otherwise make decisions.
"""
def __init__(self, lib, loghandler, paths, query):
"""Create a session. `lib` is a Library object. `loghandler` is a
logging.Handler. Either `paths` or `query` is non-null and indicates
the source of files to be imported.
"""
self.lib = lib
self.logger = self._setup_logging(loghandler)
self.paths = paths
self.query = query
self._is_resuming = dict()
self._merged_items = set()
self._merged_dirs = set()
# Normalize the paths.
if self.paths:
self.paths = list(map(normpath, self.paths))
def _setup_logging(self, loghandler):
logger = logging.getLogger(__name__)
logger.propagate = False
if not loghandler:
loghandler = logging.NullHandler()
logger.handlers = [loghandler]
return logger
def set_config(self, config):
"""Set `config` property from global import config and make
implied changes.
"""
# FIXME: Maybe this function should not exist and should instead
# provide "decision wrappers" like "should_resume()", etc.
iconfig = dict(config)
self.config = iconfig
# Incremental and progress are mutually exclusive.
if iconfig['incremental']:
iconfig['resume'] = False
# When based on a query instead of directories, never
# save progress or try to resume.
if self.query is not None:
iconfig['resume'] = False
iconfig['incremental'] = False
# Copy, move, link, and hardlink are mutually exclusive.
if iconfig['move']:
iconfig['copy'] = False
iconfig['link'] = False
iconfig['hardlink'] = False
elif iconfig['link']:
iconfig['copy'] = False
iconfig['move'] = False
iconfig['hardlink'] = False
elif iconfig['hardlink']:
iconfig['copy'] = False
iconfig['move'] = False
iconfig['link'] = False
# Only delete when copying.
if not iconfig['copy']:
iconfig['delete'] = False
self.want_resume = config['resume'].as_choice([True, False, 'ask'])
def tag_log(self, status, paths):
"""Log a message about a given album to the importer log. The status
should reflect the reason the album couldn't be tagged.
"""
self.logger.info(u'{0} {1}', status, displayable_path(paths))
def log_choice(self, task, duplicate=False):
"""Logs the task's current choice if it should be logged. If
``duplicate``, then this is a secondary choice after a duplicate was
detected and a decision was made.
"""
paths = task.paths
if duplicate:
# Duplicate: log all three choices (skip, keep both, and trump).
if task.should_remove_duplicates:
self.tag_log(u'duplicate-replace', paths)
elif task.choice_flag in (action.ASIS, action.APPLY):
self.tag_log(u'duplicate-keep', paths)
elif task.choice_flag is (action.SKIP):
self.tag_log(u'duplicate-skip', paths)
else:
# Non-duplicate: log "skip" and "asis" choices.
if task.choice_flag is action.ASIS:
self.tag_log(u'asis', paths)
elif task.choice_flag is action.SKIP:
self.tag_log(u'skip', paths)
def should_resume(self, path):
raise NotImplementedError
def choose_match(self, task):
raise NotImplementedError
def resolve_duplicate(self, task, found_duplicates):
raise NotImplementedError
def choose_item(self, task):
raise NotImplementedError
def run(self):
"""Run the import task.
"""
self.logger.info(u'import started {0}', time.asctime())
self.set_config(config['import'])
# Set up the pipeline.
if self.query is None:
stages = [read_tasks(self)]
else:
stages = [query_tasks(self)]
# In pretend mode, just log what would otherwise be imported.
if self.config['pretend']:
stages += [log_files(self)]
else:
if self.config['group_albums'] and \
not self.config['singletons']:
# Split directory tasks into one task for each album.
stages += [group_albums(self)]
# These stages either talk to the user to get a decision or,
# in the case of a non-autotagged import, just choose to
# import everything as-is. In *both* cases, these stages
# also add the music to the library database, so later
# stages need to read and write data from there.
if self.config['autotag']:
stages += [lookup_candidates(self), user_query(self)]
else:
stages += [import_asis(self)]
# Plugin stages.
for stage_func in plugins.early_import_stages():
stages.append(plugin_stage(self, stage_func))
for stage_func in plugins.import_stages():
stages.append(plugin_stage(self, stage_func))
stages += [manipulate_files(self)]
pl = pipeline.Pipeline(stages)
# Run the pipeline.
plugins.send('import_begin', session=self)
try:
if config['threaded']:
pl.run_parallel(QUEUE_SIZE)
else:
pl.run_sequential()
except ImportAbort:
# User aborted operation. Silently stop.
pass
# Incremental and resumed imports
def already_imported(self, toppath, paths):
"""Returns true if the files belonging to this task have already
been imported in a previous session.
"""
if self.is_resuming(toppath) \
and all([progress_element(toppath, p) for p in paths]):
return True
if self.config['incremental'] \
and tuple(paths) in self.history_dirs:
return True
return False
@property
def history_dirs(self):
if not hasattr(self, '_history_dirs'):
self._history_dirs = history_get()
return self._history_dirs
def already_merged(self, paths):
"""Returns true if all the paths being imported were part of a merge
during previous tasks.
"""
for path in paths:
if path not in self._merged_items \
and path not in self._merged_dirs:
return False
return True
def mark_merged(self, paths):
"""Mark paths and directories as merged for future reimport tasks.
"""
self._merged_items.update(paths)
dirs = set([os.path.dirname(path) if os.path.isfile(path) else path
for path in paths])
self._merged_dirs.update(dirs)
def is_resuming(self, toppath):
"""Return `True` if user wants to resume import of this path.
You have to call `ask_resume` first to determine the return value.
"""
return self._is_resuming.get(toppath, False)
def ask_resume(self, toppath):
"""If import of `toppath` was aborted in an earlier session, ask
user if she wants to resume the import.
Determines the return value of `is_resuming(toppath)`.
"""
if self.want_resume and has_progress(toppath):
# Either accept immediately or prompt for input to decide.
if self.want_resume is True or \
self.should_resume(toppath):
log.warning(u'Resuming interrupted import of {0}',
util.displayable_path(toppath))
self._is_resuming[toppath] = True
else:
# Clear progress; we're starting from the top.
progress_reset(toppath)
# The importer task class.
class BaseImportTask(object):
"""An abstract base class for importer tasks.
Tasks flow through the importer pipeline. Each stage can update
them. """
def __init__(self, toppath, paths, items):
"""Create a task. The primary fields that define a task are:
* `toppath`: The user-specified base directory that contains the
music for this task. If the task has *no* user-specified base
(for example, when importing based on an -L query), this can
be None. This is used for tracking progress and history.
* `paths`: A list of *specific* paths where the music for this task
came from. These paths can be directories, when their entire
contents are being imported, or files, when the task comprises
individual tracks. This is used for progress/history tracking and
for displaying the task to the user.
* `items`: A list of `Item` objects representing the music being
imported.
These fields should not change after initialization.
"""
self.toppath = toppath
self.paths = paths
self.items = items
class ImportTask(BaseImportTask):
"""Represents a single set of items to be imported along with its
intermediate state. May represent an album or a single item.
The import session and stages call the following methods in the
given order.
* `lookup_candidates()` Sets the `common_artist`, `common_album`,
`candidates`, and `rec` attributes. `candidates` is a list of
`AlbumMatch` objects.
* `choose_match()` Uses the session to set the `match` attribute
from the `candidates` list.
* `find_duplicates()` Returns a list of albums from `lib` with the
same artist and album name as the task.
* `apply_metadata()` Sets the attributes of the items from the
task's `match` attribute.
* `add()` Add the imported items and album to the database.
* `manipulate_files()` Copy, move, and write files depending on the
session configuration.
* `set_fields()` Sets the fields given at CLI or configuration to
the specified values.
* `finalize()` Update the import progress and cleanup the file
system.
"""
def __init__(self, toppath, paths, items):
super(ImportTask, self).__init__(toppath, paths, items)
self.choice_flag = None
self.cur_album = None
self.cur_artist = None
self.candidates = []
self.rec = None
self.should_remove_duplicates = False
self.should_merge_duplicates = False
self.is_album = True
self.search_ids = [] # user-supplied candidate IDs.
def set_choice(self, choice):
"""Given an AlbumMatch or TrackMatch object or an action constant,
indicates that an action has been selected for this task.
"""
# Not part of the task structure:
assert choice != action.APPLY # Only used internally.
if choice in (action.SKIP, action.ASIS, action.TRACKS, action.ALBUMS,
action.RETAG):
self.choice_flag = choice
self.match = None
else:
self.choice_flag = action.APPLY # Implicit choice.
self.match = choice
def save_progress(self):
"""Updates the progress state to indicate that this album has
finished.
"""
if self.toppath:
progress_add(self.toppath, *self.paths)
def save_history(self):
"""Save the directory in the history for incremental imports.
"""
if self.paths:
history_add(self.paths)
# Logical decisions.
@property
def apply(self):
return self.choice_flag == action.APPLY
@property
def skip(self):
return self.choice_flag == action.SKIP
# Convenient data.
def chosen_ident(self):
"""Returns identifying metadata about the current choice. For
albums, this is an (artist, album) pair. For items, this is
(artist, title). May only be called when the choice flag is ASIS
or RETAG (in which case the data comes from the files' current
metadata) or APPLY (data comes from the choice).
"""
if self.choice_flag in (action.ASIS, action.RETAG):
return (self.cur_artist, self.cur_album)
elif self.choice_flag is action.APPLY:
return (self.match.info.artist, self.match.info.album)
def imported_items(self):
"""Return a list of Items that should be added to the library.
If the tasks applies an album match the method only returns the
matched items.
"""
if self.choice_flag in (action.ASIS, action.RETAG):
return list(self.items)
elif self.choice_flag == action.APPLY:
return list(self.match.mapping.keys())
else:
assert False
def apply_metadata(self):
"""Copy metadata from match info to the items.
"""
if config['import']['from_scratch']:
for item in self.match.mapping:
item.clear()
autotag.apply_metadata(self.match.info, self.match.mapping)
def duplicate_items(self, lib):
duplicate_items = []
for album in self.find_duplicates(lib):
duplicate_items += album.items()
return duplicate_items
def remove_duplicates(self, lib):
duplicate_items = self.duplicate_items(lib)
log.debug(u'removing {0} old duplicated items', len(duplicate_items))
for item in duplicate_items:
item.remove()
if lib.directory in util.ancestry(item.path):
log.debug(u'deleting duplicate {0}',
util.displayable_path(item.path))
util.remove(item.path)
util.prune_dirs(os.path.dirname(item.path),
lib.directory)
def set_fields(self):
"""Sets the fields given at CLI or configuration to the specified
values.
"""
for field, view in config['import']['set_fields'].items():
value = view.get()
log.debug(u'Set field {1}={2} for {0}',
displayable_path(self.paths),
field,
value)
self.album[field] = value
self.album.store()
def finalize(self, session):
"""Save progress, clean up files, and emit plugin event.
"""
# Update progress.
if session.want_resume:
self.save_progress()
if session.config['incremental'] and not (
# Should we skip recording to incremental list?
self.skip and session.config['incremental_skip_later']
):
self.save_history()
self.cleanup(copy=session.config['copy'],
delete=session.config['delete'],
move=session.config['move'])
if not self.skip:
self._emit_imported(session.lib)
def cleanup(self, copy=False, delete=False, move=False):
"""Remove and prune imported paths.
"""
# Do not delete any files or prune directories when skipping.
if self.skip:
return
items = self.imported_items()
# When copying and deleting originals, delete old files.
if copy and delete:
new_paths = [os.path.realpath(item.path) for item in items]
for old_path in self.old_paths:
# Only delete files that were actually copied.
if old_path not in new_paths:
util.remove(syspath(old_path), False)
self.prune(old_path)
# When moving, prune empty directories containing the original files.
elif move:
for old_path in self.old_paths:
self.prune(old_path)
def _emit_imported(self, lib):
plugins.send('album_imported', lib=lib, album=self.album)
def handle_created(self, session):
"""Send the `import_task_created` event for this task. Return a list of
tasks that should continue through the pipeline. By default, this is a
list containing only the task itself, but plugins can replace the task
with new ones.
"""
tasks = plugins.send('import_task_created', session=session, task=self)
if not tasks:
tasks = [self]
else:
# The plugins gave us a list of lists of tasks. Flatten it.
tasks = [t for inner in tasks for t in inner]
return tasks
def lookup_candidates(self):
"""Retrieve and store candidates for this album. User-specified
candidate IDs are stored in self.search_ids: if present, the
initial lookup is restricted to only those IDs.
"""
artist, album, prop = \
autotag.tag_album(self.items, search_ids=self.search_ids)
self.cur_artist = artist
self.cur_album = album
self.candidates = prop.candidates
self.rec = prop.recommendation
def find_duplicates(self, lib):
"""Return a list of albums from `lib` with the same artist and
album name as the task.
"""
artist, album = self.chosen_ident()
if artist is None:
# As-is import with no artist. Skip check.
return []
duplicates = []
task_paths = set(i.path for i in self.items if i)
duplicate_query = dbcore.AndQuery((
dbcore.MatchQuery('albumartist', artist),
dbcore.MatchQuery('album', album),
))
for album in lib.albums(duplicate_query):
# Check whether the album paths are all present in the task
# i.e. album is being completely re-imported by the task,
# in which case it is not a duplicate (will be replaced).
album_paths = set(i.path for i in album.items())
if not (album_paths <= task_paths):
duplicates.append(album)
return duplicates
def align_album_level_fields(self):
"""Make some album fields equal across `self.items`. For the
RETAG action, we assume that the responsible for returning it
(ie. a plugin) always ensures that the first item contains
valid data on the relevant fields.
"""
changes = {}
if self.choice_flag == action.ASIS:
# Taking metadata "as-is". Guess whether this album is VA.
plur_albumartist, freq = util.plurality(
[i.albumartist or i.artist for i in self.items]
)
if freq == len(self.items) or \
(freq > 1 and
float(freq) / len(self.items) >= SINGLE_ARTIST_THRESH):
# Single-artist album.
changes['albumartist'] = plur_albumartist
changes['comp'] = False
else:
# VA.
changes['albumartist'] = config['va_name'].as_str()
changes['comp'] = True
elif self.choice_flag in (action.APPLY, action.RETAG):
# Applying autotagged metadata. Just get AA from the first
# item.
if not self.items[0].albumartist:
changes['albumartist'] = self.items[0].artist
if not self.items[0].mb_albumartistid:
changes['mb_albumartistid'] = self.items[0].mb_artistid
# Apply new metadata.
for item in self.items:
item.update(changes)
def manipulate_files(self, operation=None, write=False, session=None):
""" Copy, move, link or hardlink (depending on `operation`) the files
as well as write metadata.
`operation` should be an instance of `util.MoveOperation`.
If `write` is `True` metadata is written to the files.
"""
items = self.imported_items()
# Save the original paths of all items for deletion and pruning
# in the next step (finalization).
self.old_paths = [item.path for item in items]
for item in items:
if operation is not None:
# In copy and link modes, treat re-imports specially:
# move in-library files. (Out-of-library files are
# copied/moved as usual).
old_path = item.path
if (operation != MoveOperation.MOVE
and self.replaced_items[item]
and session.lib.directory in util.ancestry(old_path)):
item.move()
# We moved the item, so remove the
# now-nonexistent file from old_paths.
self.old_paths.remove(old_path)
else:
# A normal import. Just copy files and keep track of
# old paths.
item.move(operation)
if write and (self.apply or self.choice_flag == action.RETAG):
item.try_write()
with session.lib.transaction():
for item in self.imported_items():
item.store()
plugins.send('import_task_files', session=session, task=self)
def add(self, lib):
"""Add the items as an album to the library and remove replaced items.
"""
self.align_album_level_fields()
with lib.transaction():
self.record_replaced(lib)
self.remove_replaced(lib)
self.album = lib.add_album(self.imported_items())
if 'data_source' in self.imported_items()[0]:
self.album.data_source = self.imported_items()[0].data_source
self.reimport_metadata(lib)
def record_replaced(self, lib):
"""Records the replaced items and albums in the `replaced_items`
and `replaced_albums` dictionaries.
"""
self.replaced_items = defaultdict(list)
self.replaced_albums = defaultdict(list)
replaced_album_ids = set()
for item in self.imported_items():
dup_items = list(lib.items(
dbcore.query.BytesQuery('path', item.path)
))
self.replaced_items[item] = dup_items
for dup_item in dup_items:
if (not dup_item.album_id or
dup_item.album_id in replaced_album_ids):
continue
replaced_album = dup_item.get_album()
if replaced_album:
replaced_album_ids.add(dup_item.album_id)
self.replaced_albums[replaced_album.path] = replaced_album
def reimport_metadata(self, lib):
"""For reimports, preserves metadata for reimported items and
albums.
"""
if self.is_album:
replaced_album = self.replaced_albums.get(self.album.path)
if replaced_album:
self.album.added = replaced_album.added
self.album.update(replaced_album._values_flex)
self.album.artpath = replaced_album.artpath
self.album.store()
log.debug(
u'Reimported album: added {0}, flexible '
u'attributes {1} from album {2} for {3}',
self.album.added,
replaced_album._values_flex.keys(),
replaced_album.id,
displayable_path(self.album.path)
)
for item in self.imported_items():
dup_items = self.replaced_items[item]
for dup_item in dup_items:
if dup_item.added and dup_item.added != item.added:
item.added = dup_item.added
log.debug(
u'Reimported item added {0} '
u'from item {1} for {2}',
item.added,
dup_item.id,
displayable_path(item.path)
)
item.update(dup_item._values_flex)
log.debug(
u'Reimported item flexible attributes {0} '
u'from item {1} for {2}',
dup_item._values_flex.keys(),
dup_item.id,
displayable_path(item.path)
)
item.store()
def remove_replaced(self, lib):
"""Removes all the items from the library that have the same
path as an item from this task.
"""
for item in self.imported_items():
for dup_item in self.replaced_items[item]:
log.debug(u'Replacing item {0}: {1}',
dup_item.id, displayable_path(item.path))
dup_item.remove()
log.debug(u'{0} of {1} items replaced',
sum(bool(l) for l in self.replaced_items.values()),
len(self.imported_items()))
def choose_match(self, session):
"""Ask the session which match should apply and apply it.
"""
choice = session.choose_match(self)
self.set_choice(choice)
session.log_choice(self)
def reload(self):
"""Reload albums and items from the database.
"""
for item in self.imported_items():
item.load()
self.album.load()
# Utilities.
def prune(self, filename):
"""Prune any empty directories above the given file. If this
task has no `toppath` or the file path provided is not within
the `toppath`, then this function has no effect. Similarly, if
the file still exists, no pruning is performed, so it's safe to
call when the file in question may not have been removed.
"""
if self.toppath and not os.path.exists(filename):
util.prune_dirs(os.path.dirname(filename),
self.toppath,
clutter=config['clutter'].as_str_seq())
class SingletonImportTask(ImportTask):
"""ImportTask for a single track that is not associated to an album.
"""
def __init__(self, toppath, item):
super(SingletonImportTask, self).__init__(toppath, [item.path], [item])
self.item = item
self.is_album = False
self.paths = [item.path]
def chosen_ident(self):
assert self.choice_flag in (action.ASIS, action.APPLY, action.RETAG)
if self.choice_flag in (action.ASIS, action.RETAG):
return (self.item.artist, self.item.title)
elif self.choice_flag is action.APPLY:
return (self.match.info.artist, self.match.info.title)
def imported_items(self):
return [self.item]
def apply_metadata(self):
autotag.apply_item_metadata(self.item, self.match.info)
def _emit_imported(self, lib):
for item in self.imported_items():
plugins.send('item_imported', lib=lib, item=item)
def lookup_candidates(self):
prop = autotag.tag_item(self.item, search_ids=self.search_ids)
self.candidates = prop.candidates
self.rec = prop.recommendation
def find_duplicates(self, lib):
"""Return a list of items from `lib` that have the same artist
and title as the task.
"""
artist, title = self.chosen_ident()
found_items = []
query = dbcore.AndQuery((
dbcore.MatchQuery('artist', artist),
dbcore.MatchQuery('title', title),
))
for other_item in lib.items(query):
# Existing items not considered duplicates.
if other_item.path != self.item.path:
found_items.append(other_item)
return found_items
duplicate_items = find_duplicates
def add(self, lib):
with lib.transaction():
self.record_replaced(lib)
self.remove_replaced(lib)
lib.add(self.item)
self.reimport_metadata(lib)
def infer_album_fields(self):
raise NotImplementedError
def choose_match(self, session):
"""Ask the session which match should apply and apply it.
"""
choice = session.choose_item(self)
self.set_choice(choice)
session.log_choice(self)
def reload(self):
self.item.load()
def set_fields(self):
"""Sets the fields given at CLI or configuration to the specified
values.
"""
for field, view in config['import']['set_fields'].items():
value = view.get()
log.debug(u'Set field {1}={2} for {0}',
displayable_path(self.paths),
field,
value)
self.item[field] = value
self.item.store()
# FIXME The inheritance relationships are inverted. This is why there
# are so many methods which pass. More responsibility should be delegated to
# the BaseImportTask class.
class SentinelImportTask(ImportTask):
"""A sentinel task marks the progress of an import and does not
import any items itself.
If only `toppath` is set the task indicates the end of a top-level
directory import. If the `paths` argument is also given, the task
indicates the progress in the `toppath` import.
"""
def __init__(self, toppath, paths):
super(SentinelImportTask, self).__init__(toppath, paths, ())
# TODO Remove the remaining attributes eventually
self.should_remove_duplicates = False
self.is_album = True
self.choice_flag = None
def save_history(self):
pass
def save_progress(self):
if self.paths is None:
# "Done" sentinel.
progress_reset(self.toppath)
else:
# "Directory progress" sentinel for singletons
progress_add(self.toppath, *self.paths)
def skip(self):
return True
def set_choice(self, choice):
raise NotImplementedError
def cleanup(self, **kwargs):
pass
def _emit_imported(self, session):
pass
class ArchiveImportTask(SentinelImportTask):
"""An import task that represents the processing of an archive.
`toppath` must be a `zip`, `tar`, or `rar` archive. Archive tasks
serve two purposes:
- First, it will unarchive the files to a temporary directory and
return it. The client should read tasks from the resulting
directory and send them through the pipeline.
- Second, it will clean up the temporary directory when it proceeds
through the pipeline. The client should send the archive task
after sending the rest of the music tasks to make this work.
"""
def __init__(self, toppath):
super(ArchiveImportTask, self).__init__(toppath, ())
self.extracted = False
@classmethod
def is_archive(cls, path):
"""Returns true if the given path points to an archive that can
be handled.
"""
if not os.path.isfile(path):
return False
for path_test, _ in cls.handlers():
if path_test(util.py3_path(path)):
return True
return False
@classmethod
def handlers(cls):
"""Returns a list of archive handlers.
Each handler is a `(path_test, ArchiveClass)` tuple. `path_test`
is a function that returns `True` if the given path can be
handled by `ArchiveClass`. `ArchiveClass` is a class that
implements the same interface as `tarfile.TarFile`.
"""
if not hasattr(cls, '_handlers'):
cls._handlers = []
from zipfile import is_zipfile, ZipFile
cls._handlers.append((is_zipfile, ZipFile))
import tarfile
cls._handlers.append((tarfile.is_tarfile, tarfile.open))
try:
from rarfile import is_rarfile, RarFile
except ImportError:
pass
else:
cls._handlers.append((is_rarfile, RarFile))
return cls._handlers
def cleanup(self, **kwargs):
"""Removes the temporary directory the archive was extracted to.
"""
if self.extracted:
log.debug(u'Removing extracted directory: {0}',
displayable_path(self.toppath))
shutil.rmtree(self.toppath)
def extract(self):
"""Extracts the archive to a temporary directory and sets
`toppath` to that directory.
"""
for path_test, handler_class in self.handlers():
if path_test(util.py3_path(self.toppath)):
break
extract_to = mkdtemp()
archive = handler_class(util.py3_path(self.toppath), mode='r')
try:
archive.extractall(extract_to)
finally:
archive.close()
self.extracted = True
self.toppath = extract_to
class ImportTaskFactory(object):
"""Generate album and singleton import tasks for all media files
indicated by a path.
"""
def __init__(self, toppath, session):
"""Create a new task factory.
`toppath` is the user-specified path to search for music to
import. `session` is the `ImportSession`, which controls how
tasks are read from the directory.
"""
self.toppath = toppath
self.session = session
self.skipped = 0 # Skipped due to incremental/resume.
self.imported = 0 # "Real" tasks created.
self.is_archive = ArchiveImportTask.is_archive(syspath(toppath))
def tasks(self):
"""Yield all import tasks for music found in the user-specified
path `self.toppath`. Any necessary sentinel tasks are also
produced.
During generation, update `self.skipped` and `self.imported`
with the number of tasks that were not produced (due to
incremental mode or resumed imports) and the number of concrete
tasks actually produced, respectively.
If `self.toppath` is an archive, it is adjusted to point to the
extracted data.
"""
# Check whether this is an archive.
if self.is_archive:
archive_task = self.unarchive()
if not archive_task:
return
# Search for music in the directory.
for dirs, paths in self.paths():
if self.session.config['singletons']:
for path in paths:
tasks = self._create(self.singleton(path))
for task in tasks:
yield task
yield self.sentinel(dirs)
else:
tasks = self._create(self.album(paths, dirs))
for task in tasks:
yield task
# Produce the final sentinel for this toppath to indicate that
# it is finished. This is usually just a SentinelImportTask, but
# for archive imports, send the archive task instead (to remove
# the extracted directory).
if self.is_archive:
yield archive_task
else:
yield self.sentinel()
def _create(self, task):
"""Handle a new task to be emitted by the factory.
Emit the `import_task_created` event and increment the
`imported` count if the task is not skipped. Return the same
task. If `task` is None, do nothing.
"""
if task:
tasks = task.handle_created(self.session)
self.imported += len(tasks)
return tasks
return []
def paths(self):
"""Walk `self.toppath` and yield `(dirs, files)` pairs where
`files` are individual music files and `dirs` the set of
containing directories where the music was found.
This can either be a recursive search in the ordinary case, a
single track when `toppath` is a file, a single directory in
`flat` mode.
"""
if not os.path.isdir(syspath(self.toppath)):
yield [self.toppath], [self.toppath]
elif self.session.config['flat']:
paths = []
for dirs, paths_in_dir in albums_in_dir(self.toppath):
paths += paths_in_dir
yield [self.toppath], paths
else:
for dirs, paths in albums_in_dir(self.toppath):
yield dirs, paths
def singleton(self, path):
"""Return a `SingletonImportTask` for the music file.
"""
if self.session.already_imported(self.toppath, [path]):
log.debug(u'Skipping previously-imported path: {0}',
displayable_path(path))
self.skipped += 1
return None
item = self.read_item(path)
if item:
return SingletonImportTask(self.toppath, item)
else:
return None
def album(self, paths, dirs=None):
"""Return a `ImportTask` with all media files from paths.
`dirs` is a list of parent directories used to record already
imported albums.
"""
if not paths:
return None
if dirs is None:
dirs = list(set(os.path.dirname(p) for p in paths))
if self.session.already_imported(self.toppath, dirs):
log.debug(u'Skipping previously-imported path: {0}',
displayable_path(dirs))
self.skipped += 1
return None
items = map(self.read_item, paths)
items = [item for item in items if item]
if items:
return ImportTask(self.toppath, dirs, items)
else:
return None
def sentinel(self, paths=None):
"""Return a `SentinelImportTask` indicating the end of a
top-level directory import.
"""
return SentinelImportTask(self.toppath, paths)
def unarchive(self):
"""Extract the archive for this `toppath`.
Extract the archive to a new directory, adjust `toppath` to
point to the extracted directory, and return an
`ArchiveImportTask`. If extraction fails, return None.
"""
assert self.is_archive
if not (self.session.config['move'] or
self.session.config['copy']):
log.warning(u"Archive importing requires either "
u"'copy' or 'move' to be enabled.")
return
log.debug(u'Extracting archive: {0}',
displayable_path(self.toppath))
archive_task = ArchiveImportTask(self.toppath)
try:
archive_task.extract()
except Exception as exc:
log.error(u'extraction failed: {0}', exc)
return
# Now read albums from the extracted directory.
self.toppath = archive_task.toppath
log.debug(u'Archive extracted to: {0}', self.toppath)
return archive_task
def read_item(self, path):
"""Return an `Item` read from the path.
If an item cannot be read, return `None` instead and log an
error.
"""
try:
return library.Item.from_path(path)
except library.ReadError as exc:
if isinstance(exc.reason, mediafile.FileTypeError):
# Silently ignore non-music files.
pass
elif isinstance(exc.reason, mediafile.UnreadableFileError):
log.warning(u'unreadable file: {0}', displayable_path(path))
else:
log.error(u'error reading {0}: {1}',
displayable_path(path), exc)
# Pipeline utilities
def _freshen_items(items):
# Clear IDs from re-tagged items so they appear "fresh" when
# we add them back to the library.
for item in items:
item.id = None
item.album_id = None
def _extend_pipeline(tasks, *stages):
# Return pipeline extension for stages with list of tasks
if type(tasks) == list:
task_iter = iter(tasks)
else:
task_iter = tasks
ipl = pipeline.Pipeline([task_iter] + list(stages))
return pipeline.multiple(ipl.pull())
# Full-album pipeline stages.
def read_tasks(session):
"""A generator yielding all the albums (as ImportTask objects) found
in the user-specified list of paths. In the case of a singleton
import, yields single-item tasks instead.
"""
skipped = 0
for toppath in session.paths:
# Check whether we need to resume the import.
session.ask_resume(toppath)
# Generate tasks.
task_factory = ImportTaskFactory(toppath, session)
for t in task_factory.tasks():
yield t
skipped += task_factory.skipped
if not task_factory.imported:
log.warning(u'No files imported from {0}',
displayable_path(toppath))
# Show skipped directories (due to incremental/resume).
if skipped:
log.info(u'Skipped {0} paths.', skipped)
def query_tasks(session):
"""A generator that works as a drop-in-replacement for read_tasks.
Instead of finding files from the filesystem, a query is used to
match items from the library.
"""
if session.config['singletons']:
# Search for items.
for item in session.lib.items(session.query):
task = SingletonImportTask(None, item)
for task in task.handle_created(session):
yield task
else:
# Search for albums.
for album in session.lib.albums(session.query):
log.debug(u'yielding album {0}: {1} - {2}',
album.id, album.albumartist, album.album)
items = list(album.items())
_freshen_items(items)
task = ImportTask(None, [album.item_dir()], items)
for task in task.handle_created(session):
yield task
@pipeline.mutator_stage
def lookup_candidates(session, task):
"""A coroutine for performing the initial MusicBrainz lookup for an
album. It accepts lists of Items and yields
(items, cur_artist, cur_album, candidates, rec) tuples. If no match
is found, all of the yielded parameters (except items) are None.
"""
if task.skip:
# FIXME This gets duplicated a lot. We need a better
# abstraction.
return
plugins.send('import_task_start', session=session, task=task)
log.debug(u'Looking up: {0}', displayable_path(task.paths))
# Restrict the initial lookup to IDs specified by the user via the -m
# option. Currently all the IDs are passed onto the tasks directly.
task.search_ids = session.config['search_ids'].as_str_seq()
task.lookup_candidates()
@pipeline.stage
def user_query(session, task):
"""A coroutine for interfacing with the user about the tagging
process.
The coroutine accepts an ImportTask objects. It uses the
session's `choose_match` method to determine the `action` for
this task. Depending on the action additional stages are executed
and the processed task is yielded.
It emits the ``import_task_choice`` event for plugins. Plugins have
acces to the choice via the ``taks.choice_flag`` property and may
choose to change it.
"""
if task.skip:
return task
if session.already_merged(task.paths):
return pipeline.BUBBLE
# Ask the user for a choice.
task.choose_match(session)
plugins.send('import_task_choice', session=session, task=task)
# As-tracks: transition to singleton workflow.
if task.choice_flag is action.TRACKS:
# Set up a little pipeline for dealing with the singletons.
def emitter(task):
for item in task.items:
task = SingletonImportTask(task.toppath, item)
for new_task in task.handle_created(session):
yield new_task
yield SentinelImportTask(task.toppath, task.paths)
return _extend_pipeline(emitter(task),
lookup_candidates(session),
user_query(session))
# As albums: group items by albums and create task for each album
if task.choice_flag is action.ALBUMS:
return _extend_pipeline([task],
group_albums(session),
lookup_candidates(session),
user_query(session))
resolve_duplicates(session, task)
if task.should_merge_duplicates:
# Create a new task for tagging the current items
# and duplicates together
duplicate_items = task.duplicate_items(session.lib)
# Duplicates would be reimported so make them look "fresh"
_freshen_items(duplicate_items)
duplicate_paths = [item.path for item in duplicate_items]
# Record merged paths in the session so they are not reimported
session.mark_merged(duplicate_paths)
merged_task = ImportTask(None, task.paths + duplicate_paths,
task.items + duplicate_items)
return _extend_pipeline([merged_task],
lookup_candidates(session),
user_query(session))
apply_choice(session, task)
return task
def resolve_duplicates(session, task):
"""Check if a task conflicts with items or albums already imported
and ask the session to resolve this.
"""
if task.choice_flag in (action.ASIS, action.APPLY, action.RETAG):
found_duplicates = task.find_duplicates(session.lib)
if found_duplicates:
log.debug(u'found duplicates: {}'.format(
[o.id for o in found_duplicates]
))
# Get the default action to follow from config.
duplicate_action = config['import']['duplicate_action'].as_choice({
u'skip': u's',
u'keep': u'k',
u'remove': u'r',
u'merge': u'm',
u'ask': u'a',
})
log.debug(u'default action for duplicates: {0}', duplicate_action)
if duplicate_action == u's':
# Skip new.
task.set_choice(action.SKIP)
elif duplicate_action == u'k':
# Keep both. Do nothing; leave the choice intact.
pass
elif duplicate_action == u'r':
# Remove old.
task.should_remove_duplicates = True
elif duplicate_action == u'm':
# Merge duplicates together
task.should_merge_duplicates = True
else:
# No default action set; ask the session.
session.resolve_duplicate(task, found_duplicates)
session.log_choice(task, True)
@pipeline.mutator_stage
def import_asis(session, task):
"""Select the `action.ASIS` choice for all tasks.
This stage replaces the initial_lookup and user_query stages
when the importer is run without autotagging.
"""
if task.skip:
return
log.info(u'{}', displayable_path(task.paths))
task.set_choice(action.ASIS)
apply_choice(session, task)
def apply_choice(session, task):
"""Apply the task's choice to the Album or Item it contains and add
it to the library.
"""
if task.skip:
return
# Change metadata.
if task.apply:
task.apply_metadata()
plugins.send('import_task_apply', session=session, task=task)
task.add(session.lib)
# If ``set_fields`` is set, set those fields to the
# configured values.
# NOTE: This cannot be done before the ``task.add()`` call above,
# because then the ``ImportTask`` won't have an `album` for which
# it can set the fields.
if config['import']['set_fields']:
task.set_fields()
@pipeline.mutator_stage
def plugin_stage(session, func, task):
"""A coroutine (pipeline stage) that calls the given function with
each non-skipped import task. These stages occur between applying
metadata changes and moving/copying/writing files.
"""
if task.skip:
return
func(session, task)
# Stage may modify DB, so re-load cached item data.
# FIXME Importer plugins should not modify the database but instead
# the albums and items attached to tasks.
task.reload()
@pipeline.stage
def manipulate_files(session, task):
"""A coroutine (pipeline stage) that performs necessary file
manipulations *after* items have been added to the library and
finalizes each task.
"""
if not task.skip:
if task.should_remove_duplicates:
task.remove_duplicates(session.lib)
if session.config['move']:
operation = MoveOperation.MOVE
elif session.config['copy']:
operation = MoveOperation.COPY
elif session.config['link']:
operation = MoveOperation.LINK
elif session.config['hardlink']:
operation = MoveOperation.HARDLINK
else:
operation = None
task.manipulate_files(
operation,
write=session.config['write'],
session=session,
)
# Progress, cleanup, and event.
task.finalize(session)
@pipeline.stage
def log_files(session, task):
"""A coroutine (pipeline stage) to log each file to be imported.
"""
if isinstance(task, SingletonImportTask):
log.info(u'Singleton: {0}', displayable_path(task.item['path']))
elif task.items:
log.info(u'Album: {0}', displayable_path(task.paths[0]))
for item in task.items:
log.info(u' {0}', displayable_path(item['path']))
def group_albums(session):
"""A pipeline stage that groups the items of each task into albums
using their metadata.
Groups are identified using their artist and album fields. The
pipeline stage emits new album tasks for each discovered group.
"""
def group(item):
return (item.albumartist or item.artist, item.album)
task = None
while True:
task = yield task
if task.skip:
continue
tasks = []
sorted_items = sorted(task.items, key=group)
for _, items in itertools.groupby(sorted_items, group):
items = list(items)
task = ImportTask(task.toppath, [i.path for i in items],
items)
tasks += task.handle_created(session)
tasks.append(SentinelImportTask(task.toppath, task.paths))
task = pipeline.multiple(tasks)
MULTIDISC_MARKERS = (br'dis[ck]', br'cd')
MULTIDISC_PAT_FMT = br'^(.*%s[\W_]*)\d'
def is_subdir_of_any_in_list(path, dirs):
"""Returns True if path os a subdirectory of any directory in dirs
(a list). In other case, returns False.
"""
ancestors = ancestry(path)
return any(d in ancestors for d in dirs)
def albums_in_dir(path):
"""Recursively searches the given directory and returns an iterable
of (paths, items) where paths is a list of directories and items is
a list of Items that is probably an album. Specifically, any folder
containing any media files is an album.
"""
collapse_pat = collapse_paths = collapse_items = None
ignore = config['ignore'].as_str_seq()
ignore_hidden = config['ignore_hidden'].get(bool)
for root, dirs, files in sorted_walk(path, ignore=ignore,
ignore_hidden=ignore_hidden,
logger=log):
items = [os.path.join(root, f) for f in files]
# If we're currently collapsing the constituent directories in a
# multi-disc album, check whether we should continue collapsing
# and add the current directory. If so, just add the directory
# and move on to the next directory. If not, stop collapsing.
if collapse_paths:
if (is_subdir_of_any_in_list(root, collapse_paths)) or \
(collapse_pat and
collapse_pat.match(os.path.basename(root))):
# Still collapsing.
collapse_paths.append(root)
collapse_items += items
continue
else:
# Collapse finished. Yield the collapsed directory and
# proceed to process the current one.
if collapse_items:
yield collapse_paths, collapse_items
collapse_pat = collapse_paths = collapse_items = None
# Check whether this directory looks like the *first* directory
# in a multi-disc sequence. There are two indicators: the file
# is named like part of a multi-disc sequence (e.g., "Title Disc
# 1") or it contains no items but only directories that are
# named in this way.
start_collapsing = False
for marker in MULTIDISC_MARKERS:
# We're using replace on %s due to lack of .format() on bytestrings
p = MULTIDISC_PAT_FMT.replace(b'%s', marker)
marker_pat = re.compile(p, re.I)
match = marker_pat.match(os.path.basename(root))
# Is this directory the root of a nested multi-disc album?
if dirs and not items:
# Check whether all subdirectories have the same prefix.
start_collapsing = True
subdir_pat = None
for subdir in dirs:
subdir = util.bytestring_path(subdir)
# The first directory dictates the pattern for
# the remaining directories.
if not subdir_pat:
match = marker_pat.match(subdir)
if match:
match_group = re.escape(match.group(1))
subdir_pat = re.compile(
b''.join([b'^', match_group, br'\d']),
re.I
)
else:
start_collapsing = False
break
# Subsequent directories must match the pattern.
elif not subdir_pat.match(subdir):
start_collapsing = False
break
# If all subdirectories match, don't check other
# markers.
if start_collapsing:
break
# Is this directory the first in a flattened multi-disc album?
elif match:
start_collapsing = True
# Set the current pattern to match directories with the same
# prefix as this one, followed by a digit.
collapse_pat = re.compile(
b''.join([b'^', re.escape(match.group(1)), br'\d']),
re.I
)
break
# If either of the above heuristics indicated that this is the
# beginning of a multi-disc album, initialize the collapsed
# directory and item lists and check the next directory.
if start_collapsing:
# Start collapsing; continue to the next iteration.
collapse_paths = [root]
collapse_items = items
continue
# If it's nonempty, yield it.
if items:
yield [root], items
# Clear out any unfinished collapse.
if collapse_paths and collapse_items:
yield collapse_paths, collapse_items
| {
"content_hash": "494269e947350696c5f392e0dd68e028",
"timestamp": "",
"source": "github",
"line_count": 1690,
"max_line_length": 79,
"avg_line_length": 35.598224852071006,
"alnum_prop": 0.5880720067818022,
"repo_name": "jackwilsdon/beets",
"id": "68d5f3d5dd05f21bc4b62365bd424b99be6cbf87",
"size": "60832",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "beets/importer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2951"
},
{
"name": "HTML",
"bytes": "3306"
},
{
"name": "JavaScript",
"bytes": "85947"
},
{
"name": "Python",
"bytes": "2050533"
},
{
"name": "Shell",
"bytes": "7448"
}
],
"symlink_target": ""
} |
import abc
import logging
from itertools import izip
from datetime import datetime
import tornado.gen
from tormon.api import utils
from tormon.models import Response, RequestError
DEFAULT_TIMESTAMP = 0.0
DEFAULT_STATUS_CODE = None
DEFAULT_HEADER_SET = {}
class IBaseWriter(object):
__metaclass__ = abc.ABCMeta
def __init__(self, *args, **kwargs):
logging.info(u'Initializing writer: {0}'.format(
self.__class__.__name__
))
@abc.abstractmethod
def iter_data(self):
pass
@abc.abstractmethod
def write_response(self, resource, response):
pass
@abc.abstractmethod
def write_error(self, resource, error):
pass
@abc.abstractmethod
def get_info(self, url):
pass
@abc.abstractmethod
def get_stats(self):
pass
@property
def updated_at(self):
return datetime.now()
def get_response_data(self, response):
logging.debug(u"[{0}] Code: {1} Time: {2}s".format(
response.request.url, response.code,
u"%.2f" % response.request_time
))
return Response(
code=response.code, request_time=response.request_time,
headers=dict(response.headers), updated_at=self.updated_at
)
def get_error_data(self, resource, error):
error_data = error.__dict__
logging.error(u"[{}] {}".format(resource, str(error)))
return RequestError(
code=error_data.get(u'code', DEFAULT_STATUS_CODE),
headers=dict(error_data.get(u'headers', DEFAULT_HEADER_SET)),
request_time=error_data.get(u'request_time', DEFAULT_TIMESTAMP),
updated_at=self.updated_at,
message=str(error)
)
class MemoryWriter(IBaseWriter):
def __init__(self, *args, **kwargs):
super(MemoryWriter, self).__init__(*args, **kwargs)
self.url_status = {}
def write_response(self, resource, response):
response_data = self.get_response_data(response=response)
self.url_status[resource.url] = response_data.as_dict()
def write_error(self, resource, error):
error_data = self.get_error_data(
resource=resource,
error=error
)
self.url_status[resource.url] = error_data.as_dict()
@tornado.gen.coroutine
def iter_data(self):
data = self.url_status
raise tornado.gen.Return(data.iteritems())
@tornado.gen.coroutine
def get_info(self, url):
data = self.url_status[url]
raise tornado.gen.Return(data)
@tornado.gen.coroutine
def get_stats(self):
data = yield tornado.gen.Task(self.iter_data)
urls = [
url for url, _ in data
]
stats = {
u'total_monitored': len(urls),
u'urls': urls
}
raise tornado.gen.Return(stats)
class RedisWriter(IBaseWriter):
KEY_PREFIX = u'tormon-'
def __init__(self, *args, **kwargs):
super(RedisWriter, self).__init__(*args, **kwargs)
import redis
self.r = redis.StrictRedis()
def key(self, base):
return u"{}{}".format(self.KEY_PREFIX, base)
@tornado.gen.coroutine
def iter_data(self):
match = u"{}*".format(self.KEY_PREFIX)
count = None
results = {}
keys = [
key for
key in self.r.scan_iter(match=match, count=count)
]
values = self.r.mget(keys)
keys = map(lambda x: x.replace(self.KEY_PREFIX, u''), keys)
values = map(lambda x: utils.json_loads(x), values)
results.update(dict(izip(keys, values)))
raise tornado.gen.Return(results.iteritems())
@tornado.gen.coroutine
def write_response(self, resource, response):
response_data = self.get_response_data(response=response)
json_data = utils.json_dumps(response_data.as_dict())
self.r.set(self.key(resource.url), json_data)
@tornado.gen.coroutine
def write_error(self, resource, error):
error_data = self.get_error_data(
resource=resource,
error=error
)
json_data = utils.json_dumps(error_data.as_dict())
self.r.set(self.key(resource.url), json_data)
@tornado.gen.coroutine
def get_info(self, url):
key = self.key(url)
data = self.r.get(key)
if not data:
raise KeyError(u"Key: {} does not exist in db".format(key))
raise tornado.gen.Return(utils.json_loads(data))
@tornado.gen.coroutine
def get_stats(self):
data = yield tornado.gen.Task(self.iter_data)
urls = [
url for url, _ in data
]
stats = {
u'total_monitored': len(urls),
u'urls': urls
}
raise tornado.gen.Return(stats) | {
"content_hash": "ffae8ee8655509e71b14da30308ad4bc",
"timestamp": "",
"source": "github",
"line_count": 178,
"max_line_length": 76,
"avg_line_length": 27.15730337078652,
"alnum_prop": 0.5887463798096815,
"repo_name": "jnosal/tormon",
"id": "8593fb653b8375fd6326c2285d035771b0b24de8",
"size": "4834",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tormon/monitor/writers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "22078"
}
],
"symlink_target": ""
} |
from django import template
register = template.Library()
def paginator(context, adjacent_pages=2):
"""
To be used in conjunction with the object_list generic view.
Adds pagination context variables for use in displaying first, adjacent and
last page links in addition to those created by the object_list generic
view.
"""
startPage = max(context['page'] - adjacent_pages, 1)
if startPage <= 3: startPage = 1
endPage = context['page'] + adjacent_pages + 1
if endPage >= context['pages'] - 1: endPage = context['pages'] + 1
page_numbers = [n for n in range(startPage, endPage) \
if n > 0 and n <= context['pages']]
page_obj = context['page_obj']
paginator = context['paginator']
return {
'page_obj': page_obj,
'paginator': paginator,
'hits': context['hits'],
'results_per_page': context['results_per_page'],
'page': context['page'],
'pages': context['pages'],
'page_numbers': page_numbers,
'next': context['next'],
'previous': context['previous'],
'has_next': context['has_next'],
'has_previous': context['has_previous'],
'show_first': 1 not in page_numbers,
'show_last': context['pages'] not in page_numbers,
}
register.inclusion_tag('listings/paginator.html', takes_context=True)(paginator)
| {
"content_hash": "7813a972ae00cb4562198c9c2ebde86e",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 80,
"avg_line_length": 35.23076923076923,
"alnum_prop": 0.6200873362445415,
"repo_name": "wtrevino/django-listings",
"id": "4980a6cd14d3e2149162056ce8ea8a02e5647e4d",
"size": "1677",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "listings/templatetags/paginator.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "5455"
},
{
"name": "Python",
"bytes": "113175"
}
],
"symlink_target": ""
} |
__author__ = 'Yves Bonjour'
from Tokenizer import create_tokenizer
import redis
import uuid
def create_indexer(redis_host, redis_port):
tokenizer = create_tokenizer()
redis_db = redis.Redis(redis_host, redis_port)
store = RedisIndexStore(redis_db)
return Indexer(store, tokenizer)
class Indexer:
def __init__(self, store, tokenizer):
self.store = store
self.tokenizer = tokenizer
def index(self, text, document_id):
tokens = self.tokenizer.tokenize(text)
for token in tokens:
self.store.add(document_id, token)
def document_frequency_normalized(self, term):
return float(self.store.document_frequency(term)) / float(self.store.num_documents())
def term_document_frequency(self, document, term):
return self.store.term_document_frequency(document, term)
def get_posting_list(self, term):
return self.store.posting_list(term)
def get_terms(self, document):
return self.store.get_terms(document)
class MemoryIndexStore(object):
def __init__(self):
self.posting_lists = {}
self.documents = {}
def posting_list(self, term):
if term not in self.posting_lists:
return {}
return self.posting_lists[term]
def get_terms(self, document):
if document not in self.documents:
return []
return self.documents[document]
def document_frequency(self, term):
if term not in self.posting_lists:
return 0
return len(self.posting_lists[term])
def num_documents(self):
return len(self.documents)
def term_document_frequency(self, document, term):
if term not in self.posting_lists or document not in self.posting_lists[term]:
return 0
return self.posting_lists[term][document]
def add(self, document, term):
if term not in self.posting_lists:
self.posting_lists[term] = {}
if document not in self.posting_lists[term]:
self.posting_lists[term][document] = 0
self.posting_lists[term][document] += 1
if document not in self.documents:
self.documents[document] = set()
self.documents[document].add(term)
class RedisIndexStore(object):
def __init__(self, redis):
self.redis = redis
def posting_list(self, term):
return {uuid.UUID(document): int(self.redis.get(self._posting_key(term, document)))
for document in self.redis.smembers(self._term_key(term))}
def document_frequency(self, term):
return len(self.redis.smembers(self._term_key(term)))
def get_terms(self, document):
return self.redis.smembers(self._document_key(document))
def num_documents(self):
return len(self.redis.smembers(self._documents_key()))
def term_document_frequency(self, document, term):
tdf = self.redis.get(self._posting_key(term, document))
return int(tdf) if tdf else 0
def add(self, document, term):
self.redis.sadd(self._documents_key(), document)
self.redis.sadd(self._term_key(term), document)
self.redis.sadd(self._document_key(document), term)
self.redis.setnx(self._posting_key(term, document), 0)
self.redis.incr(self._posting_key(term, document))
def _documents_key(self):
return "documents"
def _document_key(self, document):
return "document:{document}".format(document=document)
def _term_key(self, term):
return "term:{term}".format(term=term)
def _posting_key(self, term, document):
return "posting:{term}:{document}".format(term=term, document=document) | {
"content_hash": "9126c89cc179ca64b8b232452d459d67",
"timestamp": "",
"source": "github",
"line_count": 123,
"max_line_length": 93,
"avg_line_length": 30.097560975609756,
"alnum_prop": 0.6428957320367369,
"repo_name": "ybonjour/nuus",
"id": "ac4d53bbfd933384fc1fa8fad08d02ca8184b0e8",
"size": "3702",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "services/indexing/Indexer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CoffeeScript",
"bytes": "229"
},
{
"name": "JavaScript",
"bytes": "210240"
},
{
"name": "Python",
"bytes": "133059"
},
{
"name": "Ruby",
"bytes": "21666"
},
{
"name": "Shell",
"bytes": "2943"
}
],
"symlink_target": ""
} |
'''
Copyright (c) 2017 Vanessa Sochat
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
from django.conf.urls import url
import docfish.apps.main.views as views
urlpatterns = [
url(r'^collections/(?P<cid>\d+)/entity/(?P<eid>.+?)/details$',views.view_entity,name='entity_details'),
url(r'^collections/(?P<cid>\d+)/entity/(?P<eid>.+?)/remove$',views.remove_entity,name='remove_entity'),
#url(r'^collections/(?P<cid>.+?)/entities/upload$',views.upload_entities,name='upload_entities'),
]
| {
"content_hash": "a7336a43a08773e0083cb9bb8fcbcb9e",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 107,
"avg_line_length": 44.84848484848485,
"alnum_prop": 0.7648648648648648,
"repo_name": "vsoch/docfish",
"id": "1f6be3263f4f9887a1a35f4fc667ea9962e4945d",
"size": "1480",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docfish/apps/main/urls/entity.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1203987"
},
{
"name": "HTML",
"bytes": "679372"
},
{
"name": "JavaScript",
"bytes": "3447989"
},
{
"name": "Nginx",
"bytes": "1783"
},
{
"name": "Python",
"bytes": "322003"
},
{
"name": "Shell",
"bytes": "365"
}
],
"symlink_target": ""
} |
"""Classes supporting creation and editing of questions."""
__author__ = 'Mike Gainer ([email protected])'
from common import schema_fields
from models import models
from models import roles
from modules.dashboard import dto_editor
from google.appengine.api import users
class AdminPreferencesEditor(dto_editor.BaseDatastoreAssetEditor):
"""An editor for editing and managing course admin preferences.
Note that this editor operates on StudentPreferencesDAO instances.
This is intentional; that type stores per-human, per-course prefs.
This editor exposes only the admin-specific settings, and is
available only in contexts where the user is a course admin.
(I.e, the dashboard.)
"""
def post_edit_admin_preferences(self):
if not roles.Roles.is_course_admin(self.app_context):
self.error(401)
return
template_values = {
'page_title': self.format_title('Edit Preferences'),
'main_content': self.get_form(
AdminPreferencesRESTHandler,
users.get_current_user().user_id(),
'/dashboard?action=settings',
deletable=False)
}
self.render_page(template_values, 'settings')
class AdminPreferencesRESTHandler(dto_editor.BaseDatastoreRestHandler):
URI = '/rest/admin_prefs'
REQUIRED_MODULES = ['inputex-hidden', 'inputex-checkbox']
EXTRA_JS_FILES = []
XSRF_TOKEN = 'admin-prefs-edit'
SCHEMA_VERSIONS = [models.StudentPreferencesDAO.CURRENT_VERSION]
DAO = models.StudentPreferencesDAO
@classmethod
def get_schema(cls):
ret = schema_fields.FieldRegistry(
'Admin Prefs', description='Administrator preferences')
ret.add_property(schema_fields.SchemaField(
'version', '', 'string', optional=True, hidden=True))
ret.add_property(schema_fields.SchemaField(
'id', '', 'string', optional=True, hidden=True))
ret.add_property(schema_fields.SchemaField(
'show_hooks', 'Show Hook Edit Buttons', 'boolean',
description='Whether to show controls on course pages to permit'
'editing of HTML inclusions (hook points) at that location on '
'the page. Turn this setting off to see the course as the '
'student would see it, and on to enable the edit controls.',
optional=True, hidden=False))
ret.add_property(schema_fields.SchemaField(
'show_jinja_context', 'Show Jinja Context', 'boolean',
description='Whether to show a dump of Jinja context contents '
'at the bottom of course pages (Only for admins, and only '
'available on development server.)',
optional=True, hidden=False))
return ret
def get_default_content(self):
return {
'version': self.SCHEMA_VERSIONS[0],
'show_hooks': False
}
def validate(self, prefs_dict, key, schema_version, errors):
pass
| {
"content_hash": "1298433889e12066caced4cb5b7822ab",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 76,
"avg_line_length": 39.25974025974026,
"alnum_prop": 0.6493549454184585,
"repo_name": "wavemind/mlgcb",
"id": "45647c8fe3ea2f2d20137637ad180cea824c3450",
"size": "3621",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "modules/dashboard/admin_preferences_editor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "31927"
},
{
"name": "JavaScript",
"bytes": "329372"
},
{
"name": "Python",
"bytes": "2274187"
},
{
"name": "Shell",
"bytes": "15633"
}
],
"symlink_target": ""
} |
import datetime
import sys
from typing import Any, Dict, List, Optional, TYPE_CHECKING, Union
from ... import _serialization
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from .. import models as _models
if sys.version_info >= (3, 9):
from collections.abc import MutableMapping
else:
from typing import MutableMapping # type: ignore # pylint: disable=ungrouped-imports
JSON = MutableMapping[str, Any] # pylint: disable=unsubscriptable-object
class Resource(_serialization.Model):
"""An azure resource object.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Azure resource Id.
:vartype id: str
:ivar name: Azure resource name.
:vartype name: str
:ivar type: Azure resource type.
:vartype type: str
:ivar location: Resource location. Required.
:vartype location: str
:ivar tags: Resource tags.
:vartype tags: dict[str, str]
"""
_validation = {
"id": {"readonly": True},
"name": {"readonly": True},
"type": {"readonly": True},
"location": {"required": True},
}
_attribute_map = {
"id": {"key": "id", "type": "str"},
"name": {"key": "name", "type": "str"},
"type": {"key": "type", "type": "str"},
"location": {"key": "location", "type": "str"},
"tags": {"key": "tags", "type": "{str}"},
}
def __init__(self, *, location: str, tags: Optional[Dict[str, str]] = None, **kwargs):
"""
:keyword location: Resource location. Required.
:paramtype location: str
:keyword tags: Resource tags.
:paramtype tags: dict[str, str]
"""
super().__init__(**kwargs)
self.id = None
self.name = None
self.type = None
self.location = location
self.tags = tags
class AlertRuleResource(Resource): # pylint: disable=too-many-instance-attributes
"""The alert rule resource.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Azure resource Id.
:vartype id: str
:ivar name: Azure resource name.
:vartype name: str
:ivar type: Azure resource type.
:vartype type: str
:ivar location: Resource location. Required.
:vartype location: str
:ivar tags: Resource tags.
:vartype tags: dict[str, str]
:ivar name_properties_name: the name of the alert rule. Required.
:vartype name_properties_name: str
:ivar description: the description of the alert rule that will be included in the alert email.
:vartype description: str
:ivar provisioning_state: the provisioning state.
:vartype provisioning_state: str
:ivar is_enabled: the flag that indicates whether the alert rule is enabled. Required.
:vartype is_enabled: bool
:ivar condition: the condition that results in the alert rule being activated. Required.
:vartype condition: ~$(python-base-namespace).v2015_07_01.models.RuleCondition
:ivar action: action that is performed when the alert rule becomes active, and when an alert
condition is resolved.
:vartype action: ~$(python-base-namespace).v2015_07_01.models.RuleAction
:ivar actions: the array of actions that are performed when the alert rule becomes active, and
when an alert condition is resolved.
:vartype actions: list[~$(python-base-namespace).v2015_07_01.models.RuleAction]
:ivar last_updated_time: Last time the rule was updated in ISO8601 format.
:vartype last_updated_time: ~datetime.datetime
"""
_validation = {
"id": {"readonly": True},
"name": {"readonly": True},
"type": {"readonly": True},
"location": {"required": True},
"name_properties_name": {"required": True},
"is_enabled": {"required": True},
"condition": {"required": True},
"last_updated_time": {"readonly": True},
}
_attribute_map = {
"id": {"key": "id", "type": "str"},
"name": {"key": "name", "type": "str"},
"type": {"key": "type", "type": "str"},
"location": {"key": "location", "type": "str"},
"tags": {"key": "tags", "type": "{str}"},
"name_properties_name": {"key": "properties.name", "type": "str"},
"description": {"key": "properties.description", "type": "str"},
"provisioning_state": {"key": "properties.provisioningState", "type": "str"},
"is_enabled": {"key": "properties.isEnabled", "type": "bool"},
"condition": {"key": "properties.condition", "type": "RuleCondition"},
"action": {"key": "properties.action", "type": "RuleAction"},
"actions": {"key": "properties.actions", "type": "[RuleAction]"},
"last_updated_time": {"key": "properties.lastUpdatedTime", "type": "iso-8601"},
}
def __init__(
self,
*,
location: str,
name_properties_name: str,
is_enabled: bool,
condition: "_models.RuleCondition",
tags: Optional[Dict[str, str]] = None,
description: Optional[str] = None,
provisioning_state: Optional[str] = None,
action: Optional["_models.RuleAction"] = None,
actions: Optional[List["_models.RuleAction"]] = None,
**kwargs
):
"""
:keyword location: Resource location. Required.
:paramtype location: str
:keyword tags: Resource tags.
:paramtype tags: dict[str, str]
:keyword name_properties_name: the name of the alert rule. Required.
:paramtype name_properties_name: str
:keyword description: the description of the alert rule that will be included in the alert
email.
:paramtype description: str
:keyword provisioning_state: the provisioning state.
:paramtype provisioning_state: str
:keyword is_enabled: the flag that indicates whether the alert rule is enabled. Required.
:paramtype is_enabled: bool
:keyword condition: the condition that results in the alert rule being activated. Required.
:paramtype condition: ~$(python-base-namespace).v2015_07_01.models.RuleCondition
:keyword action: action that is performed when the alert rule becomes active, and when an alert
condition is resolved.
:paramtype action: ~$(python-base-namespace).v2015_07_01.models.RuleAction
:keyword actions: the array of actions that are performed when the alert rule becomes active,
and when an alert condition is resolved.
:paramtype actions: list[~$(python-base-namespace).v2015_07_01.models.RuleAction]
"""
super().__init__(location=location, tags=tags, **kwargs)
self.name_properties_name = name_properties_name
self.description = description
self.provisioning_state = provisioning_state
self.is_enabled = is_enabled
self.condition = condition
self.action = action
self.actions = actions
self.last_updated_time = None
class AlertRuleResourceCollection(_serialization.Model):
"""Represents a collection of alert rule resources.
:ivar value: the values for the alert rule resources.
:vartype value: list[~$(python-base-namespace).v2015_07_01.models.AlertRuleResource]
"""
_attribute_map = {
"value": {"key": "value", "type": "[AlertRuleResource]"},
}
def __init__(self, *, value: Optional[List["_models.AlertRuleResource"]] = None, **kwargs):
"""
:keyword value: the values for the alert rule resources.
:paramtype value: list[~$(python-base-namespace).v2015_07_01.models.AlertRuleResource]
"""
super().__init__(**kwargs)
self.value = value
class AlertRuleResourcePatch(_serialization.Model):
"""The alert rule object for patch operations.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar tags: Resource tags.
:vartype tags: dict[str, str]
:ivar name: the name of the alert rule.
:vartype name: str
:ivar description: the description of the alert rule that will be included in the alert email.
:vartype description: str
:ivar provisioning_state: the provisioning state.
:vartype provisioning_state: str
:ivar is_enabled: the flag that indicates whether the alert rule is enabled.
:vartype is_enabled: bool
:ivar condition: the condition that results in the alert rule being activated.
:vartype condition: ~$(python-base-namespace).v2015_07_01.models.RuleCondition
:ivar action: action that is performed when the alert rule becomes active, and when an alert
condition is resolved.
:vartype action: ~$(python-base-namespace).v2015_07_01.models.RuleAction
:ivar actions: the array of actions that are performed when the alert rule becomes active, and
when an alert condition is resolved.
:vartype actions: list[~$(python-base-namespace).v2015_07_01.models.RuleAction]
:ivar last_updated_time: Last time the rule was updated in ISO8601 format.
:vartype last_updated_time: ~datetime.datetime
"""
_validation = {
"last_updated_time": {"readonly": True},
}
_attribute_map = {
"tags": {"key": "tags", "type": "{str}"},
"name": {"key": "properties.name", "type": "str"},
"description": {"key": "properties.description", "type": "str"},
"provisioning_state": {"key": "properties.provisioningState", "type": "str"},
"is_enabled": {"key": "properties.isEnabled", "type": "bool"},
"condition": {"key": "properties.condition", "type": "RuleCondition"},
"action": {"key": "properties.action", "type": "RuleAction"},
"actions": {"key": "properties.actions", "type": "[RuleAction]"},
"last_updated_time": {"key": "properties.lastUpdatedTime", "type": "iso-8601"},
}
def __init__(
self,
*,
tags: Optional[Dict[str, str]] = None,
name: Optional[str] = None,
description: Optional[str] = None,
provisioning_state: Optional[str] = None,
is_enabled: Optional[bool] = None,
condition: Optional["_models.RuleCondition"] = None,
action: Optional["_models.RuleAction"] = None,
actions: Optional[List["_models.RuleAction"]] = None,
**kwargs
):
"""
:keyword tags: Resource tags.
:paramtype tags: dict[str, str]
:keyword name: the name of the alert rule.
:paramtype name: str
:keyword description: the description of the alert rule that will be included in the alert
email.
:paramtype description: str
:keyword provisioning_state: the provisioning state.
:paramtype provisioning_state: str
:keyword is_enabled: the flag that indicates whether the alert rule is enabled.
:paramtype is_enabled: bool
:keyword condition: the condition that results in the alert rule being activated.
:paramtype condition: ~$(python-base-namespace).v2015_07_01.models.RuleCondition
:keyword action: action that is performed when the alert rule becomes active, and when an alert
condition is resolved.
:paramtype action: ~$(python-base-namespace).v2015_07_01.models.RuleAction
:keyword actions: the array of actions that are performed when the alert rule becomes active,
and when an alert condition is resolved.
:paramtype actions: list[~$(python-base-namespace).v2015_07_01.models.RuleAction]
"""
super().__init__(**kwargs)
self.tags = tags
self.name = name
self.description = description
self.provisioning_state = provisioning_state
self.is_enabled = is_enabled
self.condition = condition
self.action = action
self.actions = actions
self.last_updated_time = None
class DimensionProperties(_serialization.Model):
"""Type of operation: get, read, delete, etc.
:ivar name: Name of dimension.
:vartype name: str
:ivar display_name: Display name of dimension.
:vartype display_name: str
:ivar to_be_exported_for_shoebox: Legacy usage, should not set.
:vartype to_be_exported_for_shoebox: bool
:ivar is_hidden: When set, the dimension is hidden from the customer, used in conjunction with
the defaultDimensionValues field below.
:vartype is_hidden: bool
:ivar default_dimension_values: Default dimension value to be sent down for the hidden
dimension during query.
:vartype default_dimension_values: JSON
"""
_attribute_map = {
"name": {"key": "name", "type": "str"},
"display_name": {"key": "displayName", "type": "str"},
"to_be_exported_for_shoebox": {"key": "toBeExportedForShoebox", "type": "bool"},
"is_hidden": {"key": "isHidden", "type": "bool"},
"default_dimension_values": {"key": "defaultDimensionValues", "type": "object"},
}
def __init__(
self,
*,
name: Optional[str] = None,
display_name: Optional[str] = None,
to_be_exported_for_shoebox: Optional[bool] = None,
is_hidden: Optional[bool] = None,
default_dimension_values: Optional[JSON] = None,
**kwargs
):
"""
:keyword name: Name of dimension.
:paramtype name: str
:keyword display_name: Display name of dimension.
:paramtype display_name: str
:keyword to_be_exported_for_shoebox: Legacy usage, should not set.
:paramtype to_be_exported_for_shoebox: bool
:keyword is_hidden: When set, the dimension is hidden from the customer, used in conjunction
with the defaultDimensionValues field below.
:paramtype is_hidden: bool
:keyword default_dimension_values: Default dimension value to be sent down for the hidden
dimension during query.
:paramtype default_dimension_values: JSON
"""
super().__init__(**kwargs)
self.name = name
self.display_name = display_name
self.to_be_exported_for_shoebox = to_be_exported_for_shoebox
self.is_hidden = is_hidden
self.default_dimension_values = default_dimension_values
class ErrorContract(_serialization.Model):
"""Common error response for all Azure Resource Manager APIs to return error details for failed operations. (This also follows the OData error response format.).
:ivar error: The error object.
:vartype error: ~$(python-base-namespace).v2015_07_01.models.ErrorResponse
"""
_attribute_map = {
"error": {"key": "error", "type": "ErrorResponse"},
}
def __init__(self, *, error: Optional["_models.ErrorResponse"] = None, **kwargs):
"""
:keyword error: The error object.
:paramtype error: ~$(python-base-namespace).v2015_07_01.models.ErrorResponse
"""
super().__init__(**kwargs)
self.error = error
class ErrorResponse(_serialization.Model):
"""Describes the format of Error response.
:ivar code: Error code.
:vartype code: str
:ivar message: Error message indicating why the operation failed.
:vartype message: str
"""
_attribute_map = {
"code": {"key": "code", "type": "str"},
"message": {"key": "message", "type": "str"},
}
def __init__(self, *, code: Optional[str] = None, message: Optional[str] = None, **kwargs):
"""
:keyword code: Error code.
:paramtype code: str
:keyword message: Error message indicating why the operation failed.
:paramtype message: str
"""
super().__init__(**kwargs)
self.code = code
self.message = message
class LocalizableString(_serialization.Model):
"""The localizable string class.
All required parameters must be populated in order to send to Azure.
:ivar value: the invariant value. Required.
:vartype value: str
:ivar localized_value: the locale specific value.
:vartype localized_value: str
"""
_validation = {
"value": {"required": True},
}
_attribute_map = {
"value": {"key": "value", "type": "str"},
"localized_value": {"key": "localizedValue", "type": "str"},
}
def __init__(self, *, value: str, localized_value: Optional[str] = None, **kwargs):
"""
:keyword value: the invariant value. Required.
:paramtype value: str
:keyword localized_value: the locale specific value.
:paramtype localized_value: str
"""
super().__init__(**kwargs)
self.value = value
self.localized_value = localized_value
class RuleCondition(_serialization.Model):
"""The condition that results in the alert rule being activated.
You probably want to use the sub-classes and not this class directly. Known sub-classes are:
LocationThresholdRuleCondition, ManagementEventRuleCondition, ThresholdRuleCondition
All required parameters must be populated in order to send to Azure.
:ivar odata_type: specifies the type of condition. This can be one of three types:
ManagementEventRuleCondition (occurrences of management events), LocationThresholdRuleCondition
(based on the number of failures of a web test), and ThresholdRuleCondition (based on the
threshold of a metric). Required.
:vartype odata_type: str
:ivar data_source: the resource from which the rule collects its data. For this type dataSource
will always be of type RuleMetricDataSource.
:vartype data_source: ~$(python-base-namespace).v2015_07_01.models.RuleDataSource
"""
_validation = {
"odata_type": {"required": True},
}
_attribute_map = {
"odata_type": {"key": "odata\\.type", "type": "str"},
"data_source": {"key": "dataSource", "type": "RuleDataSource"},
}
_subtype_map = {
"odata_type": {
"Microsoft.Azure.Management.Insights.Models.LocationThresholdRuleCondition": "LocationThresholdRuleCondition",
"Microsoft.Azure.Management.Insights.Models.ManagementEventRuleCondition": "ManagementEventRuleCondition",
"Microsoft.Azure.Management.Insights.Models.ThresholdRuleCondition": "ThresholdRuleCondition",
}
}
def __init__(self, *, data_source: Optional["_models.RuleDataSource"] = None, **kwargs):
"""
:keyword data_source: the resource from which the rule collects its data. For this type
dataSource will always be of type RuleMetricDataSource.
:paramtype data_source: ~$(python-base-namespace).v2015_07_01.models.RuleDataSource
"""
super().__init__(**kwargs)
self.odata_type = None # type: Optional[str]
self.data_source = data_source
class LocationThresholdRuleCondition(RuleCondition):
"""A rule condition based on a certain number of locations failing.
All required parameters must be populated in order to send to Azure.
:ivar odata_type: specifies the type of condition. This can be one of three types:
ManagementEventRuleCondition (occurrences of management events), LocationThresholdRuleCondition
(based on the number of failures of a web test), and ThresholdRuleCondition (based on the
threshold of a metric). Required.
:vartype odata_type: str
:ivar data_source: the resource from which the rule collects its data. For this type dataSource
will always be of type RuleMetricDataSource.
:vartype data_source: ~$(python-base-namespace).v2015_07_01.models.RuleDataSource
:ivar window_size: the period of time (in ISO 8601 duration format) that is used to monitor
alert activity based on the threshold. If specified then it must be between 5 minutes and 1
day.
:vartype window_size: ~datetime.timedelta
:ivar failed_location_count: the number of locations that must fail to activate the alert.
Required.
:vartype failed_location_count: int
"""
_validation = {
"odata_type": {"required": True},
"failed_location_count": {"required": True, "minimum": 0},
}
_attribute_map = {
"odata_type": {"key": "odata\\.type", "type": "str"},
"data_source": {"key": "dataSource", "type": "RuleDataSource"},
"window_size": {"key": "windowSize", "type": "duration"},
"failed_location_count": {"key": "failedLocationCount", "type": "int"},
}
def __init__(
self,
*,
failed_location_count: int,
data_source: Optional["_models.RuleDataSource"] = None,
window_size: Optional[datetime.timedelta] = None,
**kwargs
):
"""
:keyword data_source: the resource from which the rule collects its data. For this type
dataSource will always be of type RuleMetricDataSource.
:paramtype data_source: ~$(python-base-namespace).v2015_07_01.models.RuleDataSource
:keyword window_size: the period of time (in ISO 8601 duration format) that is used to monitor
alert activity based on the threshold. If specified then it must be between 5 minutes and 1
day.
:paramtype window_size: ~datetime.timedelta
:keyword failed_location_count: the number of locations that must fail to activate the alert.
Required.
:paramtype failed_location_count: int
"""
super().__init__(data_source=data_source, **kwargs)
self.odata_type = "Microsoft.Azure.Management.Insights.Models.LocationThresholdRuleCondition" # type: str
self.window_size = window_size
self.failed_location_count = failed_location_count
class LogSettings(_serialization.Model):
"""Part of MultiTenantDiagnosticSettings. Specifies the settings for a particular log.
All required parameters must be populated in order to send to Azure.
:ivar category: Name of a Diagnostic Log category for a resource type this setting is applied
to. To obtain the list of Diagnostic Log categories for a resource, first perform a GET
diagnostic settings operation.
:vartype category: str
:ivar enabled: a value indicating whether this log is enabled. Required.
:vartype enabled: bool
:ivar retention_policy: the retention policy for this log.
:vartype retention_policy: ~$(python-base-namespace).v2015_07_01.models.RetentionPolicy
"""
_validation = {
"enabled": {"required": True},
}
_attribute_map = {
"category": {"key": "category", "type": "str"},
"enabled": {"key": "enabled", "type": "bool"},
"retention_policy": {"key": "retentionPolicy", "type": "RetentionPolicy"},
}
def __init__(
self,
*,
enabled: bool,
category: Optional[str] = None,
retention_policy: Optional["_models.RetentionPolicy"] = None,
**kwargs
):
"""
:keyword category: Name of a Diagnostic Log category for a resource type this setting is
applied to. To obtain the list of Diagnostic Log categories for a resource, first perform a GET
diagnostic settings operation.
:paramtype category: str
:keyword enabled: a value indicating whether this log is enabled. Required.
:paramtype enabled: bool
:keyword retention_policy: the retention policy for this log.
:paramtype retention_policy: ~$(python-base-namespace).v2015_07_01.models.RetentionPolicy
"""
super().__init__(**kwargs)
self.category = category
self.enabled = enabled
self.retention_policy = retention_policy
class LogSpecification(_serialization.Model):
"""Log specification of operation.
:ivar name: Name of log specification.
:vartype name: str
:ivar display_name: Display name of log specification.
:vartype display_name: str
:ivar blob_duration: Blob duration of specification.
:vartype blob_duration: str
"""
_attribute_map = {
"name": {"key": "name", "type": "str"},
"display_name": {"key": "displayName", "type": "str"},
"blob_duration": {"key": "blobDuration", "type": "str"},
}
def __init__(
self,
*,
name: Optional[str] = None,
display_name: Optional[str] = None,
blob_duration: Optional[str] = None,
**kwargs
):
"""
:keyword name: Name of log specification.
:paramtype name: str
:keyword display_name: Display name of log specification.
:paramtype display_name: str
:keyword blob_duration: Blob duration of specification.
:paramtype blob_duration: str
"""
super().__init__(**kwargs)
self.name = name
self.display_name = display_name
self.blob_duration = blob_duration
class ManagementEventAggregationCondition(_serialization.Model):
"""How the data that is collected should be combined over time.
:ivar operator: the condition operator. Known values are: "GreaterThan", "GreaterThanOrEqual",
"LessThan", and "LessThanOrEqual".
:vartype operator: str or ~$(python-base-namespace).v2015_07_01.models.ConditionOperator
:ivar threshold: The threshold value that activates the alert.
:vartype threshold: float
:ivar window_size: the period of time (in ISO 8601 duration format) that is used to monitor
alert activity based on the threshold. If specified then it must be between 5 minutes and 1
day.
:vartype window_size: ~datetime.timedelta
"""
_attribute_map = {
"operator": {"key": "operator", "type": "str"},
"threshold": {"key": "threshold", "type": "float"},
"window_size": {"key": "windowSize", "type": "duration"},
}
def __init__(
self,
*,
operator: Optional[Union[str, "_models.ConditionOperator"]] = None,
threshold: Optional[float] = None,
window_size: Optional[datetime.timedelta] = None,
**kwargs
):
"""
:keyword operator: the condition operator. Known values are: "GreaterThan",
"GreaterThanOrEqual", "LessThan", and "LessThanOrEqual".
:paramtype operator: str or ~$(python-base-namespace).v2015_07_01.models.ConditionOperator
:keyword threshold: The threshold value that activates the alert.
:paramtype threshold: float
:keyword window_size: the period of time (in ISO 8601 duration format) that is used to monitor
alert activity based on the threshold. If specified then it must be between 5 minutes and 1
day.
:paramtype window_size: ~datetime.timedelta
"""
super().__init__(**kwargs)
self.operator = operator
self.threshold = threshold
self.window_size = window_size
class ManagementEventRuleCondition(RuleCondition):
"""A management event rule condition.
All required parameters must be populated in order to send to Azure.
:ivar odata_type: specifies the type of condition. This can be one of three types:
ManagementEventRuleCondition (occurrences of management events), LocationThresholdRuleCondition
(based on the number of failures of a web test), and ThresholdRuleCondition (based on the
threshold of a metric). Required.
:vartype odata_type: str
:ivar data_source: the resource from which the rule collects its data. For this type dataSource
will always be of type RuleMetricDataSource.
:vartype data_source: ~$(python-base-namespace).v2015_07_01.models.RuleDataSource
:ivar aggregation: How the data that is collected should be combined over time and when the
alert is activated. Note that for management event alerts aggregation is optional – if it is
not provided then any event will cause the alert to activate.
:vartype aggregation:
~$(python-base-namespace).v2015_07_01.models.ManagementEventAggregationCondition
"""
_validation = {
"odata_type": {"required": True},
}
_attribute_map = {
"odata_type": {"key": "odata\\.type", "type": "str"},
"data_source": {"key": "dataSource", "type": "RuleDataSource"},
"aggregation": {"key": "aggregation", "type": "ManagementEventAggregationCondition"},
}
def __init__(
self,
*,
data_source: Optional["_models.RuleDataSource"] = None,
aggregation: Optional["_models.ManagementEventAggregationCondition"] = None,
**kwargs
):
"""
:keyword data_source: the resource from which the rule collects its data. For this type
dataSource will always be of type RuleMetricDataSource.
:paramtype data_source: ~$(python-base-namespace).v2015_07_01.models.RuleDataSource
:keyword aggregation: How the data that is collected should be combined over time and when the
alert is activated. Note that for management event alerts aggregation is optional – if it is
not provided then any event will cause the alert to activate.
:paramtype aggregation:
~$(python-base-namespace).v2015_07_01.models.ManagementEventAggregationCondition
"""
super().__init__(data_source=data_source, **kwargs)
self.odata_type = "Microsoft.Azure.Management.Insights.Models.ManagementEventRuleCondition" # type: str
self.aggregation = aggregation
class MetricAvailability(_serialization.Model):
"""Metric availability specifies the time grain (aggregation interval or frequency) and the retention period for that time grain.
:ivar time_grain: the time grain specifies the aggregation interval for the metric. Expressed
as a duration 'PT1M', 'P1D', etc.
:vartype time_grain: ~datetime.timedelta
:ivar retention: the retention period for the metric at the specified timegrain. Expressed as
a duration 'PT1M', 'P1D', etc.
:vartype retention: ~datetime.timedelta
:ivar location: Info about where the metric data is stored.
:vartype location: ~$(python-base-namespace).v2015_07_01.models.MetricAvailabilityLocation
"""
_attribute_map = {
"time_grain": {"key": "timeGrain", "type": "duration"},
"retention": {"key": "retention", "type": "duration"},
"location": {"key": "location", "type": "MetricAvailabilityLocation"},
}
def __init__(
self,
*,
time_grain: Optional[datetime.timedelta] = None,
retention: Optional[datetime.timedelta] = None,
location: Optional["_models.MetricAvailabilityLocation"] = None,
**kwargs
):
"""
:keyword time_grain: the time grain specifies the aggregation interval for the metric.
Expressed as a duration 'PT1M', 'P1D', etc.
:paramtype time_grain: ~datetime.timedelta
:keyword retention: the retention period for the metric at the specified timegrain. Expressed
as a duration 'PT1M', 'P1D', etc.
:paramtype retention: ~datetime.timedelta
:keyword location: Info about where the metric data is stored.
:paramtype location: ~$(python-base-namespace).v2015_07_01.models.MetricAvailabilityLocation
"""
super().__init__(**kwargs)
self.time_grain = time_grain
self.retention = retention
self.location = location
class MetricAvailabilityLocation(_serialization.Model):
"""Info about where the metric data is stored.
:ivar table_endpoint: The url for the storage account.
:vartype table_endpoint: str
:ivar table_info: Info about the storage tables storing this resources metrics.
:vartype table_info: list[~$(python-base-namespace).v2015_07_01.models.TableInfoEntry]
:ivar partition_key: partition key in the table where the metrics for this resource are stored.
:vartype partition_key: str
"""
_attribute_map = {
"table_endpoint": {"key": "tableEndpoint", "type": "str"},
"table_info": {"key": "tableInfo", "type": "[TableInfoEntry]"},
"partition_key": {"key": "partitionKey", "type": "str"},
}
def __init__(
self,
*,
table_endpoint: Optional[str] = None,
table_info: Optional[List["_models.TableInfoEntry"]] = None,
partition_key: Optional[str] = None,
**kwargs
):
"""
:keyword table_endpoint: The url for the storage account.
:paramtype table_endpoint: str
:keyword table_info: Info about the storage tables storing this resources metrics.
:paramtype table_info: list[~$(python-base-namespace).v2015_07_01.models.TableInfoEntry]
:keyword partition_key: partition key in the table where the metrics for this resource are
stored.
:paramtype partition_key: str
"""
super().__init__(**kwargs)
self.table_endpoint = table_endpoint
self.table_info = table_info
self.partition_key = partition_key
class MetricDefinition(_serialization.Model): # pylint: disable=too-many-instance-attributes
"""Metric definition class specifies the metadata for a metric.
:ivar resource_id: The resource identifier of the resource that emitted the metric.
:vartype resource_id: str
:ivar resource_uri: The resource identifier of the resource that emitted the metric.
:vartype resource_uri: str
:ivar name: the name and the display name of the metric, i.e. it is a localizable string.
:vartype name: ~$(python-base-namespace).v2015_07_01.models.LocalizableString
:ivar category: The category of this metric.
:vartype category: str
:ivar unit: the unit of the metric. Known values are: "Count", "Bytes", "Seconds",
"CountPerSecond", "BytesPerSecond", "Percent", and "MilliSeconds".
:vartype unit: str or ~$(python-base-namespace).v2015_07_01.models.Unit
:ivar start_time: Start time of the metadata request timespan.
:vartype start_time: ~datetime.datetime
:ivar end_time: End time of the metadata request timespan.
:vartype end_time: ~datetime.datetime
:ivar primary_aggregation_type: the primary aggregation type value defining how to use the
values for display. Known values are: "None", "Average", "Count", "Minimum", "Maximum", and
"Total".
:vartype primary_aggregation_type: str or
~$(python-base-namespace).v2015_07_01.models.AggregationType
:ivar supported_aggregation_types: List of all aggregations that are applicable for this
metric.
:vartype supported_aggregation_types: list[str or
~$(python-base-namespace).v2015_07_01.models.AggregationType]
:ivar metric_availabilities: the collection of what aggregation intervals are available to be
queried.
:vartype metric_availabilities:
list[~$(python-base-namespace).v2015_07_01.models.MetricAvailability]
:ivar id: the resource identifier of the metric definition.
:vartype id: str
"""
_attribute_map = {
"resource_id": {"key": "resourceId", "type": "str"},
"resource_uri": {"key": "resourceUri", "type": "str"},
"name": {"key": "name", "type": "LocalizableString"},
"category": {"key": "category", "type": "str"},
"unit": {"key": "unit", "type": "str"},
"start_time": {"key": "startTime", "type": "iso-8601"},
"end_time": {"key": "endTime", "type": "iso-8601"},
"primary_aggregation_type": {"key": "primaryAggregationType", "type": "str"},
"supported_aggregation_types": {"key": "supportedAggregationTypes", "type": "[str]"},
"metric_availabilities": {"key": "metricAvailabilities", "type": "[MetricAvailability]"},
"id": {"key": "id", "type": "str"},
}
def __init__(
self,
*,
resource_id: Optional[str] = None,
resource_uri: Optional[str] = None,
name: Optional["_models.LocalizableString"] = None,
category: Optional[str] = None,
unit: Optional[Union[str, "_models.Unit"]] = None,
start_time: Optional[datetime.datetime] = None,
end_time: Optional[datetime.datetime] = None,
primary_aggregation_type: Optional[Union[str, "_models.AggregationType"]] = None,
supported_aggregation_types: Optional[List[Union[str, "_models.AggregationType"]]] = None,
metric_availabilities: Optional[List["_models.MetricAvailability"]] = None,
id: Optional[str] = None, # pylint: disable=redefined-builtin
**kwargs
):
"""
:keyword resource_id: The resource identifier of the resource that emitted the metric.
:paramtype resource_id: str
:keyword resource_uri: The resource identifier of the resource that emitted the metric.
:paramtype resource_uri: str
:keyword name: the name and the display name of the metric, i.e. it is a localizable string.
:paramtype name: ~$(python-base-namespace).v2015_07_01.models.LocalizableString
:keyword category: The category of this metric.
:paramtype category: str
:keyword unit: the unit of the metric. Known values are: "Count", "Bytes", "Seconds",
"CountPerSecond", "BytesPerSecond", "Percent", and "MilliSeconds".
:paramtype unit: str or ~$(python-base-namespace).v2015_07_01.models.Unit
:keyword start_time: Start time of the metadata request timespan.
:paramtype start_time: ~datetime.datetime
:keyword end_time: End time of the metadata request timespan.
:paramtype end_time: ~datetime.datetime
:keyword primary_aggregation_type: the primary aggregation type value defining how to use the
values for display. Known values are: "None", "Average", "Count", "Minimum", "Maximum", and
"Total".
:paramtype primary_aggregation_type: str or
~$(python-base-namespace).v2015_07_01.models.AggregationType
:keyword supported_aggregation_types: List of all aggregations that are applicable for this
metric.
:paramtype supported_aggregation_types: list[str or
~$(python-base-namespace).v2015_07_01.models.AggregationType]
:keyword metric_availabilities: the collection of what aggregation intervals are available to
be queried.
:paramtype metric_availabilities:
list[~$(python-base-namespace).v2015_07_01.models.MetricAvailability]
:keyword id: the resource identifier of the metric definition.
:paramtype id: str
"""
super().__init__(**kwargs)
self.resource_id = resource_id
self.resource_uri = resource_uri
self.name = name
self.category = category
self.unit = unit
self.start_time = start_time
self.end_time = end_time
self.primary_aggregation_type = primary_aggregation_type
self.supported_aggregation_types = supported_aggregation_types
self.metric_availabilities = metric_availabilities
self.id = id
class MetricDefinitionCollection(_serialization.Model):
"""Represents collection of metric definitions.
All required parameters must be populated in order to send to Azure.
:ivar id: Resource Id for these metric definitions.
:vartype id: str
:ivar value: the values for the metric definitions. Required.
:vartype value: list[~$(python-base-namespace).v2015_07_01.models.MetricDefinition]
"""
_validation = {
"value": {"required": True},
}
_attribute_map = {
"id": {"key": "id", "type": "str"},
"value": {"key": "value", "type": "[MetricDefinition]"},
}
def __init__(
self,
*,
value: List["_models.MetricDefinition"],
id: Optional[str] = None, # pylint: disable=redefined-builtin
**kwargs
):
"""
:keyword id: Resource Id for these metric definitions.
:paramtype id: str
:keyword value: the values for the metric definitions. Required.
:paramtype value: list[~$(python-base-namespace).v2015_07_01.models.MetricDefinition]
"""
super().__init__(**kwargs)
self.id = id
self.value = value
class MetricSettings(_serialization.Model):
"""Part of MultiTenantDiagnosticSettings. Specifies the settings for a particular metric.
All required parameters must be populated in order to send to Azure.
:ivar time_grain: the timegrain of the metric in ISO8601 format. Required.
:vartype time_grain: ~datetime.timedelta
:ivar enabled: a value indicating whether this timegrain is enabled. Required.
:vartype enabled: bool
:ivar retention_policy: the retention policy for this timegrain.
:vartype retention_policy: ~$(python-base-namespace).v2015_07_01.models.RetentionPolicy
"""
_validation = {
"time_grain": {"required": True},
"enabled": {"required": True},
}
_attribute_map = {
"time_grain": {"key": "timeGrain", "type": "duration"},
"enabled": {"key": "enabled", "type": "bool"},
"retention_policy": {"key": "retentionPolicy", "type": "RetentionPolicy"},
}
def __init__(
self,
*,
time_grain: datetime.timedelta,
enabled: bool,
retention_policy: Optional["_models.RetentionPolicy"] = None,
**kwargs
):
"""
:keyword time_grain: the timegrain of the metric in ISO8601 format. Required.
:paramtype time_grain: ~datetime.timedelta
:keyword enabled: a value indicating whether this timegrain is enabled. Required.
:paramtype enabled: bool
:keyword retention_policy: the retention policy for this timegrain.
:paramtype retention_policy: ~$(python-base-namespace).v2015_07_01.models.RetentionPolicy
"""
super().__init__(**kwargs)
self.time_grain = time_grain
self.enabled = enabled
self.retention_policy = retention_policy
class MetricSpecification(_serialization.Model): # pylint: disable=too-many-instance-attributes
"""Metric specification of operation.
:ivar name: The name of the metric.
:vartype name: str
:ivar display_name: Display name of the metric.
:vartype display_name: str
:ivar display_description: Display description of the metric.
:vartype display_description: str
:ivar unit: The metric unit. Possible values include:
Count,Bytes,Seconds,Percent,CountPerSecond,BytesPerSecond,MilliSeconds,ByteSeconds,Unspecified,BitsPerSecond,Cores,MilliCores,NanoCores.
:vartype unit: str
:ivar aggregation_type: The default metric aggregation type. Possible values include:
Total,Average,Maximum,Minimum,Count.
:vartype aggregation_type: str
:ivar supported_aggregation_types: The supported aggregation types for the metrics.
:vartype supported_aggregation_types: list[str]
:ivar supported_time_grain_types: The supported time grain types for the metrics.
:vartype supported_time_grain_types: list[str]
:ivar availabilities: The supported time grain types for the metrics.
:vartype availabilities: list[str]
:ivar lock_aggregation_type: The metric lock aggregation type.
:vartype lock_aggregation_type: str
:ivar category: Category or type of metric.
:vartype category: str
:ivar dimensions: The dimensions of metric.
:vartype dimensions: list[~$(python-base-namespace).v2015_07_01.models.DimensionProperties]
:ivar fill_gap_with_zero: Property to specify whether to fill empty gaps with zero.
:vartype fill_gap_with_zero: bool
:ivar internal_metric_name: The internal metric name.
:vartype internal_metric_name: str
"""
_attribute_map = {
"name": {"key": "name", "type": "str"},
"display_name": {"key": "displayName", "type": "str"},
"display_description": {"key": "displayDescription", "type": "str"},
"unit": {"key": "unit", "type": "str"},
"aggregation_type": {"key": "aggregationType", "type": "str"},
"supported_aggregation_types": {"key": "supportedAggregationTypes", "type": "[str]"},
"supported_time_grain_types": {"key": "supportedTimeGrainTypes", "type": "[str]"},
"availabilities": {"key": "availabilities", "type": "[str]"},
"lock_aggregation_type": {"key": "lockAggregationType", "type": "str"},
"category": {"key": "category", "type": "str"},
"dimensions": {"key": "dimensions", "type": "[DimensionProperties]"},
"fill_gap_with_zero": {"key": "fillGapWithZero", "type": "bool"},
"internal_metric_name": {"key": "internalMetricName", "type": "str"},
}
def __init__(
self,
*,
name: Optional[str] = None,
display_name: Optional[str] = None,
display_description: Optional[str] = None,
unit: Optional[str] = None,
aggregation_type: Optional[str] = None,
supported_aggregation_types: Optional[List[str]] = None,
supported_time_grain_types: Optional[List[str]] = None,
availabilities: Optional[List[str]] = None,
lock_aggregation_type: Optional[str] = None,
category: Optional[str] = None,
dimensions: Optional[List["_models.DimensionProperties"]] = None,
fill_gap_with_zero: Optional[bool] = None,
internal_metric_name: Optional[str] = None,
**kwargs
):
"""
:keyword name: The name of the metric.
:paramtype name: str
:keyword display_name: Display name of the metric.
:paramtype display_name: str
:keyword display_description: Display description of the metric.
:paramtype display_description: str
:keyword unit: The metric unit. Possible values include:
Count,Bytes,Seconds,Percent,CountPerSecond,BytesPerSecond,MilliSeconds,ByteSeconds,Unspecified,BitsPerSecond,Cores,MilliCores,NanoCores.
:paramtype unit: str
:keyword aggregation_type: The default metric aggregation type. Possible values include:
Total,Average,Maximum,Minimum,Count.
:paramtype aggregation_type: str
:keyword supported_aggregation_types: The supported aggregation types for the metrics.
:paramtype supported_aggregation_types: list[str]
:keyword supported_time_grain_types: The supported time grain types for the metrics.
:paramtype supported_time_grain_types: list[str]
:keyword availabilities: The supported time grain types for the metrics.
:paramtype availabilities: list[str]
:keyword lock_aggregation_type: The metric lock aggregation type.
:paramtype lock_aggregation_type: str
:keyword category: Category or type of metric.
:paramtype category: str
:keyword dimensions: The dimensions of metric.
:paramtype dimensions: list[~$(python-base-namespace).v2015_07_01.models.DimensionProperties]
:keyword fill_gap_with_zero: Property to specify whether to fill empty gaps with zero.
:paramtype fill_gap_with_zero: bool
:keyword internal_metric_name: The internal metric name.
:paramtype internal_metric_name: str
"""
super().__init__(**kwargs)
self.name = name
self.display_name = display_name
self.display_description = display_description
self.unit = unit
self.aggregation_type = aggregation_type
self.supported_aggregation_types = supported_aggregation_types
self.supported_time_grain_types = supported_time_grain_types
self.availabilities = availabilities
self.lock_aggregation_type = lock_aggregation_type
self.category = category
self.dimensions = dimensions
self.fill_gap_with_zero = fill_gap_with_zero
self.internal_metric_name = internal_metric_name
class Operation(_serialization.Model):
"""Microsoft Insights API operation definition.
:ivar name: Operation name: {provider}/{resource}/{operation}.
:vartype name: str
:ivar is_data_action: Property to specify whether the action is a data action.
:vartype is_data_action: bool
:ivar display: Display metadata associated with the operation.
:vartype display: ~$(python-base-namespace).v2015_07_01.models.OperationDisplay
:ivar service_specification: One property of operation, include metric specifications.
:vartype service_specification:
~$(python-base-namespace).v2015_07_01.models.ServiceSpecification
"""
_attribute_map = {
"name": {"key": "name", "type": "str"},
"is_data_action": {"key": "isDataAction", "type": "bool"},
"display": {"key": "display", "type": "OperationDisplay"},
"service_specification": {"key": "properties.serviceSpecification", "type": "ServiceSpecification"},
}
def __init__(
self,
*,
name: Optional[str] = None,
is_data_action: Optional[bool] = None,
display: Optional["_models.OperationDisplay"] = None,
service_specification: Optional["_models.ServiceSpecification"] = None,
**kwargs
):
"""
:keyword name: Operation name: {provider}/{resource}/{operation}.
:paramtype name: str
:keyword is_data_action: Property to specify whether the action is a data action.
:paramtype is_data_action: bool
:keyword display: Display metadata associated with the operation.
:paramtype display: ~$(python-base-namespace).v2015_07_01.models.OperationDisplay
:keyword service_specification: One property of operation, include metric specifications.
:paramtype service_specification:
~$(python-base-namespace).v2015_07_01.models.ServiceSpecification
"""
super().__init__(**kwargs)
self.name = name
self.is_data_action = is_data_action
self.display = display
self.service_specification = service_specification
class OperationDisplay(_serialization.Model):
"""Display metadata associated with the operation.
:ivar publisher: The publisher of this operation.
:vartype publisher: str
:ivar provider: Service provider: Microsoft.Insights.
:vartype provider: str
:ivar resource: Resource on which the operation is performed: AlertRules, Autoscale, etc.
:vartype resource: str
:ivar operation: Operation type: Read, write, delete, etc.
:vartype operation: str
:ivar description: The description of the operation.
:vartype description: str
"""
_attribute_map = {
"publisher": {"key": "publisher", "type": "str"},
"provider": {"key": "provider", "type": "str"},
"resource": {"key": "resource", "type": "str"},
"operation": {"key": "operation", "type": "str"},
"description": {"key": "description", "type": "str"},
}
def __init__(
self,
*,
publisher: Optional[str] = None,
provider: Optional[str] = None,
resource: Optional[str] = None,
operation: Optional[str] = None,
description: Optional[str] = None,
**kwargs
):
"""
:keyword publisher: The publisher of this operation.
:paramtype publisher: str
:keyword provider: Service provider: Microsoft.Insights.
:paramtype provider: str
:keyword resource: Resource on which the operation is performed: AlertRules, Autoscale, etc.
:paramtype resource: str
:keyword operation: Operation type: Read, write, delete, etc.
:paramtype operation: str
:keyword description: The description of the operation.
:paramtype description: str
"""
super().__init__(**kwargs)
self.publisher = publisher
self.provider = provider
self.resource = resource
self.operation = operation
self.description = description
class OperationListResult(_serialization.Model):
"""Result of the request to list Microsoft.Insights operations. It contains a list of operations and a URL link to get the next set of results.
:ivar value: List of operations supported by the Microsoft.Insights provider.
:vartype value: list[~$(python-base-namespace).v2015_07_01.models.Operation]
:ivar next_link: URL to get the next set of operation list results if there are any.
:vartype next_link: str
"""
_attribute_map = {
"value": {"key": "value", "type": "[Operation]"},
"next_link": {"key": "nextLink", "type": "str"},
}
def __init__(self, *, value: Optional[List["_models.Operation"]] = None, next_link: Optional[str] = None, **kwargs):
"""
:keyword value: List of operations supported by the Microsoft.Insights provider.
:paramtype value: list[~$(python-base-namespace).v2015_07_01.models.Operation]
:keyword next_link: URL to get the next set of operation list results if there are any.
:paramtype next_link: str
"""
super().__init__(**kwargs)
self.value = value
self.next_link = next_link
class RetentionPolicy(_serialization.Model):
"""Specifies the retention policy for the log.
All required parameters must be populated in order to send to Azure.
:ivar enabled: a value indicating whether the retention policy is enabled. Required.
:vartype enabled: bool
:ivar days: the number of days for the retention in days. A value of 0 will retain the events
indefinitely. Required.
:vartype days: int
"""
_validation = {
"enabled": {"required": True},
"days": {"required": True},
}
_attribute_map = {
"enabled": {"key": "enabled", "type": "bool"},
"days": {"key": "days", "type": "int"},
}
def __init__(self, *, enabled: bool, days: int, **kwargs):
"""
:keyword enabled: a value indicating whether the retention policy is enabled. Required.
:paramtype enabled: bool
:keyword days: the number of days for the retention in days. A value of 0 will retain the
events indefinitely. Required.
:paramtype days: int
"""
super().__init__(**kwargs)
self.enabled = enabled
self.days = days
class RuleAction(_serialization.Model):
"""The action that is performed when the alert rule becomes active, and when an alert condition is resolved.
You probably want to use the sub-classes and not this class directly. Known sub-classes are:
RuleEmailAction, RuleWebhookAction
All required parameters must be populated in order to send to Azure.
:ivar odata_type: specifies the type of the action. There are two types of actions:
RuleEmailAction and RuleWebhookAction. Required.
:vartype odata_type: str
"""
_validation = {
"odata_type": {"required": True},
}
_attribute_map = {
"odata_type": {"key": "odata\\.type", "type": "str"},
}
_subtype_map = {
"odata_type": {
"Microsoft.Azure.Management.Insights.Models.RuleEmailAction": "RuleEmailAction",
"Microsoft.Azure.Management.Insights.Models.RuleWebhookAction": "RuleWebhookAction",
}
}
def __init__(self, **kwargs):
""" """
super().__init__(**kwargs)
self.odata_type = None # type: Optional[str]
class RuleDataSource(_serialization.Model):
"""The resource from which the rule collects its data.
You probably want to use the sub-classes and not this class directly. Known sub-classes are:
RuleManagementEventDataSource, RuleMetricDataSource
All required parameters must be populated in order to send to Azure.
:ivar odata_type: specifies the type of data source. There are two types of rule data sources:
RuleMetricDataSource and RuleManagementEventDataSource. Required.
:vartype odata_type: str
:ivar resource_uri: the resource identifier of the resource the rule monitors. **NOTE**\ : this
property cannot be updated for an existing rule.
:vartype resource_uri: str
:ivar legacy_resource_id: the legacy resource identifier of the resource the rule monitors.
**NOTE**\ : this property cannot be updated for an existing rule.
:vartype legacy_resource_id: str
:ivar resource_location: the location of the resource.
:vartype resource_location: str
:ivar metric_namespace: the namespace of the metric.
:vartype metric_namespace: str
"""
_validation = {
"odata_type": {"required": True},
}
_attribute_map = {
"odata_type": {"key": "odata\\.type", "type": "str"},
"resource_uri": {"key": "resourceUri", "type": "str"},
"legacy_resource_id": {"key": "legacyResourceId", "type": "str"},
"resource_location": {"key": "resourceLocation", "type": "str"},
"metric_namespace": {"key": "metricNamespace", "type": "str"},
}
_subtype_map = {
"odata_type": {
"Microsoft.Azure.Management.Insights.Models.RuleManagementEventDataSource": "RuleManagementEventDataSource",
"Microsoft.Azure.Management.Insights.Models.RuleMetricDataSource": "RuleMetricDataSource",
}
}
def __init__(
self,
*,
resource_uri: Optional[str] = None,
legacy_resource_id: Optional[str] = None,
resource_location: Optional[str] = None,
metric_namespace: Optional[str] = None,
**kwargs
):
"""
:keyword resource_uri: the resource identifier of the resource the rule monitors. **NOTE**\ :
this property cannot be updated for an existing rule.
:paramtype resource_uri: str
:keyword legacy_resource_id: the legacy resource identifier of the resource the rule monitors.
**NOTE**\ : this property cannot be updated for an existing rule.
:paramtype legacy_resource_id: str
:keyword resource_location: the location of the resource.
:paramtype resource_location: str
:keyword metric_namespace: the namespace of the metric.
:paramtype metric_namespace: str
"""
super().__init__(**kwargs)
self.odata_type = None # type: Optional[str]
self.resource_uri = resource_uri
self.legacy_resource_id = legacy_resource_id
self.resource_location = resource_location
self.metric_namespace = metric_namespace
class RuleEmailAction(RuleAction):
"""Specifies the action to send email when the rule condition is evaluated. The discriminator is always RuleEmailAction in this case.
All required parameters must be populated in order to send to Azure.
:ivar odata_type: specifies the type of the action. There are two types of actions:
RuleEmailAction and RuleWebhookAction. Required.
:vartype odata_type: str
:ivar send_to_service_owners: Whether the administrators (service and co-administrators) of the
service should be notified when the alert is activated.
:vartype send_to_service_owners: bool
:ivar custom_emails: the list of administrator's custom email addresses to notify of the
activation of the alert.
:vartype custom_emails: list[str]
"""
_validation = {
"odata_type": {"required": True},
}
_attribute_map = {
"odata_type": {"key": "odata\\.type", "type": "str"},
"send_to_service_owners": {"key": "sendToServiceOwners", "type": "bool"},
"custom_emails": {"key": "customEmails", "type": "[str]"},
}
def __init__(
self, *, send_to_service_owners: Optional[bool] = None, custom_emails: Optional[List[str]] = None, **kwargs
):
"""
:keyword send_to_service_owners: Whether the administrators (service and co-administrators) of
the service should be notified when the alert is activated.
:paramtype send_to_service_owners: bool
:keyword custom_emails: the list of administrator's custom email addresses to notify of the
activation of the alert.
:paramtype custom_emails: list[str]
"""
super().__init__(**kwargs)
self.odata_type = "Microsoft.Azure.Management.Insights.Models.RuleEmailAction" # type: str
self.send_to_service_owners = send_to_service_owners
self.custom_emails = custom_emails
class RuleManagementEventClaimsDataSource(_serialization.Model):
"""The claims for a rule management event data source.
:ivar email_address: the email address.
:vartype email_address: str
"""
_attribute_map = {
"email_address": {"key": "emailAddress", "type": "str"},
}
def __init__(self, *, email_address: Optional[str] = None, **kwargs):
"""
:keyword email_address: the email address.
:paramtype email_address: str
"""
super().__init__(**kwargs)
self.email_address = email_address
class RuleManagementEventDataSource(RuleDataSource): # pylint: disable=too-many-instance-attributes
"""A rule management event data source. The discriminator fields is always RuleManagementEventDataSource in this case.
All required parameters must be populated in order to send to Azure.
:ivar odata_type: specifies the type of data source. There are two types of rule data sources:
RuleMetricDataSource and RuleManagementEventDataSource. Required.
:vartype odata_type: str
:ivar resource_uri: the resource identifier of the resource the rule monitors. **NOTE**\ : this
property cannot be updated for an existing rule.
:vartype resource_uri: str
:ivar legacy_resource_id: the legacy resource identifier of the resource the rule monitors.
**NOTE**\ : this property cannot be updated for an existing rule.
:vartype legacy_resource_id: str
:ivar resource_location: the location of the resource.
:vartype resource_location: str
:ivar metric_namespace: the namespace of the metric.
:vartype metric_namespace: str
:ivar event_name: the event name.
:vartype event_name: str
:ivar event_source: the event source.
:vartype event_source: str
:ivar level: the level.
:vartype level: str
:ivar operation_name: The name of the operation that should be checked for. If no name is
provided, any operation will match.
:vartype operation_name: str
:ivar resource_group_name: the resource group name.
:vartype resource_group_name: str
:ivar resource_provider_name: the resource provider name.
:vartype resource_provider_name: str
:ivar status: The status of the operation that should be checked for. If no status is provided,
any status will match.
:vartype status: str
:ivar sub_status: the substatus.
:vartype sub_status: str
:ivar claims: the claims.
:vartype claims:
~$(python-base-namespace).v2015_07_01.models.RuleManagementEventClaimsDataSource
"""
_validation = {
"odata_type": {"required": True},
}
_attribute_map = {
"odata_type": {"key": "odata\\.type", "type": "str"},
"resource_uri": {"key": "resourceUri", "type": "str"},
"legacy_resource_id": {"key": "legacyResourceId", "type": "str"},
"resource_location": {"key": "resourceLocation", "type": "str"},
"metric_namespace": {"key": "metricNamespace", "type": "str"},
"event_name": {"key": "eventName", "type": "str"},
"event_source": {"key": "eventSource", "type": "str"},
"level": {"key": "level", "type": "str"},
"operation_name": {"key": "operationName", "type": "str"},
"resource_group_name": {"key": "resourceGroupName", "type": "str"},
"resource_provider_name": {"key": "resourceProviderName", "type": "str"},
"status": {"key": "status", "type": "str"},
"sub_status": {"key": "subStatus", "type": "str"},
"claims": {"key": "claims", "type": "RuleManagementEventClaimsDataSource"},
}
def __init__(
self,
*,
resource_uri: Optional[str] = None,
legacy_resource_id: Optional[str] = None,
resource_location: Optional[str] = None,
metric_namespace: Optional[str] = None,
event_name: Optional[str] = None,
event_source: Optional[str] = None,
level: Optional[str] = None,
operation_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
resource_provider_name: Optional[str] = None,
status: Optional[str] = None,
sub_status: Optional[str] = None,
claims: Optional["_models.RuleManagementEventClaimsDataSource"] = None,
**kwargs
):
"""
:keyword resource_uri: the resource identifier of the resource the rule monitors. **NOTE**\ :
this property cannot be updated for an existing rule.
:paramtype resource_uri: str
:keyword legacy_resource_id: the legacy resource identifier of the resource the rule monitors.
**NOTE**\ : this property cannot be updated for an existing rule.
:paramtype legacy_resource_id: str
:keyword resource_location: the location of the resource.
:paramtype resource_location: str
:keyword metric_namespace: the namespace of the metric.
:paramtype metric_namespace: str
:keyword event_name: the event name.
:paramtype event_name: str
:keyword event_source: the event source.
:paramtype event_source: str
:keyword level: the level.
:paramtype level: str
:keyword operation_name: The name of the operation that should be checked for. If no name is
provided, any operation will match.
:paramtype operation_name: str
:keyword resource_group_name: the resource group name.
:paramtype resource_group_name: str
:keyword resource_provider_name: the resource provider name.
:paramtype resource_provider_name: str
:keyword status: The status of the operation that should be checked for. If no status is
provided, any status will match.
:paramtype status: str
:keyword sub_status: the substatus.
:paramtype sub_status: str
:keyword claims: the claims.
:paramtype claims:
~$(python-base-namespace).v2015_07_01.models.RuleManagementEventClaimsDataSource
"""
super().__init__(
resource_uri=resource_uri,
legacy_resource_id=legacy_resource_id,
resource_location=resource_location,
metric_namespace=metric_namespace,
**kwargs
)
self.odata_type = "Microsoft.Azure.Management.Insights.Models.RuleManagementEventDataSource" # type: str
self.event_name = event_name
self.event_source = event_source
self.level = level
self.operation_name = operation_name
self.resource_group_name = resource_group_name
self.resource_provider_name = resource_provider_name
self.status = status
self.sub_status = sub_status
self.claims = claims
class RuleMetricDataSource(RuleDataSource):
"""A rule metric data source. The discriminator value is always RuleMetricDataSource in this case.
All required parameters must be populated in order to send to Azure.
:ivar odata_type: specifies the type of data source. There are two types of rule data sources:
RuleMetricDataSource and RuleManagementEventDataSource. Required.
:vartype odata_type: str
:ivar resource_uri: the resource identifier of the resource the rule monitors. **NOTE**\ : this
property cannot be updated for an existing rule.
:vartype resource_uri: str
:ivar legacy_resource_id: the legacy resource identifier of the resource the rule monitors.
**NOTE**\ : this property cannot be updated for an existing rule.
:vartype legacy_resource_id: str
:ivar resource_location: the location of the resource.
:vartype resource_location: str
:ivar metric_namespace: the namespace of the metric.
:vartype metric_namespace: str
:ivar metric_name: the name of the metric that defines what the rule monitors.
:vartype metric_name: str
"""
_validation = {
"odata_type": {"required": True},
}
_attribute_map = {
"odata_type": {"key": "odata\\.type", "type": "str"},
"resource_uri": {"key": "resourceUri", "type": "str"},
"legacy_resource_id": {"key": "legacyResourceId", "type": "str"},
"resource_location": {"key": "resourceLocation", "type": "str"},
"metric_namespace": {"key": "metricNamespace", "type": "str"},
"metric_name": {"key": "metricName", "type": "str"},
}
def __init__(
self,
*,
resource_uri: Optional[str] = None,
legacy_resource_id: Optional[str] = None,
resource_location: Optional[str] = None,
metric_namespace: Optional[str] = None,
metric_name: Optional[str] = None,
**kwargs
):
"""
:keyword resource_uri: the resource identifier of the resource the rule monitors. **NOTE**\ :
this property cannot be updated for an existing rule.
:paramtype resource_uri: str
:keyword legacy_resource_id: the legacy resource identifier of the resource the rule monitors.
**NOTE**\ : this property cannot be updated for an existing rule.
:paramtype legacy_resource_id: str
:keyword resource_location: the location of the resource.
:paramtype resource_location: str
:keyword metric_namespace: the namespace of the metric.
:paramtype metric_namespace: str
:keyword metric_name: the name of the metric that defines what the rule monitors.
:paramtype metric_name: str
"""
super().__init__(
resource_uri=resource_uri,
legacy_resource_id=legacy_resource_id,
resource_location=resource_location,
metric_namespace=metric_namespace,
**kwargs
)
self.odata_type = "Microsoft.Azure.Management.Insights.Models.RuleMetricDataSource" # type: str
self.metric_name = metric_name
class RuleWebhookAction(RuleAction):
"""Specifies the action to post to service when the rule condition is evaluated. The discriminator is always RuleWebhookAction in this case.
All required parameters must be populated in order to send to Azure.
:ivar odata_type: specifies the type of the action. There are two types of actions:
RuleEmailAction and RuleWebhookAction. Required.
:vartype odata_type: str
:ivar service_uri: the service uri to Post the notification when the alert activates or
resolves.
:vartype service_uri: str
:ivar properties: the dictionary of custom properties to include with the post operation. These
data are appended to the webhook payload.
:vartype properties: dict[str, str]
"""
_validation = {
"odata_type": {"required": True},
}
_attribute_map = {
"odata_type": {"key": "odata\\.type", "type": "str"},
"service_uri": {"key": "serviceUri", "type": "str"},
"properties": {"key": "properties", "type": "{str}"},
}
def __init__(self, *, service_uri: Optional[str] = None, properties: Optional[Dict[str, str]] = None, **kwargs):
"""
:keyword service_uri: the service uri to Post the notification when the alert activates or
resolves.
:paramtype service_uri: str
:keyword properties: the dictionary of custom properties to include with the post operation.
These data are appended to the webhook payload.
:paramtype properties: dict[str, str]
"""
super().__init__(**kwargs)
self.odata_type = "Microsoft.Azure.Management.Insights.Models.RuleWebhookAction" # type: str
self.service_uri = service_uri
self.properties = properties
class ServiceDiagnosticSettingsResource(Resource):
"""Description of a service diagnostic setting.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Azure resource Id.
:vartype id: str
:ivar name: Azure resource name.
:vartype name: str
:ivar type: Azure resource type.
:vartype type: str
:ivar location: Resource location. Required.
:vartype location: str
:ivar tags: Resource tags.
:vartype tags: dict[str, str]
:ivar storage_account_id: The resource ID of the storage account to which you would like to
send Diagnostic Logs.
:vartype storage_account_id: str
:ivar service_bus_rule_id: The service bus rule ID of the service bus namespace in which you
would like to have Event Hubs created for streaming Diagnostic Logs. The rule ID is of the
format: '{service bus resource ID}/authorizationrules/{key name}'.
:vartype service_bus_rule_id: str
:ivar metrics: the list of metric settings.
:vartype metrics: list[~$(python-base-namespace).v2015_07_01.models.MetricSettings]
:ivar logs: the list of logs settings.
:vartype logs: list[~$(python-base-namespace).v2015_07_01.models.LogSettings]
:ivar workspace_id: The workspace ID (resource ID of a Log Analytics workspace) for a Log
Analytics workspace to which you would like to send Diagnostic Logs. Example:
/subscriptions/4b9e8510-67ab-4e9a-95a9-e2f1e570ea9c/resourceGroups/insights-integration/providers/Microsoft.OperationalInsights/workspaces/viruela2.
:vartype workspace_id: str
"""
_validation = {
"id": {"readonly": True},
"name": {"readonly": True},
"type": {"readonly": True},
"location": {"required": True},
}
_attribute_map = {
"id": {"key": "id", "type": "str"},
"name": {"key": "name", "type": "str"},
"type": {"key": "type", "type": "str"},
"location": {"key": "location", "type": "str"},
"tags": {"key": "tags", "type": "{str}"},
"storage_account_id": {"key": "properties.storageAccountId", "type": "str"},
"service_bus_rule_id": {"key": "properties.serviceBusRuleId", "type": "str"},
"metrics": {"key": "properties.metrics", "type": "[MetricSettings]"},
"logs": {"key": "properties.logs", "type": "[LogSettings]"},
"workspace_id": {"key": "properties.workspaceId", "type": "str"},
}
def __init__(
self,
*,
location: str,
tags: Optional[Dict[str, str]] = None,
storage_account_id: Optional[str] = None,
service_bus_rule_id: Optional[str] = None,
metrics: Optional[List["_models.MetricSettings"]] = None,
logs: Optional[List["_models.LogSettings"]] = None,
workspace_id: Optional[str] = None,
**kwargs
):
"""
:keyword location: Resource location. Required.
:paramtype location: str
:keyword tags: Resource tags.
:paramtype tags: dict[str, str]
:keyword storage_account_id: The resource ID of the storage account to which you would like to
send Diagnostic Logs.
:paramtype storage_account_id: str
:keyword service_bus_rule_id: The service bus rule ID of the service bus namespace in which you
would like to have Event Hubs created for streaming Diagnostic Logs. The rule ID is of the
format: '{service bus resource ID}/authorizationrules/{key name}'.
:paramtype service_bus_rule_id: str
:keyword metrics: the list of metric settings.
:paramtype metrics: list[~$(python-base-namespace).v2015_07_01.models.MetricSettings]
:keyword logs: the list of logs settings.
:paramtype logs: list[~$(python-base-namespace).v2015_07_01.models.LogSettings]
:keyword workspace_id: The workspace ID (resource ID of a Log Analytics workspace) for a Log
Analytics workspace to which you would like to send Diagnostic Logs. Example:
/subscriptions/4b9e8510-67ab-4e9a-95a9-e2f1e570ea9c/resourceGroups/insights-integration/providers/Microsoft.OperationalInsights/workspaces/viruela2.
:paramtype workspace_id: str
"""
super().__init__(location=location, tags=tags, **kwargs)
self.storage_account_id = storage_account_id
self.service_bus_rule_id = service_bus_rule_id
self.metrics = metrics
self.logs = logs
self.workspace_id = workspace_id
class ServiceSpecification(_serialization.Model):
"""One property of operation, include log specifications.
:ivar log_specifications: Log specifications of operation.
:vartype log_specifications:
list[~$(python-base-namespace).v2015_07_01.models.LogSpecification]
:ivar metric_specifications: Metric specifications of operation.
:vartype metric_specifications:
list[~$(python-base-namespace).v2015_07_01.models.MetricSpecification]
:ivar legacy_metric_specifications: Legacy Metric specifications for operation. Deprecated, do
not use.
:vartype legacy_metric_specifications: JSON
"""
_attribute_map = {
"log_specifications": {"key": "logSpecifications", "type": "[LogSpecification]"},
"metric_specifications": {"key": "metricSpecifications", "type": "[MetricSpecification]"},
"legacy_metric_specifications": {"key": "legacyMetricSpecifications", "type": "object"},
}
def __init__(
self,
*,
log_specifications: Optional[List["_models.LogSpecification"]] = None,
metric_specifications: Optional[List["_models.MetricSpecification"]] = None,
legacy_metric_specifications: Optional[JSON] = None,
**kwargs
):
"""
:keyword log_specifications: Log specifications of operation.
:paramtype log_specifications:
list[~$(python-base-namespace).v2015_07_01.models.LogSpecification]
:keyword metric_specifications: Metric specifications of operation.
:paramtype metric_specifications:
list[~$(python-base-namespace).v2015_07_01.models.MetricSpecification]
:keyword legacy_metric_specifications: Legacy Metric specifications for operation. Deprecated,
do not use.
:paramtype legacy_metric_specifications: JSON
"""
super().__init__(**kwargs)
self.log_specifications = log_specifications
self.metric_specifications = metric_specifications
self.legacy_metric_specifications = legacy_metric_specifications
class TableInfoEntry(_serialization.Model):
"""Info about a storage table with metric data.
:ivar table_name: Name of a table with metric data for this resource.
:vartype table_name: str
:ivar start_time: Start time of the metrics in this table.
:vartype start_time: ~datetime.datetime
:ivar end_time: End time of the metrics in this table.
:vartype end_time: ~datetime.datetime
:ivar sas_token: For the storage account with metrics.
:vartype sas_token: str
:ivar sas_token_expiration_time: For the storage account with metrics.
:vartype sas_token_expiration_time: ~datetime.datetime
"""
_attribute_map = {
"table_name": {"key": "tableName", "type": "str"},
"start_time": {"key": "startTime", "type": "iso-8601"},
"end_time": {"key": "endTime", "type": "iso-8601"},
"sas_token": {"key": "sasToken", "type": "str"},
"sas_token_expiration_time": {"key": "sasTokenExpirationTime", "type": "iso-8601"},
}
def __init__(
self,
*,
table_name: Optional[str] = None,
start_time: Optional[datetime.datetime] = None,
end_time: Optional[datetime.datetime] = None,
sas_token: Optional[str] = None,
sas_token_expiration_time: Optional[datetime.datetime] = None,
**kwargs
):
"""
:keyword table_name: Name of a table with metric data for this resource.
:paramtype table_name: str
:keyword start_time: Start time of the metrics in this table.
:paramtype start_time: ~datetime.datetime
:keyword end_time: End time of the metrics in this table.
:paramtype end_time: ~datetime.datetime
:keyword sas_token: For the storage account with metrics.
:paramtype sas_token: str
:keyword sas_token_expiration_time: For the storage account with metrics.
:paramtype sas_token_expiration_time: ~datetime.datetime
"""
super().__init__(**kwargs)
self.table_name = table_name
self.start_time = start_time
self.end_time = end_time
self.sas_token = sas_token
self.sas_token_expiration_time = sas_token_expiration_time
class ThresholdRuleCondition(RuleCondition):
"""A rule condition based on a metric crossing a threshold.
All required parameters must be populated in order to send to Azure.
:ivar odata_type: specifies the type of condition. This can be one of three types:
ManagementEventRuleCondition (occurrences of management events), LocationThresholdRuleCondition
(based on the number of failures of a web test), and ThresholdRuleCondition (based on the
threshold of a metric). Required.
:vartype odata_type: str
:ivar data_source: the resource from which the rule collects its data. For this type dataSource
will always be of type RuleMetricDataSource.
:vartype data_source: ~$(python-base-namespace).v2015_07_01.models.RuleDataSource
:ivar operator: the operator used to compare the data and the threshold. Required. Known values
are: "GreaterThan", "GreaterThanOrEqual", "LessThan", and "LessThanOrEqual".
:vartype operator: str or ~$(python-base-namespace).v2015_07_01.models.ConditionOperator
:ivar threshold: the threshold value that activates the alert. Required.
:vartype threshold: float
:ivar window_size: the period of time (in ISO 8601 duration format) that is used to monitor
alert activity based on the threshold. If specified then it must be between 5 minutes and 1
day.
:vartype window_size: ~datetime.timedelta
:ivar time_aggregation: the time aggregation operator. How the data that are collected should
be combined over time. The default value is the PrimaryAggregationType of the Metric. Known
values are: "Average", "Minimum", "Maximum", "Total", and "Last".
:vartype time_aggregation: str or
~$(python-base-namespace).v2015_07_01.models.TimeAggregationOperator
"""
_validation = {
"odata_type": {"required": True},
"operator": {"required": True},
"threshold": {"required": True},
}
_attribute_map = {
"odata_type": {"key": "odata\\.type", "type": "str"},
"data_source": {"key": "dataSource", "type": "RuleDataSource"},
"operator": {"key": "operator", "type": "str"},
"threshold": {"key": "threshold", "type": "float"},
"window_size": {"key": "windowSize", "type": "duration"},
"time_aggregation": {"key": "timeAggregation", "type": "str"},
}
def __init__(
self,
*,
operator: Union[str, "_models.ConditionOperator"],
threshold: float,
data_source: Optional["_models.RuleDataSource"] = None,
window_size: Optional[datetime.timedelta] = None,
time_aggregation: Optional[Union[str, "_models.TimeAggregationOperator"]] = None,
**kwargs
):
"""
:keyword data_source: the resource from which the rule collects its data. For this type
dataSource will always be of type RuleMetricDataSource.
:paramtype data_source: ~$(python-base-namespace).v2015_07_01.models.RuleDataSource
:keyword operator: the operator used to compare the data and the threshold. Required. Known
values are: "GreaterThan", "GreaterThanOrEqual", "LessThan", and "LessThanOrEqual".
:paramtype operator: str or ~$(python-base-namespace).v2015_07_01.models.ConditionOperator
:keyword threshold: the threshold value that activates the alert. Required.
:paramtype threshold: float
:keyword window_size: the period of time (in ISO 8601 duration format) that is used to monitor
alert activity based on the threshold. If specified then it must be between 5 minutes and 1
day.
:paramtype window_size: ~datetime.timedelta
:keyword time_aggregation: the time aggregation operator. How the data that are collected
should be combined over time. The default value is the PrimaryAggregationType of the Metric.
Known values are: "Average", "Minimum", "Maximum", "Total", and "Last".
:paramtype time_aggregation: str or
~$(python-base-namespace).v2015_07_01.models.TimeAggregationOperator
"""
super().__init__(data_source=data_source, **kwargs)
self.odata_type = "Microsoft.Azure.Management.Insights.Models.ThresholdRuleCondition" # type: str
self.operator = operator
self.threshold = threshold
self.window_size = window_size
self.time_aggregation = time_aggregation
| {
"content_hash": "cf2ae5614b26cd3cb91744cc7dc929b3",
"timestamp": "",
"source": "github",
"line_count": 1948,
"max_line_length": 165,
"avg_line_length": 43.65092402464066,
"alnum_prop": 0.6520486405118073,
"repo_name": "Azure/azure-sdk-for-python",
"id": "9d9565b0d7a662ab0daf5fcd82e702eaf6381fe7",
"size": "85537",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/monitor/azure-mgmt-monitor/azure/mgmt/monitor/v2015_07_01/models/_models_py3.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
"""Implementation of tenant API."""
from __future__ import absolute_import
from .. import admin
from .. import authz
from .. import context
from .. import schema
class API(object):
"""Treadmill Tenant REST api."""
def __init__(self):
def _admin_tnt():
"""Lazily return admin object."""
return admin.Tenant(context.GLOBAL.ldap.conn)
def _list():
"""List tenants."""
return _admin_tnt().list({})
@schema.schema({'$ref': 'tenant.json#/resource_id'})
def get(rsrc_id):
"""Get tenant configuration."""
result = _admin_tnt().get(rsrc_id)
result['_id'] = rsrc_id
del result['tenant']
return result
@schema.schema(
{'$ref': 'tenant.json#/resource_id'},
{'allOf': [{'$ref': 'tenant.json#/resource'},
{'$ref': 'tenant.json#/verbs/create'}]}
)
def create(rsrc_id, rsrc):
"""Create tenant."""
_admin_tnt().create(rsrc_id, rsrc)
return _admin_tnt().get(rsrc_id)
@schema.schema(
{'$ref': 'tenant.json#/resource_id'},
{'allOf': [{'$ref': 'tenant.json#/resource'},
{'$ref': 'tenant.json#/verbs/update'}]}
)
def update(rsrc_id, rsrc):
"""Update tenant."""
_admin_tnt().update(rsrc_id, rsrc)
return _admin_tnt().get(rsrc_id)
@schema.schema({'$ref': 'tenant.json#/resource_id'})
def delete(rsrc_id):
"""Delete tenant."""
_admin_tnt().delete(rsrc_id)
self.list = _list
self.get = get
self.create = create
self.update = update
self.delete = delete
def init(authorizer):
"""Returns module API wrapped with authorizer function."""
api = API()
return authz.wrap(api, authorizer)
| {
"content_hash": "e5e8ec113047e2a52052e5092596abf5",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 62,
"avg_line_length": 29.12121212121212,
"alnum_prop": 0.5098855359001041,
"repo_name": "toenuff/treadmill",
"id": "c54245ee1a5e72efa7b34880a24d749298911de8",
"size": "1922",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/python/treadmill/api/tenant.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Prolog",
"bytes": "19323"
},
{
"name": "Python",
"bytes": "1511919"
},
{
"name": "Shell",
"bytes": "29014"
}
],
"symlink_target": ""
} |
from .models import Note
from rest_framework import serializers, viewsets, routers
class NoteSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Note
fields = ('id', 'content', 'time', 'update_time')
class NoteViewSet(viewsets.ReadOnlyModelViewSet):
queryset = Note.objects.all()
serializer_class = NoteSerializer
router = routers.DefaultRouter()
router.register(r'notes', NoteViewSet) | {
"content_hash": "b99a4dcb2842d3edc5fb36a58ef438ef",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 61,
"avg_line_length": 25.823529411764707,
"alnum_prop": 0.7380410022779044,
"repo_name": "kyunooh/JellyBlog",
"id": "78f725f957c395bba5aeda5d3502d8d87c37a56a",
"size": "439",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "jellyblog/serializer.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "24244"
},
{
"name": "HTML",
"bytes": "62411"
},
{
"name": "JavaScript",
"bytes": "86677"
},
{
"name": "Python",
"bytes": "42385"
}
],
"symlink_target": ""
} |
"""
This module defines various classes of supported actions. All actions are
implemented as static methods, but are defined using classes (as opposed to
modules) so that a set of well-defined actions can be namespaced easily.
"""
import os
import shutil
def get_nested_dict(input_dict, key):
"""
Helper function to interpret a nested dict input.
"""
current = input_dict
toks = key.split("->")
n = len(toks)
for i, tok in enumerate(toks):
if tok not in current and i < n - 1:
current[tok] = {}
elif i == n - 1:
return current, toks[-1]
current = current[tok]
return None
class DictActions:
"""
Class to implement the supported mongo-like modifications on a dict.
Supported keywords include the following Mongo-based keywords, with the
usual meanings (refer to Mongo documentation for information):
_inc
_set
_unset
_push
_push_all
_add_to_set (but _each is not supported)
_pop
_pull
_pull_all
_rename
However, note that "_set" does not support modification of nested dicts
using the mongo {"a.b":1} notation. This is because mongo does not allow
keys with "." to be inserted. Instead, nested dict modification is
supported using a special "->" keyword, e.g. {"a->b": 1}
"""
@staticmethod
def set(input_dict, settings):
"""
Sets a value using MongoDB syntax.
Args:
input_dict (dict): The input dictionary to be modified.
settings (dict): The specification of the modification to be made.
"""
for k, v in settings.items():
(d, key) = get_nested_dict(input_dict, k)
d[key] = v
@staticmethod
def unset(input_dict, settings):
"""
Unsets a value using MongoDB syntax.
Args:
input_dict (dict): The input dictionary to be modified.
settings (dict): The specification of the modification to be made.
"""
for k in settings.keys():
(d, key) = get_nested_dict(input_dict, k)
del d[key]
@staticmethod
def push(input_dict, settings):
"""
Push to a list using MongoDB syntax.
Args:
input_dict (dict): The input dictionary to be modified.
settings (dict): The specification of the modification to be made.
"""
for k, v in settings.items():
(d, key) = get_nested_dict(input_dict, k)
if key in d:
d[key].append(v)
else:
d[key] = [v]
@staticmethod
def push_all(input_dict, settings):
"""
Push multiple items to a list using MongoDB syntax.
Args:
input_dict (dict): The input dictionary to be modified.
settings (dict): The specification of the modification to be made.
"""
for k, v in settings.items():
(d, key) = get_nested_dict(input_dict, k)
if key in d:
d[key].extend(v)
else:
d[key] = v
@staticmethod
def inc(input_dict, settings):
"""
Increment a value using MongdoDB syntax.
Args:
input_dict (dict): The input dictionary to be modified.
settings (dict): The specification of the modification to be made.
"""
for k, v in settings.items():
(d, key) = get_nested_dict(input_dict, k)
if key in d:
d[key] += v
else:
d[key] = v
@staticmethod
def rename(input_dict, settings):
"""
Rename a key using MongoDB syntax.
Args:
input_dict (dict): The input dictionary to be modified.
settings (dict): The specification of the modification to be made.
"""
for k, v in settings.items():
if k in input_dict:
input_dict[v] = input_dict[k]
del input_dict[k]
@staticmethod
def add_to_set(input_dict, settings):
"""
Add to set using MongoDB syntax.
Args:
input_dict (dict): The input dictionary to be modified.
settings (dict): The specification of the modification to be made.
"""
for k, v in settings.items():
(d, key) = get_nested_dict(input_dict, k)
if key in d and (not isinstance(d[key], list)):
raise ValueError(f"Keyword {k} does not refer to an array.")
if key in d and v not in d[key]:
d[key].append(v)
elif key not in d:
d[key] = v
@staticmethod
def pull(input_dict, settings):
"""
Pull an item using MongoDB syntax.
Args:
input_dict (dict): The input dictionary to be modified.
settings (dict): The specification of the modification to be made.
"""
for k, v in settings.items():
(d, key) = get_nested_dict(input_dict, k)
if key in d and (not isinstance(d[key], list)):
raise ValueError(f"Keyword {k} does not refer to an array.")
if key in d:
d[key] = [i for i in d[key] if i != v]
@staticmethod
def pull_all(input_dict, settings):
"""
Pull multiple items to a list using MongoDB syntax.
Args:
input_dict (dict): The input dictionary to be modified.
settings (dict): The specification of the modification to be made.
"""
for k, v in settings.items():
if k in input_dict and (not isinstance(input_dict[k], list)):
raise ValueError(f"Keyword {k} does not refer to an array.")
for i in v:
DictActions.pull(input_dict, {k: i})
@staticmethod
def pop(input_dict, settings):
"""
Pop item from a list using MongoDB syntax.
Args:
input_dict (dict): The input dictionary to be modified.
settings (dict): The specification of the modification to be made.
"""
for k, v in settings.items():
(d, key) = get_nested_dict(input_dict, k)
if key in d and (not isinstance(d[key], list)):
raise ValueError(f"Keyword {k} does not refer to an array.")
if v == 1:
d[key].pop()
elif v == -1:
d[key].pop(0)
class FileActions:
"""
Class of supported file actions. For FileActions, the modder class takes in
a filename as a string. The filename should preferably be a full path to
avoid ambiguity.
"""
@staticmethod
def file_create(filename, settings):
"""
Creates a file.
Args:
filename (str): Filename.
settings (dict): Must be {"content": actual_content}
"""
if len(settings) != 1:
raise ValueError("Settings must only contain one item with key " "'content'.")
for k, v in settings.items():
if k == "content":
with open(filename, "w") as f:
f.write(v)
@staticmethod
def file_move(filename, settings):
"""
Moves a file. {'_file_move': {'dest': 'new_file_name'}}
Args:
filename (str): Filename.
settings (dict): Must be {"dest": path of new file}
"""
if len(settings) != 1:
raise ValueError("Settings must only contain one item with key " "'dest'.")
for k, v in settings.items():
if k == "dest":
shutil.move(filename, v)
@staticmethod
def file_delete(filename, settings):
"""
Deletes a file. {'_file_delete': {'mode': "actual"}}
Args:
filename (str): Filename.
settings (dict): Must be {"mode": actual/simulated}. Simulated
mode only prints the action without performing it.
"""
if len(settings) != 1:
raise ValueError("Settings must only contain one item with key " "'mode'.")
for k, v in settings.items():
if k == "mode" and v == "actual":
try:
os.remove(filename)
except OSError:
# Skip file not found error.
pass
elif k == "mode" and v == "simulated":
print(f"Simulated removal of {filename}")
@staticmethod
def file_copy(filename, settings):
"""
Copies a file. {'_file_copy': {'dest': 'new_file_name'}}
Args:
filename (str): Filename.
settings (dict): Must be {"dest": path of new file}
"""
for k, v in settings.items():
if k.startswith("dest"):
shutil.copyfile(filename, v)
@staticmethod
def file_modify(filename, settings):
"""
Modifies file access
Args:
filename (str): Filename.
settings (dict): Can be "mode" or "owners"
"""
for k, v in settings.items():
if k == "mode":
os.chmod(filename, v)
if k == "owners":
os.chown(filename, v)
| {
"content_hash": "3b8037d3b20eb2b506482ea09fc15d65",
"timestamp": "",
"source": "github",
"line_count": 292,
"max_line_length": 90,
"avg_line_length": 31.89041095890411,
"alnum_prop": 0.5343642611683849,
"repo_name": "materialsproject/custodian",
"id": "3e833ce38d4c4b7d08b67a1f42ab51e3588c6bd0",
"size": "9312",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "custodian/ansible/actions.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "5100"
},
{
"name": "CSS",
"bytes": "1133"
},
{
"name": "HTML",
"bytes": "2710"
},
{
"name": "Makefile",
"bytes": "5577"
},
{
"name": "Python",
"bytes": "532313"
},
{
"name": "Roff",
"bytes": "1552939"
},
{
"name": "Shell",
"bytes": "7472"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import re
from .amp import AMPIE
from .common import InfoExtractor
from ..utils import (
parse_duration,
parse_iso8601,
try_get,
)
class AbcNewsVideoIE(AMPIE):
IE_NAME = 'abcnews:video'
_VALID_URL = r'''(?x)
https?://
(?:
abcnews\.go\.com/
(?:
(?:[^/]+/)*video/(?P<display_id>[0-9a-z-]+)-|
video/(?:embed|itemfeed)\?.*?\bid=
)|
fivethirtyeight\.abcnews\.go\.com/video/embed/\d+/
)
(?P<id>\d+)
'''
_TESTS = [{
'url': 'http://abcnews.go.com/ThisWeek/video/week-exclusive-irans-foreign-minister-zarif-20411932',
'info_dict': {
'id': '20411932',
'ext': 'mp4',
'display_id': 'week-exclusive-irans-foreign-minister-zarif',
'title': '\'This Week\' Exclusive: Iran\'s Foreign Minister Zarif',
'description': 'George Stephanopoulos goes one-on-one with Iranian Foreign Minister Dr. Javad Zarif.',
'duration': 180,
'thumbnail': r're:^https?://.*\.jpg$',
'timestamp': 1380454200,
'upload_date': '20130929',
},
'params': {
# m3u8 download
'skip_download': True,
},
}, {
'url': 'http://abcnews.go.com/video/embed?id=46979033',
'only_matching': True,
}, {
'url': 'http://abcnews.go.com/2020/video/2020-husband-stands-teacher-jail-student-affairs-26119478',
'only_matching': True,
}, {
'url': 'http://abcnews.go.com/video/itemfeed?id=46979033',
'only_matching': True,
}, {
'url': 'https://abcnews.go.com/GMA/News/video/history-christmas-story-67894761',
'only_matching': True,
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
display_id = mobj.group('display_id')
video_id = mobj.group('id')
info_dict = self._extract_feed_info(
'http://abcnews.go.com/video/itemfeed?id=%s' % video_id)
info_dict.update({
'id': video_id,
'display_id': display_id,
})
return info_dict
class AbcNewsIE(InfoExtractor):
IE_NAME = 'abcnews'
_VALID_URL = r'https?://abcnews\.go\.com/(?:[^/]+/)+(?P<display_id>[0-9a-z-]+)/story\?id=(?P<id>\d+)'
_TESTS = [{
# Youtube Embeds
'url': 'https://abcnews.go.com/Entertainment/peter-billingsley-child-actor-christmas-story-hollywood-power/story?id=51286501',
'info_dict': {
'id': '51286501',
'title': "Peter Billingsley: From child actor in 'A Christmas Story' to Hollywood power player",
'description': 'Billingsley went from a child actor to Hollywood power player.',
},
'playlist_count': 5,
}, {
'url': 'http://abcnews.go.com/Entertainment/justin-timberlake-performs-stop-feeling-eurovision-2016/story?id=39125818',
'info_dict': {
'id': '38897857',
'ext': 'mp4',
'title': 'Justin Timberlake Drops Hints For Secret Single',
'description': 'Lara Spencer reports the buzziest stories of the day in "GMA" Pop News.',
'upload_date': '20160505',
'timestamp': 1462442280,
},
'params': {
# m3u8 download
'skip_download': True,
# The embedded YouTube video is blocked due to copyright issues
'playlist_items': '1',
},
'add_ie': ['AbcNewsVideo'],
}, {
'url': 'http://abcnews.go.com/Technology/exclusive-apple-ceo-tim-cook-iphone-cracking-software/story?id=37173343',
'only_matching': True,
}, {
# inline.type == 'video'
'url': 'http://abcnews.go.com/Technology/exclusive-apple-ceo-tim-cook-iphone-cracking-software/story?id=37173343',
'only_matching': True,
}]
def _real_extract(self, url):
story_id = self._match_id(url)
webpage = self._download_webpage(url, story_id)
story = self._parse_json(self._search_regex(
r"window\['__abcnews__'\]\s*=\s*({.+?});",
webpage, 'data'), story_id)['page']['content']['story']['everscroll'][0]
article_contents = story.get('articleContents') or {}
def entries():
featured_video = story.get('featuredVideo') or {}
feed = try_get(featured_video, lambda x: x['video']['feed'])
if feed:
yield {
'_type': 'url',
'id': featured_video.get('id'),
'title': featured_video.get('name'),
'url': feed,
'thumbnail': featured_video.get('images'),
'description': featured_video.get('description'),
'timestamp': parse_iso8601(featured_video.get('uploadDate')),
'duration': parse_duration(featured_video.get('duration')),
'ie_key': AbcNewsVideoIE.ie_key(),
}
for inline in (article_contents.get('inlines') or []):
inline_type = inline.get('type')
if inline_type == 'iframe':
iframe_url = try_get(inline, lambda x: x['attrs']['src'])
if iframe_url:
yield self.url_result(iframe_url)
elif inline_type == 'video':
video_id = inline.get('id')
if video_id:
yield {
'_type': 'url',
'id': video_id,
'url': 'http://abcnews.go.com/video/embed?id=' + video_id,
'thumbnail': inline.get('imgSrc') or inline.get('imgDefault'),
'description': inline.get('description'),
'duration': parse_duration(inline.get('duration')),
'ie_key': AbcNewsVideoIE.ie_key(),
}
return self.playlist_result(
entries(), story_id, article_contents.get('headline'),
article_contents.get('subHead'))
| {
"content_hash": "8e75545bd0a8a1b3042859905728cd4a",
"timestamp": "",
"source": "github",
"line_count": 157,
"max_line_length": 134,
"avg_line_length": 40.66242038216561,
"alnum_prop": 0.5007832080200502,
"repo_name": "yasoob/youtube-dl-GUI",
"id": "908c833770038840f6b073ef7a5f8f5bf61278fc",
"size": "6400",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "youtube_dl/extractor/abcnews.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Inno Setup",
"bytes": "7102"
},
{
"name": "Python",
"bytes": "1335226"
}
],
"symlink_target": ""
} |
from cogent.app.parameters import FlagParameter, ValuedParameter
from cogent.app.util import CommandLineApplication, ResultPath
"""Application controller for sfffile"""
__author__ = "Kyle Bittinger"
__copyright__ = "Copyright 2007-2012, The Cogent Project"
__credits__ = ["Kyle Bittinger"]
__license__ = "GPL"
__version__ = "1.5.3"
__maintainer__ = "Kyle Bittinger"
__email__ = "[email protected]"
__status__ = "Prototype"
class Sfffile(CommandLineApplication):
"""Simple sfffile application controller.
"""
_options = {
# output filepath
'-o': ValuedParameter('-', 'o', Delimiter=' '),
# file of accession numbers to be included
'-i': ValuedParameter('-', 'i', Delimiter=' '),
# file of accession numbers to be excluded
'-e': ValuedParameter('-', 'e', Delimiter=' '),
# file of custom trim points
'-t': ValuedParameter('-', 't', Delimiter=' '),
# number of cycles in output sff
'-c': ValuedParameter('-', 'c', Delimiter=' '),
# shortcut for -c 42
'-gs20': FlagParameter('-', 'gs20'),
# shortcut for -c 100
'-gsflx': FlagParameter('-', 'gsflx'),
# split multiplexed reads
'-s': ValuedParameter('-', 's', Delimiter=' '),
# custom MID configuration file
'-mcf': ValuedParameter('-', 'mcf', Delimiter=' '),
# prevent propagation of sff index
'-nmft': FlagParameter('-', 'nmft'),
}
_parameters = {}
_parameters.update(_options)
_input_handler = '_input_as_path'
_command = 'sfffile'
def _get_result_paths(self, data):
"""Collect the resultant SFF file in the results.
Because cogent.app.util.CommandLineAppResult opens output
files in text mode, this method may not be portable for
Windows users. A more portable solution would be to not use
the app controller results, but instead specify the output SFF
filepath manually via the '-o' parameter.
"""
if self.Parameters['-o'].isOn():
sff_path = self.Parameters['-o'].Value
else:
sff_path = '454Reads.sff'
return {'sff': ResultPath(sff_path)}
def _accept_exit_status(self, exit_status):
"""Accept an exit status of 0 for the sfffile program.
"""
return exit_status == 0
| {
"content_hash": "be09a82e01e8a2f04acf36b92eb960f8",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 70,
"avg_line_length": 35.92424242424242,
"alnum_prop": 0.5963728384647828,
"repo_name": "sauloal/cnidaria",
"id": "95e84099c1fe0b85383ea3a6865a7e983162704e",
"size": "2394",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/venv/lib/python2.7/site-packages/cogent/app/sfffile.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1696790"
},
{
"name": "C++",
"bytes": "3035466"
},
{
"name": "CSS",
"bytes": "20306"
},
{
"name": "FORTRAN",
"bytes": "3707"
},
{
"name": "Groff",
"bytes": "32478"
},
{
"name": "HTML",
"bytes": "19658"
},
{
"name": "JavaScript",
"bytes": "250616"
},
{
"name": "Jupyter Notebook",
"bytes": "8401292"
},
{
"name": "M4",
"bytes": "3905"
},
{
"name": "Makefile",
"bytes": "177650"
},
{
"name": "Objective-C",
"bytes": "1701"
},
{
"name": "Python",
"bytes": "28122291"
},
{
"name": "R",
"bytes": "86108"
},
{
"name": "Shell",
"bytes": "676123"
}
],
"symlink_target": ""
} |
import os
import sys
import socket
import subprocess
import datetime
import signal
import StringIO
import math
import geopm_context
import geopmpy.launcher
from geopmpy.launcher import resource_manager
class TestLauncher(object):
def __init__(self, app_conf, ctl_conf, report_path,
trace_path=None, host_file=None, time_limit=600, region_barrier=False):
self._app_conf = app_conf
self._ctl_conf = ctl_conf
self._report_path = report_path
self._trace_path = trace_path
self._host_file = host_file
self._time_limit = time_limit
self._region_barrier = region_barrier
self._node_list = None
self._pmpi_ctl = 'process'
self._job_name = 'geopm_int_test'
self._timeout = 30
self.set_num_cpu()
self.set_num_rank(16)
self.set_num_node(4)
def set_node_list(self, node_list):
self._node_list = node_list
def set_num_node(self, num_node):
self._num_node = num_node
self.set_cpu_per_rank()
def set_num_rank(self, num_rank):
self._num_rank = num_rank
self.set_cpu_per_rank()
def set_pmpi_ctl(self, pmpi_ctl):
self._pmpi_ctl = pmpi_ctl
def check_run(self, test_name):
with open(test_name + '.log', 'a') as outfile:
argv = ['dummy', 'true']
launcher = geopmpy.launcher.factory(argv, self._num_rank, self._num_node,
self._cpu_per_rank, self._timeout,
self._time_limit, self._job_name,
self._node_list, self._host_file)
launcher.run(stdout=outfile, stderr=outfile)
def run(self, test_name):
self._app_conf.write()
self._ctl_conf.write()
with open(test_name + '.log', 'a') as outfile:
outfile.write(str(datetime.datetime.now()) + '\n')
outfile.flush()
script_dir = os.path.dirname(os.path.realpath(__file__))
# Using libtool causes sporadic issues with the Intel toolchain.
exec_path = os.path.join(script_dir, '.libs', 'geopm_test_integration')
argv = ['dummy', '--geopm-ctl', self._pmpi_ctl,
'--geopm-policy', self._ctl_conf.get_path(),
'--geopm-report', self._report_path,
'--geopm-profile', test_name]
if self._trace_path is not None:
argv.extend(['--geopm-trace', self._trace_path])
if self._region_barrier:
argv.append('--geopm-barrier')
argv.extend([exec_path, '--verbose', self._app_conf.get_path()])
launcher = geopmpy.launcher.factory(argv, self._num_rank, self._num_node, self._cpu_per_rank, self._timeout,
self._time_limit, test_name, self._node_list, self._host_file)
launcher.run(stdout=outfile, stderr=outfile)
def get_report(self):
return Report(self._report_path)
def get_trace(self):
return Trace(self._trace_path)
def get_idle_nodes(self):
argv = ['dummy', 'true']
launcher = geopmpy.launcher.factory(argv, 1, 1)
return launcher.get_idle_nodes()
def get_alloc_nodes(self):
argv = ['dummy', 'true']
launcher = geopmpy.launcher.factory(argv, 1, 1)
return launcher.get_alloc_nodes()
def write_log(self, test_name, message):
with open(test_name + '.log', 'a') as outfile:
outfile.write(message + '\n\n')
def set_num_cpu(self):
# Figure out the number of CPUs per rank leaving one for the
# OS and one (potentially, may/may not be use depending on pmpi_ctl)
# for the controller.
argv = ['dummy', 'lscpu']
launcher = geopmpy.launcher.factory(argv, 1, 1)
ostream = StringIO.StringIO()
launcher.run(stdout=ostream)
out = ostream.getvalue()
core_socket = [int(line.split(':')[1])
for line in out.splitlines()
if line.find('Core(s) per socket:') == 0 or
line.find('Socket(s):') == 0]
# Mulitply num core per socket by num socket and remove one
# CPU for BSP to calculate number of CPU for application.
# Don't use hyper-threads.
self._num_cpu = core_socket[0] * core_socket[1] - 1
def set_cpu_per_rank(self):
try:
rank_per_node = int(math.ceil(float(self._num_rank) / float(self._num_node)))
self._cpu_per_rank = int(math.floor(self._num_cpu / rank_per_node))
except (AttributeError, TypeError):
pass
| {
"content_hash": "69f61707a89523ee7f7c6ffaea179134",
"timestamp": "",
"source": "github",
"line_count": 121,
"max_line_length": 120,
"avg_line_length": 39.30578512396694,
"alnum_prop": 0.5578216989066442,
"repo_name": "sssylvester/geopm",
"id": "5ba19a21422da209ef4a4174f82bccc5769e5d85",
"size": "6358",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "test_integration/geopm_test_launcher.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "431969"
},
{
"name": "C++",
"bytes": "961908"
},
{
"name": "Fortran",
"bytes": "497157"
},
{
"name": "M4",
"bytes": "33937"
},
{
"name": "Makefile",
"bytes": "90859"
},
{
"name": "Python",
"bytes": "215047"
},
{
"name": "Shell",
"bytes": "44641"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('distances', '0024_remove_exercise_testtime'),
]
operations = [
migrations.AlterField(
model_name='exercise',
name='sport',
field=models.CharField(choices=[('Running', 'Running'), ('Skiing', 'Skiing'), ('Walking', 'Walking'), ('Cycling', 'Cycling'), ('Swimming', 'Swimming'), ('Rowing', 'Rowing'), ('Other', 'Other')], default='Running', max_length=20),
),
]
| {
"content_hash": "316f32d486cc97a286ad37f17b9692db",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 241,
"avg_line_length": 32.111111111111114,
"alnum_prop": 0.596885813148789,
"repo_name": "tkettu/rokego",
"id": "fbf4b7c4b69a58f3ccff272569e8e435d6fd1935",
"size": "649",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "distances/migrations/0025_auto_20170724_2006.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2016"
},
{
"name": "HTML",
"bytes": "22895"
},
{
"name": "JavaScript",
"bytes": "17518"
},
{
"name": "Python",
"bytes": "71608"
}
],
"symlink_target": ""
} |
"""Helpful routines for regression testing."""
from base64 import b64encode
from binascii import hexlify, unhexlify
from decimal import Decimal, ROUND_DOWN
import hashlib
import json
import logging
import os
import random
import re
from subprocess import CalledProcessError
import time
from . import coverage
from .authproxy import AuthServiceProxy, JSONRPCException
logger = logging.getLogger("TestFramework.utils")
# Assert functions
##################
def assert_fee_amount(fee, tx_size, fee_per_kB):
"""Assert the fee was in range"""
target_fee = tx_size * fee_per_kB / 1000
if fee < target_fee:
raise AssertionError("Fee of %s LCC too low! (Should be %s LCC)" % (str(fee), str(target_fee)))
# allow the wallet's estimation to be at most 2 bytes off
if fee > (tx_size + 2) * fee_per_kB / 1000:
raise AssertionError("Fee of %s LCC too high! (Should be %s LCC)" % (str(fee), str(target_fee)))
def assert_equal(thing1, thing2, *args):
if thing1 != thing2 or any(thing1 != arg for arg in args):
raise AssertionError("not(%s)" % " == ".join(str(arg) for arg in (thing1, thing2) + args))
def assert_greater_than(thing1, thing2):
if thing1 <= thing2:
raise AssertionError("%s <= %s" % (str(thing1), str(thing2)))
def assert_greater_than_or_equal(thing1, thing2):
if thing1 < thing2:
raise AssertionError("%s < %s" % (str(thing1), str(thing2)))
def assert_raises(exc, fun, *args, **kwds):
assert_raises_message(exc, None, fun, *args, **kwds)
def assert_raises_message(exc, message, fun, *args, **kwds):
try:
fun(*args, **kwds)
except JSONRPCException:
raise AssertionError("Use assert_raises_rpc_error() to test RPC failures")
except exc as e:
if message is not None and message not in e.error['message']:
raise AssertionError("Expected substring not found:" + e.error['message'])
except Exception as e:
raise AssertionError("Unexpected exception raised: " + type(e).__name__)
else:
raise AssertionError("No exception raised")
def assert_raises_process_error(returncode, output, fun, *args, **kwds):
"""Execute a process and asserts the process return code and output.
Calls function `fun` with arguments `args` and `kwds`. Catches a CalledProcessError
and verifies that the return code and output are as expected. Throws AssertionError if
no CalledProcessError was raised or if the return code and output are not as expected.
Args:
returncode (int): the process return code.
output (string): [a substring of] the process output.
fun (function): the function to call. This should execute a process.
args*: positional arguments for the function.
kwds**: named arguments for the function.
"""
try:
fun(*args, **kwds)
except CalledProcessError as e:
if returncode != e.returncode:
raise AssertionError("Unexpected returncode %i" % e.returncode)
if output not in e.output:
raise AssertionError("Expected substring not found:" + e.output)
else:
raise AssertionError("No exception raised")
def assert_raises_rpc_error(code, message, fun, *args, **kwds):
"""Run an RPC and verify that a specific JSONRPC exception code and message is raised.
Calls function `fun` with arguments `args` and `kwds`. Catches a JSONRPCException
and verifies that the error code and message are as expected. Throws AssertionError if
no JSONRPCException was raised or if the error code/message are not as expected.
Args:
code (int), optional: the error code returned by the RPC call (defined
in src/rpc/protocol.h). Set to None if checking the error code is not required.
message (string), optional: [a substring of] the error string returned by the
RPC call. Set to None if checking the error string is not required.
fun (function): the function to call. This should be the name of an RPC.
args*: positional arguments for the function.
kwds**: named arguments for the function.
"""
assert try_rpc(code, message, fun, *args, **kwds), "No exception raised"
def try_rpc(code, message, fun, *args, **kwds):
"""Tries to run an rpc command.
Test against error code and message if the rpc fails.
Returns whether a JSONRPCException was raised."""
try:
fun(*args, **kwds)
except JSONRPCException as e:
# JSONRPCException was thrown as expected. Check the code and message values are correct.
if (code is not None) and (code != e.error["code"]):
raise AssertionError("Unexpected JSONRPC error code %i" % e.error["code"])
if (message is not None) and (message not in e.error['message']):
raise AssertionError("Expected substring not found:" + e.error['message'])
return True
except Exception as e:
raise AssertionError("Unexpected exception raised: " + type(e).__name__)
else:
return False
def assert_is_hex_string(string):
try:
int(string, 16)
except Exception as e:
raise AssertionError(
"Couldn't interpret %r as hexadecimal; raised: %s" % (string, e))
def assert_is_hash_string(string, length=64):
if not isinstance(string, str):
raise AssertionError("Expected a string, got type %r" % type(string))
elif length and len(string) != length:
raise AssertionError(
"String of length %d expected; got %d" % (length, len(string)))
elif not re.match('[abcdef0-9]+$', string):
raise AssertionError(
"String %r contains invalid characters for a hash." % string)
def assert_array_result(object_array, to_match, expected, should_not_find=False):
"""
Pass in array of JSON objects, a dictionary with key/value pairs
to match against, and another dictionary with expected key/value
pairs.
If the should_not_find flag is true, to_match should not be found
in object_array
"""
if should_not_find:
assert_equal(expected, {})
num_matched = 0
for item in object_array:
all_match = True
for key, value in to_match.items():
if item[key] != value:
all_match = False
if not all_match:
continue
elif should_not_find:
num_matched = num_matched + 1
for key, value in expected.items():
if item[key] != value:
raise AssertionError("%s : expected %s=%s" % (str(item), str(key), str(value)))
num_matched = num_matched + 1
if num_matched == 0 and not should_not_find:
raise AssertionError("No objects matched %s" % (str(to_match)))
if num_matched > 0 and should_not_find:
raise AssertionError("Objects were found %s" % (str(to_match)))
# Utility functions
###################
def check_json_precision():
"""Make sure json library being used does not lose precision converting BTC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n))) * 1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def count_bytes(hex_string):
return len(bytearray.fromhex(hex_string))
def bytes_to_hex_str(byte_str):
return hexlify(byte_str).decode('ascii')
def hash256(byte_str):
sha256 = hashlib.sha256()
sha256.update(byte_str)
sha256d = hashlib.sha256()
sha256d.update(sha256.digest())
return sha256d.digest()[::-1]
def hex_str_to_bytes(hex_str):
return unhexlify(hex_str.encode('ascii'))
def str_to_b64str(string):
return b64encode(string.encode('utf-8')).decode('ascii')
def satoshi_round(amount):
return Decimal(amount).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
def wait_until(predicate, *, attempts=float('inf'), timeout=float('inf'), lock=None):
if attempts == float('inf') and timeout == float('inf'):
timeout = 60
attempt = 0
timeout += time.time()
while attempt < attempts and time.time() < timeout:
if lock:
with lock:
if predicate():
return
else:
if predicate():
return
attempt += 1
time.sleep(0.05)
# Print the cause of the timeout
assert_greater_than(attempts, attempt)
assert_greater_than(timeout, time.time())
raise RuntimeError('Unreachable')
# RPC/P2P connection constants and functions
############################################
# The maximum number of nodes a single test can spawn
MAX_NODES = 8
# Don't assign rpc or p2p ports lower than this
PORT_MIN = 11000
# The number of ports to "reserve" for p2p and rpc, each
PORT_RANGE = 5000
class PortSeed:
# Must be initialized with a unique integer for each process
n = None
def get_rpc_proxy(url, node_number, timeout=None, coveragedir=None):
"""
Args:
url (str): URL of the RPC server to call
node_number (int): the node number (or id) that this calls to
Kwargs:
timeout (int): HTTP timeout in seconds
Returns:
AuthServiceProxy. convenience object for making RPC calls.
"""
proxy_kwargs = {}
if timeout is not None:
proxy_kwargs['timeout'] = timeout
proxy = AuthServiceProxy(url, **proxy_kwargs)
proxy.url = url # store URL on proxy for info
coverage_logfile = coverage.get_filename(
coveragedir, node_number) if coveragedir else None
return coverage.AuthServiceProxyWrapper(proxy, coverage_logfile)
def p2p_port(n):
assert(n <= MAX_NODES)
return PORT_MIN + n + (MAX_NODES * PortSeed.n) % (PORT_RANGE - 1 - MAX_NODES)
def rpc_port(n):
return PORT_MIN + PORT_RANGE + n + (MAX_NODES * PortSeed.n) % (PORT_RANGE - 1 - MAX_NODES)
def rpc_url(datadir, i, rpchost=None):
rpc_u, rpc_p = get_auth_cookie(datadir)
host = '127.0.0.1'
port = rpc_port(i)
if rpchost:
parts = rpchost.split(':')
if len(parts) == 2:
host, port = parts
else:
host = rpchost
return "http://%s:%s@%s:%d" % (rpc_u, rpc_p, host, int(port))
# Node functions
################
def initialize_datadir(dirname, n):
datadir = os.path.join(dirname, "node" + str(n))
if not os.path.isdir(datadir):
os.makedirs(datadir)
with open(os.path.join(datadir, "maza.conf"), 'w', encoding='utf8') as f:
f.write("regtest=1\n")
f.write("port=" + str(p2p_port(n)) + "\n")
f.write("rpcport=" + str(rpc_port(n)) + "\n")
f.write("listenonion=0\n")
return datadir
def get_datadir_path(dirname, n):
return os.path.join(dirname, "node" + str(n))
def get_auth_cookie(datadir):
user = None
password = None
if os.path.isfile(os.path.join(datadir, "maza.conf")):
with open(os.path.join(datadir, "maza.conf"), 'r', encoding='utf8') as f:
for line in f:
if line.startswith("rpcuser="):
assert user is None # Ensure that there is only one rpcuser line
user = line.split("=")[1].strip("\n")
if line.startswith("rpcpassword="):
assert password is None # Ensure that there is only one rpcpassword line
password = line.split("=")[1].strip("\n")
if os.path.isfile(os.path.join(datadir, "regtest", ".cookie")):
with open(os.path.join(datadir, "regtest", ".cookie"), 'r') as f:
userpass = f.read()
split_userpass = userpass.split(':')
user = split_userpass[0]
password = split_userpass[1]
if user is None or password is None:
raise ValueError("No RPC credentials")
return user, password
def log_filename(dirname, n_node, logname):
return os.path.join(dirname, "node" + str(n_node), "regtest", logname)
def get_bip9_status(node, key):
info = node.getblockchaininfo()
return info['bip9_softforks'][key]
def set_node_times(nodes, t):
for node in nodes:
node.setmocktime(t)
def disconnect_nodes(from_connection, node_num):
for peer_id in [peer['id'] for peer in from_connection.getpeerinfo() if "testnode%d" % node_num in peer['subver']]:
from_connection.disconnectnode(nodeid=peer_id)
for _ in range(50):
if [peer['id'] for peer in from_connection.getpeerinfo() if "testnode%d" % node_num in peer['subver']] == []:
break
time.sleep(0.1)
else:
raise AssertionError("timed out waiting for disconnect")
def connect_nodes(from_connection, node_num):
ip_port = "127.0.0.1:" + str(p2p_port(node_num))
from_connection.addnode(ip_port, "onetry")
# poll until version handshake complete to avoid race conditions
# with transaction relaying
while any(peer['version'] == 0 for peer in from_connection.getpeerinfo()):
time.sleep(0.1)
def connect_nodes_bi(nodes, a, b):
connect_nodes(nodes[a], b)
connect_nodes(nodes[b], a)
def sync_blocks(rpc_connections, *, wait=1, timeout=60):
"""
Wait until everybody has the same tip.
sync_blocks needs to be called with an rpc_connections set that has least
one node already synced to the latest, stable tip, otherwise there's a
chance it might return before all nodes are stably synced.
"""
# Use getblockcount() instead of waitforblockheight() to determine the
# initial max height because the two RPCs look at different internal global
# variables (chainActive vs latestBlock) and the former gets updated
# earlier.
maxheight = max(x.getblockcount() for x in rpc_connections)
start_time = cur_time = time.time()
while cur_time <= start_time + timeout:
tips = [r.waitforblockheight(maxheight, int(wait * 1000)) for r in rpc_connections]
if all(t["height"] == maxheight for t in tips):
if all(t["hash"] == tips[0]["hash"] for t in tips):
return
raise AssertionError("Block sync failed, mismatched block hashes:{}".format(
"".join("\n {!r}".format(tip) for tip in tips)))
cur_time = time.time()
raise AssertionError("Block sync to height {} timed out:{}".format(
maxheight, "".join("\n {!r}".format(tip) for tip in tips)))
def sync_chain(rpc_connections, *, wait=1, timeout=60):
"""
Wait until everybody has the same best block
"""
while timeout > 0:
best_hash = [x.getbestblockhash() for x in rpc_connections]
if best_hash == [best_hash[0]] * len(best_hash):
return
time.sleep(wait)
timeout -= wait
raise AssertionError("Chain sync failed: Best block hashes don't match")
def sync_mempools(rpc_connections, *, wait=1, timeout=60, flush_scheduler=True):
"""
Wait until everybody has the same transactions in their memory
pools
"""
while timeout > 0:
pool = set(rpc_connections[0].getrawmempool())
num_match = 1
for i in range(1, len(rpc_connections)):
if set(rpc_connections[i].getrawmempool()) == pool:
num_match = num_match + 1
if num_match == len(rpc_connections):
if flush_scheduler:
for r in rpc_connections:
r.syncwithvalidationinterfacequeue()
return
time.sleep(wait)
timeout -= wait
raise AssertionError("Mempool sync failed")
# Transaction/Block functions
#############################
def find_output(node, txid, amount):
"""
Return index to output of txid with value amount
Raises exception if there is none.
"""
txdata = node.getrawtransaction(txid, 1)
for i in range(len(txdata["vout"])):
if txdata["vout"][i]["value"] == amount:
return i
raise RuntimeError("find_output txid %s : %s not found" % (txid, str(amount)))
def gather_inputs(from_node, amount_needed, confirmations_required=1):
"""
Return a random set of unspent txouts that are enough to pay amount_needed
"""
assert(confirmations_required >= 0)
utxo = from_node.listunspent(confirmations_required)
random.shuffle(utxo)
inputs = []
total_in = Decimal("0.00000000")
while total_in < amount_needed and len(utxo) > 0:
t = utxo.pop()
total_in += t["amount"]
inputs.append({"txid": t["txid"], "vout": t["vout"], "address": t["address"]})
if total_in < amount_needed:
raise RuntimeError("Insufficient funds: need %d, have %d" % (amount_needed, total_in))
return (total_in, inputs)
def make_change(from_node, amount_in, amount_out, fee):
"""
Create change output(s), return them
"""
outputs = {}
amount = amount_out + fee
change = amount_in - amount
if change > amount * 2:
# Create an extra change output to break up big inputs
change_address = from_node.getnewaddress()
# Split change in two, being careful of rounding:
outputs[change_address] = Decimal(change / 2).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
change = amount_in - amount - outputs[change_address]
if change > 0:
outputs[from_node.getnewaddress()] = change
return outputs
def random_transaction(nodes, amount, min_fee, fee_increment, fee_variants):
"""
Create a random transaction.
Returns (txid, hex-encoded-transaction-data, fee)
"""
from_node = random.choice(nodes)
to_node = random.choice(nodes)
fee = min_fee + fee_increment * random.randint(0, fee_variants)
(total_in, inputs) = gather_inputs(from_node, amount + fee)
outputs = make_change(from_node, total_in, amount, fee)
outputs[to_node.getnewaddress()] = float(amount)
rawtx = from_node.createrawtransaction(inputs, outputs)
signresult = from_node.signrawtransaction(rawtx)
txid = from_node.sendrawtransaction(signresult["hex"], True)
return (txid, signresult["hex"], fee)
# Helper to create at least "count" utxos
# Pass in a fee that is sufficient for relay and mining new transactions.
def create_confirmed_utxos(fee, node, count):
to_generate = int(0.5 * count) + 101
while to_generate > 0:
node.generate(min(25, to_generate))
to_generate -= 25
utxos = node.listunspent()
iterations = count - len(utxos)
addr1 = node.getnewaddress()
addr2 = node.getnewaddress()
if iterations <= 0:
return utxos
for i in range(iterations):
t = utxos.pop()
inputs = []
inputs.append({"txid": t["txid"], "vout": t["vout"]})
outputs = {}
send_value = t['amount'] - fee
outputs[addr1] = satoshi_round(send_value / 2)
outputs[addr2] = satoshi_round(send_value / 2)
raw_tx = node.createrawtransaction(inputs, outputs)
signed_tx = node.signrawtransaction(raw_tx)["hex"]
node.sendrawtransaction(signed_tx)
while (node.getmempoolinfo()['size'] > 0):
node.generate(1)
utxos = node.listunspent()
assert(len(utxos) >= count)
return utxos
# Create large OP_RETURN txouts that can be appended to a transaction
# to make it large (helper for constructing large transactions).
def gen_return_txouts():
# Some pre-processing to create a bunch of OP_RETURN txouts to insert into transactions we create
# So we have big transactions (and therefore can't fit very many into each block)
# create one script_pubkey
script_pubkey = "6a4d0200" # OP_RETURN OP_PUSH2 512 bytes
for i in range(512):
script_pubkey = script_pubkey + "01"
# concatenate 128 txouts of above script_pubkey which we'll insert before the txout for change
txouts = "81"
for k in range(128):
# add txout value
txouts = txouts + "0000000000000000"
# add length of script_pubkey
txouts = txouts + "fd0402"
# add script_pubkey
txouts = txouts + script_pubkey
return txouts
def create_tx(node, coinbase, to_address, amount):
inputs = [{"txid": coinbase, "vout": 0}]
outputs = {to_address: amount}
rawtx = node.createrawtransaction(inputs, outputs)
signresult = node.signrawtransaction(rawtx)
assert_equal(signresult["complete"], True)
return signresult["hex"]
# Create a spend of each passed-in utxo, splicing in "txouts" to each raw
# transaction to make it large. See gen_return_txouts() above.
def create_lots_of_big_transactions(node, txouts, utxos, num, fee):
addr = node.getnewaddress()
txids = []
for _ in range(num):
t = utxos.pop()
inputs = [{"txid": t["txid"], "vout": t["vout"]}]
outputs = {}
change = t['amount'] - fee
outputs[addr] = satoshi_round(change)
rawtx = node.createrawtransaction(inputs, outputs)
newtx = rawtx[0:92]
newtx = newtx + txouts
newtx = newtx + rawtx[94:]
signresult = node.signrawtransaction(newtx, None, None, "NONE")
txid = node.sendrawtransaction(signresult["hex"], True)
txids.append(txid)
return txids
def mine_large_block(node, utxos=None):
# generate a 66k transaction,
# and 14 of them is close to the 1MB block limit
num = 14
txouts = gen_return_txouts()
utxos = utxos if utxos is not None else []
if len(utxos) < num:
utxos.clear()
utxos.extend(node.listunspent())
fee = 100 * node.getnetworkinfo()["relayfee"]
create_lots_of_big_transactions(node, txouts, utxos, num, fee=fee)
node.generate(1)
| {
"content_hash": "cadf7711c4a79bc3de84f6bb444a7352",
"timestamp": "",
"source": "github",
"line_count": 567,
"max_line_length": 119,
"avg_line_length": 38.045855379188716,
"alnum_prop": 0.6341553866122752,
"repo_name": "MazaCoin/maza",
"id": "2dbc60f88b266f89b2f3d733291c48e50a565e85",
"size": "21786",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/functional/test_framework/util.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "28453"
},
{
"name": "C",
"bytes": "4236272"
},
{
"name": "C++",
"bytes": "7018978"
},
{
"name": "HTML",
"bytes": "21860"
},
{
"name": "Java",
"bytes": "30290"
},
{
"name": "M4",
"bytes": "195177"
},
{
"name": "Makefile",
"bytes": "116362"
},
{
"name": "Objective-C++",
"bytes": "6747"
},
{
"name": "Python",
"bytes": "1298620"
},
{
"name": "QMake",
"bytes": "782"
},
{
"name": "Sage",
"bytes": "30188"
},
{
"name": "Shell",
"bytes": "69703"
}
],
"symlink_target": ""
} |
"""
Tests for stuff in django.utils.datastructures.
"""
import copy
import pickle
from django.test import SimpleTestCase
from django.utils.datastructures import *
class SortedDictTests(SimpleTestCase):
def setUp(self):
self.d1 = SortedDict()
self.d1[7] = 'seven'
self.d1[1] = 'one'
self.d1[9] = 'nine'
self.d2 = SortedDict()
self.d2[1] = 'one'
self.d2[9] = 'nine'
self.d2[0] = 'nil'
self.d2[7] = 'seven'
def test_basic_methods(self):
self.assertEqual(self.d1.keys(), [7, 1, 9])
self.assertEqual(self.d1.values(), ['seven', 'one', 'nine'])
self.assertEqual(self.d1.items(), [(7, 'seven'), (1, 'one'), (9, 'nine')])
def test_overwrite_ordering(self):
""" Overwriting an item keeps it's place. """
self.d1[1] = 'ONE'
self.assertEqual(self.d1.values(), ['seven', 'ONE', 'nine'])
def test_append_items(self):
""" New items go to the end. """
self.d1[0] = 'nil'
self.assertEqual(self.d1.keys(), [7, 1, 9, 0])
def test_delete_and_insert(self):
"""
Deleting an item, then inserting the same key again will place it
at the end.
"""
del self.d2[7]
self.assertEqual(self.d2.keys(), [1, 9, 0])
self.d2[7] = 'lucky number 7'
self.assertEqual(self.d2.keys(), [1, 9, 0, 7])
def test_change_keys(self):
"""
Changing the keys won't do anything, it's only a copy of the
keys dict.
"""
k = self.d2.keys()
k.remove(9)
self.assertEqual(self.d2.keys(), [1, 9, 0, 7])
def test_init_keys(self):
"""
Initialising a SortedDict with two keys will just take the first one.
A real dict will actually take the second value so we will too, but
we'll keep the ordering from the first key found.
"""
tuples = ((2, 'two'), (1, 'one'), (2, 'second-two'))
d = SortedDict(tuples)
self.assertEqual(d.keys(), [2, 1])
real_dict = dict(tuples)
self.assertEqual(sorted(real_dict.values()), ['one', 'second-two'])
# Here the order of SortedDict values *is* what we are testing
self.assertEqual(d.values(), ['second-two', 'one'])
def test_overwrite(self):
self.d1[1] = 'not one'
self.assertEqual(self.d1[1], 'not one')
self.assertEqual(self.d1.keys(), self.d1.copy().keys())
def test_append(self):
self.d1[13] = 'thirteen'
self.assertEqual(
repr(self.d1),
"{7: 'seven', 1: 'one', 9: 'nine', 13: 'thirteen'}"
)
def test_pop(self):
self.assertEqual(self.d1.pop(1, 'missing'), 'one')
self.assertEqual(self.d1.pop(1, 'missing'), 'missing')
# We don't know which item will be popped in popitem(), so we'll
# just check that the number of keys has decreased.
l = len(self.d1)
self.d1.popitem()
self.assertEqual(l - len(self.d1), 1)
def test_dict_equality(self):
d = SortedDict((i, i) for i in xrange(3))
self.assertEqual(d, {0: 0, 1: 1, 2: 2})
def test_tuple_init(self):
d = SortedDict(((1, "one"), (0, "zero"), (2, "two")))
self.assertEqual(repr(d), "{1: 'one', 0: 'zero', 2: 'two'}")
def test_pickle(self):
self.assertEqual(
pickle.loads(pickle.dumps(self.d1, 2)),
{7: 'seven', 1: 'one', 9: 'nine'}
)
def test_clear(self):
self.d1.clear()
self.assertEqual(self.d1, {})
self.assertEqual(self.d1.keyOrder, [])
class MergeDictTests(SimpleTestCase):
def test_simple_mergedict(self):
d1 = {'chris':'cool', 'camri':'cute', 'cotton':'adorable',
'tulip':'snuggable', 'twoofme':'firstone'}
d2 = {'chris2':'cool2', 'camri2':'cute2', 'cotton2':'adorable2',
'tulip2':'snuggable2'}
d3 = {'chris3':'cool3', 'camri3':'cute3', 'cotton3':'adorable3',
'tulip3':'snuggable3'}
d4 = {'twoofme': 'secondone'}
md = MergeDict(d1, d2, d3)
self.assertEqual(md['chris'], 'cool')
self.assertEqual(md['camri'], 'cute')
self.assertEqual(md['twoofme'], 'firstone')
md2 = md.copy()
self.assertEqual(md2['chris'], 'cool')
def test_mergedict_merges_multivaluedict(self):
""" MergeDict can merge MultiValueDicts """
multi1 = MultiValueDict({'key1': ['value1'],
'key2': ['value2', 'value3']})
multi2 = MultiValueDict({'key2': ['value4'],
'key4': ['value5', 'value6']})
mm = MergeDict(multi1, multi2)
# Although 'key2' appears in both dictionaries,
# only the first value is used.
self.assertEqual(mm.getlist('key2'), ['value2', 'value3'])
self.assertEqual(mm.getlist('key4'), ['value5', 'value6'])
self.assertEqual(mm.getlist('undefined'), [])
self.assertEqual(sorted(mm.keys()), ['key1', 'key2', 'key4'])
self.assertEqual(len(mm.values()), 3)
self.assertTrue('value1' in mm.values())
self.assertEqual(sorted(mm.items(), key=lambda k: k[0]),
[('key1', 'value1'), ('key2', 'value3'),
('key4', 'value6')])
self.assertEqual([(k,mm.getlist(k)) for k in sorted(mm)],
[('key1', ['value1']),
('key2', ['value2', 'value3']),
('key4', ['value5', 'value6'])])
class MultiValueDictTests(SimpleTestCase):
def test_multivaluedict(self):
d = MultiValueDict({'name': ['Adrian', 'Simon'],
'position': ['Developer']})
self.assertEqual(d['name'], 'Simon')
self.assertEqual(d.get('name'), 'Simon')
self.assertEqual(d.getlist('name'), ['Adrian', 'Simon'])
self.assertEqual(list(d.iteritems()),
[('position', 'Developer'), ('name', 'Simon')])
self.assertEqual(list(d.iterlists()),
[('position', ['Developer']),
('name', ['Adrian', 'Simon'])])
# MultiValueDictKeyError: "Key 'lastname' not found in
# <MultiValueDict: {'position': ['Developer'],
# 'name': ['Adrian', 'Simon']}>"
self.assertRaisesMessage(MultiValueDictKeyError,
'"Key \'lastname\' not found in <MultiValueDict: {\'position\':'\
' [\'Developer\'], \'name\': [\'Adrian\', \'Simon\']}>"',
d.__getitem__, 'lastname')
self.assertEqual(d.get('lastname'), None)
self.assertEqual(d.get('lastname', 'nonexistent'), 'nonexistent')
self.assertEqual(d.getlist('lastname'), [])
self.assertEqual(d.getlist('doesnotexist', ['Adrian', 'Simon']),
['Adrian', 'Simon'])
d.setlist('lastname', ['Holovaty', 'Willison'])
self.assertEqual(d.getlist('lastname'), ['Holovaty', 'Willison'])
self.assertEqual(d.values(), ['Developer', 'Simon', 'Willison'])
self.assertEqual(list(d.itervalues()),
['Developer', 'Simon', 'Willison'])
def test_copy(self):
for copy_func in [copy.copy, lambda d: d.copy()]:
d1 = MultiValueDict({
"developers": ["Carl", "Fred"]
})
self.assertEqual(d1["developers"], "Fred")
d2 = copy_func(d1)
d2.update({"developers": "Groucho"})
self.assertEqual(d2["developers"], "Groucho")
self.assertEqual(d1["developers"], "Fred")
d1 = MultiValueDict({
"key": [[]]
})
self.assertEqual(d1["key"], [])
d2 = copy_func(d1)
d2["key"].append("Penguin")
self.assertEqual(d1["key"], ["Penguin"])
self.assertEqual(d2["key"], ["Penguin"])
def test_dict_translation(self):
mvd = MultiValueDict({
'devs': ['Bob', 'Joe'],
'pm': ['Rory'],
})
d = mvd.dict()
self.assertEqual(d.keys(), mvd.keys())
for key in mvd.keys():
self.assertEqual(d[key], mvd[key])
self.assertEqual({}, MultiValueDict().dict())
class DotExpandedDictTests(SimpleTestCase):
def test_dotexpandeddict(self):
d = DotExpandedDict({'person.1.firstname': ['Simon'],
'person.1.lastname': ['Willison'],
'person.2.firstname': ['Adrian'],
'person.2.lastname': ['Holovaty']})
self.assertEqual(d['person']['1']['lastname'], ['Willison'])
self.assertEqual(d['person']['2']['lastname'], ['Holovaty'])
self.assertEqual(d['person']['2']['firstname'], ['Adrian'])
class ImmutableListTests(SimpleTestCase):
def test_sort(self):
d = ImmutableList(range(10))
# AttributeError: ImmutableList object is immutable.
self.assertRaisesMessage(AttributeError,
'ImmutableList object is immutable.', d.sort)
self.assertEqual(repr(d), '(0, 1, 2, 3, 4, 5, 6, 7, 8, 9)')
def test_custom_warning(self):
d = ImmutableList(range(10), warning="Object is immutable!")
self.assertEqual(d[1], 1)
# AttributeError: Object is immutable!
self.assertRaisesMessage(AttributeError,
'Object is immutable!', d.__setitem__, 1, 'test')
class DictWrapperTests(SimpleTestCase):
def test_dictwrapper(self):
f = lambda x: "*%s" % x
d = DictWrapper({'a': 'a'}, f, 'xx_')
self.assertEqual("Normal: %(a)s. Modified: %(xx_a)s" % d,
'Normal: a. Modified: *a')
| {
"content_hash": "cb8b9a537edb3c7510db0a122ef9959c",
"timestamp": "",
"source": "github",
"line_count": 282,
"max_line_length": 82,
"avg_line_length": 34.709219858156025,
"alnum_prop": 0.530036779730282,
"repo_name": "disqus/django-old",
"id": "d86a3c8fb8bf63bd97ba34a8f6fed7785eead50e",
"size": "9788",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/regressiontests/utils/datastructures.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "85749"
},
{
"name": "Python",
"bytes": "7413553"
},
{
"name": "Shell",
"bytes": "9076"
}
],
"symlink_target": ""
} |
import sys
import warnings
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from airflow.utils.context import Context
if sys.version_info >= (3, 8):
from functools import cached_property
else:
from cached_property import cached_property
from airflow.models import BaseOperator
from airflow.providers.amazon.aws.hooks.glue_crawler import GlueCrawlerHook
class GlueCrawlerOperator(BaseOperator):
"""
Creates, updates and triggers an AWS Glue Crawler. AWS Glue Crawler is a serverless
service that manages a catalog of metadata tables that contain the inferred
schema, format and data types of data stores within the AWS cloud.
:param config: Configurations for the AWS Glue crawler
:type config: dict
:param aws_conn_id: aws connection to use
:type aws_conn_id: Optional[str]
:param poll_interval: Time (in seconds) to wait between two consecutive calls to check crawler status
:type poll_interval: Optional[int]
"""
ui_color = '#ededed'
def __init__(
self,
config,
aws_conn_id='aws_default',
poll_interval: int = 5,
**kwargs,
):
super().__init__(**kwargs)
self.aws_conn_id = aws_conn_id
self.poll_interval = poll_interval
self.config = config
@cached_property
def hook(self) -> GlueCrawlerHook:
"""Create and return an GlueCrawlerHook."""
return GlueCrawlerHook(self.aws_conn_id)
def execute(self, context: 'Context'):
"""
Executes AWS Glue Crawler from Airflow
:return: the name of the current glue crawler.
"""
crawler_name = self.config['Name']
if self.hook.has_crawler(crawler_name):
self.hook.update_crawler(**self.config)
else:
self.hook.create_crawler(**self.config)
self.log.info("Triggering AWS Glue Crawler")
self.hook.start_crawler(crawler_name)
self.log.info("Waiting for AWS Glue Crawler")
self.hook.wait_for_crawler_completion(crawler_name=crawler_name, poll_interval=self.poll_interval)
return crawler_name
class AwsGlueCrawlerOperator(GlueCrawlerOperator):
"""
This operator is deprecated.
Please use :class:`airflow.providers.amazon.aws.operators.glue_crawler.GlueCrawlerOperator`.
"""
def __init__(self, *args, **kwargs):
warnings.warn(
"This operator is deprecated. "
"Please use :class:`airflow.providers.amazon.aws.operators.glue_crawler.GlueCrawlerOperator`.",
DeprecationWarning,
stacklevel=2,
)
super().__init__(*args, **kwargs)
| {
"content_hash": "88ebae410659aa45f7eeb40cc73d41c7",
"timestamp": "",
"source": "github",
"line_count": 84,
"max_line_length": 107,
"avg_line_length": 31.55952380952381,
"alnum_prop": 0.6593738211995473,
"repo_name": "mistercrunch/airflow",
"id": "b35e3d034231d54480b0e9866d19bc4624d31dfe",
"size": "3438",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "airflow/providers/amazon/aws/operators/glue_crawler.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "36341"
},
{
"name": "HTML",
"bytes": "99243"
},
{
"name": "JavaScript",
"bytes": "891460"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "773270"
},
{
"name": "Shell",
"bytes": "5659"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('product', '0003_sale_amount'),
('activity', '0005_conclusion'),
]
operations = [
migrations.CreateModel(
name='Itinerary',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('recorded_date', models.DateField(default=datetime.date.today)),
('places', models.TextField(blank=True)),
('rep', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='product.Rep')),
],
),
migrations.CreateModel(
name='Summary',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('start_date', models.DateField()),
('end_date', models.DateField()),
('outstanding', models.IntegerField(default=0)),
('report', models.TextField(blank=True)),
('rep', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='product.Rep')),
],
),
]
| {
"content_hash": "9d215fec53a7521d4ed3d898d4022f30",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 114,
"avg_line_length": 37.02777777777778,
"alnum_prop": 0.5693923480870218,
"repo_name": "boyombo/pharmrep",
"id": "1ee7e027db7fd1a961723612651bcae676f9371d",
"size": "1405",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pharmrep/activity/migrations/0006_itinerary_summary.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "12020"
},
{
"name": "HTML",
"bytes": "80266"
},
{
"name": "JavaScript",
"bytes": "65316"
},
{
"name": "Python",
"bytes": "59359"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.