text
stringlengths 4
1.02M
| meta
dict |
---|---|
from __future__ import unicode_literals
from django.core.exceptions import ObjectDoesNotExist
from django.db import migrations, models, connection
from django.utils.translation import activate, _trans
from tenant_extras.middleware import tenant_translation
from parler.models import TranslatableModelMixin
def remove_old_statistic_block_from_homepage(apps, schema_editor):
StatsContent = apps.get_model('cms', 'StatsContent')
ContentType = apps.get_model('contenttypes', 'ContentType')
for stats_content in StatsContent.objects.all():
if stats_content.placeholder and stats_content.placeholder.parent_type.model == 'homepage':
stats_content.stats.all().delete()
with connection.cursor() as c:
c.execute(
'delete from contentitem_cms_statscontent where contentitem_ptr_id = {};'.format(
stats_content.contentitem_ptr_id
)
)
class Migration(migrations.Migration):
dependencies = [
('cms', '0061_auto_20200812_1030'),
]
operations = [
migrations.RunPython(
remove_old_statistic_block_from_homepage,
migrations.RunPython.noop
)
]
| {
"content_hash": "a470945d9141ff2fa667b40e349a76be",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 101,
"avg_line_length": 34.388888888888886,
"alnum_prop": 0.6591276252019386,
"repo_name": "onepercentclub/bluebottle",
"id": "3bf3fa6da98891e9a57886ea84af2ef2e2540fc5",
"size": "1312",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bluebottle/cms/migrations/0062_auto_20200812_1514.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "41694"
},
{
"name": "HTML",
"bytes": "246695"
},
{
"name": "Handlebars",
"bytes": "63"
},
{
"name": "JavaScript",
"bytes": "139123"
},
{
"name": "PHP",
"bytes": "35"
},
{
"name": "PLpgSQL",
"bytes": "1369882"
},
{
"name": "PostScript",
"bytes": "2927"
},
{
"name": "Python",
"bytes": "4983116"
},
{
"name": "Rich Text Format",
"bytes": "39109"
},
{
"name": "SCSS",
"bytes": "99555"
},
{
"name": "Shell",
"bytes": "3068"
},
{
"name": "Smarty",
"bytes": "3814"
}
],
"symlink_target": ""
} |
from rope.base import (change, taskhandle, evaluate,
exceptions, pyobjects, pynames, ast)
from rope.refactor import restructure, sourceutils, similarfinder
class UseFunction(object):
"""Try to use a function wherever possible"""
def __init__(self, project, resource, offset):
self.project = project
self.offset = offset
this_pymodule = project.pycore.resource_to_pyobject(resource)
pyname = evaluate.eval_location(this_pymodule, offset)
if pyname is None:
raise exceptions.RefactoringError('Unresolvable name selected')
self.pyfunction = pyname.get_object()
if not isinstance(self.pyfunction, pyobjects.PyFunction) or \
not isinstance(self.pyfunction.parent, pyobjects.PyModule):
raise exceptions.RefactoringError(
'Use function works for global functions, only.')
self.resource = self.pyfunction.get_module().get_resource()
self._check_returns()
def _check_returns(self):
node = self.pyfunction.get_ast()
if _yield_count(node):
raise exceptions.RefactoringError('Use function should not '
'be used on generators.')
returns = _return_count(node)
if returns > 1:
raise exceptions.RefactoringError('usefunction: Function has more '
'than one return statement.')
if returns == 1 and not _returns_last(node):
raise exceptions.RefactoringError('usefunction: return should '
'be the last statement.')
def get_changes(self, resources=None,
task_handle=taskhandle.NullTaskHandle()):
if resources is None:
resources = self.project.pycore.get_python_files()
changes = change.ChangeSet('Using function <%s>' %
self.pyfunction.get_name())
if self.resource in resources:
newresources = list(resources)
newresources.remove(self.resource)
for c in self._restructure(newresources, task_handle).changes:
changes.add_change(c)
if self.resource in resources:
for c in self._restructure([self.resource], task_handle,
others=False).changes:
changes.add_change(c)
return changes
def get_function_name(self):
return self.pyfunction.get_name()
def _restructure(self, resources, task_handle, others=True):
pattern = self._make_pattern()
goal = self._make_goal(import_=others)
imports = None
if others:
imports = ['import %s' % self._module_name()]
body_region = sourceutils.get_body_region(self.pyfunction)
args_value = {'skip': (self.resource, body_region)}
args = {'': args_value}
restructuring = restructure.Restructure(
self.project, pattern, goal, args=args, imports=imports)
return restructuring.get_changes(resources=resources,
task_handle=task_handle)
def _find_temps(self):
return find_temps(self.project, self._get_body())
def _module_name(self):
return self.project.pycore.modname(self.resource)
def _make_pattern(self):
params = self.pyfunction.get_param_names()
body = self._get_body()
body = restructure.replace(body, 'return', 'pass')
wildcards = list(params)
wildcards.extend(self._find_temps())
if self._does_return():
if self._is_expression():
replacement = '${%s}' % self._rope_returned
else:
replacement = '%s = ${%s}' % (self._rope_result,
self._rope_returned)
body = restructure.replace(
body, 'return ${%s}' % self._rope_returned,
replacement)
wildcards.append(self._rope_result)
return similarfinder.make_pattern(body, wildcards)
def _get_body(self):
return sourceutils.get_body(self.pyfunction)
def _make_goal(self, import_=False):
params = self.pyfunction.get_param_names()
function_name = self.pyfunction.get_name()
if import_:
function_name = self._module_name() + '.' + function_name
goal = '%s(%s)' % (function_name,
', ' .join(('${%s}' % p) for p in params))
if self._does_return() and not self._is_expression():
goal = '${%s} = %s' % (self._rope_result, goal)
return goal
def _does_return(self):
body = self._get_body()
removed_return = restructure.replace(body, 'return ${result}', '')
return removed_return != body
def _is_expression(self):
return len(self.pyfunction.get_ast().body) == 1
_rope_result = '_rope__result'
_rope_returned = '_rope__returned'
def find_temps(project, code):
code = 'def f():\n' + sourceutils.indent_lines(code, 4)
pymodule = project.pycore.get_string_module(code)
result = []
function_scope = pymodule.get_scope().get_scopes()[0]
for name, pyname in function_scope.get_names().items():
if isinstance(pyname, pynames.AssignedName):
result.append(name)
return result
def _returns_last(node):
return node.body and isinstance(node.body[-1], ast.Return)
def _yield_count(node):
visitor = _ReturnOrYieldFinder()
visitor.start_walking(node)
return visitor.yields
def _return_count(node):
visitor = _ReturnOrYieldFinder()
visitor.start_walking(node)
return visitor.returns
class _ReturnOrYieldFinder(object):
def __init__(self):
self.returns = 0
self.yields = 0
def _Return(self, node):
self.returns += 1
def _Yield(self, node):
self.yields += 1
def _FunctionDef(self, node):
pass
def _ClassDef(self, node):
pass
def start_walking(self, node):
nodes = [node]
if isinstance(node, ast.FunctionDef):
nodes = ast.get_child_nodes(node)
for child in nodes:
ast.walk(child, self)
| {
"content_hash": "086ca12b7b080a52c5f8168c3749e5ad",
"timestamp": "",
"source": "github",
"line_count": 173,
"max_line_length": 79,
"avg_line_length": 36.29479768786127,
"alnum_prop": 0.5832138875617137,
"repo_name": "sreejithr/emacs.d",
"id": "1a147ab234f982eaecf2e3ce3e8002bfede04eb2",
"size": "6279",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pyenv/emacs/lib/python2.7/site-packages/rope/refactor/usefunction.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Emacs Lisp",
"bytes": "3742112"
},
{
"name": "Python",
"bytes": "1767564"
},
{
"name": "Shell",
"bytes": "251933"
}
],
"symlink_target": ""
} |
from vatsystem.model import *
from vatsystem.util import const
from vatsystem.widgets.components import *
__all__ = [
"ohead_search_form",
"nhead_search_form",
"po_search_form",
"charge_search_form",
"variance_search_form"
]
class OheadSearchForm(RPACNoForm):
fields = [
RPACAjaxText("customer_code", label_text="Supplier Code"),
RPACAjaxText("customer_name", label_text="Supplier Name"),
RPACSelect("status",attrs={'VISIBILITY':'visible'}, label_text="Status", options=const.VAT_THEAD_STATUS_LIST),
RPACAjaxText("invoice_no", label_text="SI Number"),
RPACAjaxText("sales_contract_no", label_text="SO Number"),
RPACAjaxText("thead_ref", label_text="MSI/MSO Ref"),
RPACAjaxText("ref", label_text="Reference No"),
RPACAjaxText("vat_no", label_text="VAT No"),
RPACAjaxText("item_code", label_text="Item Code"),
RPACAjaxText("create_by_id", label_text="Created By"),
RPACCalendarPicker("date_from", attrs={'style':'width:80px','id':'theadStartDate'}, label_text="Date From"),
RPACCalendarPicker("date_to", attrs={'style':'width:80px','id':'theadEndDate'}, label_text="Date To"),
]
class NHeadSearchForm(RPACNoForm):
fields = [
RPACAjaxText("customer_code", label_text="Supplier Code"),
RPACAjaxText("customer_name", label_text="Supplier Name"),
RPACSelect("status", attrs={'VISIBILITY':'visible'}, label_text="Status", options=const.VAT_CHEAD_STATUS_LIST),
RPACAjaxText("invoice_no", label_text="SI Number"),
RPACAjaxText("sales_contract_no", label_text="SO Number"),
RPACAjaxText("chead_ref", label_text="MCN Ref"),
RPACAjaxText("ref", label_text="Reference No"),
RPACAjaxText("vat_no", label_text="VAT No"),
RPACAjaxText("item_code", label_text="Item Code"),
RPACAjaxText("create_by_id", label_text="Created By"),
RPACCalendarPicker("date_from", attrs={'style':'width:80px','id':'cheadStartDate'}, label_text="Date From"),
RPACCalendarPicker("date_to", attrs={'style':'width:80px','id':'cheadEndDate'}, label_text="Date To"),
]
class POSearchForm(RPACNoForm):
fields = [
RPACAjaxText("customer_code", label_text="Customer Code"),
RPACAjaxText("customer_name", label_text="Customer Name"),
RPACAjaxText("ref", label_text="Ref"),
RPACAjaxText("item_no", label_text="Item Code"),
RPACAjaxText("po_no", label_text="PO NO"),
RPACAjaxText("pi_no", label_text="PI NO"),
RPACAjaxText("sales_contract_no", label_text="SO Number"),
RPACAjaxText("invoice_no", label_text="SI Number"),
RPACCalendarPicker("date_from", attrs={'style':'width:80px','id':'poStartDate'}, label_text="Date From"),
RPACCalendarPicker("date_to", attrs={'style':'width:80px','id':'poEndDate'}, label_text="Date To"),
]
class ChargeSearchForm(RPACNoForm):
fields = [
RPACAjaxText("customer_code", label_text="Customer Code"),
RPACAjaxText("customer_name", label_text="Customer Name"),
RPACAjaxText("ref", label_text="Ref"),
RPACAjaxText("item_no", label_text="Item Code"),
RPACAjaxText("po_no", label_text="PO NO"),
RPACAjaxText("pi_no", label_text="PI NO"),
RPACAjaxText("sales_contract_no", label_text="SO Number"),
RPACAjaxText("invoice_no", label_text="SI Number"),
RPACCalendarPicker("date_from", attrs={'style':'width:80px','id':'chargeStartDate'}, label_text="Date From"),
RPACCalendarPicker("date_to", attrs={'style':'width:80px','id':'chargeEndDate'}, label_text="Date To"),
]
class VarianceSearchForm(RPACNoForm):
fields = [
RPACAjaxText("pi_no", label_text="PI No"),
RPACAjaxText("note_no", label_text="Note No"),
#RPACCalendarPicker("date_from", attrs={'style':'width:80px','id':'varianceStartDate'}, label_text="Date From"),
#RPACCalendarPicker("date_to", attrs={'style':'width:80px','id':'varianceEndDate'}, label_text="Date To"),
]
ohead_search_form = OheadSearchForm()
nhead_search_form = NHeadSearchForm()
po_search_form = POSearchForm()
charge_search_form = ChargeSearchForm()
variance_search_form = VarianceSearchForm()
| {
"content_hash": "d128ad42bbadea782a9e43b6785e88b2",
"timestamp": "",
"source": "github",
"line_count": 85,
"max_line_length": 120,
"avg_line_length": 50.741176470588236,
"alnum_prop": 0.6415488059355438,
"repo_name": "LamCiuLoeng/vat",
"id": "4560cd1cd577c322b6f058c07456cacc3649eec4",
"size": "4330",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vatsystem/widgets/cost.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "49976"
},
{
"name": "Java",
"bytes": "8353"
},
{
"name": "JavaScript",
"bytes": "319365"
},
{
"name": "Python",
"bytes": "821950"
}
],
"symlink_target": ""
} |
from django.db import models
from django.conf import settings
from django.core.urlresolvers import reverse
class Phenotype(models.Model):
"""
Phenotype model, if possible links to AraPheno
"""
name = models.CharField(max_length=255) # name of phenotype
study_name = models.CharField(max_length=255, default = "")
description = models.TextField(blank=True, null=True) # short description
date = models.DateTimeField(blank= True, null=True) # date of creation/update
# to = models.CharField(max_length=255) # Trait ontology that regroups similar phenotypes TODO: add trait ontology to all phenotypes
arapheno_link = models.URLField(blank=True, null=True) # link to phenotype entry in AraPheno
trait_ontology_id = models.CharField(max_length=50, default="")
trait_ontology_name = models.CharField(max_length=255, default="")
trait_ontology_description = models.CharField(max_length=255, default="", null=True)
@property
def doi(self):
"""Returns the DOI"""
return '%s/phenotype:%s' % (settings.DATACITE_PREFIX, self.id)
def __str__(self):
return "Phenotype: %s (%s)" % (self.name, self.study_name)
class Study(models.Model):
"""
GWA Study model, associated with ONE phenotype, if possible links to easyGWAS
"""
name = models.CharField(max_length=255) # name of the study
transformation = models.CharField(max_length=255) # transformation used prior to GWAS (log, sqrt, box-cox, etc)
genotype = models.ForeignKey("Genotype") # foreign key to a Genotype
phenotype = models.ForeignKey("Phenotype", null=True) # foregin key to the phenotype of interest
method = models.CharField(max_length=255) # method used to individuate associations (LM, KW, LMM, etc)
publication = models.URLField(blank=True, null=True) # link to a DOI for a published study
publication_name = models.CharField(max_length=255, blank=True, null=True) # internal name of the publication
publication_pmid = models.CharField(max_length=255, blank=True, null=True) # pubmed id of publication
publication_pmcid = models.CharField(max_length=255, blank=True, null=True) # pubmed central id of publication
number_samples = models.IntegerField(blank=True, null=True) # number of samples used in the GWAS
number_countries = models.IntegerField(blank=True, null=True) # number of countries of origin for the various accessions
n_hits_thr = models.IntegerField(blank=True, null=True) # number of hits with 1e-4 threshold
n_hits_bonf = models.IntegerField(blank=True, null=True) # number of hits with Bonferoni threshold
n_hits_fdr = models.IntegerField(blank=True, null=True) # number of hits above FDR (benjamini-hochberg) threshold
n_hits_perm = models.IntegerField(blank=True, null=True) # number of hits with permutation threshold
bh_threshold = models.FloatField(blank=True, null=True) # FDR threshold
bonferroni_threshold = models.FloatField(blank=True, null=True) # bonferroni threshold
permutation_threshold = models.FloatField(blank=True, null=True) # permutation threshold
n_hits_total = models.IntegerField(blank=True, null=True) # total number of associations
create_date = models.DateTimeField(auto_now_add=True)
update_date = models.DateTimeField(default=None, null=True, blank=True)
def get_absolute_url(self):
"""returns the submission page or study detail page"""
url = reverse('index')
return url + "#/study/%s" % self.pk
@property
def doi(self):
"""Returns the DOI"""
return '%s/gwas:%s' % (settings.DATACITE_PREFIX, self.id)
@property
def doi_link(self):
"""Returns the DOI link to datacite"""
return '%s/%s' % (settings.DATACITE_DOI_URL, self.doi)
def __str__(self):
return "Study: %s" % (self.name)
# TODO add number of markers as field and DOI for publication
# how to deal with versioning (maybe via N:M table)
class Genotype(models.Model):
"""
Genotype model, specific to the dataset used for a particular GWAS
"""
name = models.CharField(max_length=255) # name of the genotype
description = models.TextField(blank=True, null=True) # short description
version = models.CharField(max_length=255) # version of the dataset
def __str__(self):
return u"{} {}".format(self.name, self.version)
| {
"content_hash": "89d5cb04a4cf158716ed9a5112ebf3aa",
"timestamp": "",
"source": "github",
"line_count": 84,
"max_line_length": 136,
"avg_line_length": 52.19047619047619,
"alnum_prop": 0.7057481751824818,
"repo_name": "1001genomes/AraGWAS",
"id": "79801bccb574d7e446f7f130e9aa5b5ff1851c3e",
"size": "4384",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "aragwas_server/gwasdb/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "376"
},
{
"name": "Dockerfile",
"bytes": "1310"
},
{
"name": "HTML",
"bytes": "407"
},
{
"name": "JavaScript",
"bytes": "212102"
},
{
"name": "Python",
"bytes": "218225"
},
{
"name": "Shell",
"bytes": "7351"
},
{
"name": "TypeScript",
"bytes": "20680"
},
{
"name": "Vue",
"bytes": "290140"
}
],
"symlink_target": ""
} |
from unittest.mock import Mock, patch
from django.test import TestCase
from data_refinery_common import utils
class UtilsTestCase(TestCase):
@patch('data_refinery_common.utils.requests.get')
def test_get_instance_id_cloud(self, mock_get):
"""Test that a request is made and the global value is stored"""
# Ensure utils.INSTANCE_ID hasn't been set yet in case the
# order the tests are run in ever changes
utils.INSTANCE_ID = None
mock_get.return_value = Mock(ok=True)
mock_get.return_value.text = "instance_id"
with self.settings(RUNNING_IN_CLOUD=True):
self.assertEqual(utils.get_instance_id(), "instance_id")
# Ensure that the second call uses the now-set global value.
# (By resetting the mocks, calling it again, and checking that
# the values didn't need to be set again).
mock_get.reset_mock()
utils.get_instance_id()
mock_get.assert_not_called()
def test_get_instance_id_local(self):
"""Test that local is used for instance id."""
# Ensure utils.INSTANCE_ID hasn't been set yet in case the
# order the tests are run in ever changes
utils.INSTANCE_ID = None
with self.settings(RUNNING_IN_CLOUD=False):
self.assertEqual(utils.get_instance_id(), "local")
# Ensure that the second call uses the now-set global value
# by changing what settings would tell it.
with self.settings(RUNNING_IN_CLOUD=True):
self.assertEqual(utils.get_instance_id(), "local")
def test_supported_microarray_platforms(self):
"""Test that supported microarray platforms setting is set correctly."""
supported_microarray_platforms = utils.get_supported_microarray_platforms()
has_equgene11st = False
has_A_AFFY_59 = False
has_GPL23026 = False
has_AGEOD23026 = False
for platform in supported_microarray_platforms:
if platform["platform_accession"] == "equgene11st" and platform["is_brainarray"]:
has_equgene11st = True
if platform["external_accession"] == "A-AFFY-59" and not platform["is_brainarray"]:
has_A_AFFY_59 = True
if platform["external_accession"] == "GPL23026" and not platform["is_brainarray"]:
has_GPL23026 = True
if platform["external_accession"] == "A-GEOD-23026" and not platform["is_brainarray"]:
has_AGEOD23026 = True
self.assertTrue(has_equgene11st)
self.assertTrue(has_A_AFFY_59)
self.assertTrue(has_GPL23026)
self.assertTrue(has_AGEOD23026)
def test_get_internal_microarray_accession(self):
"""Test that supported microarray platforms setting is set correctly."""
self.assertEqual(utils.get_internal_microarray_accession("hgu133a"), "hgu133a")
self.assertEqual(utils.get_internal_microarray_accession("A-AFFY-59"), "soybean")
self.assertEqual(utils.get_internal_microarray_accession("GPL23026"), "Illumina_HumanHT-12_V4.0")
def test_supported_rnaseq_platforms(self):
"""Test that supported RNASeq platforms setting is set correctly."""
self.assertTrue("Illumina HiSeq 1000" in utils.get_supported_rnaseq_platforms())
def test_readable_affymetrix_names(self):
"""Test that the setting for Affymetrix accessions to
human readable names is set correctly."""
readable_platform_names = utils.get_readable_affymetrix_names()
expected_readable_name = "[ChiGene-1_0-st] Affymetrix Chicken Gene 1.0 ST Array"
self.assertTrue(readable_platform_names["chigene10st"] == expected_readable_name)
expected_readable_name = "[Xenopus_laevis] Affymetrix Xenopus laevis Genome Array"
self.assertTrue(readable_platform_names["xenopuslaevis"] == expected_readable_name)
def test_get_normalized_platform(self):
""" Test a particular normaization we need to perform """
self.assertEqual(utils.get_normalized_platform("hugene10stv1"), "hugene10st")
self.assertEqual(utils.get_normalized_platform("hugene10stv2"), "hugene10st")
self.assertEqual(utils.get_normalized_platform("stv1hugene10"), "stv1hugene10")
def test_volume_index(self):
"""Test that supported RNASeq platforms setting is set correctly."""
self.assertEqual(utils.get_volume_index(), "0")
with open('/tmp/VOLUME_INDEX', 'wb') as f:
f.write("123".encode())
self.assertEqual(utils.get_volume_index(path='/tmp/VOLUME_INDEX'), "123")
| {
"content_hash": "e213ddb7790bf04703449dcc8c8a45a2",
"timestamp": "",
"source": "github",
"line_count": 100,
"max_line_length": 105,
"avg_line_length": 46.09,
"alnum_prop": 0.666088088522456,
"repo_name": "data-refinery/data_refinery",
"id": "8a01966a4c1c0b9ed1bc6f007c635d99cead85bc",
"size": "4609",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "common/data_refinery_common/test_utils.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HCL",
"bytes": "15276"
},
{
"name": "Python",
"bytes": "307545"
},
{
"name": "R",
"bytes": "4988"
},
{
"name": "Shell",
"bytes": "9338"
}
],
"symlink_target": ""
} |
import _plotly_utils.basevalidators
class SizeValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self,
plotly_name="size",
parent_name="layout.ternary.aaxis.title.font",
**kwargs,
):
super(SizeValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "plot"),
min=kwargs.pop("min", 1),
**kwargs,
)
| {
"content_hash": "50ceb71a0df82ae27e97f3c1f907f0ce",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 66,
"avg_line_length": 28.764705882352942,
"alnum_prop": 0.5603271983640081,
"repo_name": "plotly/plotly.py",
"id": "4f95af68bcb26ea5f9718515a26c70938c08a979",
"size": "489",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/layout/ternary/aaxis/title/font/_size.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "545"
},
{
"name": "JavaScript",
"bytes": "2074"
},
{
"name": "PostScript",
"bytes": "565328"
},
{
"name": "Python",
"bytes": "31506317"
},
{
"name": "TypeScript",
"bytes": "71337"
}
],
"symlink_target": ""
} |
"""
copy the text of button from a window to b window in DND
Tested environment:
Mac OS X 10.6.8
http://doc.qt.nokia.com/latest/dnd.html
"""
import sys
try:
from PySide import QtCore
from PySide import QtGui
except ImportError:
from PyQt4 import QtCore
from PyQt4 import QtGui
class DragWidget(QtGui.QPushButton):
def __init__(self, parent=None):
super(DragWidget, self).__init__('DND me', parent)
def mousePressEvent(self, evt):
if not self.geometry().contains(evt.pos()):
return
if evt.button() == QtCore.Qt.MouseButton.LeftButton:
mime_data = QtCore.QMimeData()
mime_data.setText(self.text())
drag = QtGui.QDrag(self)
drag.setMimeData(mime_data)
# drag.exec_() # show nothing while drag move
# drag.exec_(QtCore.Qt.CopyAction) # show a `Plus/Copy icon' while drag move
# These flags support drag it from PySide application internal to external.
# for example, drag this into Finder on Mac OS X, it will auto creates a text file,
# both file name and content are 'DND me'.
drag.exec_(QtCore.Qt.CopyAction | QtCore.Qt.MoveAction)
class ChatWin(QtGui.QWidget):
def __init__(self, parent=None):
super(ChatWin, self).__init__()
self.demo = parent
x, y, w, h = 200, 200, 300, 400
self.setGeometry(x, y, w, h)
self.setAcceptDrops(True)
def show_and_raise(self):
self.show()
self.raise_()
def dragEnterEvent(self, evt):
evt.accept()
if evt.mimeData().hasFormat('text/plain'):
evt.accept()
else:
evt.ignore()
def dropEvent(self, evt):
evt.accept()
mime_data = evt.mimeData()
print 'text:', mime_data.data('text/plain')
class Demo(QtGui.QMainWindow):
def __init__(self):
super(Demo, self).__init__()
x, y, w, h = 500, 200, 300, 400
self.setGeometry(x, y, w, h)
self.btn = DragWidget(self)
self.btn.move(10, 10)
self.setAcceptDrops(True)
self.chat_win = ChatWin(self)
self.chat_win.show_and_raise()
def show_and_raise(self):
self.show()
self.raise_()
def dragEnterEvent(self, drag_enter_evt):
mime_data = drag_enter_evt.mimeData()
if mime_data.hasFormat('text/plain'):
drag_enter_evt.acceptProposedAction()
def dragMoveEvent(self, evt):
# print 'dragMoveEvent', evt.pos()
if self.btn.geometry().contains(evt.pos()):
evt.ignore()
def dropEvent(self, drop_evt):
mime_data = drop_evt.mimeData()
if not self.btn.geometry().contains(drop_evt.pos()) and \
mime_data.hasFormat('text/plain'):
print 'text:', mime_data.data('text/plain')
drop_evt.accept()
if __name__ == "__main__":
app = QtGui.QApplication(sys.argv)
demo = Demo()
demo.show_and_raise()
sys.exit(app.exec_()) | {
"content_hash": "301bd4bac0cd23a17c66e72a21040706",
"timestamp": "",
"source": "github",
"line_count": 118,
"max_line_length": 95,
"avg_line_length": 25.78813559322034,
"alnum_prop": 0.5836345711468945,
"repo_name": "alexlib/Qt-Python-Binding-Examples",
"id": "a90bd217ec06b90f5a8a12649c9a6338577b505a",
"size": "3087",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "drag_and_drop/copy_txt_of_btn_between_wins_in_dnd.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "251904"
}
],
"symlink_target": ""
} |
from __future__ import print_function
import glob
import os.path
import shutil
import subprocess
import sys
import time
AWKS = [
'./goawk',
'./orig', # GoAWK without perf improvements
'original-awk',
'gawk',
'mawk',
]
NORM_INDEX = AWKS.index('original-awk')
TESTS_TO_MEAN = None # By default, calculate the mean of all tests
if False:
# Only get the mean of these tests because these are the only ones
# we show in the GoAWK article.
TESTS_TO_MEAN = [
'tt.01_print',
'tt.02_print_NR_NF',
'tt.02a_print_length',
'tt.03_sum_length',
'tt.03a_sum_field',
'tt.04_printf_fields',
'tt.05_concat_fields',
'tt.06_count_lengths',
'tt.07_even_fields',
'tt.big_complex_program',
'tt.x1_mandelbrot',
'tt.x2_sum_loop',
]
NUM_RUNS = 6
MIN_TIME = 0.5
PROGRAM_GLOB = 'testdata/tt.*'
if len(sys.argv) > 1:
PROGRAM_GLOB = 'testdata/' + sys.argv[1]
def repeat_file(input_file, repeated_file, n):
with open(input_file, 'rb') as fin, open(repeated_file, 'wb') as fout:
for i in range(n):
fin.seek(0)
shutil.copyfileobj(fin, fout)
print('Test ', end='')
for awk in AWKS:
display_awk = os.path.basename(awk)
display_awk = display_awk.replace('original-awk', 'awk')
print('| {:>8} '.format(display_awk), end='')
print()
print('-'*28 + ' | --------'*len(AWKS))
repeats_created = []
products = [1] * len(AWKS)
num_products = 0
programs = sorted(glob.glob(PROGRAM_GLOB))
for program in programs:
# First do a test run with GoAWK to see roughly how long it takes
cmdline = '{} -f {} testdata/foo.td >tt.out'.format(AWKS[0], program)
start = time.time()
status = subprocess.call(cmdline, shell=True)
elapsed = time.time() - start
# If test run took less than MIN_TIME seconds, scale/repeat input
# file accordingly
input_file = 'testdata/foo.td'
if elapsed < MIN_TIME:
multiplier = int(round(MIN_TIME / elapsed))
repeated_file = '{}.{}'.format(input_file, multiplier)
if not os.path.exists(repeated_file):
repeat_file(input_file, repeated_file, multiplier)
repeats_created.append(repeated_file)
input_file = repeated_file
# Record time taken to run this test, running each NUM_RUMS times
# and taking the minimum elapsed time
awk_times = []
for awk in AWKS:
cmdline = '{} -f {} {} >tt.out'.format(awk, program, input_file)
times = []
for i in range(NUM_RUNS):
start = time.time()
status = subprocess.call(cmdline, shell=True)
elapsed = time.time() - start
times.append(elapsed)
if status != 0:
print('ERROR status {} from cmd: {}'.format(status, cmdline), file=sys.stderr)
min_time = min(sorted(times)[1:])
awk_times.append(min_time)
# Normalize to One True AWK time = 1.0
norm_time = awk_times[NORM_INDEX]
speeds = [norm_time/t for t in awk_times]
test_name = program.split('/')[1]
if TESTS_TO_MEAN is None or test_name in TESTS_TO_MEAN:
num_products += 1
for i in range(len(AWKS)):
products[i] *= speeds[i]
display_name = test_name.split('_')[0] + ' (' + ' '.join(test_name.split('_')[1:]) + ')'
print('{:28}'.format(display_name), end='')
for i, awk in enumerate(AWKS):
print(' | {:8.2f}'.format(speeds[i]), end='')
print()
print('-'*28 + ' | --------'*len(AWKS))
print('**Geo mean** ', end='')
for i, awk in enumerate(AWKS):
print(' | **{:.2f}**'.format(products[i] ** (1.0/num_products)), end='')
print()
# Delete temporary files created
os.remove('tt.out')
for repeated_file in repeats_created:
os.remove(repeated_file)
| {
"content_hash": "8716424d2e10cf9a63f8d65507bba2ec",
"timestamp": "",
"source": "github",
"line_count": 121,
"max_line_length": 94,
"avg_line_length": 31.710743801652892,
"alnum_prop": 0.5856137607505864,
"repo_name": "benhoyt/goawk",
"id": "7fbd919df5278975c28092ca2d2e511a3c556241",
"size": "3906",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/benchmark_awks.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Go",
"bytes": "429714"
},
{
"name": "Python",
"bytes": "4550"
},
{
"name": "Shell",
"bytes": "4697"
}
],
"symlink_target": ""
} |
from django.conf import settings
from django.db import transaction
from django.db.models import Q
from celery import task
from venue.models import Venue
from venue.services import VenueService
@task
def update_venues():
venues = Venue.objects.select_for_update().filter(
street='',
).exclude(
~Q(zip='') |
~Q(phone='') |
~Q(website='') |
~Q(description='') |
~Q(capacity__isnull=True)
)[:settings.VENUES_COUNT]
venue_service = VenueService()
with transaction.atomic():
for venue in venues:
venue = venue_service.update_venue(venue)
venue.save()
| {
"content_hash": "be826e275468db7d981e114aae092284",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 54,
"avg_line_length": 23.25,
"alnum_prop": 0.6221198156682027,
"repo_name": "FedorSelitsky/eventrack",
"id": "2725c66f3c913e8d1db23a8647d7bc8d51beea11",
"size": "651",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "venue/tasks.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "16128"
},
{
"name": "Dockerfile",
"bytes": "1061"
},
{
"name": "HTML",
"bytes": "62582"
},
{
"name": "JavaScript",
"bytes": "46270"
},
{
"name": "Python",
"bytes": "47384"
},
{
"name": "Shell",
"bytes": "127"
}
],
"symlink_target": ""
} |
"""Tests for feature_column."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import copy
import numpy as np
from tensorflow.core.example import example_pb2
from tensorflow.core.example import feature_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.python import keras
from tensorflow.python.client import session
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.estimator.inputs import numpy_io
from tensorflow.python.feature_column import feature_column as fc_old
from tensorflow.python.feature_column import feature_column_v2 as fc
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import lookup_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import partitioned_variables
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables as variables_lib
from tensorflow.python.platform import test
from tensorflow.python.training import coordinator
from tensorflow.python.training import queue_runner_impl
from tensorflow.python.training import rmsprop
def _initialized_session(config=None):
sess = session.Session(config=config)
sess.run(variables_lib.global_variables_initializer())
sess.run(lookup_ops.tables_initializer())
return sess
def get_linear_model_bias(name='linear_model'):
with variable_scope.variable_scope(name, reuse=True):
return variable_scope.get_variable('bias_weights')
def get_linear_model_column_var(column, name='linear_model'):
return ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES,
name + '/' + column.name)[0]
class BaseFeatureColumnForTests(fc.FeatureColumn):
"""A base FeatureColumn useful to avoid boiler-plate in tests.
Provides dummy implementations for abstract methods that raise ValueError in
order to avoid re-defining all abstract methods for each test sub-class.
"""
@property
def parents(self):
raise ValueError('Should not use this method.')
@classmethod
def _from_config(cls, config, custom_objects=None, columns_by_name=None):
raise ValueError('Should not use this method.')
def _get_config(self):
raise ValueError('Should not use this method.')
class LazyColumnTest(test.TestCase):
def test_transformations_called_once(self):
class TransformCounter(BaseFeatureColumnForTests):
def __init__(self):
self.num_transform = 0
@property
def _is_v2_column(self):
return True
@property
def name(self):
return 'TransformCounter'
def transform_feature(self, transformation_cache, state_manager):
self.num_transform += 1 # Count transform calls.
return transformation_cache.get('a', state_manager)
@property
def parse_example_spec(self):
pass
transformation_cache = fc.FeatureTransformationCache(
features={'a': [[2], [3.]]})
column = TransformCounter()
self.assertEqual(0, column.num_transform)
transformation_cache.get(column, None)
self.assertEqual(1, column.num_transform)
transformation_cache.get(column, None)
self.assertEqual(1, column.num_transform)
def test_returns_transform_output(self):
class Transformer(BaseFeatureColumnForTests):
@property
def _is_v2_column(self):
return True
@property
def name(self):
return 'Transformer'
def transform_feature(self, transformation_cache, state_manager):
return 'Output'
@property
def parse_example_spec(self):
pass
transformation_cache = fc.FeatureTransformationCache(
features={'a': [[2], [3.]]})
column = Transformer()
self.assertEqual('Output', transformation_cache.get(column, None))
self.assertEqual('Output', transformation_cache.get(column, None))
def test_does_not_pollute_given_features_dict(self):
class Transformer(BaseFeatureColumnForTests):
@property
def _is_v2_column(self):
return True
@property
def name(self):
return 'Transformer'
def transform_feature(self, transformation_cache, state_manager):
return 'Output'
@property
def parse_example_spec(self):
pass
features = {'a': [[2], [3.]]}
transformation_cache = fc.FeatureTransformationCache(features=features)
transformation_cache.get(Transformer(), None)
self.assertEqual(['a'], list(features.keys()))
def test_error_if_feature_is_not_found(self):
transformation_cache = fc.FeatureTransformationCache(
features={'a': [[2], [3.]]})
with self.assertRaisesRegexp(ValueError,
'bbb is not in features dictionary'):
transformation_cache.get('bbb', None)
with self.assertRaisesRegexp(ValueError,
'bbb is not in features dictionary'):
transformation_cache.get(u'bbb', None)
def test_not_supported_feature_column(self):
class NotAProperColumn(BaseFeatureColumnForTests):
@property
def _is_v2_column(self):
return True
@property
def name(self):
return 'NotAProperColumn'
def transform_feature(self, transformation_cache, state_manager):
# It should return not None.
pass
@property
def parse_example_spec(self):
pass
transformation_cache = fc.FeatureTransformationCache(
features={'a': [[2], [3.]]})
with self.assertRaisesRegexp(ValueError,
'NotAProperColumn is not supported'):
transformation_cache.get(NotAProperColumn(), None)
def test_key_should_be_string_or_feature_colum(self):
class NotAFeatureColumn(object):
pass
transformation_cache = fc.FeatureTransformationCache(
features={'a': [[2], [3.]]})
with self.assertRaisesRegexp(
TypeError, '"key" must be either a "str" or "FeatureColumn".'):
transformation_cache.get(NotAFeatureColumn(), None)
def test_expand_dim_rank_1_sparse_tensor_empty_batch(self):
# empty 1-D sparse tensor:
transformation_cache = fc.FeatureTransformationCache(
features={
'a':
sparse_tensor.SparseTensor(
indices=np.reshape(np.array([], dtype=np.int64), (0, 1)),
dense_shape=[0],
values=np.array([]))
})
with self.cached_session():
spv = transformation_cache.get('a', None).eval()
self.assertAllEqual(np.array([0, 1], dtype=np.int64), spv.dense_shape)
self.assertAllEqual(
np.reshape(np.array([], dtype=np.int64), (0, 2)), spv.indices)
class NumericColumnTest(test.TestCase):
def test_defaults(self):
a = fc.numeric_column('aaa')
self.assertEqual('aaa', a.key)
self.assertEqual('aaa', a.name)
self.assertEqual((1,), a.shape)
self.assertIsNone(a.default_value)
self.assertEqual(dtypes.float32, a.dtype)
self.assertIsNone(a.normalizer_fn)
self.assertTrue(a._is_v2_column)
def test_key_should_be_string(self):
with self.assertRaisesRegexp(ValueError, 'key must be a string.'):
fc.numeric_column(key=('aaa',))
def test_shape_saved_as_tuple(self):
a = fc.numeric_column('aaa', shape=[1, 2], default_value=[[3, 2.]])
self.assertEqual((1, 2), a.shape)
def test_default_value_saved_as_tuple(self):
a = fc.numeric_column('aaa', default_value=4.)
self.assertEqual((4.,), a.default_value)
a = fc.numeric_column('aaa', shape=[1, 2], default_value=[[3, 2.]])
self.assertEqual(((3., 2.),), a.default_value)
def test_shape_and_default_value_compatibility(self):
fc.numeric_column('aaa', shape=[2], default_value=[1, 2.])
with self.assertRaisesRegexp(ValueError, 'The shape of default_value'):
fc.numeric_column('aaa', shape=[2], default_value=[1, 2, 3.])
fc.numeric_column(
'aaa', shape=[3, 2], default_value=[[2, 3], [1, 2], [2, 3.]])
with self.assertRaisesRegexp(ValueError, 'The shape of default_value'):
fc.numeric_column(
'aaa', shape=[3, 1], default_value=[[2, 3], [1, 2], [2, 3.]])
with self.assertRaisesRegexp(ValueError, 'The shape of default_value'):
fc.numeric_column(
'aaa', shape=[3, 3], default_value=[[2, 3], [1, 2], [2, 3.]])
def test_default_value_type_check(self):
fc.numeric_column(
'aaa', shape=[2], default_value=[1, 2.], dtype=dtypes.float32)
fc.numeric_column(
'aaa', shape=[2], default_value=[1, 2], dtype=dtypes.int32)
with self.assertRaisesRegexp(TypeError, 'must be compatible with dtype'):
fc.numeric_column(
'aaa', shape=[2], default_value=[1, 2.], dtype=dtypes.int32)
with self.assertRaisesRegexp(TypeError,
'default_value must be compatible with dtype'):
fc.numeric_column('aaa', default_value=['string'])
def test_shape_must_be_positive_integer(self):
with self.assertRaisesRegexp(TypeError, 'shape dimensions must be integer'):
fc.numeric_column(
'aaa', shape=[
1.0,
])
with self.assertRaisesRegexp(ValueError,
'shape dimensions must be greater than 0'):
fc.numeric_column(
'aaa', shape=[
0,
])
def test_dtype_is_convertible_to_float(self):
with self.assertRaisesRegexp(ValueError,
'dtype must be convertible to float'):
fc.numeric_column('aaa', dtype=dtypes.string)
def test_scalar_default_value_fills_the_shape(self):
a = fc.numeric_column('aaa', shape=[2, 3], default_value=2.)
self.assertEqual(((2., 2., 2.), (2., 2., 2.)), a.default_value)
def test_parse_spec(self):
a = fc.numeric_column('aaa', shape=[2, 3], dtype=dtypes.int32)
self.assertEqual({
'aaa': parsing_ops.FixedLenFeature((2, 3), dtype=dtypes.int32)
}, a.parse_example_spec)
def test_parse_example_no_default_value(self):
price = fc.numeric_column('price', shape=[2])
data = example_pb2.Example(features=feature_pb2.Features(
feature={
'price':
feature_pb2.Feature(float_list=feature_pb2.FloatList(
value=[20., 110.]))
}))
features = parsing_ops.parse_example(
serialized=[data.SerializeToString()],
features=fc.make_parse_example_spec_v2([price]))
self.assertIn('price', features)
with self.cached_session():
self.assertAllEqual([[20., 110.]], features['price'].eval())
def test_parse_example_with_default_value(self):
price = fc.numeric_column('price', shape=[2], default_value=11.)
data = example_pb2.Example(features=feature_pb2.Features(
feature={
'price':
feature_pb2.Feature(float_list=feature_pb2.FloatList(
value=[20., 110.]))
}))
no_data = example_pb2.Example(features=feature_pb2.Features(
feature={
'something_else':
feature_pb2.Feature(float_list=feature_pb2.FloatList(
value=[20., 110.]))
}))
features = parsing_ops.parse_example(
serialized=[data.SerializeToString(),
no_data.SerializeToString()],
features=fc.make_parse_example_spec_v2([price]))
self.assertIn('price', features)
with self.cached_session():
self.assertAllEqual([[20., 110.], [11., 11.]], features['price'].eval())
def test_normalizer_fn_must_be_callable(self):
with self.assertRaisesRegexp(TypeError, 'must be a callable'):
fc.numeric_column('price', normalizer_fn='NotACallable')
def test_normalizer_fn_transform_feature(self):
def _increment_two(input_tensor):
return input_tensor + 2.
price = fc.numeric_column('price', shape=[2], normalizer_fn=_increment_two)
output = fc._transform_features_v2({
'price': [[1., 2.], [5., 6.]]
}, [price], None)
with self.cached_session():
self.assertAllEqual([[3., 4.], [7., 8.]], output[price].eval())
def test_get_dense_tensor(self):
def _increment_two(input_tensor):
return input_tensor + 2.
price = fc.numeric_column('price', shape=[2], normalizer_fn=_increment_two)
transformation_cache = fc.FeatureTransformationCache({
'price': [[1., 2.], [5., 6.]]
})
self.assertEqual(
transformation_cache.get(price, None),
price.get_dense_tensor(transformation_cache, None))
def test_sparse_tensor_not_supported(self):
price = fc.numeric_column('price')
transformation_cache = fc.FeatureTransformationCache({
'price':
sparse_tensor.SparseTensor(
indices=[[0, 0]], values=[0.3], dense_shape=[1, 1])
})
with self.assertRaisesRegexp(ValueError, 'must be a Tensor'):
price.transform_feature(transformation_cache, None)
def test_deep_copy(self):
a = fc.numeric_column('aaa', shape=[1, 2], default_value=[[3., 2.]])
a_copy = copy.deepcopy(a)
self.assertEqual(a_copy.name, 'aaa')
self.assertEqual(a_copy.shape, (1, 2))
self.assertEqual(a_copy.default_value, ((3., 2.),))
def test_numpy_default_value(self):
a = fc.numeric_column(
'aaa', shape=[1, 2], default_value=np.array([[3., 2.]]))
self.assertEqual(a.default_value, ((3., 2.),))
def test_linear_model(self):
price = fc.numeric_column('price')
with ops.Graph().as_default():
features = {'price': [[1.], [5.]]}
model = fc.LinearModel([price])
predictions = model(features)
price_var, bias = model.variables
with _initialized_session() as sess:
self.assertAllClose([0.], self.evaluate(bias))
self.assertAllClose([[0.]], self.evaluate(price_var))
self.assertAllClose([[0.], [0.]], self.evaluate(predictions))
sess.run(price_var.assign([[10.]]))
self.assertAllClose([[10.], [50.]], self.evaluate(predictions))
def test_old_linear_model(self):
price = fc.numeric_column('price')
with ops.Graph().as_default():
features = {'price': [[1.], [5.]]}
predictions = fc_old.linear_model(features, [price])
bias = get_linear_model_bias()
price_var = get_linear_model_column_var(price)
with _initialized_session() as sess:
self.assertAllClose([0.], self.evaluate(bias))
self.assertAllClose([[0.]], self.evaluate(price_var))
self.assertAllClose([[0.], [0.]], self.evaluate(predictions))
sess.run(price_var.assign([[10.]]))
self.assertAllClose([[10.], [50.]], self.evaluate(predictions))
def test_serialization(self):
def _increment_two(input_tensor):
return input_tensor + 2.
price = fc.numeric_column('price', normalizer_fn=_increment_two)
self.assertEqual(['price'], price.parents)
config = price._get_config()
self.assertEqual({
'key': 'price',
'shape': (1,),
'default_value': None,
'dtype': 'float32',
'normalizer_fn': '_increment_two'
}, config)
self.assertEqual(
price,
fc.NumericColumn._from_config(
config, custom_objects={'_increment_two': _increment_two}))
class BucketizedColumnTest(test.TestCase):
def test_invalid_source_column_type(self):
a = fc.categorical_column_with_hash_bucket('aaa', hash_bucket_size=10)
with self.assertRaisesRegexp(
ValueError,
'source_column must be a column generated with numeric_column'):
fc.bucketized_column(a, boundaries=[0, 1])
def test_invalid_source_column_shape(self):
a = fc.numeric_column('aaa', shape=[2, 3])
with self.assertRaisesRegexp(
ValueError, 'source_column must be one-dimensional column'):
fc.bucketized_column(a, boundaries=[0, 1])
def test_invalid_boundaries(self):
a = fc.numeric_column('aaa')
with self.assertRaisesRegexp(
ValueError, 'boundaries must be a sorted list'):
fc.bucketized_column(a, boundaries=None)
with self.assertRaisesRegexp(
ValueError, 'boundaries must be a sorted list'):
fc.bucketized_column(a, boundaries=1.)
with self.assertRaisesRegexp(
ValueError, 'boundaries must be a sorted list'):
fc.bucketized_column(a, boundaries=[1, 0])
with self.assertRaisesRegexp(
ValueError, 'boundaries must be a sorted list'):
fc.bucketized_column(a, boundaries=[1, 1])
def test_name(self):
a = fc.numeric_column('aaa', dtype=dtypes.int32)
b = fc.bucketized_column(a, boundaries=[0, 1])
self.assertTrue(b._is_v2_column)
self.assertEqual('aaa_bucketized', b.name)
def test_is_v2_column_old_numeric(self):
a = fc_old._numeric_column('aaa', dtype=dtypes.int32)
b = fc.bucketized_column(a, boundaries=[0, 1])
self.assertFalse(b._is_v2_column)
self.assertEqual('aaa_bucketized', b.name)
def test_parse_spec(self):
a = fc.numeric_column('aaa', shape=[2], dtype=dtypes.int32)
b = fc.bucketized_column(a, boundaries=[0, 1])
self.assertEqual({
'aaa': parsing_ops.FixedLenFeature((2,), dtype=dtypes.int32)
}, b.parse_example_spec)
def test_variable_shape(self):
a = fc.numeric_column('aaa', shape=[2], dtype=dtypes.int32)
b = fc.bucketized_column(a, boundaries=[0, 1])
# Column 'aaa` has shape [2] times three buckets -> variable_shape=[2, 3].
self.assertAllEqual((2, 3), b.variable_shape)
def test_num_buckets(self):
a = fc.numeric_column('aaa', shape=[2], dtype=dtypes.int32)
b = fc.bucketized_column(a, boundaries=[0, 1])
# Column 'aaa` has shape [2] times three buckets -> num_buckets=6.
self.assertEqual(6, b.num_buckets)
def test_parse_example(self):
price = fc.numeric_column('price', shape=[2])
bucketized_price = fc.bucketized_column(price, boundaries=[0, 50])
data = example_pb2.Example(features=feature_pb2.Features(
feature={
'price':
feature_pb2.Feature(float_list=feature_pb2.FloatList(
value=[20., 110.]))
}))
features = parsing_ops.parse_example(
serialized=[data.SerializeToString()],
features=fc.make_parse_example_spec_v2([bucketized_price]))
self.assertIn('price', features)
with self.cached_session():
self.assertAllEqual([[20., 110.]], features['price'].eval())
def test_transform_feature(self):
price = fc.numeric_column('price', shape=[2])
bucketized_price = fc.bucketized_column(price, boundaries=[0, 2, 4, 6])
with ops.Graph().as_default():
transformed_tensor = fc._transform_features_v2({
'price': [[-1., 1.], [5., 6.]]
}, [bucketized_price], None)
with _initialized_session():
self.assertAllEqual([[0, 1], [3, 4]],
transformed_tensor[bucketized_price].eval())
def test_get_dense_tensor_one_input_value(self):
"""Tests _get_dense_tensor() for input with shape=[1]."""
price = fc.numeric_column('price', shape=[1])
bucketized_price = fc.bucketized_column(price, boundaries=[0, 2, 4, 6])
with ops.Graph().as_default():
transformation_cache = fc.FeatureTransformationCache({
'price': [[-1.], [1.], [5.], [6.]]
})
with _initialized_session():
bucketized_price_tensor = bucketized_price.get_dense_tensor(
transformation_cache, None)
self.assertAllClose(
# One-hot tensor.
[[[1., 0., 0., 0., 0.]], [[0., 1., 0., 0., 0.]],
[[0., 0., 0., 1., 0.]], [[0., 0., 0., 0., 1.]]],
self.evaluate(bucketized_price_tensor))
def test_get_dense_tensor_two_input_values(self):
"""Tests _get_dense_tensor() for input with shape=[2]."""
price = fc.numeric_column('price', shape=[2])
bucketized_price = fc.bucketized_column(price, boundaries=[0, 2, 4, 6])
with ops.Graph().as_default():
transformation_cache = fc.FeatureTransformationCache({
'price': [[-1., 1.], [5., 6.]]
})
with _initialized_session():
bucketized_price_tensor = bucketized_price.get_dense_tensor(
transformation_cache, None)
self.assertAllClose(
# One-hot tensor.
[[[1., 0., 0., 0., 0.], [0., 1., 0., 0., 0.]],
[[0., 0., 0., 1., 0.], [0., 0., 0., 0., 1.]]],
self.evaluate(bucketized_price_tensor))
def test_get_sparse_tensors_one_input_value(self):
"""Tests _get_sparse_tensors() for input with shape=[1]."""
price = fc.numeric_column('price', shape=[1])
bucketized_price = fc.bucketized_column(price, boundaries=[0, 2, 4, 6])
with ops.Graph().as_default():
transformation_cache = fc.FeatureTransformationCache({
'price': [[-1.], [1.], [5.], [6.]]
})
with _initialized_session() as sess:
id_weight_pair = bucketized_price.get_sparse_tensors(
transformation_cache, None)
self.assertIsNone(id_weight_pair.weight_tensor)
id_tensor_value = sess.run(id_weight_pair.id_tensor)
self.assertAllEqual(
[[0, 0], [1, 0], [2, 0], [3, 0]], id_tensor_value.indices)
self.assertAllEqual([0, 1, 3, 4], id_tensor_value.values)
self.assertAllEqual([4, 1], id_tensor_value.dense_shape)
def test_get_sparse_tensors_two_input_values(self):
"""Tests _get_sparse_tensors() for input with shape=[2]."""
price = fc.numeric_column('price', shape=[2])
bucketized_price = fc.bucketized_column(price, boundaries=[0, 2, 4, 6])
with ops.Graph().as_default():
transformation_cache = fc.FeatureTransformationCache({
'price': [[-1., 1.], [5., 6.]]
})
with _initialized_session() as sess:
id_weight_pair = bucketized_price.get_sparse_tensors(
transformation_cache, None)
self.assertIsNone(id_weight_pair.weight_tensor)
id_tensor_value = sess.run(id_weight_pair.id_tensor)
self.assertAllEqual(
[[0, 0], [0, 1], [1, 0], [1, 1]], id_tensor_value.indices)
# Values 0-4 correspond to the first column of the input price.
# Values 5-9 correspond to the second column of the input price.
self.assertAllEqual([0, 6, 3, 9], id_tensor_value.values)
self.assertAllEqual([2, 2], id_tensor_value.dense_shape)
def test_sparse_tensor_input_not_supported(self):
price = fc.numeric_column('price')
bucketized_price = fc.bucketized_column(price, boundaries=[0, 1])
transformation_cache = fc.FeatureTransformationCache({
'price':
sparse_tensor.SparseTensor(
indices=[[0, 0]], values=[0.3], dense_shape=[1, 1])
})
with self.assertRaisesRegexp(ValueError, 'must be a Tensor'):
bucketized_price.transform_feature(transformation_cache, None)
def test_deep_copy(self):
a = fc.numeric_column('aaa', shape=[2])
a_bucketized = fc.bucketized_column(a, boundaries=[0, 1])
a_bucketized_copy = copy.deepcopy(a_bucketized)
self.assertEqual(a_bucketized_copy.name, 'aaa_bucketized')
self.assertAllEqual(a_bucketized_copy.variable_shape, (2, 3))
self.assertEqual(a_bucketized_copy.boundaries, (0, 1))
def test_linear_model_one_input_value(self):
"""Tests linear_model() for input with shape=[1]."""
price = fc.numeric_column('price', shape=[1])
bucketized_price = fc.bucketized_column(price, boundaries=[0, 2, 4, 6])
with ops.Graph().as_default():
features = {'price': [[-1.], [1.], [5.], [6.]]}
model = fc.LinearModel([bucketized_price])
predictions = model(features)
bucketized_price_var, bias = model.variables
with _initialized_session() as sess:
self.assertAllClose([0.], self.evaluate(bias))
# One weight variable per bucket, all initialized to zero.
self.assertAllClose([[0.], [0.], [0.], [0.], [0.]],
self.evaluate(bucketized_price_var))
self.assertAllClose([[0.], [0.], [0.], [0.]],
self.evaluate(predictions))
sess.run(bucketized_price_var.assign(
[[10.], [20.], [30.], [40.], [50.]]))
# price -1. is in the 0th bucket, whose weight is 10.
# price 1. is in the 1st bucket, whose weight is 20.
# price 5. is in the 3rd bucket, whose weight is 40.
# price 6. is in the 4th bucket, whose weight is 50.
self.assertAllClose([[10.], [20.], [40.], [50.]],
self.evaluate(predictions))
sess.run(bias.assign([1.]))
self.assertAllClose([[11.], [21.], [41.], [51.]],
self.evaluate(predictions))
def test_linear_model_two_input_values(self):
"""Tests linear_model() for input with shape=[2]."""
price = fc.numeric_column('price', shape=[2])
bucketized_price = fc.bucketized_column(price, boundaries=[0, 2, 4, 6])
with ops.Graph().as_default():
features = {'price': [[-1., 1.], [5., 6.]]}
model = fc.LinearModel([bucketized_price])
predictions = model(features)
bucketized_price_var, bias = model.variables
with _initialized_session() as sess:
self.assertAllClose([0.], self.evaluate(bias))
# One weight per bucket per input column, all initialized to zero.
self.assertAllClose(
[[0.], [0.], [0.], [0.], [0.], [0.], [0.], [0.], [0.], [0.]],
self.evaluate(bucketized_price_var))
self.assertAllClose([[0.], [0.]], self.evaluate(predictions))
sess.run(bucketized_price_var.assign(
[[10.], [20.], [30.], [40.], [50.],
[60.], [70.], [80.], [90.], [100.]]))
# 1st example:
# price -1. is in the 0th bucket, whose weight is 10.
# price 1. is in the 6th bucket, whose weight is 70.
# 2nd example:
# price 5. is in the 3rd bucket, whose weight is 40.
# price 6. is in the 9th bucket, whose weight is 100.
self.assertAllClose([[80.], [140.]], self.evaluate(predictions))
sess.run(bias.assign([1.]))
self.assertAllClose([[81.], [141.]], self.evaluate(predictions))
def test_old_linear_model_one_input_value(self):
"""Tests linear_model() for input with shape=[1]."""
price = fc.numeric_column('price', shape=[1])
bucketized_price = fc.bucketized_column(price, boundaries=[0, 2, 4, 6])
with ops.Graph().as_default():
features = {'price': [[-1.], [1.], [5.], [6.]]}
predictions = fc_old.linear_model(features, [bucketized_price])
bias = get_linear_model_bias()
bucketized_price_var = get_linear_model_column_var(bucketized_price)
with _initialized_session() as sess:
self.assertAllClose([0.], self.evaluate(bias))
# One weight variable per bucket, all initialized to zero.
self.assertAllClose([[0.], [0.], [0.], [0.], [0.]],
self.evaluate(bucketized_price_var))
self.assertAllClose([[0.], [0.], [0.], [0.]],
self.evaluate(predictions))
sess.run(
bucketized_price_var.assign([[10.], [20.], [30.], [40.], [50.]]))
# price -1. is in the 0th bucket, whose weight is 10.
# price 1. is in the 1st bucket, whose weight is 20.
# price 5. is in the 3rd bucket, whose weight is 40.
# price 6. is in the 4th bucket, whose weight is 50.
self.assertAllClose([[10.], [20.], [40.], [50.]],
self.evaluate(predictions))
sess.run(bias.assign([1.]))
self.assertAllClose([[11.], [21.], [41.], [51.]],
self.evaluate(predictions))
def test_old_linear_model_two_input_values(self):
"""Tests linear_model() for input with shape=[2]."""
price = fc.numeric_column('price', shape=[2])
bucketized_price = fc.bucketized_column(price, boundaries=[0, 2, 4, 6])
with ops.Graph().as_default():
features = {'price': [[-1., 1.], [5., 6.]]}
predictions = fc_old.linear_model(features, [bucketized_price])
bias = get_linear_model_bias()
bucketized_price_var = get_linear_model_column_var(bucketized_price)
with _initialized_session() as sess:
self.assertAllClose([0.], self.evaluate(bias))
# One weight per bucket per input column, all initialized to zero.
self.assertAllClose(
[[0.], [0.], [0.], [0.], [0.], [0.], [0.], [0.], [0.], [0.]],
self.evaluate(bucketized_price_var))
self.assertAllClose([[0.], [0.]], self.evaluate(predictions))
sess.run(
bucketized_price_var.assign([[10.], [20.], [30.], [40.], [50.],
[60.], [70.], [80.], [90.], [100.]]))
# 1st example:
# price -1. is in the 0th bucket, whose weight is 10.
# price 1. is in the 6th bucket, whose weight is 70.
# 2nd example:
# price 5. is in the 3rd bucket, whose weight is 40.
# price 6. is in the 9th bucket, whose weight is 100.
self.assertAllClose([[80.], [140.]], self.evaluate(predictions))
sess.run(bias.assign([1.]))
self.assertAllClose([[81.], [141.]], self.evaluate(predictions))
def test_old_linear_model_one_input_value_old_numeric(self):
"""Tests linear_model() for input with shape=[1]."""
price = fc_old._numeric_column('price', shape=[1])
bucketized_price = fc.bucketized_column(price, boundaries=[0, 2, 4, 6])
with ops.Graph().as_default():
features = {'price': [[-1.], [1.], [5.], [6.]]}
predictions = fc_old.linear_model(features, [bucketized_price])
bias = get_linear_model_bias()
bucketized_price_var = get_linear_model_column_var(bucketized_price)
with _initialized_session() as sess:
self.assertAllClose([0.], self.evaluate(bias))
# One weight variable per bucket, all initialized to zero.
self.assertAllClose([[0.], [0.], [0.], [0.], [0.]],
self.evaluate(bucketized_price_var))
self.assertAllClose([[0.], [0.], [0.], [0.]],
self.evaluate(predictions))
sess.run(
bucketized_price_var.assign([[10.], [20.], [30.], [40.], [50.]]))
# price -1. is in the 0th bucket, whose weight is 10.
# price 1. is in the 1st bucket, whose weight is 20.
# price 5. is in the 3rd bucket, whose weight is 40.
# price 6. is in the 4th bucket, whose weight is 50.
self.assertAllClose([[10.], [20.], [40.], [50.]],
self.evaluate(predictions))
sess.run(bias.assign([1.]))
self.assertAllClose([[11.], [21.], [41.], [51.]],
self.evaluate(predictions))
def test_serialization(self):
price = fc.numeric_column('price', shape=[2])
bucketized_price = fc.bucketized_column(price, boundaries=[0, 2, 4, 6])
self.assertEqual([price], bucketized_price.parents)
config = bucketized_price._get_config()
self.assertEqual({
'source_column': {
'class_name': 'NumericColumn',
'config': {
'key': 'price',
'shape': (2,),
'default_value': None,
'dtype': 'float32',
'normalizer_fn': None
}
},
'boundaries': (0, 2, 4, 6)
}, config)
new_bucketized_price = fc.BucketizedColumn._from_config(config)
self.assertEqual(bucketized_price, new_bucketized_price)
self.assertIsNot(price, new_bucketized_price.source_column)
new_bucketized_price = fc.BucketizedColumn._from_config(
config, columns_by_name={price.name: price})
self.assertEqual(bucketized_price, new_bucketized_price)
self.assertIs(price, new_bucketized_price.source_column)
class HashedCategoricalColumnTest(test.TestCase):
def test_defaults(self):
a = fc.categorical_column_with_hash_bucket('aaa', 10)
self.assertEqual('aaa', a.name)
self.assertEqual('aaa', a.key)
self.assertEqual(10, a.hash_bucket_size)
self.assertEqual(dtypes.string, a.dtype)
self.assertTrue(a._is_v2_column)
def test_key_should_be_string(self):
with self.assertRaisesRegexp(ValueError, 'key must be a string.'):
fc.categorical_column_with_hash_bucket(('key',), 10)
def test_bucket_size_should_be_given(self):
with self.assertRaisesRegexp(ValueError, 'hash_bucket_size must be set.'):
fc.categorical_column_with_hash_bucket('aaa', None)
def test_bucket_size_should_be_positive(self):
with self.assertRaisesRegexp(ValueError,
'hash_bucket_size must be at least 1'):
fc.categorical_column_with_hash_bucket('aaa', 0)
def test_dtype_should_be_string_or_integer(self):
fc.categorical_column_with_hash_bucket('aaa', 10, dtype=dtypes.string)
fc.categorical_column_with_hash_bucket('aaa', 10, dtype=dtypes.int32)
with self.assertRaisesRegexp(ValueError, 'dtype must be string or integer'):
fc.categorical_column_with_hash_bucket('aaa', 10, dtype=dtypes.float32)
def test_deep_copy(self):
original = fc.categorical_column_with_hash_bucket('aaa', 10)
for column in (original, copy.deepcopy(original)):
self.assertEqual('aaa', column.name)
self.assertEqual(10, column.hash_bucket_size)
self.assertEqual(10, column.num_buckets)
self.assertEqual(dtypes.string, column.dtype)
def test_parse_spec_string(self):
a = fc.categorical_column_with_hash_bucket('aaa', 10)
self.assertEqual({
'aaa': parsing_ops.VarLenFeature(dtypes.string)
}, a.parse_example_spec)
def test_parse_spec_int(self):
a = fc.categorical_column_with_hash_bucket('aaa', 10, dtype=dtypes.int32)
self.assertEqual({
'aaa': parsing_ops.VarLenFeature(dtypes.int32)
}, a.parse_example_spec)
def test_parse_example(self):
a = fc.categorical_column_with_hash_bucket('aaa', 10)
data = example_pb2.Example(features=feature_pb2.Features(
feature={
'aaa':
feature_pb2.Feature(bytes_list=feature_pb2.BytesList(
value=[b'omar', b'stringer']))
}))
features = parsing_ops.parse_example(
serialized=[data.SerializeToString()],
features=fc.make_parse_example_spec_v2([a]))
self.assertIn('aaa', features)
with self.cached_session():
_assert_sparse_tensor_value(
self,
sparse_tensor.SparseTensorValue(
indices=[[0, 0], [0, 1]],
values=np.array([b'omar', b'stringer'], dtype=np.object_),
dense_shape=[1, 2]),
features['aaa'].eval())
def test_strings_should_be_hashed(self):
hashed_sparse = fc.categorical_column_with_hash_bucket('wire', 10)
wire_tensor = sparse_tensor.SparseTensor(
values=['omar', 'stringer', 'marlo'],
indices=[[0, 0], [1, 0], [1, 1]],
dense_shape=[2, 2])
outputs = fc._transform_features_v2({
'wire': wire_tensor
}, [hashed_sparse], None)
output = outputs[hashed_sparse]
# Check exact hashed output. If hashing changes this test will break.
expected_values = [6, 4, 1]
with self.cached_session():
self.assertEqual(dtypes.int64, output.values.dtype)
self.assertAllEqual(expected_values, output.values.eval())
self.assertAllEqual(wire_tensor.indices.eval(), output.indices.eval())
self.assertAllEqual(wire_tensor.dense_shape.eval(),
output.dense_shape.eval())
def test_tensor_dtype_should_be_string_or_integer(self):
string_fc = fc.categorical_column_with_hash_bucket(
'a_string', 10, dtype=dtypes.string)
int_fc = fc.categorical_column_with_hash_bucket(
'a_int', 10, dtype=dtypes.int32)
float_fc = fc.categorical_column_with_hash_bucket(
'a_float', 10, dtype=dtypes.string)
int_tensor = sparse_tensor.SparseTensor(
values=[101],
indices=[[0, 0]],
dense_shape=[1, 1])
string_tensor = sparse_tensor.SparseTensor(
values=['101'],
indices=[[0, 0]],
dense_shape=[1, 1])
float_tensor = sparse_tensor.SparseTensor(
values=[101.],
indices=[[0, 0]],
dense_shape=[1, 1])
transformation_cache = fc.FeatureTransformationCache({
'a_int': int_tensor,
'a_string': string_tensor,
'a_float': float_tensor
})
transformation_cache.get(string_fc, None)
transformation_cache.get(int_fc, None)
with self.assertRaisesRegexp(ValueError, 'dtype must be string or integer'):
transformation_cache.get(float_fc, None)
def test_dtype_should_match_with_tensor(self):
hashed_sparse = fc.categorical_column_with_hash_bucket(
'wire', 10, dtype=dtypes.int64)
wire_tensor = sparse_tensor.SparseTensor(
values=['omar'], indices=[[0, 0]], dense_shape=[1, 1])
transformation_cache = fc.FeatureTransformationCache({'wire': wire_tensor})
with self.assertRaisesRegexp(ValueError, 'dtype must be compatible'):
transformation_cache.get(hashed_sparse, None)
def test_ints_should_be_hashed(self):
hashed_sparse = fc.categorical_column_with_hash_bucket(
'wire', 10, dtype=dtypes.int64)
wire_tensor = sparse_tensor.SparseTensor(
values=[101, 201, 301],
indices=[[0, 0], [1, 0], [1, 1]],
dense_shape=[2, 2])
transformation_cache = fc.FeatureTransformationCache({'wire': wire_tensor})
output = transformation_cache.get(hashed_sparse, None)
# Check exact hashed output. If hashing changes this test will break.
expected_values = [3, 7, 5]
with self.cached_session():
self.assertAllEqual(expected_values, output.values.eval())
def test_int32_64_is_compatible(self):
hashed_sparse = fc.categorical_column_with_hash_bucket(
'wire', 10, dtype=dtypes.int64)
wire_tensor = sparse_tensor.SparseTensor(
values=constant_op.constant([101, 201, 301], dtype=dtypes.int32),
indices=[[0, 0], [1, 0], [1, 1]],
dense_shape=[2, 2])
transformation_cache = fc.FeatureTransformationCache({'wire': wire_tensor})
output = transformation_cache.get(hashed_sparse, None)
# Check exact hashed output. If hashing changes this test will break.
expected_values = [3, 7, 5]
with self.cached_session():
self.assertAllEqual(expected_values, output.values.eval())
def test_get_sparse_tensors(self):
hashed_sparse = fc.categorical_column_with_hash_bucket('wire', 10)
transformation_cache = fc.FeatureTransformationCache({
'wire':
sparse_tensor.SparseTensor(
values=['omar', 'stringer', 'marlo'],
indices=[[0, 0], [1, 0], [1, 1]],
dense_shape=[2, 2])
})
id_weight_pair = hashed_sparse.get_sparse_tensors(transformation_cache,
None)
self.assertIsNone(id_weight_pair.weight_tensor)
self.assertEqual(
transformation_cache.get(hashed_sparse, None), id_weight_pair.id_tensor)
def test_get_sparse_tensors_dense_input(self):
hashed_sparse = fc.categorical_column_with_hash_bucket('wire', 10)
transformation_cache = fc.FeatureTransformationCache({
'wire': (('omar', ''), ('stringer', 'marlo'))
})
id_weight_pair = hashed_sparse.get_sparse_tensors(transformation_cache,
None)
self.assertIsNone(id_weight_pair.weight_tensor)
self.assertEqual(
transformation_cache.get(hashed_sparse, None), id_weight_pair.id_tensor)
def test_linear_model(self):
wire_column = fc.categorical_column_with_hash_bucket('wire', 4)
self.assertEqual(4, wire_column.num_buckets)
with ops.Graph().as_default():
model = fc.LinearModel((wire_column,))
predictions = model({
wire_column.name:
sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=('marlo', 'skywalker', 'omar'),
dense_shape=(2, 2))
})
wire_var, bias = model.variables
with _initialized_session():
self.assertAllClose((0.,), self.evaluate(bias))
self.assertAllClose(((0.,), (0.,), (0.,), (0.,)),
self.evaluate(wire_var))
self.assertAllClose(((0.,), (0.,)), self.evaluate(predictions))
wire_var.assign(((1.,), (2.,), (3.,), (4.,))).eval()
# 'marlo' -> 3: wire_var[3] = 4
# 'skywalker' -> 2, 'omar' -> 2: wire_var[2] + wire_var[2] = 3+3 = 6
self.assertAllClose(((4.,), (6.,)), self.evaluate(predictions))
def test_old_linear_model(self):
wire_column = fc.categorical_column_with_hash_bucket('wire', 4)
self.assertEqual(4, wire_column.num_buckets)
with ops.Graph().as_default():
predictions = fc_old.linear_model({
wire_column.name:
sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=('marlo', 'skywalker', 'omar'),
dense_shape=(2, 2))
}, (wire_column,))
bias = get_linear_model_bias()
wire_var = get_linear_model_column_var(wire_column)
with _initialized_session():
self.assertAllClose((0.,), self.evaluate(bias))
self.assertAllClose(((0.,), (0.,), (0.,), (0.,)),
self.evaluate(wire_var))
self.assertAllClose(((0.,), (0.,)), self.evaluate(predictions))
wire_var.assign(((1.,), (2.,), (3.,), (4.,))).eval()
# 'marlo' -> 3: wire_var[3] = 4
# 'skywalker' -> 2, 'omar' -> 2: wire_var[2] + wire_var[2] = 3+3 = 6
self.assertAllClose(((4.,), (6.,)), self.evaluate(predictions))
def test_serialization(self):
wire_column = fc.categorical_column_with_hash_bucket('wire', 4)
self.assertEqual(['wire'], wire_column.parents)
config = wire_column._get_config()
self.assertEqual({
'key': 'wire',
'hash_bucket_size': 4,
'dtype': 'string'
}, config)
self.assertEqual(wire_column,
fc.HashedCategoricalColumn._from_config(config))
class CrossedColumnTest(test.TestCase):
def test_keys_empty(self):
with self.assertRaisesRegexp(
ValueError, 'keys must be a list with length > 1'):
fc.crossed_column([], 10)
def test_keys_length_one(self):
with self.assertRaisesRegexp(
ValueError, 'keys must be a list with length > 1'):
fc.crossed_column(['a'], 10)
def test_key_type_unsupported(self):
with self.assertRaisesRegexp(ValueError, 'Unsupported key type'):
fc.crossed_column(['a', fc.numeric_column('c')], 10)
with self.assertRaisesRegexp(
ValueError, 'categorical_column_with_hash_bucket is not supported'):
fc.crossed_column(
['a', fc.categorical_column_with_hash_bucket('c', 10)], 10)
def test_hash_bucket_size_negative(self):
with self.assertRaisesRegexp(
ValueError, 'hash_bucket_size must be > 1'):
fc.crossed_column(['a', 'c'], -1)
def test_hash_bucket_size_zero(self):
with self.assertRaisesRegexp(
ValueError, 'hash_bucket_size must be > 1'):
fc.crossed_column(['a', 'c'], 0)
def test_hash_bucket_size_none(self):
with self.assertRaisesRegexp(
ValueError, 'hash_bucket_size must be > 1'):
fc.crossed_column(['a', 'c'], None)
def test_name(self):
a = fc.numeric_column('a', dtype=dtypes.int32)
b = fc.bucketized_column(a, boundaries=[0, 1])
crossed1 = fc.crossed_column(['d1', 'd2'], 10)
self.assertTrue(crossed1._is_v2_column)
crossed2 = fc.crossed_column([b, 'c', crossed1], 10)
self.assertTrue(crossed2._is_v2_column)
self.assertEqual('a_bucketized_X_c_X_d1_X_d2', crossed2.name)
def test_is_v2_column(self):
a = fc_old._numeric_column('a', dtype=dtypes.int32)
b = fc.bucketized_column(a, boundaries=[0, 1])
crossed1 = fc.crossed_column(['d1', 'd2'], 10)
self.assertTrue(crossed1._is_v2_column)
crossed2 = fc.crossed_column([b, 'c', crossed1], 10)
self.assertFalse(crossed2._is_v2_column)
self.assertEqual('a_bucketized_X_c_X_d1_X_d2', crossed2.name)
def test_name_ordered_alphabetically(self):
"""Tests that the name does not depend on the order of given columns."""
a = fc.numeric_column('a', dtype=dtypes.int32)
b = fc.bucketized_column(a, boundaries=[0, 1])
crossed1 = fc.crossed_column(['d1', 'd2'], 10)
crossed2 = fc.crossed_column([crossed1, 'c', b], 10)
self.assertEqual('a_bucketized_X_c_X_d1_X_d2', crossed2.name)
def test_name_leaf_keys_ordered_alphabetically(self):
"""Tests that the name does not depend on the order of given columns."""
a = fc.numeric_column('a', dtype=dtypes.int32)
b = fc.bucketized_column(a, boundaries=[0, 1])
crossed1 = fc.crossed_column(['d2', 'c'], 10)
crossed2 = fc.crossed_column([crossed1, 'd1', b], 10)
self.assertEqual('a_bucketized_X_c_X_d1_X_d2', crossed2.name)
def test_parse_spec(self):
a = fc.numeric_column('a', shape=[2], dtype=dtypes.int32)
b = fc.bucketized_column(a, boundaries=[0, 1])
crossed = fc.crossed_column([b, 'c'], 10)
self.assertEqual({
'a': parsing_ops.FixedLenFeature((2,), dtype=dtypes.int32),
'c': parsing_ops.VarLenFeature(dtypes.string),
}, crossed.parse_example_spec)
def test_num_buckets(self):
a = fc.numeric_column('a', shape=[2], dtype=dtypes.int32)
b = fc.bucketized_column(a, boundaries=[0, 1])
crossed = fc.crossed_column([b, 'c'], 15)
self.assertEqual(15, crossed.num_buckets)
def test_deep_copy(self):
a = fc.numeric_column('a', dtype=dtypes.int32)
b = fc.bucketized_column(a, boundaries=[0, 1])
crossed1 = fc.crossed_column(['d1', 'd2'], 10)
crossed2 = fc.crossed_column([b, 'c', crossed1], 15, hash_key=5)
crossed2_copy = copy.deepcopy(crossed2)
self.assertEqual('a_bucketized_X_c_X_d1_X_d2', crossed2_copy.name,)
self.assertEqual(15, crossed2_copy.hash_bucket_size)
self.assertEqual(5, crossed2_copy.hash_key)
def test_parse_example(self):
price = fc.numeric_column('price', shape=[2])
bucketized_price = fc.bucketized_column(price, boundaries=[0, 50])
price_cross_wire = fc.crossed_column([bucketized_price, 'wire'], 10)
data = example_pb2.Example(features=feature_pb2.Features(
feature={
'price':
feature_pb2.Feature(float_list=feature_pb2.FloatList(
value=[20., 110.])),
'wire':
feature_pb2.Feature(bytes_list=feature_pb2.BytesList(
value=[b'omar', b'stringer'])),
}))
features = parsing_ops.parse_example(
serialized=[data.SerializeToString()],
features=fc.make_parse_example_spec_v2([price_cross_wire]))
self.assertIn('price', features)
self.assertIn('wire', features)
with self.cached_session():
self.assertAllEqual([[20., 110.]], features['price'].eval())
wire_sparse = features['wire']
self.assertAllEqual([[0, 0], [0, 1]], wire_sparse.indices.eval())
# Use byte constants to pass the open-source test.
self.assertAllEqual([b'omar', b'stringer'], wire_sparse.values.eval())
self.assertAllEqual([1, 2], wire_sparse.dense_shape.eval())
def test_transform_feature(self):
price = fc.numeric_column('price', shape=[2])
bucketized_price = fc.bucketized_column(price, boundaries=[0, 50])
hash_bucket_size = 10
price_cross_wire = fc.crossed_column([bucketized_price, 'wire'],
hash_bucket_size)
features = {
'price': constant_op.constant([[1., 2.], [5., 6.]]),
'wire': sparse_tensor.SparseTensor(
values=['omar', 'stringer', 'marlo'],
indices=[[0, 0], [1, 0], [1, 1]],
dense_shape=[2, 2]),
}
outputs = fc._transform_features_v2(features, [price_cross_wire], None)
output = outputs[price_cross_wire]
with self.cached_session() as sess:
output_val = sess.run(output)
self.assertAllEqual(
[[0, 0], [0, 1], [1, 0], [1, 1], [1, 2], [1, 3]], output_val.indices)
for val in output_val.values:
self.assertIn(val, list(range(hash_bucket_size)))
self.assertAllEqual([2, 4], output_val.dense_shape)
def test_get_sparse_tensors(self):
a = fc.numeric_column('a', dtype=dtypes.int32, shape=(2,))
b = fc.bucketized_column(a, boundaries=(0, 1))
crossed1 = fc.crossed_column(['d1', 'd2'], 10)
crossed2 = fc.crossed_column([b, 'c', crossed1], 15, hash_key=5)
with ops.Graph().as_default():
transformation_cache = fc.FeatureTransformationCache({
'a':
constant_op.constant(((-1., .5), (.5, 1.))),
'c':
sparse_tensor.SparseTensor(
indices=((0, 0), (1, 0), (1, 1)),
values=['cA', 'cB', 'cC'],
dense_shape=(2, 2)),
'd1':
sparse_tensor.SparseTensor(
indices=((0, 0), (1, 0), (1, 1)),
values=['d1A', 'd1B', 'd1C'],
dense_shape=(2, 2)),
'd2':
sparse_tensor.SparseTensor(
indices=((0, 0), (1, 0), (1, 1)),
values=['d2A', 'd2B', 'd2C'],
dense_shape=(2, 2)),
})
id_weight_pair = crossed2.get_sparse_tensors(transformation_cache, None)
with _initialized_session():
id_tensor_eval = id_weight_pair.id_tensor.eval()
self.assertAllEqual(
((0, 0), (0, 1), (1, 0), (1, 1), (1, 2), (1, 3), (1, 4), (1, 5),
(1, 6), (1, 7), (1, 8), (1, 9), (1, 10), (1, 11), (1, 12), (1, 13),
(1, 14), (1, 15)),
id_tensor_eval.indices)
# Check exact hashed output. If hashing changes this test will break.
# All values are within [0, hash_bucket_size).
expected_values = (
6, 14, 0, 13, 8, 8, 10, 12, 2, 0, 1, 9, 8, 12, 2, 0, 10, 11)
self.assertAllEqual(expected_values, id_tensor_eval.values)
self.assertAllEqual((2, 16), id_tensor_eval.dense_shape)
def test_get_sparse_tensors_simple(self):
"""Same as test_get_sparse_tensors, but with simpler values."""
a = fc.numeric_column('a', dtype=dtypes.int32, shape=(2,))
b = fc.bucketized_column(a, boundaries=(0, 1))
crossed = fc.crossed_column([b, 'c'], hash_bucket_size=5, hash_key=5)
with ops.Graph().as_default():
transformation_cache = fc.FeatureTransformationCache({
'a':
constant_op.constant(((-1., .5), (.5, 1.))),
'c':
sparse_tensor.SparseTensor(
indices=((0, 0), (1, 0), (1, 1)),
values=['cA', 'cB', 'cC'],
dense_shape=(2, 2)),
})
id_weight_pair = crossed.get_sparse_tensors(transformation_cache, None)
with _initialized_session():
id_tensor_eval = id_weight_pair.id_tensor.eval()
self.assertAllEqual(
((0, 0), (0, 1), (1, 0), (1, 1), (1, 2), (1, 3)),
id_tensor_eval.indices)
# Check exact hashed output. If hashing changes this test will break.
# All values are within [0, hash_bucket_size).
expected_values = (1, 0, 1, 3, 4, 2)
self.assertAllEqual(expected_values, id_tensor_eval.values)
self.assertAllEqual((2, 4), id_tensor_eval.dense_shape)
def test_linear_model(self):
"""Tests linear_model.
Uses data from test_get_sparse_tesnsors_simple.
"""
a = fc.numeric_column('a', dtype=dtypes.int32, shape=(2,))
b = fc.bucketized_column(a, boundaries=(0, 1))
crossed = fc.crossed_column([b, 'c'], hash_bucket_size=5, hash_key=5)
with ops.Graph().as_default():
model = fc.LinearModel((crossed,))
predictions = model({
'a':
constant_op.constant(((-1., .5), (.5, 1.))),
'c':
sparse_tensor.SparseTensor(
indices=((0, 0), (1, 0), (1, 1)),
values=['cA', 'cB', 'cC'],
dense_shape=(2, 2)),
})
crossed_var, bias = model.variables
with _initialized_session() as sess:
self.assertAllClose((0.,), self.evaluate(bias))
self.assertAllClose(((0.,), (0.,), (0.,), (0.,), (0.,)),
self.evaluate(crossed_var))
self.assertAllClose(((0.,), (0.,)), self.evaluate(predictions))
sess.run(crossed_var.assign(((1.,), (2.,), (3.,), (4.,), (5.,))))
# Expected ids after cross = (1, 0, 1, 3, 4, 2)
self.assertAllClose(((3.,), (14.,)), self.evaluate(predictions))
sess.run(bias.assign((.1,)))
self.assertAllClose(((3.1,), (14.1,)), self.evaluate(predictions))
def test_linear_model_with_weights(self):
class _TestColumnWithWeights(BaseFeatureColumnForTests,
fc.CategoricalColumn):
"""Produces sparse IDs and sparse weights."""
@property
def _is_v2_column(self):
return True
@property
def name(self):
return 'test_column'
@property
def parse_example_spec(self):
return {
self.name: parsing_ops.VarLenFeature(dtypes.int32),
'{}_weights'.format(self.name): parsing_ops.VarLenFeature(
dtypes.float32),
}
@property
def num_buckets(self):
return 5
def transform_feature(self, transformation_cache, state_manager):
return (transformation_cache.get(self.name, state_manager),
transformation_cache.get('{}_weights'.format(self.name),
state_manager))
def get_sparse_tensors(self, transformation_cache, state_manager):
"""Populates both id_tensor and weight_tensor."""
ids_and_weights = transformation_cache.get(self, state_manager)
return fc.CategoricalColumn.IdWeightPair(
id_tensor=ids_and_weights[0], weight_tensor=ids_and_weights[1])
t = _TestColumnWithWeights()
crossed = fc.crossed_column([t, 'c'], hash_bucket_size=5, hash_key=5)
with ops.Graph().as_default():
with self.assertRaisesRegexp(
ValueError,
'crossed_column does not support weight_tensor.*{}'.format(t.name)):
model = fc.LinearModel((crossed,))
model({
t.name:
sparse_tensor.SparseTensor(
indices=((0, 0), (1, 0), (1, 1)),
values=[0, 1, 2],
dense_shape=(2, 2)),
'{}_weights'.format(t.name):
sparse_tensor.SparseTensor(
indices=((0, 0), (1, 0), (1, 1)),
values=[1., 10., 2.],
dense_shape=(2, 2)),
'c':
sparse_tensor.SparseTensor(
indices=((0, 0), (1, 0), (1, 1)),
values=['cA', 'cB', 'cC'],
dense_shape=(2, 2)),
})
def test_old_linear_model(self):
"""Tests linear_model.
Uses data from test_get_sparse_tesnsors_simple.
"""
a = fc.numeric_column('a', dtype=dtypes.int32, shape=(2,))
b = fc.bucketized_column(a, boundaries=(0, 1))
crossed = fc.crossed_column([b, 'c'], hash_bucket_size=5, hash_key=5)
with ops.Graph().as_default():
predictions = fc_old.linear_model({
'a':
constant_op.constant(((-1., .5), (.5, 1.))),
'c':
sparse_tensor.SparseTensor(
indices=((0, 0), (1, 0), (1, 1)),
values=['cA', 'cB', 'cC'],
dense_shape=(2, 2)),
}, (crossed,))
bias = get_linear_model_bias()
crossed_var = get_linear_model_column_var(crossed)
with _initialized_session() as sess:
self.assertAllClose((0.,), self.evaluate(bias))
self.assertAllClose(((0.,), (0.,), (0.,), (0.,), (0.,)),
self.evaluate(crossed_var))
self.assertAllClose(((0.,), (0.,)), self.evaluate(predictions))
sess.run(crossed_var.assign(((1.,), (2.,), (3.,), (4.,), (5.,))))
# Expected ids after cross = (1, 0, 1, 3, 4, 2)
self.assertAllClose(((3.,), (14.,)), self.evaluate(predictions))
sess.run(bias.assign((.1,)))
self.assertAllClose(((3.1,), (14.1,)), self.evaluate(predictions))
def test_old_linear_model_with_weights(self):
class _TestColumnWithWeights(BaseFeatureColumnForTests,
fc.CategoricalColumn,
fc_old._CategoricalColumn):
"""Produces sparse IDs and sparse weights."""
@property
def _is_v2_column(self):
return True
@property
def name(self):
return 'test_column'
@property
def parse_example_spec(self):
return {
self.name:
parsing_ops.VarLenFeature(dtypes.int32),
'{}_weights'.format(self.name):
parsing_ops.VarLenFeature(dtypes.float32),
}
@property
def _parse_example_spec(self):
return self.parse_example_spec
@property
def num_buckets(self):
return 5
@property
def _num_buckets(self):
return self.num_buckets
def transform_feature(self, transformation_cache, state_manager):
raise ValueError('Should not be called.')
def _transform_feature(self, inputs):
return (inputs.get(self.name),
inputs.get('{}_weights'.format(self.name)))
def get_sparse_tensors(self, transformation_cache, state_manager):
raise ValueError('Should not be called.')
def _get_sparse_tensors(self,
inputs,
weight_collections=None,
trainable=None):
"""Populates both id_tensor and weight_tensor."""
ids_and_weights = inputs.get(self)
return fc.CategoricalColumn.IdWeightPair(
id_tensor=ids_and_weights[0], weight_tensor=ids_and_weights[1])
t = _TestColumnWithWeights()
crossed = fc.crossed_column([t, 'c'], hash_bucket_size=5, hash_key=5)
with ops.Graph().as_default():
with self.assertRaisesRegexp(
ValueError,
'crossed_column does not support weight_tensor.*{}'.format(t.name)):
fc_old.linear_model({
t.name:
sparse_tensor.SparseTensor(
indices=((0, 0), (1, 0), (1, 1)),
values=[0, 1, 2],
dense_shape=(2, 2)),
'{}_weights'.format(t.name):
sparse_tensor.SparseTensor(
indices=((0, 0), (1, 0), (1, 1)),
values=[1., 10., 2.],
dense_shape=(2, 2)),
'c':
sparse_tensor.SparseTensor(
indices=((0, 0), (1, 0), (1, 1)),
values=['cA', 'cB', 'cC'],
dense_shape=(2, 2)),
}, (crossed,))
def test_old_linear_model_old_numeric(self):
"""Tests linear_model.
Uses data from test_get_sparse_tesnsors_simple.
"""
a = fc_old._numeric_column('a', dtype=dtypes.int32, shape=(2,))
b = fc.bucketized_column(a, boundaries=(0, 1))
crossed = fc.crossed_column([b, 'c'], hash_bucket_size=5, hash_key=5)
with ops.Graph().as_default():
predictions = fc_old.linear_model({
'a':
constant_op.constant(((-1., .5), (.5, 1.))),
'c':
sparse_tensor.SparseTensor(
indices=((0, 0), (1, 0), (1, 1)),
values=['cA', 'cB', 'cC'],
dense_shape=(2, 2)),
}, (crossed,))
bias = get_linear_model_bias()
crossed_var = get_linear_model_column_var(crossed)
with _initialized_session() as sess:
self.assertAllClose((0.,), self.evaluate(bias))
self.assertAllClose(((0.,), (0.,), (0.,), (0.,), (0.,)),
self.evaluate(crossed_var))
self.assertAllClose(((0.,), (0.,)), self.evaluate(predictions))
sess.run(crossed_var.assign(((1.,), (2.,), (3.,), (4.,), (5.,))))
# Expected ids after cross = (1, 0, 1, 3, 4, 2)
self.assertAllClose(((3.,), (14.,)), self.evaluate(predictions))
sess.run(bias.assign((.1,)))
self.assertAllClose(((3.1,), (14.1,)), self.evaluate(predictions))
def test_serialization(self):
a = fc.numeric_column('a', dtype=dtypes.int32, shape=(2,))
b = fc.bucketized_column(a, boundaries=(0, 1))
crossed = fc.crossed_column([b, 'c'], hash_bucket_size=5, hash_key=5)
self.assertEqual([b, 'c'], crossed.parents)
config = crossed._get_config()
self.assertEqual({
'hash_bucket_size':
5,
'hash_key':
5,
'keys': ({
'config': {
'boundaries': (0, 1),
'source_column': {
'config': {
'dtype': 'int32',
'default_value': None,
'key': 'a',
'normalizer_fn': None,
'shape': (2,)
},
'class_name': 'NumericColumn'
}
},
'class_name': 'BucketizedColumn'
}, 'c')
}, config)
new_crossed = fc.CrossedColumn._from_config(config)
self.assertEqual(crossed, new_crossed)
self.assertIsNot(b, new_crossed.keys[0])
new_crossed = fc.CrossedColumn._from_config(
config, columns_by_name={b.name: b})
self.assertEqual(crossed, new_crossed)
self.assertIs(b, new_crossed.keys[0])
class LinearModelTest(test.TestCase):
def test_raises_if_empty_feature_columns(self):
with self.assertRaisesRegexp(ValueError,
'feature_columns must not be empty'):
fc.LinearModel(feature_columns=[])
def test_should_be_feature_column(self):
with self.assertRaisesRegexp(ValueError, 'must be a FeatureColumn'):
fc.LinearModel(feature_columns='NotSupported')
def test_should_be_dense_or_categorical_column(self):
class NotSupportedColumn(BaseFeatureColumnForTests):
@property
def _is_v2_column(self):
return True
@property
def name(self):
return 'NotSupportedColumn'
def transform_feature(self, transformation_cache, state_manager):
pass
@property
def parse_example_spec(self):
pass
with self.assertRaisesRegexp(
ValueError, 'must be either a DenseColumn or CategoricalColumn'):
fc.LinearModel(feature_columns=[NotSupportedColumn()])
def test_does_not_support_dict_columns(self):
with self.assertRaisesRegexp(
ValueError, 'Expected feature_columns to be iterable, found dict.'):
fc.LinearModel(feature_columns={'a': fc.numeric_column('a')})
def test_raises_if_duplicate_name(self):
with self.assertRaisesRegexp(
ValueError, 'Duplicate feature column name found for columns'):
fc.LinearModel(
feature_columns=[fc.numeric_column('a'),
fc.numeric_column('a')])
def test_not_dict_input_features(self):
price = fc.numeric_column('price')
with ops.Graph().as_default():
features = [[1.], [5.]]
model = fc.LinearModel([price])
with self.assertRaisesRegexp(ValueError, 'We expected a dictionary here'):
model(features)
def test_dense_bias(self):
price = fc.numeric_column('price')
with ops.Graph().as_default():
features = {'price': [[1.], [5.]]}
model = fc.LinearModel([price])
predictions = model(features)
price_var, bias = model.variables
with _initialized_session() as sess:
self.assertAllClose([0.], self.evaluate(bias))
sess.run(price_var.assign([[10.]]))
sess.run(bias.assign([5.]))
self.assertAllClose([[15.], [55.]], self.evaluate(predictions))
def test_sparse_bias(self):
wire_cast = fc.categorical_column_with_hash_bucket('wire_cast', 4)
with ops.Graph().as_default():
wire_tensor = sparse_tensor.SparseTensor(
values=['omar', 'stringer', 'marlo'], # hashed to = [2, 0, 3]
indices=[[0, 0], [1, 0], [1, 1]],
dense_shape=[2, 2])
features = {'wire_cast': wire_tensor}
model = fc.LinearModel([wire_cast])
predictions = model(features)
wire_cast_var, bias = model.variables
with _initialized_session() as sess:
self.assertAllClose([0.], self.evaluate(bias))
self.assertAllClose([[0.], [0.], [0.], [0.]],
self.evaluate(wire_cast_var))
sess.run(wire_cast_var.assign([[10.], [100.], [1000.], [10000.]]))
sess.run(bias.assign([5.]))
self.assertAllClose([[1005.], [10015.]], self.evaluate(predictions))
def test_dense_and_sparse_bias(self):
wire_cast = fc.categorical_column_with_hash_bucket('wire_cast', 4)
price = fc.numeric_column('price')
with ops.Graph().as_default():
wire_tensor = sparse_tensor.SparseTensor(
values=['omar', 'stringer', 'marlo'], # hashed to = [2, 0, 3]
indices=[[0, 0], [1, 0], [1, 1]],
dense_shape=[2, 2])
features = {'wire_cast': wire_tensor, 'price': [[1.], [5.]]}
model = fc.LinearModel([wire_cast, price])
predictions = model(features)
price_var, wire_cast_var, bias = model.variables
with _initialized_session() as sess:
sess.run(wire_cast_var.assign([[10.], [100.], [1000.], [10000.]]))
sess.run(bias.assign([5.]))
sess.run(price_var.assign([[10.]]))
self.assertAllClose([[1015.], [10065.]], self.evaluate(predictions))
def test_dense_and_sparse_column(self):
"""When the column is both dense and sparse, uses sparse tensors."""
class _DenseAndSparseColumn(BaseFeatureColumnForTests, fc.DenseColumn,
fc.CategoricalColumn):
@property
def _is_v2_column(self):
return True
@property
def name(self):
return 'dense_and_sparse_column'
@property
def parse_example_spec(self):
return {self.name: parsing_ops.VarLenFeature(self.dtype)}
def transform_feature(self, transformation_cache, state_manager):
return transformation_cache.get(self.name, state_manager)
@property
def variable_shape(self):
raise ValueError('Should not use this method.')
def get_dense_tensor(self, transformation_cache, state_manager):
raise ValueError('Should not use this method.')
@property
def num_buckets(self):
return 4
def get_sparse_tensors(self, transformation_cache, state_manager):
sp_tensor = sparse_tensor.SparseTensor(
indices=[[0, 0], [1, 0], [1, 1]],
values=[2, 0, 3],
dense_shape=[2, 2])
return fc.CategoricalColumn.IdWeightPair(sp_tensor, None)
dense_and_sparse_column = _DenseAndSparseColumn()
with ops.Graph().as_default():
sp_tensor = sparse_tensor.SparseTensor(
values=['omar', 'stringer', 'marlo'],
indices=[[0, 0], [1, 0], [1, 1]],
dense_shape=[2, 2])
features = {dense_and_sparse_column.name: sp_tensor}
model = fc.LinearModel([dense_and_sparse_column])
predictions = model(features)
dense_and_sparse_column_var, bias = model.variables
with _initialized_session() as sess:
sess.run(dense_and_sparse_column_var.assign(
[[10.], [100.], [1000.], [10000.]]))
sess.run(bias.assign([5.]))
self.assertAllClose([[1005.], [10015.]], self.evaluate(predictions))
def test_dense_multi_output(self):
price = fc.numeric_column('price')
with ops.Graph().as_default():
features = {'price': [[1.], [5.]]}
model = fc.LinearModel([price], units=3)
predictions = model(features)
price_var, bias = model.variables
with _initialized_session() as sess:
self.assertAllClose(np.zeros((3,)), self.evaluate(bias))
self.assertAllClose(np.zeros((1, 3)), self.evaluate(price_var))
sess.run(price_var.assign([[10., 100., 1000.]]))
sess.run(bias.assign([5., 6., 7.]))
self.assertAllClose([[15., 106., 1007.], [55., 506., 5007.]],
self.evaluate(predictions))
def test_sparse_multi_output(self):
wire_cast = fc.categorical_column_with_hash_bucket('wire_cast', 4)
with ops.Graph().as_default():
wire_tensor = sparse_tensor.SparseTensor(
values=['omar', 'stringer', 'marlo'], # hashed to = [2, 0, 3]
indices=[[0, 0], [1, 0], [1, 1]],
dense_shape=[2, 2])
features = {'wire_cast': wire_tensor}
model = fc.LinearModel([wire_cast], units=3)
predictions = model(features)
wire_cast_var, bias = model.variables
with _initialized_session() as sess:
self.assertAllClose(np.zeros((3,)), self.evaluate(bias))
self.assertAllClose(np.zeros((4, 3)), self.evaluate(wire_cast_var))
sess.run(
wire_cast_var.assign([[10., 11., 12.], [100., 110., 120.], [
1000., 1100., 1200.
], [10000., 11000., 12000.]]))
sess.run(bias.assign([5., 6., 7.]))
self.assertAllClose([[1005., 1106., 1207.], [10015., 11017., 12019.]],
self.evaluate(predictions))
def test_dense_multi_dimension(self):
price = fc.numeric_column('price', shape=2)
with ops.Graph().as_default():
features = {'price': [[1., 2.], [5., 6.]]}
model = fc.LinearModel([price])
predictions = model(features)
price_var, _ = model.variables
with _initialized_session() as sess:
self.assertAllClose([[0.], [0.]], self.evaluate(price_var))
sess.run(price_var.assign([[10.], [100.]]))
self.assertAllClose([[210.], [650.]], self.evaluate(predictions))
def test_sparse_multi_rank(self):
wire_cast = fc.categorical_column_with_hash_bucket('wire_cast', 4)
with ops.Graph().as_default():
wire_tensor = array_ops.sparse_placeholder(dtypes.string)
wire_value = sparse_tensor.SparseTensorValue(
values=['omar', 'stringer', 'marlo', 'omar'], # hashed = [2, 0, 3, 2]
indices=[[0, 0, 0], [0, 1, 0], [1, 0, 0], [1, 0, 1]],
dense_shape=[2, 2, 2])
features = {'wire_cast': wire_tensor}
model = fc.LinearModel([wire_cast])
predictions = model(features)
wire_cast_var, _ = model.variables
with _initialized_session() as sess:
self.assertAllClose(np.zeros((4, 1)), self.evaluate(wire_cast_var))
self.assertAllClose(
np.zeros((2, 1)),
predictions.eval(feed_dict={wire_tensor: wire_value}))
sess.run(wire_cast_var.assign([[10.], [100.], [1000.], [10000.]]))
self.assertAllClose(
[[1010.], [11000.]],
predictions.eval(feed_dict={wire_tensor: wire_value}))
def test_sparse_combiner(self):
wire_cast = fc.categorical_column_with_hash_bucket('wire_cast', 4)
with ops.Graph().as_default():
wire_tensor = sparse_tensor.SparseTensor(
values=['omar', 'stringer', 'marlo'], # hashed to = [2, 0, 3]
indices=[[0, 0], [1, 0], [1, 1]],
dense_shape=[2, 2])
features = {'wire_cast': wire_tensor}
model = fc.LinearModel([wire_cast], sparse_combiner='mean')
predictions = model(features)
wire_cast_var, bias = model.variables
with _initialized_session() as sess:
sess.run(wire_cast_var.assign([[10.], [100.], [1000.], [10000.]]))
sess.run(bias.assign([5.]))
self.assertAllClose([[1005.], [5010.]], self.evaluate(predictions))
def test_sparse_combiner_with_negative_weights(self):
wire_cast = fc.categorical_column_with_hash_bucket('wire_cast', 4)
wire_cast_weights = fc.weighted_categorical_column(wire_cast, 'weights')
with ops.Graph().as_default():
wire_tensor = sparse_tensor.SparseTensor(
values=['omar', 'stringer', 'marlo'], # hashed to = [2, 0, 3]
indices=[[0, 0], [1, 0], [1, 1]],
dense_shape=[2, 2])
features = {
'wire_cast': wire_tensor,
'weights': constant_op.constant([[1., 1., -1.0]])
}
model = fc.LinearModel([wire_cast_weights], sparse_combiner='sum')
predictions = model(features)
wire_cast_var, bias = model.variables
with _initialized_session() as sess:
sess.run(wire_cast_var.assign([[10.], [100.], [1000.], [10000.]]))
sess.run(bias.assign([5.]))
self.assertAllClose([[1005.], [-9985.]], self.evaluate(predictions))
def test_dense_multi_dimension_multi_output(self):
price = fc.numeric_column('price', shape=2)
with ops.Graph().as_default():
features = {'price': [[1., 2.], [5., 6.]]}
model = fc.LinearModel([price], units=3)
predictions = model(features)
price_var, bias = model.variables
with _initialized_session() as sess:
self.assertAllClose(np.zeros((3,)), self.evaluate(bias))
self.assertAllClose(np.zeros((2, 3)), self.evaluate(price_var))
sess.run(price_var.assign([[1., 2., 3.], [10., 100., 1000.]]))
sess.run(bias.assign([2., 3., 4.]))
self.assertAllClose([[23., 205., 2007.], [67., 613., 6019.]],
self.evaluate(predictions))
def test_raises_if_shape_mismatch(self):
price = fc.numeric_column('price', shape=2)
with ops.Graph().as_default():
features = {'price': [[1.], [5.]]}
with self.assertRaisesRegexp(
Exception,
r'Cannot reshape a tensor with 2 elements to shape \[2,2\]'):
model = fc.LinearModel([price])
model(features)
def test_dense_reshaping(self):
price = fc.numeric_column('price', shape=[1, 2])
with ops.Graph().as_default():
features = {'price': [[[1., 2.]], [[5., 6.]]]}
model = fc.LinearModel([price])
predictions = model(features)
price_var, bias = model.variables
with _initialized_session() as sess:
self.assertAllClose([0.], self.evaluate(bias))
self.assertAllClose([[0.], [0.]], self.evaluate(price_var))
self.assertAllClose([[0.], [0.]], self.evaluate(predictions))
sess.run(price_var.assign([[10.], [100.]]))
self.assertAllClose([[210.], [650.]], self.evaluate(predictions))
def test_dense_multi_column(self):
price1 = fc.numeric_column('price1', shape=2)
price2 = fc.numeric_column('price2')
with ops.Graph().as_default():
features = {
'price1': [[1., 2.], [5., 6.]],
'price2': [[3.], [4.]]
}
model = fc.LinearModel([price1, price2])
predictions = model(features)
price1_var, price2_var, bias = model.variables
with _initialized_session() as sess:
self.assertAllClose([0.], self.evaluate(bias))
self.assertAllClose([[0.], [0.]], self.evaluate(price1_var))
self.assertAllClose([[0.]], self.evaluate(price2_var))
self.assertAllClose([[0.], [0.]], self.evaluate(predictions))
sess.run(price1_var.assign([[10.], [100.]]))
sess.run(price2_var.assign([[1000.]]))
sess.run(bias.assign([7.]))
self.assertAllClose([[3217.], [4657.]], self.evaluate(predictions))
def test_dense_trainable_default(self):
price = fc.numeric_column('price')
with ops.Graph().as_default() as g:
features = {'price': [[1.], [5.]]}
model = fc.LinearModel([price])
model(features)
price_var, bias = model.variables
trainable_vars = g.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)
self.assertIn(bias, trainable_vars)
self.assertIn(price_var, trainable_vars)
def test_sparse_trainable_default(self):
wire_cast = fc.categorical_column_with_hash_bucket('wire_cast', 4)
with ops.Graph().as_default() as g:
wire_tensor = sparse_tensor.SparseTensor(
values=['omar'], indices=[[0, 0]], dense_shape=[1, 1])
features = {'wire_cast': wire_tensor}
model = fc.LinearModel([wire_cast])
model(features)
trainable_vars = g.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)
wire_cast_var, bias = model.variables
self.assertIn(bias, trainable_vars)
self.assertIn(wire_cast_var, trainable_vars)
def test_dense_trainable_false(self):
price = fc.numeric_column('price')
with ops.Graph().as_default() as g:
features = {'price': [[1.], [5.]]}
model = fc.LinearModel([price], trainable=False)
model(features)
trainable_vars = g.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)
self.assertEqual([], trainable_vars)
def test_sparse_trainable_false(self):
wire_cast = fc.categorical_column_with_hash_bucket('wire_cast', 4)
with ops.Graph().as_default() as g:
wire_tensor = sparse_tensor.SparseTensor(
values=['omar'], indices=[[0, 0]], dense_shape=[1, 1])
features = {'wire_cast': wire_tensor}
model = fc.LinearModel([wire_cast], trainable=False)
model(features)
trainable_vars = g.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)
self.assertEqual([], trainable_vars)
def test_column_order(self):
price_a = fc.numeric_column('price_a')
price_b = fc.numeric_column('price_b')
wire_cast = fc.categorical_column_with_hash_bucket('wire_cast', 4)
with ops.Graph().as_default():
features = {
'price_a': [[1.]],
'price_b': [[3.]],
'wire_cast':
sparse_tensor.SparseTensor(
values=['omar'], indices=[[0, 0]], dense_shape=[1, 1])
}
model = fc.LinearModel([price_a, wire_cast, price_b])
model(features)
my_vars = model.variables
self.assertIn('price_a', my_vars[0].name)
self.assertIn('price_b', my_vars[1].name)
self.assertIn('wire_cast', my_vars[2].name)
with ops.Graph().as_default():
features = {
'price_a': [[1.]],
'price_b': [[3.]],
'wire_cast':
sparse_tensor.SparseTensor(
values=['omar'], indices=[[0, 0]], dense_shape=[1, 1])
}
model = fc.LinearModel([wire_cast, price_b, price_a])
model(features)
my_vars = model.variables
self.assertIn('price_a', my_vars[0].name)
self.assertIn('price_b', my_vars[1].name)
self.assertIn('wire_cast', my_vars[2].name)
def test_variable_names(self):
price1 = fc.numeric_column('price1')
dense_feature = fc.numeric_column('dense_feature')
dense_feature_bucketized = fc.bucketized_column(
dense_feature, boundaries=[0.])
some_sparse_column = fc.categorical_column_with_hash_bucket(
'sparse_feature', hash_bucket_size=5)
some_embedding_column = fc.embedding_column(
some_sparse_column, dimension=10)
all_cols = [price1, dense_feature_bucketized, some_embedding_column]
with ops.Graph().as_default():
model = fc.LinearModel(all_cols)
features = {
'price1': [[3.], [4.]],
'dense_feature': [[-1.], [4.]],
'sparse_feature': [['a'], ['x']],
}
model(features)
for var in model.variables:
self.assertTrue(isinstance(var, variables_lib.RefVariable))
variable_names = [var.name for var in model.variables]
self.assertItemsEqual([
'linear_model/dense_feature_bucketized/weights:0',
'linear_model/price1/weights:0',
'linear_model/sparse_feature_embedding/embedding_weights:0',
'linear_model/sparse_feature_embedding/weights:0',
'linear_model/bias_weights:0',
], variable_names)
def test_fit_and_predict(self):
columns = [fc.numeric_column('a')]
model = fc.LinearModel(columns)
model.compile(
optimizer=rmsprop.RMSPropOptimizer(1e-3),
loss='categorical_crossentropy',
metrics=['accuracy'])
x = {'a': np.random.random((10, 1))}
y = np.random.randint(20, size=(10, 1))
y = keras.utils.to_categorical(y, num_classes=20)
model.fit(x, y, epochs=1, batch_size=5)
model.fit(x, y, epochs=1, batch_size=5)
model.evaluate(x, y, batch_size=5)
model.predict(x, batch_size=5)
def test_static_batch_size_mismatch(self):
price1 = fc.numeric_column('price1')
price2 = fc.numeric_column('price2')
with ops.Graph().as_default():
features = {
'price1': [[1.], [5.], [7.]], # batchsize = 3
'price2': [[3.], [4.]] # batchsize = 2
}
with self.assertRaisesRegexp(
ValueError,
'Batch size \(first dimension\) of each feature must be same.'): # pylint: disable=anomalous-backslash-in-string
model = fc.LinearModel([price1, price2])
model(features)
def test_subset_of_static_batch_size_mismatch(self):
price1 = fc.numeric_column('price1')
price2 = fc.numeric_column('price2')
price3 = fc.numeric_column('price3')
with ops.Graph().as_default():
features = {
'price1': array_ops.placeholder(dtype=dtypes.int64), # batchsize = 3
'price2': [[3.], [4.]], # batchsize = 2
'price3': [[3.], [4.], [5.]] # batchsize = 3
}
with self.assertRaisesRegexp(
ValueError,
'Batch size \(first dimension\) of each feature must be same.'): # pylint: disable=anomalous-backslash-in-string
model = fc.LinearModel([price1, price2, price3])
model(features)
def test_runtime_batch_size_mismatch(self):
price1 = fc.numeric_column('price1')
price2 = fc.numeric_column('price2')
with ops.Graph().as_default():
features = {
'price1': array_ops.placeholder(dtype=dtypes.int64), # batchsize = 3
'price2': [[3.], [4.]] # batchsize = 2
}
model = fc.LinearModel([price1, price2])
predictions = model(features)
with _initialized_session() as sess:
with self.assertRaisesRegexp(errors.OpError,
'must have the same size and shape'):
sess.run(
predictions, feed_dict={features['price1']: [[1.], [5.], [7.]]})
def test_runtime_batch_size_matches(self):
price1 = fc.numeric_column('price1')
price2 = fc.numeric_column('price2')
with ops.Graph().as_default():
features = {
'price1': array_ops.placeholder(dtype=dtypes.int64), # batchsize = 2
'price2': array_ops.placeholder(dtype=dtypes.int64), # batchsize = 2
}
model = fc.LinearModel([price1, price2])
predictions = model(features)
with _initialized_session() as sess:
sess.run(
predictions,
feed_dict={
features['price1']: [[1.], [5.]],
features['price2']: [[1.], [5.]],
})
def test_with_numpy_input_fn(self):
price = fc.numeric_column('price')
price_buckets = fc.bucketized_column(
price, boundaries=[
0.,
10.,
100.,
])
body_style = fc.categorical_column_with_vocabulary_list(
'body-style', vocabulary_list=['hardtop', 'wagon', 'sedan'])
input_fn = numpy_io.numpy_input_fn(
x={
'price': np.array([-1., 2., 13., 104.]),
'body-style': np.array(['sedan', 'hardtop', 'wagon', 'sedan']),
},
batch_size=2,
shuffle=False)
features = input_fn()
model = fc.LinearModel([price_buckets, body_style])
net = model(features)
# self.assertEqual(1 + 3 + 5, net.shape[1])
with _initialized_session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess, coord=coord)
body_style_var, price_buckets_var, bias = model.variables
sess.run(price_buckets_var.assign([[10.], [100.], [1000.], [10000.]]))
sess.run(body_style_var.assign([[-10.], [-100.], [-1000.]]))
sess.run(bias.assign([5.]))
self.assertAllClose([[10 - 1000 + 5.], [100 - 10 + 5.]], sess.run(net))
coord.request_stop()
coord.join(threads)
def test_with_1d_sparse_tensor(self):
price = fc.numeric_column('price')
price_buckets = fc.bucketized_column(
price, boundaries=[
0.,
10.,
100.,
])
body_style = fc.categorical_column_with_vocabulary_list(
'body-style', vocabulary_list=['hardtop', 'wagon', 'sedan'])
# Provides 1-dim tensor and dense tensor.
features = {
'price': constant_op.constant([-1., 12.,]),
'body-style': sparse_tensor.SparseTensor(
indices=((0,), (1,)),
values=('sedan', 'hardtop'),
dense_shape=(2,)),
}
self.assertEqual(1, features['price'].shape.ndims)
self.assertEqual(1, features['body-style'].dense_shape.get_shape()[0])
model = fc.LinearModel([price_buckets, body_style])
net = model(features)
with _initialized_session() as sess:
body_style_var, price_buckets_var, bias = model.variables
sess.run(price_buckets_var.assign([[10.], [100.], [1000.], [10000.]]))
sess.run(body_style_var.assign([[-10.], [-100.], [-1000.]]))
sess.run(bias.assign([5.]))
self.assertAllClose([[10 - 1000 + 5.], [1000 - 10 + 5.]], sess.run(net))
def test_with_1d_unknown_shape_sparse_tensor(self):
price = fc.numeric_column('price')
price_buckets = fc.bucketized_column(
price, boundaries=[
0.,
10.,
100.,
])
body_style = fc.categorical_column_with_vocabulary_list(
'body-style', vocabulary_list=['hardtop', 'wagon', 'sedan'])
country = fc.categorical_column_with_vocabulary_list(
'country', vocabulary_list=['US', 'JP', 'CA'])
# Provides 1-dim tensor and dense tensor.
features = {
'price': array_ops.placeholder(dtypes.float32),
'body-style': array_ops.sparse_placeholder(dtypes.string),
'country': array_ops.placeholder(dtypes.string),
}
self.assertIsNone(features['price'].shape.ndims)
self.assertIsNone(features['body-style'].get_shape().ndims)
price_data = np.array([-1., 12.])
body_style_data = sparse_tensor.SparseTensorValue(
indices=((0,), (1,)),
values=('sedan', 'hardtop'),
dense_shape=(2,))
country_data = np.array(['US', 'CA'])
model = fc.LinearModel([price_buckets, body_style, country])
net = model(features)
body_style_var, _, price_buckets_var, bias = model.variables
with _initialized_session() as sess:
sess.run(price_buckets_var.assign([[10.], [100.], [1000.], [10000.]]))
sess.run(body_style_var.assign([[-10.], [-100.], [-1000.]]))
sess.run(bias.assign([5.]))
self.assertAllClose([[10 - 1000 + 5.], [1000 - 10 + 5.]],
sess.run(
net,
feed_dict={
features['price']: price_data,
features['body-style']: body_style_data,
features['country']: country_data
}))
def test_with_rank_0_feature(self):
price = fc.numeric_column('price')
features = {
'price': constant_op.constant(0),
}
self.assertEqual(0, features['price'].shape.ndims)
# Static rank 0 should fail
with self.assertRaisesRegexp(ValueError, 'Feature .* cannot have rank 0'):
model = fc.LinearModel([price])
model(features)
# Dynamic rank 0 should fail
features = {
'price': array_ops.placeholder(dtypes.float32),
}
model = fc.LinearModel([price])
net = model(features)
self.assertEqual(1, net.shape[1])
with _initialized_session() as sess:
with self.assertRaisesOpError('Feature .* cannot have rank 0'):
sess.run(net, feed_dict={features['price']: np.array(1)})
def test_multiple_linear_models(self):
price = fc.numeric_column('price')
with ops.Graph().as_default():
features1 = {'price': [[1.], [5.]]}
features2 = {'price': [[2.], [10.]]}
model1 = fc.LinearModel([price])
model2 = fc.LinearModel([price])
predictions1 = model1(features1)
predictions2 = model2(features2)
price_var1, bias1 = model1.variables
price_var2, bias2 = model2.variables
with _initialized_session() as sess:
self.assertAllClose([0.], self.evaluate(bias1))
sess.run(price_var1.assign([[10.]]))
sess.run(bias1.assign([5.]))
self.assertAllClose([[15.], [55.]], self.evaluate(predictions1))
self.assertAllClose([0.], self.evaluate(bias2))
sess.run(price_var2.assign([[10.]]))
sess.run(bias2.assign([5.]))
self.assertAllClose([[25.], [105.]], self.evaluate(predictions2))
class OldLinearModelTest(test.TestCase):
def test_raises_if_empty_feature_columns(self):
with self.assertRaisesRegexp(ValueError,
'feature_columns must not be empty'):
fc_old.linear_model(features={}, feature_columns=[])
def test_should_be_feature_column(self):
with self.assertRaisesRegexp(ValueError, 'must be a _FeatureColumn'):
fc_old.linear_model(features={'a': [[0]]}, feature_columns='NotSupported')
def test_should_be_dense_or_categorical_column(self):
class NotSupportedColumn(BaseFeatureColumnForTests, fc.FeatureColumn,
fc_old._FeatureColumn):
@property
def _is_v2_column(self):
return True
@property
def name(self):
return 'NotSupportedColumn'
def transform_feature(self, transformation_cache, state_manager):
pass
def _transform_feature(self, inputs):
pass
@property
def parse_example_spec(self):
pass
@property
def _parse_example_spec(self):
pass
with self.assertRaisesRegexp(
ValueError, 'must be either a _DenseColumn or _CategoricalColumn'):
fc_old.linear_model(
features={'a': [[0]]}, feature_columns=[NotSupportedColumn()])
def test_does_not_support_dict_columns(self):
with self.assertRaisesRegexp(
ValueError, 'Expected feature_columns to be iterable, found dict.'):
fc_old.linear_model(
features={'a': [[0]]}, feature_columns={'a': fc.numeric_column('a')})
def test_raises_if_duplicate_name(self):
with self.assertRaisesRegexp(
ValueError, 'Duplicate feature column name found for columns'):
fc_old.linear_model(
features={'a': [[0]]},
feature_columns=[fc.numeric_column('a'),
fc.numeric_column('a')])
def test_dense_bias(self):
price = fc.numeric_column('price')
with ops.Graph().as_default():
features = {'price': [[1.], [5.]]}
predictions = fc_old.linear_model(features, [price])
bias = get_linear_model_bias()
price_var = get_linear_model_column_var(price)
with _initialized_session() as sess:
self.assertAllClose([0.], self.evaluate(bias))
sess.run(price_var.assign([[10.]]))
sess.run(bias.assign([5.]))
self.assertAllClose([[15.], [55.]], self.evaluate(predictions))
def test_sparse_bias(self):
wire_cast = fc.categorical_column_with_hash_bucket('wire_cast', 4)
with ops.Graph().as_default():
wire_tensor = sparse_tensor.SparseTensor(
values=['omar', 'stringer', 'marlo'], # hashed to = [2, 0, 3]
indices=[[0, 0], [1, 0], [1, 1]],
dense_shape=[2, 2])
features = {'wire_cast': wire_tensor}
predictions = fc_old.linear_model(features, [wire_cast])
bias = get_linear_model_bias()
wire_cast_var = get_linear_model_column_var(wire_cast)
with _initialized_session() as sess:
self.assertAllClose([0.], self.evaluate(bias))
self.assertAllClose([[0.], [0.], [0.], [0.]],
self.evaluate(wire_cast_var))
sess.run(wire_cast_var.assign([[10.], [100.], [1000.], [10000.]]))
sess.run(bias.assign([5.]))
self.assertAllClose([[1005.], [10015.]], self.evaluate(predictions))
def test_dense_and_sparse_bias(self):
wire_cast = fc.categorical_column_with_hash_bucket('wire_cast', 4)
price = fc.numeric_column('price')
with ops.Graph().as_default():
wire_tensor = sparse_tensor.SparseTensor(
values=['omar', 'stringer', 'marlo'], # hashed to = [2, 0, 3]
indices=[[0, 0], [1, 0], [1, 1]],
dense_shape=[2, 2])
features = {'wire_cast': wire_tensor, 'price': [[1.], [5.]]}
predictions = fc_old.linear_model(features, [wire_cast, price])
bias = get_linear_model_bias()
wire_cast_var = get_linear_model_column_var(wire_cast)
price_var = get_linear_model_column_var(price)
with _initialized_session() as sess:
sess.run(wire_cast_var.assign([[10.], [100.], [1000.], [10000.]]))
sess.run(bias.assign([5.]))
sess.run(price_var.assign([[10.]]))
self.assertAllClose([[1015.], [10065.]], self.evaluate(predictions))
def test_dense_and_sparse_column(self):
"""When the column is both dense and sparse, uses sparse tensors."""
class _DenseAndSparseColumn(BaseFeatureColumnForTests, fc.DenseColumn,
fc.CategoricalColumn, fc_old._DenseColumn,
fc_old._CategoricalColumn):
@property
def _is_v2_column(self):
return True
@property
def name(self):
return 'dense_and_sparse_column'
@property
def parse_example_spec(self):
return {self.name: parsing_ops.VarLenFeature(self.dtype)}
@property
def _parse_example_spec(self):
return self.parse_example_spec
def transform_feature(self, transformation_cache, state_manager):
raise ValueError('Should not use this method.')
def _transform_feature(self, inputs):
return inputs.get(self.name)
@property
def variable_shape(self):
return self.variable_shape
@property
def _variable_shape(self):
return self.variable_shape
def get_dense_tensor(self, transformation_cache, state_manager):
raise ValueError('Should not use this method.')
def _get_dense_tensor(self, inputs):
raise ValueError('Should not use this method.')
@property
def num_buckets(self):
return 4
@property
def _num_buckets(self):
return self.num_buckets
def get_sparse_tensors(self, transformation_cache, state_manager):
raise ValueError('Should not use this method.')
def _get_sparse_tensors(self,
inputs,
weight_collections=None,
trainable=None):
sp_tensor = sparse_tensor.SparseTensor(
indices=[[0, 0], [1, 0], [1, 1]],
values=[2, 0, 3],
dense_shape=[2, 2])
return fc.CategoricalColumn.IdWeightPair(sp_tensor, None)
dense_and_sparse_column = _DenseAndSparseColumn()
with ops.Graph().as_default():
sp_tensor = sparse_tensor.SparseTensor(
values=['omar', 'stringer', 'marlo'],
indices=[[0, 0], [1, 0], [1, 1]],
dense_shape=[2, 2])
features = {dense_and_sparse_column.name: sp_tensor}
predictions = fc_old.linear_model(features, [dense_and_sparse_column])
bias = get_linear_model_bias()
dense_and_sparse_column_var = get_linear_model_column_var(
dense_and_sparse_column)
with _initialized_session() as sess:
sess.run(
dense_and_sparse_column_var.assign([[10.], [100.], [1000.],
[10000.]]))
sess.run(bias.assign([5.]))
self.assertAllClose([[1005.], [10015.]], self.evaluate(predictions))
def test_dense_multi_output(self):
price = fc.numeric_column('price')
with ops.Graph().as_default():
features = {'price': [[1.], [5.]]}
predictions = fc_old.linear_model(features, [price], units=3)
bias = get_linear_model_bias()
price_var = get_linear_model_column_var(price)
with _initialized_session() as sess:
self.assertAllClose(np.zeros((3,)), self.evaluate(bias))
self.assertAllClose(np.zeros((1, 3)), self.evaluate(price_var))
sess.run(price_var.assign([[10., 100., 1000.]]))
sess.run(bias.assign([5., 6., 7.]))
self.assertAllClose([[15., 106., 1007.], [55., 506., 5007.]],
self.evaluate(predictions))
def test_sparse_multi_output(self):
wire_cast = fc.categorical_column_with_hash_bucket('wire_cast', 4)
with ops.Graph().as_default():
wire_tensor = sparse_tensor.SparseTensor(
values=['omar', 'stringer', 'marlo'], # hashed to = [2, 0, 3]
indices=[[0, 0], [1, 0], [1, 1]],
dense_shape=[2, 2])
features = {'wire_cast': wire_tensor}
predictions = fc_old.linear_model(features, [wire_cast], units=3)
bias = get_linear_model_bias()
wire_cast_var = get_linear_model_column_var(wire_cast)
with _initialized_session() as sess:
self.assertAllClose(np.zeros((3,)), self.evaluate(bias))
self.assertAllClose(np.zeros((4, 3)), self.evaluate(wire_cast_var))
sess.run(
wire_cast_var.assign([[10., 11., 12.], [100., 110., 120.],
[1000., 1100., 1200.],
[10000., 11000., 12000.]]))
sess.run(bias.assign([5., 6., 7.]))
self.assertAllClose([[1005., 1106., 1207.], [10015., 11017., 12019.]],
self.evaluate(predictions))
def test_dense_multi_dimension(self):
price = fc.numeric_column('price', shape=2)
with ops.Graph().as_default():
features = {'price': [[1., 2.], [5., 6.]]}
predictions = fc_old.linear_model(features, [price])
price_var = get_linear_model_column_var(price)
with _initialized_session() as sess:
self.assertAllClose([[0.], [0.]], self.evaluate(price_var))
sess.run(price_var.assign([[10.], [100.]]))
self.assertAllClose([[210.], [650.]], self.evaluate(predictions))
def test_sparse_multi_rank(self):
wire_cast = fc.categorical_column_with_hash_bucket('wire_cast', 4)
with ops.Graph().as_default():
wire_tensor = array_ops.sparse_placeholder(dtypes.string)
wire_value = sparse_tensor.SparseTensorValue(
values=['omar', 'stringer', 'marlo', 'omar'], # hashed = [2, 0, 3, 2]
indices=[[0, 0, 0], [0, 1, 0], [1, 0, 0], [1, 0, 1]],
dense_shape=[2, 2, 2])
features = {'wire_cast': wire_tensor}
predictions = fc_old.linear_model(features, [wire_cast])
wire_cast_var = get_linear_model_column_var(wire_cast)
with _initialized_session() as sess:
self.assertAllClose(np.zeros((4, 1)), self.evaluate(wire_cast_var))
self.assertAllClose(
np.zeros((2, 1)),
predictions.eval(feed_dict={wire_tensor: wire_value}))
sess.run(wire_cast_var.assign([[10.], [100.], [1000.], [10000.]]))
self.assertAllClose(
[[1010.], [11000.]],
predictions.eval(feed_dict={wire_tensor: wire_value}))
def test_sparse_combiner(self):
wire_cast = fc.categorical_column_with_hash_bucket('wire_cast', 4)
with ops.Graph().as_default():
wire_tensor = sparse_tensor.SparseTensor(
values=['omar', 'stringer', 'marlo'], # hashed to = [2, 0, 3]
indices=[[0, 0], [1, 0], [1, 1]],
dense_shape=[2, 2])
features = {'wire_cast': wire_tensor}
predictions = fc_old.linear_model(
features, [wire_cast], sparse_combiner='mean')
bias = get_linear_model_bias()
wire_cast_var = get_linear_model_column_var(wire_cast)
with _initialized_session() as sess:
sess.run(wire_cast_var.assign([[10.], [100.], [1000.], [10000.]]))
sess.run(bias.assign([5.]))
self.assertAllClose([[1005.], [5010.]], self.evaluate(predictions))
def test_sparse_combiner_with_negative_weights(self):
wire_cast = fc.categorical_column_with_hash_bucket('wire_cast', 4)
wire_cast_weights = fc.weighted_categorical_column(wire_cast, 'weights')
with ops.Graph().as_default():
wire_tensor = sparse_tensor.SparseTensor(
values=['omar', 'stringer', 'marlo'], # hashed to = [2, 0, 3]
indices=[[0, 0], [1, 0], [1, 1]],
dense_shape=[2, 2])
features = {
'wire_cast': wire_tensor,
'weights': constant_op.constant([[1., 1., -1.0]])
}
predictions = fc_old.linear_model(
features, [wire_cast_weights], sparse_combiner='sum')
bias = get_linear_model_bias()
wire_cast_var = get_linear_model_column_var(wire_cast)
with _initialized_session() as sess:
sess.run(wire_cast_var.assign([[10.], [100.], [1000.], [10000.]]))
sess.run(bias.assign([5.]))
self.assertAllClose([[1005.], [-9985.]], self.evaluate(predictions))
def test_dense_multi_dimension_multi_output(self):
price = fc.numeric_column('price', shape=2)
with ops.Graph().as_default():
features = {'price': [[1., 2.], [5., 6.]]}
predictions = fc_old.linear_model(features, [price], units=3)
bias = get_linear_model_bias()
price_var = get_linear_model_column_var(price)
with _initialized_session() as sess:
self.assertAllClose(np.zeros((3,)), self.evaluate(bias))
self.assertAllClose(np.zeros((2, 3)), self.evaluate(price_var))
sess.run(price_var.assign([[1., 2., 3.], [10., 100., 1000.]]))
sess.run(bias.assign([2., 3., 4.]))
self.assertAllClose([[23., 205., 2007.], [67., 613., 6019.]],
self.evaluate(predictions))
def test_raises_if_shape_mismatch(self):
price = fc.numeric_column('price', shape=2)
with ops.Graph().as_default():
features = {'price': [[1.], [5.]]}
with self.assertRaisesRegexp(
Exception,
r'Cannot reshape a tensor with 2 elements to shape \[2,2\]'):
fc_old.linear_model(features, [price])
def test_dense_reshaping(self):
price = fc.numeric_column('price', shape=[1, 2])
with ops.Graph().as_default():
features = {'price': [[[1., 2.]], [[5., 6.]]]}
predictions = fc_old.linear_model(features, [price])
bias = get_linear_model_bias()
price_var = get_linear_model_column_var(price)
with _initialized_session() as sess:
self.assertAllClose([0.], self.evaluate(bias))
self.assertAllClose([[0.], [0.]], self.evaluate(price_var))
self.assertAllClose([[0.], [0.]], self.evaluate(predictions))
sess.run(price_var.assign([[10.], [100.]]))
self.assertAllClose([[210.], [650.]], self.evaluate(predictions))
def test_dense_multi_column(self):
price1 = fc.numeric_column('price1', shape=2)
price2 = fc.numeric_column('price2')
with ops.Graph().as_default():
features = {'price1': [[1., 2.], [5., 6.]], 'price2': [[3.], [4.]]}
predictions = fc_old.linear_model(features, [price1, price2])
bias = get_linear_model_bias()
price1_var = get_linear_model_column_var(price1)
price2_var = get_linear_model_column_var(price2)
with _initialized_session() as sess:
self.assertAllClose([0.], self.evaluate(bias))
self.assertAllClose([[0.], [0.]], self.evaluate(price1_var))
self.assertAllClose([[0.]], self.evaluate(price2_var))
self.assertAllClose([[0.], [0.]], self.evaluate(predictions))
sess.run(price1_var.assign([[10.], [100.]]))
sess.run(price2_var.assign([[1000.]]))
sess.run(bias.assign([7.]))
self.assertAllClose([[3217.], [4657.]], self.evaluate(predictions))
def test_fills_cols_to_vars(self):
price1 = fc.numeric_column('price1', shape=2)
price2 = fc.numeric_column('price2')
with ops.Graph().as_default():
features = {'price1': [[1., 2.], [5., 6.]], 'price2': [[3.], [4.]]}
cols_to_vars = {}
fc_old.linear_model(features, [price1, price2], cols_to_vars=cols_to_vars)
bias = get_linear_model_bias()
price1_var = get_linear_model_column_var(price1)
price2_var = get_linear_model_column_var(price2)
self.assertAllEqual(cols_to_vars['bias'], [bias])
self.assertAllEqual(cols_to_vars[price1], [price1_var])
self.assertAllEqual(cols_to_vars[price2], [price2_var])
def test_fills_cols_to_vars_partitioned_variables(self):
price1 = fc.numeric_column('price1', shape=2)
price2 = fc.numeric_column('price2', shape=3)
with ops.Graph().as_default():
features = {
'price1': [[1., 2.], [6., 7.]],
'price2': [[3., 4., 5.], [8., 9., 10.]]
}
cols_to_vars = {}
with variable_scope.variable_scope(
'linear',
partitioner=partitioned_variables.fixed_size_partitioner(2, axis=0)):
fc_old.linear_model(
features, [price1, price2], cols_to_vars=cols_to_vars)
with _initialized_session():
self.assertEqual([0.], cols_to_vars['bias'][0].eval())
# Partitioning shards the [2, 1] price1 var into 2 [1, 1] Variables.
self.assertAllEqual([[0.]], cols_to_vars[price1][0].eval())
self.assertAllEqual([[0.]], cols_to_vars[price1][1].eval())
# Partitioning shards the [3, 1] price2 var into a [2, 1] Variable and
# a [1, 1] Variable.
self.assertAllEqual([[0.], [0.]], cols_to_vars[price2][0].eval())
self.assertAllEqual([[0.]], cols_to_vars[price2][1].eval())
def test_fills_cols_to_output_tensors(self):
# Provide three _DenseColumn's to input_layer: a _NumericColumn, a
# _BucketizedColumn, and an _EmbeddingColumn. Only the _EmbeddingColumn
# creates a Variable.
apple_numeric_column = fc.numeric_column('apple_numeric_column')
banana_dense_feature = fc.numeric_column('banana_dense_feature')
banana_dense_feature_bucketized = fc.bucketized_column(
banana_dense_feature, boundaries=[0.])
cherry_sparse_column = fc.categorical_column_with_hash_bucket(
'cherry_sparse_feature', hash_bucket_size=5)
dragonfruit_embedding_column = fc.embedding_column(
cherry_sparse_column, dimension=10)
with ops.Graph().as_default():
features = {
'apple_numeric_column': [[3.], [4.]],
'banana_dense_feature': [[-1.], [4.]],
'cherry_sparse_feature': [['a'], ['x']],
}
cols_to_output_tensors = {}
all_cols = [
apple_numeric_column, banana_dense_feature_bucketized,
dragonfruit_embedding_column
]
input_layer = fc_old.input_layer(
features, all_cols, cols_to_output_tensors=cols_to_output_tensors)
# We check the mapping by checking that we have the right keys,
# and that the values (output_tensors) were indeed the ones used to
# form the input layer.
self.assertItemsEqual(all_cols, cols_to_output_tensors.keys())
input_layer_inputs = [tensor for tensor in input_layer.op.inputs[:-1]]
output_tensors = [tensor for tensor in cols_to_output_tensors.values()]
self.assertItemsEqual(input_layer_inputs, output_tensors)
def test_dense_collection(self):
price = fc.numeric_column('price')
with ops.Graph().as_default() as g:
features = {'price': [[1.], [5.]]}
fc_old.linear_model(features, [price], weight_collections=['my-vars'])
my_vars = g.get_collection('my-vars')
bias = get_linear_model_bias()
price_var = get_linear_model_column_var(price)
self.assertIn(bias, my_vars)
self.assertIn(price_var, my_vars)
def test_sparse_collection(self):
wire_cast = fc.categorical_column_with_hash_bucket('wire_cast', 4)
with ops.Graph().as_default() as g:
wire_tensor = sparse_tensor.SparseTensor(
values=['omar'], indices=[[0, 0]], dense_shape=[1, 1])
features = {'wire_cast': wire_tensor}
fc_old.linear_model(features, [wire_cast], weight_collections=['my-vars'])
my_vars = g.get_collection('my-vars')
bias = get_linear_model_bias()
wire_cast_var = get_linear_model_column_var(wire_cast)
self.assertIn(bias, my_vars)
self.assertIn(wire_cast_var, my_vars)
def test_dense_trainable_default(self):
price = fc.numeric_column('price')
with ops.Graph().as_default() as g:
features = {'price': [[1.], [5.]]}
fc_old.linear_model(features, [price])
bias = get_linear_model_bias()
price_var = get_linear_model_column_var(price)
trainable_vars = g.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)
self.assertIn(bias, trainable_vars)
self.assertIn(price_var, trainable_vars)
def test_sparse_trainable_default(self):
wire_cast = fc.categorical_column_with_hash_bucket('wire_cast', 4)
with ops.Graph().as_default() as g:
wire_tensor = sparse_tensor.SparseTensor(
values=['omar'], indices=[[0, 0]], dense_shape=[1, 1])
features = {'wire_cast': wire_tensor}
fc_old.linear_model(features, [wire_cast])
trainable_vars = g.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)
bias = get_linear_model_bias()
wire_cast_var = get_linear_model_column_var(wire_cast)
self.assertIn(bias, trainable_vars)
self.assertIn(wire_cast_var, trainable_vars)
def test_dense_trainable_false(self):
price = fc.numeric_column('price')
with ops.Graph().as_default() as g:
features = {'price': [[1.], [5.]]}
fc_old.linear_model(features, [price], trainable=False)
trainable_vars = g.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)
self.assertEqual([], trainable_vars)
def test_sparse_trainable_false(self):
wire_cast = fc.categorical_column_with_hash_bucket('wire_cast', 4)
with ops.Graph().as_default() as g:
wire_tensor = sparse_tensor.SparseTensor(
values=['omar'], indices=[[0, 0]], dense_shape=[1, 1])
features = {'wire_cast': wire_tensor}
fc_old.linear_model(features, [wire_cast], trainable=False)
trainable_vars = g.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)
self.assertEqual([], trainable_vars)
def test_column_order(self):
price_a = fc.numeric_column('price_a')
price_b = fc.numeric_column('price_b')
wire_cast = fc.categorical_column_with_hash_bucket('wire_cast', 4)
with ops.Graph().as_default() as g:
features = {
'price_a': [[1.]],
'price_b': [[3.]],
'wire_cast':
sparse_tensor.SparseTensor(
values=['omar'], indices=[[0, 0]], dense_shape=[1, 1])
}
fc_old.linear_model(
features, [price_a, wire_cast, price_b],
weight_collections=['my-vars'])
my_vars = g.get_collection('my-vars')
self.assertIn('price_a', my_vars[0].name)
self.assertIn('price_b', my_vars[1].name)
self.assertIn('wire_cast', my_vars[2].name)
with ops.Graph().as_default() as g:
features = {
'price_a': [[1.]],
'price_b': [[3.]],
'wire_cast':
sparse_tensor.SparseTensor(
values=['omar'], indices=[[0, 0]], dense_shape=[1, 1])
}
fc_old.linear_model(
features, [wire_cast, price_b, price_a],
weight_collections=['my-vars'])
my_vars = g.get_collection('my-vars')
self.assertIn('price_a', my_vars[0].name)
self.assertIn('price_b', my_vars[1].name)
self.assertIn('wire_cast', my_vars[2].name)
def test_static_batch_size_mismatch(self):
price1 = fc.numeric_column('price1')
price2 = fc.numeric_column('price2')
with ops.Graph().as_default():
features = {
'price1': [[1.], [5.], [7.]], # batchsize = 3
'price2': [[3.], [4.]] # batchsize = 2
}
with self.assertRaisesRegexp(
ValueError,
'Batch size \(first dimension\) of each feature must be same.'): # pylint: disable=anomalous-backslash-in-string
fc_old.linear_model(features, [price1, price2])
def test_subset_of_static_batch_size_mismatch(self):
price1 = fc.numeric_column('price1')
price2 = fc.numeric_column('price2')
price3 = fc.numeric_column('price3')
with ops.Graph().as_default():
features = {
'price1': array_ops.placeholder(dtype=dtypes.int64), # batchsize = 3
'price2': [[3.], [4.]], # batchsize = 2
'price3': [[3.], [4.], [5.]] # batchsize = 3
}
with self.assertRaisesRegexp(
ValueError,
'Batch size \(first dimension\) of each feature must be same.'): # pylint: disable=anomalous-backslash-in-string
fc_old.linear_model(features, [price1, price2, price3])
def test_runtime_batch_size_mismatch(self):
price1 = fc.numeric_column('price1')
price2 = fc.numeric_column('price2')
with ops.Graph().as_default():
features = {
'price1': array_ops.placeholder(dtype=dtypes.int64), # batchsize = 3
'price2': [[3.], [4.]] # batchsize = 2
}
predictions = fc_old.linear_model(features, [price1, price2])
with _initialized_session() as sess:
with self.assertRaisesRegexp(errors.OpError,
'must have the same size and shape'):
sess.run(
predictions, feed_dict={features['price1']: [[1.], [5.], [7.]]})
def test_runtime_batch_size_matches(self):
price1 = fc.numeric_column('price1')
price2 = fc.numeric_column('price2')
with ops.Graph().as_default():
features = {
'price1': array_ops.placeholder(dtype=dtypes.int64), # batchsize = 2
'price2': array_ops.placeholder(dtype=dtypes.int64), # batchsize = 2
}
predictions = fc_old.linear_model(features, [price1, price2])
with _initialized_session() as sess:
sess.run(
predictions,
feed_dict={
features['price1']: [[1.], [5.]],
features['price2']: [[1.], [5.]],
})
def test_with_1d_sparse_tensor(self):
price = fc.numeric_column('price')
price_buckets = fc.bucketized_column(
price, boundaries=[
0.,
10.,
100.,
])
body_style = fc.categorical_column_with_vocabulary_list(
'body-style', vocabulary_list=['hardtop', 'wagon', 'sedan'])
# Provides 1-dim tensor and dense tensor.
features = {
'price':
constant_op.constant([
-1.,
12.,
]),
'body-style':
sparse_tensor.SparseTensor(
indices=((0,), (1,)),
values=('sedan', 'hardtop'),
dense_shape=(2,)),
}
self.assertEqual(1, features['price'].shape.ndims)
self.assertEqual(1, features['body-style'].dense_shape.get_shape()[0])
net = fc_old.linear_model(features, [price_buckets, body_style])
with _initialized_session() as sess:
bias = get_linear_model_bias()
price_buckets_var = get_linear_model_column_var(price_buckets)
body_style_var = get_linear_model_column_var(body_style)
sess.run(price_buckets_var.assign([[10.], [100.], [1000.], [10000.]]))
sess.run(body_style_var.assign([[-10.], [-100.], [-1000.]]))
sess.run(bias.assign([5.]))
self.assertAllClose([[10 - 1000 + 5.], [1000 - 10 + 5.]], sess.run(net))
def test_with_1d_unknown_shape_sparse_tensor(self):
price = fc.numeric_column('price')
price_buckets = fc.bucketized_column(
price, boundaries=[
0.,
10.,
100.,
])
body_style = fc.categorical_column_with_vocabulary_list(
'body-style', vocabulary_list=['hardtop', 'wagon', 'sedan'])
country = fc.categorical_column_with_vocabulary_list(
'country', vocabulary_list=['US', 'JP', 'CA'])
# Provides 1-dim tensor and dense tensor.
features = {
'price': array_ops.placeholder(dtypes.float32),
'body-style': array_ops.sparse_placeholder(dtypes.string),
'country': array_ops.placeholder(dtypes.string),
}
self.assertIsNone(features['price'].shape.ndims)
self.assertIsNone(features['body-style'].get_shape().ndims)
price_data = np.array([-1., 12.])
body_style_data = sparse_tensor.SparseTensorValue(
indices=((0,), (1,)), values=('sedan', 'hardtop'), dense_shape=(2,))
country_data = np.array(['US', 'CA'])
net = fc_old.linear_model(features, [price_buckets, body_style, country])
bias = get_linear_model_bias()
price_buckets_var = get_linear_model_column_var(price_buckets)
body_style_var = get_linear_model_column_var(body_style)
with _initialized_session() as sess:
sess.run(price_buckets_var.assign([[10.], [100.], [1000.], [10000.]]))
sess.run(body_style_var.assign([[-10.], [-100.], [-1000.]]))
sess.run(bias.assign([5.]))
self.assertAllClose([[10 - 1000 + 5.], [1000 - 10 + 5.]],
sess.run(
net,
feed_dict={
features['price']: price_data,
features['body-style']: body_style_data,
features['country']: country_data
}))
def test_with_rank_0_feature(self):
price = fc.numeric_column('price')
features = {
'price': constant_op.constant(0),
}
self.assertEqual(0, features['price'].shape.ndims)
# Static rank 0 should fail
with self.assertRaisesRegexp(ValueError, 'Feature .* cannot have rank 0'):
fc_old.linear_model(features, [price])
# Dynamic rank 0 should fail
features = {
'price': array_ops.placeholder(dtypes.float32),
}
net = fc_old.linear_model(features, [price])
self.assertEqual(1, net.shape[1])
with _initialized_session() as sess:
with self.assertRaisesOpError('Feature .* cannot have rank 0'):
sess.run(net, feed_dict={features['price']: np.array(1)})
def test_multiple_linear_models(self):
price = fc.numeric_column('price')
with ops.Graph().as_default():
features1 = {'price': [[1.], [5.]]}
features2 = {'price': [[2.], [10.]]}
predictions1 = fc_old.linear_model(features1, [price])
predictions2 = fc_old.linear_model(features2, [price])
bias1 = get_linear_model_bias(name='linear_model')
bias2 = get_linear_model_bias(name='linear_model_1')
price_var1 = get_linear_model_column_var(price, name='linear_model')
price_var2 = get_linear_model_column_var(price, name='linear_model_1')
with _initialized_session() as sess:
self.assertAllClose([0.], self.evaluate(bias1))
sess.run(price_var1.assign([[10.]]))
sess.run(bias1.assign([5.]))
self.assertAllClose([[15.], [55.]], self.evaluate(predictions1))
self.assertAllClose([0.], self.evaluate(bias2))
sess.run(price_var2.assign([[10.]]))
sess.run(bias2.assign([5.]))
self.assertAllClose([[25.], [105.]], self.evaluate(predictions2))
def test_linear_model_v1_shared_embedding_all_other_v2(self):
price = fc.numeric_column('price') # v2
some_sparse_column = fc.categorical_column_with_hash_bucket(
'sparse_feature', hash_bucket_size=5) # v2
some_embedding_column = fc.embedding_column(
some_sparse_column, dimension=10) # v2
categorical_column_a = fc.categorical_column_with_identity(
key='aaa', num_buckets=3) # v2
categorical_column_b = fc.categorical_column_with_identity(
key='bbb', num_buckets=3) # v2
shared_embedding_a, shared_embedding_b = fc.shared_embedding_columns(
[categorical_column_a, categorical_column_b], dimension=2) # v1
all_cols = [
price, some_embedding_column, shared_embedding_a, shared_embedding_b
]
with ops.Graph().as_default():
features = {
'price': [[3.], [4.]],
'sparse_feature': [['a'], ['x']],
'aaa':
sparse_tensor.SparseTensor(
indices=((0, 0), (1, 0), (1, 1)),
values=(0, 1, 0),
dense_shape=(2, 2)),
'bbb':
sparse_tensor.SparseTensor(
indices=((0, 0), (1, 0), (1, 1)),
values=(1, 2, 1),
dense_shape=(2, 2)),
}
fc_old.linear_model(features, all_cols)
bias = get_linear_model_bias()
with _initialized_session():
self.assertAllClose([0.], self.evaluate(bias))
def test_linear_model_v1_shared_embedding_with_v2_cat_all_other_v2(self):
price = fc.numeric_column('price') # v2
some_sparse_column = fc.categorical_column_with_hash_bucket(
'sparse_feature', hash_bucket_size=5) # v2
some_embedding_column = fc.embedding_column(
some_sparse_column, dimension=10) # v2
categorical_column_a = fc.categorical_column_with_identity(
key='aaa', num_buckets=3) # v2
categorical_column_b = fc.categorical_column_with_identity(
key='bbb', num_buckets=3) # v2
shared_embedding_a, shared_embedding_b = fc.shared_embedding_columns(
[categorical_column_a, categorical_column_b], dimension=2) # v1
all_cols = [
price, some_embedding_column, shared_embedding_a, shared_embedding_b
]
with ops.Graph().as_default():
features = {
'price': [[3.], [4.]],
'sparse_feature': [['a'], ['x']],
'aaa':
sparse_tensor.SparseTensor(
indices=((0, 0), (1, 0), (1, 1)),
values=(0, 1, 0),
dense_shape=(2, 2)),
'bbb':
sparse_tensor.SparseTensor(
indices=((0, 0), (1, 0), (1, 1)),
values=(1, 2, 1),
dense_shape=(2, 2)),
}
fc_old.linear_model(features, all_cols)
bias = get_linear_model_bias()
with _initialized_session():
self.assertAllClose([0.], self.evaluate(bias))
def test_linear_model_v1_v2_mix(self):
price = fc.numeric_column('price') # v2
some_sparse_column = fc.categorical_column_with_hash_bucket(
'sparse_feature', hash_bucket_size=5) # v1
some_embedding_column = fc.embedding_column(
some_sparse_column, dimension=10) # v1
categorical_column_a = fc.categorical_column_with_identity(
key='aaa', num_buckets=3) # v2
categorical_column_b = fc.categorical_column_with_identity(
key='bbb', num_buckets=3) # v2
shared_embedding_a, shared_embedding_b = fc.shared_embedding_columns(
[categorical_column_a, categorical_column_b], dimension=2) # v1
all_cols = [
price, some_embedding_column, shared_embedding_a, shared_embedding_b
]
with ops.Graph().as_default():
features = {
'price': [[3.], [4.]],
'sparse_feature': [['a'], ['x']],
'aaa':
sparse_tensor.SparseTensor(
indices=((0, 0), (1, 0), (1, 1)),
values=(0, 1, 0),
dense_shape=(2, 2)),
'bbb':
sparse_tensor.SparseTensor(
indices=((0, 0), (1, 0), (1, 1)),
values=(1, 2, 1),
dense_shape=(2, 2)),
}
fc_old.linear_model(features, all_cols)
bias = get_linear_model_bias()
with _initialized_session():
self.assertAllClose([0.], self.evaluate(bias))
def test_linear_model_v2_shared_embedding_all_other_v1(self):
price = fc.numeric_column('price') # v1
some_sparse_column = fc.categorical_column_with_hash_bucket(
'sparse_feature', hash_bucket_size=5) # v1
some_embedding_column = fc.embedding_column(
some_sparse_column, dimension=10) # v1
categorical_column_a = fc.categorical_column_with_identity(
key='aaa', num_buckets=3) # v2
categorical_column_b = fc.categorical_column_with_identity(
key='bbb', num_buckets=3) # v2
shared_embedding_a, shared_embedding_b = fc.shared_embedding_columns_v2(
[categorical_column_a, categorical_column_b], dimension=2) # v2
all_cols = [
price, some_embedding_column, shared_embedding_a, shared_embedding_b
]
with ops.Graph().as_default():
features = {
'price': [[3.], [4.]],
'sparse_feature': [['a'], ['x']],
'aaa':
sparse_tensor.SparseTensor(
indices=((0, 0), (1, 0), (1, 1)),
values=(0, 1, 0),
dense_shape=(2, 2)),
'bbb':
sparse_tensor.SparseTensor(
indices=((0, 0), (1, 0), (1, 1)),
values=(1, 2, 1),
dense_shape=(2, 2)),
}
with self.assertRaisesRegexp(ValueError,
'SharedEmbeddingColumns are not supported'):
fc_old.linear_model(features, all_cols)
class DenseFeaturesTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes()
def test_retrieving_input(self):
features = {'a': [0.]}
dense_features = fc.DenseFeatures(fc.numeric_column('a'))
inputs = self.evaluate(dense_features(features))
self.assertAllClose([[0.]], inputs)
def test_reuses_variables(self):
with context.eager_mode():
sparse_input = sparse_tensor.SparseTensor(
indices=((0, 0), (1, 0), (2, 0)),
values=(0, 1, 2),
dense_shape=(3, 3))
# Create feature columns (categorical and embedding).
categorical_column = fc.categorical_column_with_identity(
key='a', num_buckets=3)
embedding_dimension = 2
def _embedding_column_initializer(shape, dtype, partition_info):
del shape # unused
del dtype # unused
del partition_info # unused
embedding_values = (
(1, 0), # id 0
(0, 1), # id 1
(1, 1)) # id 2
return embedding_values
embedding_column = fc.embedding_column(
categorical_column,
dimension=embedding_dimension,
initializer=_embedding_column_initializer)
dense_features = fc.DenseFeatures([embedding_column])
features = {'a': sparse_input}
inputs = dense_features(features)
variables = dense_features.variables
# Sanity check: test that the inputs are correct.
self.assertAllEqual([[1, 0], [0, 1], [1, 1]], inputs)
# Check that only one variable was created.
self.assertEqual(1, len(variables))
# Check that invoking dense_features on the same features does not create
# additional variables
_ = dense_features(features)
self.assertEqual(1, len(variables))
self.assertEqual(variables[0], dense_features.variables[0])
def test_feature_column_dense_features_gradient(self):
with context.eager_mode():
sparse_input = sparse_tensor.SparseTensor(
indices=((0, 0), (1, 0), (2, 0)),
values=(0, 1, 2),
dense_shape=(3, 3))
# Create feature columns (categorical and embedding).
categorical_column = fc.categorical_column_with_identity(
key='a', num_buckets=3)
embedding_dimension = 2
def _embedding_column_initializer(shape, dtype, partition_info):
del shape # unused
del dtype # unused
del partition_info # unused
embedding_values = (
(1, 0), # id 0
(0, 1), # id 1
(1, 1)) # id 2
return embedding_values
embedding_column = fc.embedding_column(
categorical_column,
dimension=embedding_dimension,
initializer=_embedding_column_initializer)
dense_features = fc.DenseFeatures([embedding_column])
features = {'a': sparse_input}
def scale_matrix():
matrix = dense_features(features)
return 2 * matrix
# Sanity check: Verify that scale_matrix returns the correct output.
self.assertAllEqual([[2, 0], [0, 2], [2, 2]], scale_matrix())
# Check that the returned gradient is correct.
grad_function = backprop.implicit_grad(scale_matrix)
grads_and_vars = grad_function()
indexed_slice = grads_and_vars[0][0]
gradient = grads_and_vars[0][0].values
self.assertAllEqual([0, 1, 2], indexed_slice.indices)
self.assertAllEqual([[2, 2], [2, 2], [2, 2]], gradient)
def test_raises_if_empty_feature_columns(self):
with self.assertRaisesRegexp(ValueError,
'feature_columns must not be empty'):
fc.DenseFeatures(feature_columns=[])(features={})
def test_should_be_dense_column(self):
with self.assertRaisesRegexp(ValueError, 'must be a DenseColumn'):
fc.DenseFeatures(feature_columns=[
fc.categorical_column_with_hash_bucket('wire_cast', 4)
])(
features={
'a': [[0]]
})
def test_does_not_support_dict_columns(self):
with self.assertRaisesRegexp(
ValueError, 'Expected feature_columns to be iterable, found dict.'):
fc.DenseFeatures(feature_columns={'a': fc.numeric_column('a')})(
features={
'a': [[0]]
})
def test_bare_column(self):
with ops.Graph().as_default():
features = features = {'a': [0.]}
net = fc.DenseFeatures(fc.numeric_column('a'))(features)
with _initialized_session():
self.assertAllClose([[0.]], self.evaluate(net))
def test_column_generator(self):
with ops.Graph().as_default():
features = features = {'a': [0.], 'b': [1.]}
columns = (fc.numeric_column(key) for key in features)
net = fc.DenseFeatures(columns)(features)
with _initialized_session():
self.assertAllClose([[0., 1.]], self.evaluate(net))
def test_raises_if_duplicate_name(self):
with self.assertRaisesRegexp(
ValueError, 'Duplicate feature column name found for columns'):
fc.DenseFeatures(
feature_columns=[fc.numeric_column('a'),
fc.numeric_column('a')])(
features={
'a': [[0]]
})
def test_one_column(self):
price = fc.numeric_column('price')
with ops.Graph().as_default():
features = {'price': [[1.], [5.]]}
net = fc.DenseFeatures([price])(features)
with _initialized_session():
self.assertAllClose([[1.], [5.]], self.evaluate(net))
def test_multi_dimension(self):
price = fc.numeric_column('price', shape=2)
with ops.Graph().as_default():
features = {'price': [[1., 2.], [5., 6.]]}
net = fc.DenseFeatures([price])(features)
with _initialized_session():
self.assertAllClose([[1., 2.], [5., 6.]], self.evaluate(net))
def test_compute_output_shape(self):
price1 = fc.numeric_column('price1', shape=2)
price2 = fc.numeric_column('price2', shape=4)
with ops.Graph().as_default():
features = {
'price1': [[1., 2.], [5., 6.]],
'price2': [[3., 4., 5., 6.], [7., 8., 9., 10.]]
}
dense_features = fc.DenseFeatures([price1, price2])
self.assertEqual((None, 6), dense_features.compute_output_shape((None,)))
net = dense_features(features)
with _initialized_session():
self.assertAllClose(
[[1., 2., 3., 4., 5., 6.], [5., 6., 7., 8., 9., 10.]],
self.evaluate(net))
def test_raises_if_shape_mismatch(self):
price = fc.numeric_column('price', shape=2)
with ops.Graph().as_default():
features = {'price': [[1.], [5.]]}
with self.assertRaisesRegexp(
Exception,
r'Cannot reshape a tensor with 2 elements to shape \[2,2\]'):
fc.DenseFeatures([price])(features)
def test_reshaping(self):
price = fc.numeric_column('price', shape=[1, 2])
with ops.Graph().as_default():
features = {'price': [[[1., 2.]], [[5., 6.]]]}
net = fc.DenseFeatures([price])(features)
with _initialized_session():
self.assertAllClose([[1., 2.], [5., 6.]], self.evaluate(net))
def test_multi_column(self):
price1 = fc.numeric_column('price1', shape=2)
price2 = fc.numeric_column('price2')
with ops.Graph().as_default():
features = {
'price1': [[1., 2.], [5., 6.]],
'price2': [[3.], [4.]]
}
net = fc.DenseFeatures([price1, price2])(features)
with _initialized_session():
self.assertAllClose([[1., 2., 3.], [5., 6., 4.]], self.evaluate(net))
def test_cols_to_output_tensors(self):
price1 = fc.numeric_column('price1', shape=2)
price2 = fc.numeric_column('price2')
with ops.Graph().as_default():
cols_dict = {}
features = {'price1': [[1., 2.], [5., 6.]], 'price2': [[3.], [4.]]}
dense_features = fc.DenseFeatures([price1, price2])
net = dense_features(features, cols_dict)
with _initialized_session():
self.assertAllClose([[1., 2.], [5., 6.]], cols_dict[price1].eval())
self.assertAllClose([[3.], [4.]], cols_dict[price2].eval())
self.assertAllClose([[1., 2., 3.], [5., 6., 4.]], self.evaluate(net))
def test_column_order(self):
price_a = fc.numeric_column('price_a')
price_b = fc.numeric_column('price_b')
with ops.Graph().as_default():
features = {
'price_a': [[1.]],
'price_b': [[3.]],
}
net1 = fc.DenseFeatures([price_a, price_b])(features)
net2 = fc.DenseFeatures([price_b, price_a])(features)
with _initialized_session():
self.assertAllClose([[1., 3.]], self.evaluate(net1))
self.assertAllClose([[1., 3.]], self.evaluate(net2))
def test_fails_for_categorical_column(self):
animal = fc.categorical_column_with_identity('animal', num_buckets=4)
with ops.Graph().as_default():
features = {
'animal':
sparse_tensor.SparseTensor(
indices=[[0, 0], [0, 1]], values=[1, 2], dense_shape=[1, 2])
}
with self.assertRaisesRegexp(Exception, 'must be a DenseColumn'):
fc.DenseFeatures([animal])(features)
def test_static_batch_size_mismatch(self):
price1 = fc.numeric_column('price1')
price2 = fc.numeric_column('price2')
with ops.Graph().as_default():
features = {
'price1': [[1.], [5.], [7.]], # batchsize = 3
'price2': [[3.], [4.]] # batchsize = 2
}
with self.assertRaisesRegexp(
ValueError,
'Batch size \(first dimension\) of each feature must be same.'): # pylint: disable=anomalous-backslash-in-string
fc.DenseFeatures([price1, price2])(features)
def test_subset_of_static_batch_size_mismatch(self):
price1 = fc.numeric_column('price1')
price2 = fc.numeric_column('price2')
price3 = fc.numeric_column('price3')
with ops.Graph().as_default():
features = {
'price1': array_ops.placeholder(dtype=dtypes.int64), # batchsize = 3
'price2': [[3.], [4.]], # batchsize = 2
'price3': [[3.], [4.], [5.]] # batchsize = 3
}
with self.assertRaisesRegexp(
ValueError,
'Batch size \(first dimension\) of each feature must be same.'): # pylint: disable=anomalous-backslash-in-string
fc.DenseFeatures([price1, price2, price3])(features)
def test_runtime_batch_size_mismatch(self):
price1 = fc.numeric_column('price1')
price2 = fc.numeric_column('price2')
with ops.Graph().as_default():
features = {
'price1': array_ops.placeholder(dtype=dtypes.int64), # batchsize = 3
'price2': [[3.], [4.]] # batchsize = 2
}
net = fc.DenseFeatures([price1, price2])(features)
with _initialized_session() as sess:
with self.assertRaisesRegexp(errors.OpError,
'Dimensions of inputs should match'):
sess.run(net, feed_dict={features['price1']: [[1.], [5.], [7.]]})
def test_runtime_batch_size_matches(self):
price1 = fc.numeric_column('price1')
price2 = fc.numeric_column('price2')
with ops.Graph().as_default():
features = {
'price1': array_ops.placeholder(dtype=dtypes.int64), # batchsize = 2
'price2': array_ops.placeholder(dtype=dtypes.int64), # batchsize = 2
}
net = fc.DenseFeatures([price1, price2])(features)
with _initialized_session() as sess:
sess.run(
net,
feed_dict={
features['price1']: [[1.], [5.]],
features['price2']: [[1.], [5.]],
})
def test_multiple_layers_with_same_embedding_column(self):
some_sparse_column = fc.categorical_column_with_hash_bucket(
'sparse_feature', hash_bucket_size=5)
some_embedding_column = fc.embedding_column(
some_sparse_column, dimension=10)
with ops.Graph().as_default():
features = {
'sparse_feature': [['a'], ['x']],
}
all_cols = [some_embedding_column]
fc.DenseFeatures(all_cols)(features)
fc.DenseFeatures(all_cols)(features)
# Make sure that 2 variables get created in this case.
self.assertEqual(2, len(
ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)))
expected_var_names = [
'dense_features/sparse_feature_embedding/embedding_weights:0',
'dense_features_1/sparse_feature_embedding/embedding_weights:0'
]
self.assertItemsEqual(
expected_var_names,
[v.name for v in ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)])
def test_multiple_layers_with_same_shared_embedding_column(self):
categorical_column_a = fc.categorical_column_with_identity(
key='aaa', num_buckets=3)
categorical_column_b = fc.categorical_column_with_identity(
key='bbb', num_buckets=3)
embedding_dimension = 2
embedding_column_b, embedding_column_a = fc.shared_embedding_columns_v2(
[categorical_column_b, categorical_column_a],
dimension=embedding_dimension)
with ops.Graph().as_default():
features = {
'aaa':
sparse_tensor.SparseTensor(
indices=((0, 0), (1, 0), (1, 1)),
values=(0, 1, 0),
dense_shape=(2, 2)),
'bbb':
sparse_tensor.SparseTensor(
indices=((0, 0), (1, 0), (1, 1)),
values=(1, 2, 1),
dense_shape=(2, 2)),
}
all_cols = [embedding_column_a, embedding_column_b]
fc.DenseFeatures(all_cols)(features)
fc.DenseFeatures(all_cols)(features)
# Make sure that only 1 variable gets created in this case.
self.assertEqual(1, len(
ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)))
self.assertItemsEqual(
['aaa_bbb_shared_embedding:0'],
[v.name for v in ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)])
def test_multiple_layers_with_same_shared_embedding_column_diff_graphs(self):
categorical_column_a = fc.categorical_column_with_identity(
key='aaa', num_buckets=3)
categorical_column_b = fc.categorical_column_with_identity(
key='bbb', num_buckets=3)
embedding_dimension = 2
embedding_column_b, embedding_column_a = fc.shared_embedding_columns_v2(
[categorical_column_b, categorical_column_a],
dimension=embedding_dimension)
all_cols = [embedding_column_a, embedding_column_b]
with ops.Graph().as_default():
features = {
'aaa':
sparse_tensor.SparseTensor(
indices=((0, 0), (1, 0), (1, 1)),
values=(0, 1, 0),
dense_shape=(2, 2)),
'bbb':
sparse_tensor.SparseTensor(
indices=((0, 0), (1, 0), (1, 1)),
values=(1, 2, 1),
dense_shape=(2, 2)),
}
fc.DenseFeatures(all_cols)(features)
# Make sure that only 1 variable gets created in this case.
self.assertEqual(1, len(
ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)))
with ops.Graph().as_default():
features1 = {
'aaa':
sparse_tensor.SparseTensor(
indices=((0, 0), (1, 0), (1, 1)),
values=(0, 1, 0),
dense_shape=(2, 2)),
'bbb':
sparse_tensor.SparseTensor(
indices=((0, 0), (1, 0), (1, 1)),
values=(1, 2, 1),
dense_shape=(2, 2)),
}
fc.DenseFeatures(all_cols)(features1)
# Make sure that only 1 variable gets created in this case.
self.assertEqual(1, len(
ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)))
self.assertItemsEqual(
['aaa_bbb_shared_embedding:0'],
[v.name for v in ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)])
def test_with_numpy_input_fn(self):
embedding_values = (
(1., 2., 3., 4., 5.), # id 0
(6., 7., 8., 9., 10.), # id 1
(11., 12., 13., 14., 15.) # id 2
)
def _initializer(shape, dtype, partition_info):
del shape, dtype, partition_info
return embedding_values
# price has 1 dimension in dense_features
price = fc.numeric_column('price')
body_style = fc.categorical_column_with_vocabulary_list(
'body-style', vocabulary_list=['hardtop', 'wagon', 'sedan'])
# one_hot_body_style has 3 dims in dense_features.
one_hot_body_style = fc.indicator_column(body_style)
# embedded_body_style has 5 dims in dense_features.
embedded_body_style = fc.embedding_column(
body_style, dimension=5, initializer=_initializer)
input_fn = numpy_io.numpy_input_fn(
x={
'price': np.array([11., 12., 13., 14.]),
'body-style': np.array(['sedan', 'hardtop', 'wagon', 'sedan']),
},
batch_size=2,
shuffle=False)
features = input_fn()
net = fc.DenseFeatures([price, one_hot_body_style, embedded_body_style])(
features)
self.assertEqual(1 + 3 + 5, net.shape[1])
with _initialized_session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess, coord=coord)
# Each row is formed by concatenating `embedded_body_style`,
# `one_hot_body_style`, and `price` in order.
self.assertAllEqual(
[[11., 12., 13., 14., 15., 0., 0., 1., 11.],
[1., 2., 3., 4., 5., 1., 0., 0., 12]],
sess.run(net))
coord.request_stop()
coord.join(threads)
def test_with_1d_sparse_tensor(self):
embedding_values = (
(1., 2., 3., 4., 5.), # id 0
(6., 7., 8., 9., 10.), # id 1
(11., 12., 13., 14., 15.) # id 2
)
def _initializer(shape, dtype, partition_info):
del shape, dtype, partition_info
return embedding_values
# price has 1 dimension in dense_features
price = fc.numeric_column('price')
# one_hot_body_style has 3 dims in dense_features.
body_style = fc.categorical_column_with_vocabulary_list(
'body-style', vocabulary_list=['hardtop', 'wagon', 'sedan'])
one_hot_body_style = fc.indicator_column(body_style)
# embedded_body_style has 5 dims in dense_features.
country = fc.categorical_column_with_vocabulary_list(
'country', vocabulary_list=['US', 'JP', 'CA'])
embedded_country = fc.embedding_column(
country, dimension=5, initializer=_initializer)
# Provides 1-dim tensor and dense tensor.
features = {
'price': constant_op.constant([11., 12.,]),
'body-style': sparse_tensor.SparseTensor(
indices=((0,), (1,)),
values=('sedan', 'hardtop'),
dense_shape=(2,)),
# This is dense tensor for the categorical_column.
'country': constant_op.constant(['CA', 'US']),
}
self.assertEqual(1, features['price'].shape.ndims)
self.assertEqual(1, features['body-style'].dense_shape.get_shape()[0])
self.assertEqual(1, features['country'].shape.ndims)
net = fc.DenseFeatures([price, one_hot_body_style, embedded_country])(
features)
self.assertEqual(1 + 3 + 5, net.shape[1])
with _initialized_session() as sess:
# Each row is formed by concatenating `embedded_body_style`,
# `one_hot_body_style`, and `price` in order.
self.assertAllEqual(
[[0., 0., 1., 11., 12., 13., 14., 15., 11.],
[1., 0., 0., 1., 2., 3., 4., 5., 12.]],
sess.run(net))
def test_with_1d_unknown_shape_sparse_tensor(self):
embedding_values = (
(1., 2.), # id 0
(6., 7.), # id 1
(11., 12.) # id 2
)
def _initializer(shape, dtype, partition_info):
del shape, dtype, partition_info
return embedding_values
# price has 1 dimension in dense_features
price = fc.numeric_column('price')
# one_hot_body_style has 3 dims in dense_features.
body_style = fc.categorical_column_with_vocabulary_list(
'body-style', vocabulary_list=['hardtop', 'wagon', 'sedan'])
one_hot_body_style = fc.indicator_column(body_style)
# embedded_body_style has 5 dims in dense_features.
country = fc.categorical_column_with_vocabulary_list(
'country', vocabulary_list=['US', 'JP', 'CA'])
embedded_country = fc.embedding_column(
country, dimension=2, initializer=_initializer)
# Provides 1-dim tensor and dense tensor.
features = {
'price': array_ops.placeholder(dtypes.float32),
'body-style': array_ops.sparse_placeholder(dtypes.string),
# This is dense tensor for the categorical_column.
'country': array_ops.placeholder(dtypes.string),
}
self.assertIsNone(features['price'].shape.ndims)
self.assertIsNone(features['body-style'].get_shape().ndims)
self.assertIsNone(features['country'].shape.ndims)
price_data = np.array([11., 12.])
body_style_data = sparse_tensor.SparseTensorValue(
indices=((0,), (1,)),
values=('sedan', 'hardtop'),
dense_shape=(2,))
country_data = np.array([['US'], ['CA']])
net = fc.DenseFeatures([price, one_hot_body_style, embedded_country])(
features)
self.assertEqual(1 + 3 + 2, net.shape[1])
with _initialized_session() as sess:
# Each row is formed by concatenating `embedded_body_style`,
# `one_hot_body_style`, and `price` in order.
self.assertAllEqual(
[[0., 0., 1., 1., 2., 11.], [1., 0., 0., 11., 12., 12.]],
sess.run(
net,
feed_dict={
features['price']: price_data,
features['body-style']: body_style_data,
features['country']: country_data
}))
def test_with_rank_0_feature(self):
# price has 1 dimension in dense_features
price = fc.numeric_column('price')
features = {
'price': constant_op.constant(0),
}
self.assertEqual(0, features['price'].shape.ndims)
# Static rank 0 should fail
with self.assertRaisesRegexp(ValueError, 'Feature .* cannot have rank 0'):
fc.DenseFeatures([price])(features)
# Dynamic rank 0 should fail
features = {
'price': array_ops.placeholder(dtypes.float32),
}
net = fc.DenseFeatures([price])(features)
self.assertEqual(1, net.shape[1])
with _initialized_session() as sess:
with self.assertRaisesOpError('Feature .* cannot have rank 0'):
sess.run(net, feed_dict={features['price']: np.array(1)})
class InputLayerTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes
def test_retrieving_input(self):
features = {'a': [0.]}
input_layer = fc_old.InputLayer(fc.numeric_column('a'))
inputs = self.evaluate(input_layer(features))
self.assertAllClose([[0.]], inputs)
def test_reuses_variables(self):
with context.eager_mode():
sparse_input = sparse_tensor.SparseTensor(
indices=((0, 0), (1, 0), (2, 0)),
values=(0, 1, 2),
dense_shape=(3, 3))
# Create feature columns (categorical and embedding).
categorical_column = fc.categorical_column_with_identity(
key='a', num_buckets=3)
embedding_dimension = 2
def _embedding_column_initializer(shape, dtype, partition_info):
del shape # unused
del dtype # unused
del partition_info # unused
embedding_values = (
(1, 0), # id 0
(0, 1), # id 1
(1, 1)) # id 2
return embedding_values
embedding_column = fc.embedding_column(
categorical_column,
dimension=embedding_dimension,
initializer=_embedding_column_initializer)
input_layer = fc_old.InputLayer([embedding_column])
features = {'a': sparse_input}
inputs = input_layer(features)
variables = input_layer.variables
# Sanity check: test that the inputs are correct.
self.assertAllEqual([[1, 0], [0, 1], [1, 1]], inputs)
# Check that only one variable was created.
self.assertEqual(1, len(variables))
# Check that invoking input_layer on the same features does not create
# additional variables
_ = input_layer(features)
self.assertEqual(1, len(variables))
self.assertEqual(variables[0], input_layer.variables[0])
def test_feature_column_input_layer_gradient(self):
with context.eager_mode():
sparse_input = sparse_tensor.SparseTensor(
indices=((0, 0), (1, 0), (2, 0)),
values=(0, 1, 2),
dense_shape=(3, 3))
# Create feature columns (categorical and embedding).
categorical_column = fc.categorical_column_with_identity(
key='a', num_buckets=3)
embedding_dimension = 2
def _embedding_column_initializer(shape, dtype, partition_info):
del shape # unused
del dtype # unused
del partition_info # unused
embedding_values = (
(1, 0), # id 0
(0, 1), # id 1
(1, 1)) # id 2
return embedding_values
embedding_column = fc.embedding_column(
categorical_column,
dimension=embedding_dimension,
initializer=_embedding_column_initializer)
input_layer = fc_old.InputLayer([embedding_column])
features = {'a': sparse_input}
def scale_matrix():
matrix = input_layer(features)
return 2 * matrix
# Sanity check: Verify that scale_matrix returns the correct output.
self.assertAllEqual([[2, 0], [0, 2], [2, 2]], scale_matrix())
# Check that the returned gradient is correct.
grad_function = backprop.implicit_grad(scale_matrix)
grads_and_vars = grad_function()
indexed_slice = grads_and_vars[0][0]
gradient = grads_and_vars[0][0].values
self.assertAllEqual([0, 1, 2], indexed_slice.indices)
self.assertAllEqual([[2, 2], [2, 2], [2, 2]], gradient)
class FunctionalInputLayerTest(test.TestCase):
def test_raises_if_empty_feature_columns(self):
with self.assertRaisesRegexp(ValueError,
'feature_columns must not be empty'):
fc_old.input_layer(features={}, feature_columns=[])
def test_should_be_dense_column(self):
with self.assertRaisesRegexp(ValueError, 'must be a _DenseColumn'):
fc_old.input_layer(
features={'a': [[0]]},
feature_columns=[
fc.categorical_column_with_hash_bucket('wire_cast', 4)
])
def test_does_not_support_dict_columns(self):
with self.assertRaisesRegexp(
ValueError, 'Expected feature_columns to be iterable, found dict.'):
fc_old.input_layer(
features={'a': [[0]]}, feature_columns={'a': fc.numeric_column('a')})
def test_bare_column(self):
with ops.Graph().as_default():
features = features = {'a': [0.]}
net = fc_old.input_layer(features, fc.numeric_column('a'))
with _initialized_session():
self.assertAllClose([[0.]], self.evaluate(net))
def test_column_generator(self):
with ops.Graph().as_default():
features = features = {'a': [0.], 'b': [1.]}
columns = (fc.numeric_column(key) for key in features)
net = fc_old.input_layer(features, columns)
with _initialized_session():
self.assertAllClose([[0., 1.]], self.evaluate(net))
def test_raises_if_duplicate_name(self):
with self.assertRaisesRegexp(
ValueError, 'Duplicate feature column name found for columns'):
fc_old.input_layer(
features={'a': [[0]]},
feature_columns=[fc.numeric_column('a'),
fc.numeric_column('a')])
def test_one_column(self):
price = fc.numeric_column('price')
with ops.Graph().as_default():
features = {'price': [[1.], [5.]]}
net = fc_old.input_layer(features, [price])
with _initialized_session():
self.assertAllClose([[1.], [5.]], self.evaluate(net))
def test_multi_dimension(self):
price = fc.numeric_column('price', shape=2)
with ops.Graph().as_default():
features = {'price': [[1., 2.], [5., 6.]]}
net = fc_old.input_layer(features, [price])
with _initialized_session():
self.assertAllClose([[1., 2.], [5., 6.]], self.evaluate(net))
def test_raises_if_shape_mismatch(self):
price = fc.numeric_column('price', shape=2)
with ops.Graph().as_default():
features = {'price': [[1.], [5.]]}
with self.assertRaisesRegexp(
Exception,
r'Cannot reshape a tensor with 2 elements to shape \[2,2\]'):
fc_old.input_layer(features, [price])
def test_reshaping(self):
price = fc.numeric_column('price', shape=[1, 2])
with ops.Graph().as_default():
features = {'price': [[[1., 2.]], [[5., 6.]]]}
net = fc_old.input_layer(features, [price])
with _initialized_session():
self.assertAllClose([[1., 2.], [5., 6.]], self.evaluate(net))
def test_multi_column(self):
price1 = fc.numeric_column('price1', shape=2)
price2 = fc.numeric_column('price2')
with ops.Graph().as_default():
features = {'price1': [[1., 2.], [5., 6.]], 'price2': [[3.], [4.]]}
net = fc_old.input_layer(features, [price1, price2])
with _initialized_session():
self.assertAllClose([[1., 2., 3.], [5., 6., 4.]], self.evaluate(net))
def test_fills_cols_to_vars(self):
# Provide three _DenseColumn's to input_layer: a _NumericColumn, a
# _BucketizedColumn, and an _EmbeddingColumn. Only the _EmbeddingColumn
# creates a Variable.
price1 = fc.numeric_column('price1')
dense_feature = fc.numeric_column('dense_feature')
dense_feature_bucketized = fc.bucketized_column(
dense_feature, boundaries=[0.])
some_sparse_column = fc.categorical_column_with_hash_bucket(
'sparse_feature', hash_bucket_size=5)
some_embedding_column = fc.embedding_column(
some_sparse_column, dimension=10)
with ops.Graph().as_default():
features = {
'price1': [[3.], [4.]],
'dense_feature': [[-1.], [4.]],
'sparse_feature': [['a'], ['x']],
}
cols_to_vars = {}
all_cols = [price1, dense_feature_bucketized, some_embedding_column]
fc_old.input_layer(features, all_cols, cols_to_vars=cols_to_vars)
self.assertItemsEqual(list(cols_to_vars.keys()), all_cols)
self.assertEqual(0, len(cols_to_vars[price1]))
self.assertEqual(0, len(cols_to_vars[dense_feature_bucketized]))
self.assertEqual(1, len(cols_to_vars[some_embedding_column]))
self.assertIsInstance(cols_to_vars[some_embedding_column][0],
variables_lib.Variable)
self.assertAllEqual(cols_to_vars[some_embedding_column][0].shape, [5, 10])
def test_fills_cols_to_vars_shared_embedding(self):
# Provide 5 DenseColumn's to input_layer: a NumericColumn, a
# BucketizedColumn, an EmbeddingColumn, two SharedEmbeddingColumns. The
# EmbeddingColumn creates a Variable and the two SharedEmbeddingColumns
# shared one variable.
price1 = fc.numeric_column('price1')
dense_feature = fc.numeric_column('dense_feature')
dense_feature_bucketized = fc.bucketized_column(
dense_feature, boundaries=[0.])
some_sparse_column = fc.categorical_column_with_hash_bucket(
'sparse_feature', hash_bucket_size=5)
some_embedding_column = fc.embedding_column(
some_sparse_column, dimension=10)
categorical_column_a = fc.categorical_column_with_identity(
key='aaa', num_buckets=3)
categorical_column_b = fc.categorical_column_with_identity(
key='bbb', num_buckets=3)
shared_embedding_a, shared_embedding_b = fc.shared_embedding_columns(
[categorical_column_a, categorical_column_b], dimension=2)
with ops.Graph().as_default():
features = {
'price1': [[3.], [4.]],
'dense_feature': [[-1.], [4.]],
'sparse_feature': [['a'], ['x']],
'aaa':
sparse_tensor.SparseTensor(
indices=((0, 0), (1, 0), (1, 1)),
values=(0, 1, 0),
dense_shape=(2, 2)),
'bbb':
sparse_tensor.SparseTensor(
indices=((0, 0), (1, 0), (1, 1)),
values=(1, 2, 1),
dense_shape=(2, 2)),
}
cols_to_vars = {}
all_cols = [
price1, dense_feature_bucketized, some_embedding_column,
shared_embedding_a, shared_embedding_b
]
fc_old.input_layer(features, all_cols, cols_to_vars=cols_to_vars)
self.assertItemsEqual(list(cols_to_vars.keys()), all_cols)
self.assertEqual(0, len(cols_to_vars[price1]))
self.assertEqual(0, len(cols_to_vars[dense_feature_bucketized]))
self.assertEqual(1, len(cols_to_vars[some_embedding_column]))
self.assertEqual(1, len(cols_to_vars[shared_embedding_a]))
# This is a bug in the current implementation and should be fixed in the
# new one.
self.assertEqual(0, len(cols_to_vars[shared_embedding_b]))
self.assertIsInstance(cols_to_vars[some_embedding_column][0],
variables_lib.Variable)
self.assertAllEqual(cols_to_vars[some_embedding_column][0].shape, [5, 10])
self.assertIsInstance(cols_to_vars[shared_embedding_a][0],
variables_lib.Variable)
self.assertAllEqual(cols_to_vars[shared_embedding_a][0].shape, [3, 2])
def test_fills_cols_to_vars_partitioned_variables(self):
price1 = fc.numeric_column('price1')
dense_feature = fc.numeric_column('dense_feature')
dense_feature_bucketized = fc.bucketized_column(
dense_feature, boundaries=[0.])
some_sparse_column = fc.categorical_column_with_hash_bucket(
'sparse_feature', hash_bucket_size=5)
some_embedding_column = fc.embedding_column(
some_sparse_column, dimension=10)
with ops.Graph().as_default():
features = {
'price1': [[3.], [4.]],
'dense_feature': [[-1.], [4.]],
'sparse_feature': [['a'], ['x']],
}
cols_to_vars = {}
all_cols = [price1, dense_feature_bucketized, some_embedding_column]
with variable_scope.variable_scope(
'input_from_feature_columns',
partitioner=partitioned_variables.fixed_size_partitioner(3, axis=0)):
fc_old.input_layer(features, all_cols, cols_to_vars=cols_to_vars)
self.assertItemsEqual(list(cols_to_vars.keys()), all_cols)
self.assertEqual(0, len(cols_to_vars[price1]))
self.assertEqual(0, len(cols_to_vars[dense_feature_bucketized]))
self.assertEqual(3, len(cols_to_vars[some_embedding_column]))
self.assertEqual(
'input_from_feature_columns/input_layer/sparse_feature_embedding/'
'embedding_weights/part_0:0',
cols_to_vars[some_embedding_column][0].name)
self.assertAllEqual(cols_to_vars[some_embedding_column][0].shape, [2, 10])
self.assertAllEqual(cols_to_vars[some_embedding_column][1].shape, [2, 10])
self.assertAllEqual(cols_to_vars[some_embedding_column][2].shape, [1, 10])
def test_column_order(self):
price_a = fc.numeric_column('price_a')
price_b = fc.numeric_column('price_b')
with ops.Graph().as_default():
features = {
'price_a': [[1.]],
'price_b': [[3.]],
}
net1 = fc_old.input_layer(features, [price_a, price_b])
net2 = fc_old.input_layer(features, [price_b, price_a])
with _initialized_session():
self.assertAllClose([[1., 3.]], self.evaluate(net1))
self.assertAllClose([[1., 3.]], self.evaluate(net2))
def test_fails_for_categorical_column(self):
animal = fc.categorical_column_with_identity('animal', num_buckets=4)
with ops.Graph().as_default():
features = {
'animal':
sparse_tensor.SparseTensor(
indices=[[0, 0], [0, 1]], values=[1, 2], dense_shape=[1, 2])
}
with self.assertRaisesRegexp(Exception, 'must be a _DenseColumn'):
fc_old.input_layer(features, [animal])
def test_static_batch_size_mismatch(self):
price1 = fc.numeric_column('price1')
price2 = fc.numeric_column('price2')
with ops.Graph().as_default():
features = {
'price1': [[1.], [5.], [7.]], # batchsize = 3
'price2': [[3.], [4.]] # batchsize = 2
}
with self.assertRaisesRegexp(
ValueError,
'Batch size \(first dimension\) of each feature must be same.'): # pylint: disable=anomalous-backslash-in-string
fc_old.input_layer(features, [price1, price2])
def test_subset_of_static_batch_size_mismatch(self):
price1 = fc.numeric_column('price1')
price2 = fc.numeric_column('price2')
price3 = fc.numeric_column('price3')
with ops.Graph().as_default():
features = {
'price1': array_ops.placeholder(dtype=dtypes.int64), # batchsize = 3
'price2': [[3.], [4.]], # batchsize = 2
'price3': [[3.], [4.], [5.]] # batchsize = 3
}
with self.assertRaisesRegexp(
ValueError,
'Batch size \(first dimension\) of each feature must be same.'): # pylint: disable=anomalous-backslash-in-string
fc_old.input_layer(features, [price1, price2, price3])
def test_runtime_batch_size_mismatch(self):
price1 = fc.numeric_column('price1')
price2 = fc.numeric_column('price2')
with ops.Graph().as_default():
features = {
'price1': array_ops.placeholder(dtype=dtypes.int64), # batchsize = 3
'price2': [[3.], [4.]] # batchsize = 2
}
net = fc_old.input_layer(features, [price1, price2])
with _initialized_session() as sess:
with self.assertRaisesRegexp(errors.OpError,
'Dimensions of inputs should match'):
sess.run(net, feed_dict={features['price1']: [[1.], [5.], [7.]]})
def test_runtime_batch_size_matches(self):
price1 = fc.numeric_column('price1')
price2 = fc.numeric_column('price2')
with ops.Graph().as_default():
features = {
'price1': array_ops.placeholder(dtype=dtypes.int64), # batchsize = 2
'price2': array_ops.placeholder(dtype=dtypes.int64), # batchsize = 2
}
net = fc_old.input_layer(features, [price1, price2])
with _initialized_session() as sess:
sess.run(
net,
feed_dict={
features['price1']: [[1.], [5.]],
features['price2']: [[1.], [5.]],
})
def test_multiple_layers_with_same_embedding_column(self):
some_sparse_column = fc.categorical_column_with_hash_bucket(
'sparse_feature', hash_bucket_size=5)
some_embedding_column = fc.embedding_column(
some_sparse_column, dimension=10)
with ops.Graph().as_default():
features = {
'sparse_feature': [['a'], ['x']],
}
all_cols = [some_embedding_column]
fc_old.input_layer(features, all_cols)
fc_old.input_layer(features, all_cols)
# Make sure that 2 variables get created in this case.
self.assertEqual(2, len(
ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)))
expected_var_names = [
'input_layer/sparse_feature_embedding/embedding_weights:0',
'input_layer_1/sparse_feature_embedding/embedding_weights:0'
]
self.assertItemsEqual(
expected_var_names,
[v.name for v in ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)])
def test_with_1d_sparse_tensor(self):
embedding_values = (
(1., 2., 3., 4., 5.), # id 0
(6., 7., 8., 9., 10.), # id 1
(11., 12., 13., 14., 15.) # id 2
)
def _initializer(shape, dtype, partition_info):
del shape, dtype, partition_info
return embedding_values
# price has 1 dimension in input_layer
price = fc.numeric_column('price')
# one_hot_body_style has 3 dims in input_layer.
body_style = fc.categorical_column_with_vocabulary_list(
'body-style', vocabulary_list=['hardtop', 'wagon', 'sedan'])
one_hot_body_style = fc.indicator_column(body_style)
# embedded_body_style has 5 dims in input_layer.
country = fc.categorical_column_with_vocabulary_list(
'country', vocabulary_list=['US', 'JP', 'CA'])
embedded_country = fc.embedding_column(
country, dimension=5, initializer=_initializer)
# Provides 1-dim tensor and dense tensor.
features = {
'price':
constant_op.constant([
11.,
12.,
]),
'body-style':
sparse_tensor.SparseTensor(
indices=((0,), (1,)),
values=('sedan', 'hardtop'),
dense_shape=(2,)),
# This is dense tensor for the categorical_column.
'country':
constant_op.constant(['CA', 'US']),
}
self.assertEqual(1, features['price'].shape.ndims)
self.assertEqual(1, features['body-style'].dense_shape.get_shape()[0])
self.assertEqual(1, features['country'].shape.ndims)
net = fc_old.input_layer(features,
[price, one_hot_body_style, embedded_country])
self.assertEqual(1 + 3 + 5, net.shape[1])
with _initialized_session() as sess:
# Each row is formed by concatenating `embedded_body_style`,
# `one_hot_body_style`, and `price` in order.
self.assertAllEqual([[0., 0., 1., 11., 12., 13., 14., 15., 11.],
[1., 0., 0., 1., 2., 3., 4., 5., 12.]],
sess.run(net))
def test_with_1d_unknown_shape_sparse_tensor(self):
embedding_values = (
(1., 2.), # id 0
(6., 7.), # id 1
(11., 12.) # id 2
)
def _initializer(shape, dtype, partition_info):
del shape, dtype, partition_info
return embedding_values
# price has 1 dimension in input_layer
price = fc.numeric_column('price')
# one_hot_body_style has 3 dims in input_layer.
body_style = fc.categorical_column_with_vocabulary_list(
'body-style', vocabulary_list=['hardtop', 'wagon', 'sedan'])
one_hot_body_style = fc.indicator_column(body_style)
# embedded_body_style has 5 dims in input_layer.
country = fc.categorical_column_with_vocabulary_list(
'country', vocabulary_list=['US', 'JP', 'CA'])
embedded_country = fc.embedding_column(
country, dimension=2, initializer=_initializer)
# Provides 1-dim tensor and dense tensor.
features = {
'price': array_ops.placeholder(dtypes.float32),
'body-style': array_ops.sparse_placeholder(dtypes.string),
# This is dense tensor for the categorical_column.
'country': array_ops.placeholder(dtypes.string),
}
self.assertIsNone(features['price'].shape.ndims)
self.assertIsNone(features['body-style'].get_shape().ndims)
self.assertIsNone(features['country'].shape.ndims)
price_data = np.array([11., 12.])
body_style_data = sparse_tensor.SparseTensorValue(
indices=((0,), (1,)), values=('sedan', 'hardtop'), dense_shape=(2,))
country_data = np.array([['US'], ['CA']])
net = fc_old.input_layer(features,
[price, one_hot_body_style, embedded_country])
self.assertEqual(1 + 3 + 2, net.shape[1])
with _initialized_session() as sess:
# Each row is formed by concatenating `embedded_body_style`,
# `one_hot_body_style`, and `price` in order.
self.assertAllEqual(
[[0., 0., 1., 1., 2., 11.], [1., 0., 0., 11., 12., 12.]],
sess.run(
net,
feed_dict={
features['price']: price_data,
features['body-style']: body_style_data,
features['country']: country_data
}))
def test_with_rank_0_feature(self):
# price has 1 dimension in input_layer
price = fc.numeric_column('price')
features = {
'price': constant_op.constant(0),
}
self.assertEqual(0, features['price'].shape.ndims)
# Static rank 0 should fail
with self.assertRaisesRegexp(ValueError, 'Feature .* cannot have rank 0'):
fc_old.input_layer(features, [price])
# Dynamic rank 0 should fail
features = {
'price': array_ops.placeholder(dtypes.float32),
}
net = fc_old.input_layer(features, [price])
self.assertEqual(1, net.shape[1])
with _initialized_session() as sess:
with self.assertRaisesOpError('Feature .* cannot have rank 0'):
sess.run(net, feed_dict={features['price']: np.array(1)})
class MakeParseExampleSpecTest(test.TestCase):
class _TestFeatureColumn(BaseFeatureColumnForTests,
collections.namedtuple('_TestFeatureColumn',
('parse_spec'))):
@property
def _is_v2_column(self):
return True
@property
def name(self):
return '_TestFeatureColumn'
def transform_feature(self, transformation_cache, state_manager):
pass
def _transform_feature(self, inputs):
pass
@property
def parse_example_spec(self):
return self.parse_spec
@property
def _parse_example_spec(self):
return self.parse_spec
def test_no_feature_columns(self):
actual = fc.make_parse_example_spec_v2([])
self.assertDictEqual({}, actual)
def test_invalid_type(self):
key1 = 'key1'
parse_spec1 = parsing_ops.FixedLenFeature(
shape=(2,), dtype=dtypes.float32, default_value=0.)
with self.assertRaisesRegexp(
ValueError,
'All feature_columns must be FeatureColumn instances.*invalid_column'):
fc.make_parse_example_spec_v2((self._TestFeatureColumn({
key1: parse_spec1
}), 'invalid_column'))
def test_one_feature_column(self):
key1 = 'key1'
parse_spec1 = parsing_ops.FixedLenFeature(
shape=(2,), dtype=dtypes.float32, default_value=0.)
actual = fc.make_parse_example_spec_v2((self._TestFeatureColumn({
key1: parse_spec1
}),))
self.assertDictEqual({key1: parse_spec1}, actual)
def test_two_feature_columns(self):
key1 = 'key1'
parse_spec1 = parsing_ops.FixedLenFeature(
shape=(2,), dtype=dtypes.float32, default_value=0.)
key2 = 'key2'
parse_spec2 = parsing_ops.VarLenFeature(dtype=dtypes.string)
actual = fc.make_parse_example_spec_v2((self._TestFeatureColumn({
key1: parse_spec1
}), self._TestFeatureColumn({
key2: parse_spec2
})))
self.assertDictEqual({key1: parse_spec1, key2: parse_spec2}, actual)
def test_equal_keys_different_parse_spec(self):
key1 = 'key1'
parse_spec1 = parsing_ops.FixedLenFeature(
shape=(2,), dtype=dtypes.float32, default_value=0.)
parse_spec2 = parsing_ops.VarLenFeature(dtype=dtypes.string)
with self.assertRaisesRegexp(
ValueError,
'feature_columns contain different parse_spec for key key1'):
fc.make_parse_example_spec_v2((self._TestFeatureColumn({
key1: parse_spec1
}), self._TestFeatureColumn({
key1: parse_spec2
})))
def test_equal_keys_equal_parse_spec(self):
key1 = 'key1'
parse_spec1 = parsing_ops.FixedLenFeature(
shape=(2,), dtype=dtypes.float32, default_value=0.)
actual = fc.make_parse_example_spec_v2((self._TestFeatureColumn({
key1: parse_spec1
}), self._TestFeatureColumn({
key1: parse_spec1
})))
self.assertDictEqual({key1: parse_spec1}, actual)
def test_multiple_features_dict(self):
"""parse_spc for one column is a dict with length > 1."""
key1 = 'key1'
parse_spec1 = parsing_ops.FixedLenFeature(
shape=(2,), dtype=dtypes.float32, default_value=0.)
key2 = 'key2'
parse_spec2 = parsing_ops.VarLenFeature(dtype=dtypes.string)
key3 = 'key3'
parse_spec3 = parsing_ops.VarLenFeature(dtype=dtypes.int32)
actual = fc.make_parse_example_spec_v2((self._TestFeatureColumn({
key1: parse_spec1
}), self._TestFeatureColumn({
key2: parse_spec2,
key3: parse_spec3
})))
self.assertDictEqual(
{key1: parse_spec1, key2: parse_spec2, key3: parse_spec3}, actual)
def _assert_sparse_tensor_value(test_case, expected, actual):
test_case.assertEqual(np.int64, np.array(actual.indices).dtype)
test_case.assertAllEqual(expected.indices, actual.indices)
test_case.assertEqual(
np.array(expected.values).dtype, np.array(actual.values).dtype)
test_case.assertAllEqual(expected.values, actual.values)
test_case.assertEqual(np.int64, np.array(actual.dense_shape).dtype)
test_case.assertAllEqual(expected.dense_shape, actual.dense_shape)
class VocabularyFileCategoricalColumnTest(test.TestCase):
def setUp(self):
super(VocabularyFileCategoricalColumnTest, self).setUp()
# Contains ints, Golden State Warriors jersey numbers: 30, 35, 11, 23, 22
self._warriors_vocabulary_file_name = test.test_src_dir_path(
'python/feature_column/testdata/warriors_vocabulary.txt')
self._warriors_vocabulary_size = 5
# Contains strings, character names from 'The Wire': omar, stringer, marlo
self._wire_vocabulary_file_name = test.test_src_dir_path(
'python/feature_column/testdata/wire_vocabulary.txt')
self._wire_vocabulary_size = 3
def test_defaults(self):
column = fc.categorical_column_with_vocabulary_file(
key='aaa', vocabulary_file='path_to_file', vocabulary_size=3)
self.assertEqual('aaa', column.name)
self.assertEqual('aaa', column.key)
self.assertEqual(3, column.num_buckets)
self.assertEqual({
'aaa': parsing_ops.VarLenFeature(dtypes.string)
}, column.parse_example_spec)
self.assertTrue(column._is_v2_column)
def test_key_should_be_string(self):
with self.assertRaisesRegexp(ValueError, 'key must be a string.'):
fc.categorical_column_with_vocabulary_file(
key=('aaa',), vocabulary_file='path_to_file', vocabulary_size=3)
def test_all_constructor_args(self):
column = fc.categorical_column_with_vocabulary_file(
key='aaa',
vocabulary_file='path_to_file',
vocabulary_size=3,
num_oov_buckets=4,
dtype=dtypes.int32)
self.assertEqual(7, column.num_buckets)
self.assertEqual({
'aaa': parsing_ops.VarLenFeature(dtypes.int32)
}, column.parse_example_spec)
def test_deep_copy(self):
original = fc.categorical_column_with_vocabulary_file(
key='aaa',
vocabulary_file='path_to_file',
vocabulary_size=3,
num_oov_buckets=4,
dtype=dtypes.int32)
for column in (original, copy.deepcopy(original)):
self.assertEqual('aaa', column.name)
self.assertEqual(7, column.num_buckets)
self.assertEqual({
'aaa': parsing_ops.VarLenFeature(dtypes.int32)
}, column.parse_example_spec)
def test_vocabulary_file_none(self):
with self.assertRaisesRegexp(ValueError, 'Missing vocabulary_file'):
fc.categorical_column_with_vocabulary_file(
key='aaa', vocabulary_file=None, vocabulary_size=3)
def test_vocabulary_file_empty_string(self):
with self.assertRaisesRegexp(ValueError, 'Missing vocabulary_file'):
fc.categorical_column_with_vocabulary_file(
key='aaa', vocabulary_file='', vocabulary_size=3)
def test_invalid_vocabulary_file(self):
column = fc.categorical_column_with_vocabulary_file(
key='aaa', vocabulary_file='file_does_not_exist', vocabulary_size=10)
inputs = sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=('marlo', 'skywalker', 'omar'),
dense_shape=(2, 2))
column.get_sparse_tensors(
fc.FeatureTransformationCache({
'aaa': inputs
}), None)
with self.assertRaisesRegexp(errors.OpError, 'file_does_not_exist'):
with self.cached_session():
lookup_ops.tables_initializer().run()
def test_invalid_vocabulary_size(self):
with self.assertRaisesRegexp(ValueError, 'Invalid vocabulary_size'):
fc.categorical_column_with_vocabulary_file(
key='aaa',
vocabulary_file=self._wire_vocabulary_file_name,
vocabulary_size=-1)
with self.assertRaisesRegexp(ValueError, 'Invalid vocabulary_size'):
fc.categorical_column_with_vocabulary_file(
key='aaa',
vocabulary_file=self._wire_vocabulary_file_name,
vocabulary_size=0)
def test_too_large_vocabulary_size(self):
column = fc.categorical_column_with_vocabulary_file(
key='aaa',
vocabulary_file=self._wire_vocabulary_file_name,
vocabulary_size=self._wire_vocabulary_size + 1)
inputs = sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=('marlo', 'skywalker', 'omar'),
dense_shape=(2, 2))
column.get_sparse_tensors(
fc.FeatureTransformationCache({
'aaa': inputs
}), None)
with self.assertRaisesRegexp(errors.OpError, 'Invalid vocab_size'):
with self.cached_session():
lookup_ops.tables_initializer().run()
def test_invalid_num_oov_buckets(self):
with self.assertRaisesRegexp(ValueError, 'Invalid num_oov_buckets'):
fc.categorical_column_with_vocabulary_file(
key='aaa',
vocabulary_file='path',
vocabulary_size=3,
num_oov_buckets=-1)
def test_invalid_dtype(self):
with self.assertRaisesRegexp(ValueError, 'dtype must be string or integer'):
fc.categorical_column_with_vocabulary_file(
key='aaa',
vocabulary_file='path',
vocabulary_size=3,
dtype=dtypes.float64)
def test_invalid_buckets_and_default_value(self):
with self.assertRaisesRegexp(
ValueError, 'both num_oov_buckets and default_value'):
fc.categorical_column_with_vocabulary_file(
key='aaa',
vocabulary_file=self._wire_vocabulary_file_name,
vocabulary_size=self._wire_vocabulary_size,
num_oov_buckets=100,
default_value=2)
def test_invalid_input_dtype_int32(self):
column = fc.categorical_column_with_vocabulary_file(
key='aaa',
vocabulary_file=self._wire_vocabulary_file_name,
vocabulary_size=self._wire_vocabulary_size,
dtype=dtypes.string)
inputs = sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=(12, 24, 36),
dense_shape=(2, 2))
with self.assertRaisesRegexp(ValueError, 'dtype must be compatible'):
column.get_sparse_tensors(
fc.FeatureTransformationCache({
'aaa': inputs
}), None)
def test_invalid_input_dtype_string(self):
column = fc.categorical_column_with_vocabulary_file(
key='aaa',
vocabulary_file=self._warriors_vocabulary_file_name,
vocabulary_size=self._warriors_vocabulary_size,
dtype=dtypes.int32)
inputs = sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=('omar', 'stringer', 'marlo'),
dense_shape=(2, 2))
with self.assertRaisesRegexp(ValueError, 'dtype must be compatible'):
column.get_sparse_tensors(
fc.FeatureTransformationCache({
'aaa': inputs
}), None)
def test_parse_example(self):
a = fc.categorical_column_with_vocabulary_file(
key='aaa', vocabulary_file='path_to_file', vocabulary_size=3)
data = example_pb2.Example(features=feature_pb2.Features(
feature={
'aaa':
feature_pb2.Feature(bytes_list=feature_pb2.BytesList(
value=[b'omar', b'stringer']))
}))
features = parsing_ops.parse_example(
serialized=[data.SerializeToString()],
features=fc.make_parse_example_spec_v2([a]))
self.assertIn('aaa', features)
with self.cached_session():
_assert_sparse_tensor_value(
self,
sparse_tensor.SparseTensorValue(
indices=[[0, 0], [0, 1]],
values=np.array([b'omar', b'stringer'], dtype=np.object_),
dense_shape=[1, 2]),
features['aaa'].eval())
def test_get_sparse_tensors(self):
column = fc.categorical_column_with_vocabulary_file(
key='aaa',
vocabulary_file=self._wire_vocabulary_file_name,
vocabulary_size=self._wire_vocabulary_size)
inputs = sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=('marlo', 'skywalker', 'omar'),
dense_shape=(2, 2))
id_weight_pair = column.get_sparse_tensors(
fc.FeatureTransformationCache({
'aaa': inputs
}), None)
self.assertIsNone(id_weight_pair.weight_tensor)
with _initialized_session():
_assert_sparse_tensor_value(
self,
sparse_tensor.SparseTensorValue(
indices=inputs.indices,
values=np.array((2, -1, 0), dtype=np.int64),
dense_shape=inputs.dense_shape),
id_weight_pair.id_tensor.eval())
def test_get_sparse_tensors_none_vocabulary_size(self):
column = fc.categorical_column_with_vocabulary_file(
key='aaa', vocabulary_file=self._wire_vocabulary_file_name)
inputs = sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=('marlo', 'skywalker', 'omar'),
dense_shape=(2, 2))
id_weight_pair = column.get_sparse_tensors(
fc.FeatureTransformationCache({
'aaa': inputs
}), None)
self.assertIsNone(id_weight_pair.weight_tensor)
with _initialized_session():
_assert_sparse_tensor_value(self,
sparse_tensor.SparseTensorValue(
indices=inputs.indices,
values=np.array(
(2, -1, 0), dtype=np.int64),
dense_shape=inputs.dense_shape),
id_weight_pair.id_tensor.eval())
def test_transform_feature(self):
column = fc.categorical_column_with_vocabulary_file(
key='aaa',
vocabulary_file=self._wire_vocabulary_file_name,
vocabulary_size=self._wire_vocabulary_size)
inputs = sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=('marlo', 'skywalker', 'omar'),
dense_shape=(2, 2))
id_tensor = fc._transform_features_v2({
'aaa': inputs
}, [column], None)[column]
with _initialized_session():
_assert_sparse_tensor_value(
self,
sparse_tensor.SparseTensorValue(
indices=inputs.indices,
values=np.array((2, -1, 0), dtype=np.int64),
dense_shape=inputs.dense_shape), self.evaluate(id_tensor))
def test_get_sparse_tensors_dense_input(self):
column = fc.categorical_column_with_vocabulary_file(
key='aaa',
vocabulary_file=self._wire_vocabulary_file_name,
vocabulary_size=self._wire_vocabulary_size)
id_weight_pair = column.get_sparse_tensors(
fc.FeatureTransformationCache({
'aaa': (('marlo', ''), ('skywalker', 'omar'))
}), None)
self.assertIsNone(id_weight_pair.weight_tensor)
with _initialized_session():
_assert_sparse_tensor_value(
self,
sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=np.array((2, -1, 0), dtype=np.int64),
dense_shape=(2, 2)),
id_weight_pair.id_tensor.eval())
def test_get_sparse_tensors_default_value_in_vocabulary(self):
column = fc.categorical_column_with_vocabulary_file(
key='aaa',
vocabulary_file=self._wire_vocabulary_file_name,
vocabulary_size=self._wire_vocabulary_size,
default_value=2)
inputs = sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=('marlo', 'skywalker', 'omar'),
dense_shape=(2, 2))
id_weight_pair = column.get_sparse_tensors(
fc.FeatureTransformationCache({
'aaa': inputs
}), None)
self.assertIsNone(id_weight_pair.weight_tensor)
with _initialized_session():
_assert_sparse_tensor_value(
self,
sparse_tensor.SparseTensorValue(
indices=inputs.indices,
values=np.array((2, 2, 0), dtype=np.int64),
dense_shape=inputs.dense_shape),
id_weight_pair.id_tensor.eval())
def test_get_sparse_tensors_with_oov_buckets(self):
column = fc.categorical_column_with_vocabulary_file(
key='aaa',
vocabulary_file=self._wire_vocabulary_file_name,
vocabulary_size=self._wire_vocabulary_size,
num_oov_buckets=100)
inputs = sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1), (1, 2)),
values=('marlo', 'skywalker', 'omar', 'heisenberg'),
dense_shape=(2, 3))
id_weight_pair = column.get_sparse_tensors(
fc.FeatureTransformationCache({
'aaa': inputs
}), None)
self.assertIsNone(id_weight_pair.weight_tensor)
with _initialized_session():
_assert_sparse_tensor_value(
self,
sparse_tensor.SparseTensorValue(
indices=inputs.indices,
values=np.array((2, 33, 0, 62), dtype=np.int64),
dense_shape=inputs.dense_shape),
id_weight_pair.id_tensor.eval())
def test_get_sparse_tensors_small_vocabulary_size(self):
# 'marlo' is the last entry in our vocabulary file, so be setting
# `vocabulary_size` to 1 less than number of entries in file, we take
# 'marlo' out of the vocabulary.
column = fc.categorical_column_with_vocabulary_file(
key='aaa',
vocabulary_file=self._wire_vocabulary_file_name,
vocabulary_size=self._wire_vocabulary_size - 1)
inputs = sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=('marlo', 'skywalker', 'omar'),
dense_shape=(2, 2))
id_weight_pair = column.get_sparse_tensors(
fc.FeatureTransformationCache({
'aaa': inputs
}), None)
self.assertIsNone(id_weight_pair.weight_tensor)
with _initialized_session():
_assert_sparse_tensor_value(
self,
sparse_tensor.SparseTensorValue(
indices=inputs.indices,
values=np.array((-1, -1, 0), dtype=np.int64),
dense_shape=inputs.dense_shape),
id_weight_pair.id_tensor.eval())
def test_get_sparse_tensors_int32(self):
column = fc.categorical_column_with_vocabulary_file(
key='aaa',
vocabulary_file=self._warriors_vocabulary_file_name,
vocabulary_size=self._warriors_vocabulary_size,
dtype=dtypes.int32)
inputs = sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1), (2, 2)),
values=(11, 100, 30, 22),
dense_shape=(3, 3))
id_weight_pair = column.get_sparse_tensors(
fc.FeatureTransformationCache({
'aaa': inputs
}), None)
self.assertIsNone(id_weight_pair.weight_tensor)
with _initialized_session():
_assert_sparse_tensor_value(
self,
sparse_tensor.SparseTensorValue(
indices=inputs.indices,
values=np.array((2, -1, 0, 4), dtype=np.int64),
dense_shape=inputs.dense_shape),
id_weight_pair.id_tensor.eval())
def test_get_sparse_tensors_int32_dense_input(self):
default_value = -100
column = fc.categorical_column_with_vocabulary_file(
key='aaa',
vocabulary_file=self._warriors_vocabulary_file_name,
vocabulary_size=self._warriors_vocabulary_size,
dtype=dtypes.int32,
default_value=default_value)
id_weight_pair = column.get_sparse_tensors(
fc.FeatureTransformationCache({
'aaa': ((11, -1, -1), (100, 30, -1), (-1, -1, 22))
}), None)
self.assertIsNone(id_weight_pair.weight_tensor)
with _initialized_session():
_assert_sparse_tensor_value(
self,
sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1), (2, 2)),
values=np.array((2, default_value, 0, 4), dtype=np.int64),
dense_shape=(3, 3)),
id_weight_pair.id_tensor.eval())
def test_get_sparse_tensors_int32_with_oov_buckets(self):
column = fc.categorical_column_with_vocabulary_file(
key='aaa',
vocabulary_file=self._warriors_vocabulary_file_name,
vocabulary_size=self._warriors_vocabulary_size,
dtype=dtypes.int32,
num_oov_buckets=100)
inputs = sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1), (2, 2)),
values=(11, 100, 30, 22),
dense_shape=(3, 3))
id_weight_pair = column.get_sparse_tensors(
fc.FeatureTransformationCache({
'aaa': inputs
}), None)
self.assertIsNone(id_weight_pair.weight_tensor)
with _initialized_session():
_assert_sparse_tensor_value(
self,
sparse_tensor.SparseTensorValue(
indices=inputs.indices,
values=np.array((2, 60, 0, 4), dtype=np.int64),
dense_shape=inputs.dense_shape),
id_weight_pair.id_tensor.eval())
def test_linear_model(self):
wire_column = fc.categorical_column_with_vocabulary_file(
key='wire',
vocabulary_file=self._wire_vocabulary_file_name,
vocabulary_size=self._wire_vocabulary_size,
num_oov_buckets=1)
self.assertEqual(4, wire_column.num_buckets)
with ops.Graph().as_default():
model = fc.LinearModel((wire_column,))
predictions = model({
wire_column.name:
sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=('marlo', 'skywalker', 'omar'),
dense_shape=(2, 2))
})
wire_var, bias = model.variables
with _initialized_session():
self.assertAllClose((0.,), self.evaluate(bias))
self.assertAllClose(((0.,), (0.,), (0.,), (0.,)),
self.evaluate(wire_var))
self.assertAllClose(((0.,), (0.,)), self.evaluate(predictions))
wire_var.assign(((1.,), (2.,), (3.,), (4.,))).eval()
# 'marlo' -> 2: wire_var[2] = 3
# 'skywalker' -> 3, 'omar' -> 0: wire_var[3] + wire_var[0] = 4+1 = 5
self.assertAllClose(((3.,), (5.,)), self.evaluate(predictions))
def test_old_linear_model(self):
wire_column = fc.categorical_column_with_vocabulary_file(
key='wire',
vocabulary_file=self._wire_vocabulary_file_name,
vocabulary_size=self._wire_vocabulary_size,
num_oov_buckets=1)
self.assertEqual(4, wire_column.num_buckets)
with ops.Graph().as_default():
predictions = fc_old.linear_model({
wire_column.name:
sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=('marlo', 'skywalker', 'omar'),
dense_shape=(2, 2))
}, (wire_column,))
bias = get_linear_model_bias()
wire_var = get_linear_model_column_var(wire_column)
with _initialized_session():
self.assertAllClose((0.,), self.evaluate(bias))
self.assertAllClose(((0.,), (0.,), (0.,), (0.,)),
self.evaluate(wire_var))
self.assertAllClose(((0.,), (0.,)), self.evaluate(predictions))
wire_var.assign(((1.,), (2.,), (3.,), (4.,))).eval()
# 'marlo' -> 2: wire_var[2] = 3
# 'skywalker' -> 3, 'omar' -> 0: wire_var[3] + wire_var[0] = 4+1 = 5
self.assertAllClose(((3.,), (5.,)), self.evaluate(predictions))
def test_serialization(self):
wire_column = fc.categorical_column_with_vocabulary_file(
key='wire',
vocabulary_file=self._wire_vocabulary_file_name,
vocabulary_size=self._wire_vocabulary_size,
num_oov_buckets=1)
self.assertEqual(['wire'], wire_column.parents)
config = wire_column._get_config()
self.assertEqual({
'default_value': -1,
'dtype': 'string',
'key': 'wire',
'num_oov_buckets': 1,
'vocabulary_file': self._wire_vocabulary_file_name,
'vocabulary_size': 3
}, config)
self.assertEqual(wire_column,
fc.VocabularyFileCategoricalColumn._from_config(config))
class VocabularyListCategoricalColumnTest(test.TestCase):
def test_defaults_string(self):
column = fc.categorical_column_with_vocabulary_list(
key='aaa', vocabulary_list=('omar', 'stringer', 'marlo'))
self.assertEqual('aaa', column.name)
self.assertEqual('aaa', column.key)
self.assertEqual(3, column.num_buckets)
self.assertEqual({
'aaa': parsing_ops.VarLenFeature(dtypes.string)
}, column.parse_example_spec)
self.assertTrue(column._is_v2_column)
def test_key_should_be_string(self):
with self.assertRaisesRegexp(ValueError, 'key must be a string.'):
fc.categorical_column_with_vocabulary_list(
key=('aaa',), vocabulary_list=('omar', 'stringer', 'marlo'))
def test_defaults_int(self):
column = fc.categorical_column_with_vocabulary_list(
key='aaa', vocabulary_list=(12, 24, 36))
self.assertEqual('aaa', column.name)
self.assertEqual('aaa', column.key)
self.assertEqual(3, column.num_buckets)
self.assertEqual({
'aaa': parsing_ops.VarLenFeature(dtypes.int64)
}, column.parse_example_spec)
def test_all_constructor_args(self):
column = fc.categorical_column_with_vocabulary_list(
key='aaa',
vocabulary_list=(12, 24, 36),
dtype=dtypes.int32,
default_value=-99)
self.assertEqual(3, column.num_buckets)
self.assertEqual({
'aaa': parsing_ops.VarLenFeature(dtypes.int32)
}, column.parse_example_spec)
def test_deep_copy(self):
original = fc.categorical_column_with_vocabulary_list(
key='aaa', vocabulary_list=(12, 24, 36), dtype=dtypes.int32)
for column in (original, copy.deepcopy(original)):
self.assertEqual('aaa', column.name)
self.assertEqual(3, column.num_buckets)
self.assertEqual({
'aaa': parsing_ops.VarLenFeature(dtypes.int32)
}, column.parse_example_spec)
def test_invalid_dtype(self):
with self.assertRaisesRegexp(ValueError, 'dtype must be string or integer'):
fc.categorical_column_with_vocabulary_list(
key='aaa',
vocabulary_list=('omar', 'stringer', 'marlo'),
dtype=dtypes.float32)
def test_invalid_mapping_dtype(self):
with self.assertRaisesRegexp(
ValueError, r'vocabulary dtype must be string or integer'):
fc.categorical_column_with_vocabulary_list(
key='aaa', vocabulary_list=(12., 24., 36.))
def test_mismatched_int_dtype(self):
with self.assertRaisesRegexp(
ValueError, r'dtype.*and vocabulary dtype.*do not match'):
fc.categorical_column_with_vocabulary_list(
key='aaa',
vocabulary_list=('omar', 'stringer', 'marlo'),
dtype=dtypes.int32)
def test_mismatched_string_dtype(self):
with self.assertRaisesRegexp(
ValueError, r'dtype.*and vocabulary dtype.*do not match'):
fc.categorical_column_with_vocabulary_list(
key='aaa', vocabulary_list=(12, 24, 36), dtype=dtypes.string)
def test_none_mapping(self):
with self.assertRaisesRegexp(
ValueError, r'vocabulary_list.*must be non-empty'):
fc.categorical_column_with_vocabulary_list(
key='aaa', vocabulary_list=None)
def test_empty_mapping(self):
with self.assertRaisesRegexp(
ValueError, r'vocabulary_list.*must be non-empty'):
fc.categorical_column_with_vocabulary_list(
key='aaa', vocabulary_list=tuple([]))
def test_duplicate_mapping(self):
with self.assertRaisesRegexp(ValueError, 'Duplicate keys'):
fc.categorical_column_with_vocabulary_list(
key='aaa', vocabulary_list=(12, 24, 12))
def test_invalid_num_oov_buckets(self):
with self.assertRaisesRegexp(ValueError, 'Invalid num_oov_buckets'):
fc.categorical_column_with_vocabulary_list(
key='aaa', vocabulary_list=(12, 24, 36), num_oov_buckets=-1)
def test_invalid_buckets_and_default_value(self):
with self.assertRaisesRegexp(
ValueError, 'both num_oov_buckets and default_value'):
fc.categorical_column_with_vocabulary_list(
key='aaa',
vocabulary_list=(12, 24, 36),
num_oov_buckets=100,
default_value=2)
def test_invalid_input_dtype_int32(self):
column = fc.categorical_column_with_vocabulary_list(
key='aaa', vocabulary_list=('omar', 'stringer', 'marlo'))
inputs = sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=(12, 24, 36),
dense_shape=(2, 2))
with self.assertRaisesRegexp(ValueError, 'dtype must be compatible'):
column.get_sparse_tensors(
fc.FeatureTransformationCache({
'aaa': inputs
}), None)
def test_invalid_input_dtype_string(self):
column = fc.categorical_column_with_vocabulary_list(
key='aaa', vocabulary_list=(12, 24, 36))
inputs = sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=('omar', 'stringer', 'marlo'),
dense_shape=(2, 2))
with self.assertRaisesRegexp(ValueError, 'dtype must be compatible'):
column.get_sparse_tensors(
fc.FeatureTransformationCache({
'aaa': inputs
}), None)
def test_parse_example_string(self):
a = fc.categorical_column_with_vocabulary_list(
key='aaa', vocabulary_list=('omar', 'stringer', 'marlo'))
data = example_pb2.Example(features=feature_pb2.Features(
feature={
'aaa':
feature_pb2.Feature(bytes_list=feature_pb2.BytesList(
value=[b'omar', b'stringer']))
}))
features = parsing_ops.parse_example(
serialized=[data.SerializeToString()],
features=fc.make_parse_example_spec_v2([a]))
self.assertIn('aaa', features)
with self.cached_session():
_assert_sparse_tensor_value(
self,
sparse_tensor.SparseTensorValue(
indices=[[0, 0], [0, 1]],
values=np.array([b'omar', b'stringer'], dtype=np.object_),
dense_shape=[1, 2]),
features['aaa'].eval())
def test_parse_example_int(self):
a = fc.categorical_column_with_vocabulary_list(
key='aaa', vocabulary_list=(11, 21, 31))
data = example_pb2.Example(features=feature_pb2.Features(
feature={
'aaa':
feature_pb2.Feature(int64_list=feature_pb2.Int64List(
value=[11, 21]))
}))
features = parsing_ops.parse_example(
serialized=[data.SerializeToString()],
features=fc.make_parse_example_spec_v2([a]))
self.assertIn('aaa', features)
with self.cached_session():
_assert_sparse_tensor_value(
self,
sparse_tensor.SparseTensorValue(
indices=[[0, 0], [0, 1]],
values=[11, 21],
dense_shape=[1, 2]),
features['aaa'].eval())
def test_get_sparse_tensors(self):
column = fc.categorical_column_with_vocabulary_list(
key='aaa', vocabulary_list=('omar', 'stringer', 'marlo'))
inputs = sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=('marlo', 'skywalker', 'omar'),
dense_shape=(2, 2))
id_weight_pair = column.get_sparse_tensors(
fc.FeatureTransformationCache({
'aaa': inputs
}), None)
self.assertIsNone(id_weight_pair.weight_tensor)
with _initialized_session():
_assert_sparse_tensor_value(
self,
sparse_tensor.SparseTensorValue(
indices=inputs.indices,
values=np.array((2, -1, 0), dtype=np.int64),
dense_shape=inputs.dense_shape),
id_weight_pair.id_tensor.eval())
def test_transform_feature(self):
column = fc.categorical_column_with_vocabulary_list(
key='aaa', vocabulary_list=('omar', 'stringer', 'marlo'))
inputs = sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=('marlo', 'skywalker', 'omar'),
dense_shape=(2, 2))
id_tensor = fc._transform_features_v2({
'aaa': inputs
}, [column], None)[column]
with _initialized_session():
_assert_sparse_tensor_value(
self,
sparse_tensor.SparseTensorValue(
indices=inputs.indices,
values=np.array((2, -1, 0), dtype=np.int64),
dense_shape=inputs.dense_shape), self.evaluate(id_tensor))
def test_get_sparse_tensors_dense_input(self):
column = fc.categorical_column_with_vocabulary_list(
key='aaa', vocabulary_list=('omar', 'stringer', 'marlo'))
id_weight_pair = column.get_sparse_tensors(
fc.FeatureTransformationCache({
'aaa': (('marlo', ''), ('skywalker', 'omar'))
}), None)
self.assertIsNone(id_weight_pair.weight_tensor)
with _initialized_session():
_assert_sparse_tensor_value(
self,
sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=np.array((2, -1, 0), dtype=np.int64),
dense_shape=(2, 2)),
id_weight_pair.id_tensor.eval())
def test_get_sparse_tensors_default_value_in_vocabulary(self):
column = fc.categorical_column_with_vocabulary_list(
key='aaa',
vocabulary_list=('omar', 'stringer', 'marlo'),
default_value=2)
inputs = sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=('marlo', 'skywalker', 'omar'),
dense_shape=(2, 2))
id_weight_pair = column.get_sparse_tensors(
fc.FeatureTransformationCache({
'aaa': inputs
}), None)
self.assertIsNone(id_weight_pair.weight_tensor)
with _initialized_session():
_assert_sparse_tensor_value(
self,
sparse_tensor.SparseTensorValue(
indices=inputs.indices,
values=np.array((2, 2, 0), dtype=np.int64),
dense_shape=inputs.dense_shape),
id_weight_pair.id_tensor.eval())
def test_get_sparse_tensors_with_oov_buckets(self):
column = fc.categorical_column_with_vocabulary_list(
key='aaa',
vocabulary_list=('omar', 'stringer', 'marlo'),
num_oov_buckets=100)
inputs = sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1), (1, 2)),
values=('marlo', 'skywalker', 'omar', 'heisenberg'),
dense_shape=(2, 3))
id_weight_pair = column.get_sparse_tensors(
fc.FeatureTransformationCache({
'aaa': inputs
}), None)
self.assertIsNone(id_weight_pair.weight_tensor)
with _initialized_session():
_assert_sparse_tensor_value(
self,
sparse_tensor.SparseTensorValue(
indices=inputs.indices,
values=np.array((2, 33, 0, 62), dtype=np.int64),
dense_shape=inputs.dense_shape),
id_weight_pair.id_tensor.eval())
def test_get_sparse_tensors_int32(self):
column = fc.categorical_column_with_vocabulary_list(
key='aaa',
vocabulary_list=np.array((30, 35, 11, 23, 22), dtype=np.int32),
dtype=dtypes.int32)
inputs = sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1), (2, 2)),
values=np.array((11, 100, 30, 22), dtype=np.int32),
dense_shape=(3, 3))
id_weight_pair = column.get_sparse_tensors(
fc.FeatureTransformationCache({
'aaa': inputs
}), None)
self.assertIsNone(id_weight_pair.weight_tensor)
with _initialized_session():
_assert_sparse_tensor_value(
self,
sparse_tensor.SparseTensorValue(
indices=inputs.indices,
values=np.array((2, -1, 0, 4), dtype=np.int64),
dense_shape=inputs.dense_shape),
id_weight_pair.id_tensor.eval())
def test_get_sparse_tensors_int32_dense_input(self):
default_value = -100
column = fc.categorical_column_with_vocabulary_list(
key='aaa',
vocabulary_list=np.array((30, 35, 11, 23, 22), dtype=np.int32),
dtype=dtypes.int32,
default_value=default_value)
id_weight_pair = column.get_sparse_tensors(
fc.FeatureTransformationCache({
'aaa':
np.array(((11, -1, -1), (100, 30, -1), (-1, -1, 22)),
dtype=np.int32)
}), None)
self.assertIsNone(id_weight_pair.weight_tensor)
with _initialized_session():
_assert_sparse_tensor_value(
self,
sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1), (2, 2)),
values=np.array((2, default_value, 0, 4), dtype=np.int64),
dense_shape=(3, 3)),
id_weight_pair.id_tensor.eval())
def test_get_sparse_tensors_int32_with_oov_buckets(self):
column = fc.categorical_column_with_vocabulary_list(
key='aaa',
vocabulary_list=np.array((30, 35, 11, 23, 22), dtype=np.int32),
dtype=dtypes.int32,
num_oov_buckets=100)
inputs = sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1), (2, 2)),
values=(11, 100, 30, 22),
dense_shape=(3, 3))
id_weight_pair = column.get_sparse_tensors(
fc.FeatureTransformationCache({
'aaa': inputs
}), None)
self.assertIsNone(id_weight_pair.weight_tensor)
with _initialized_session():
_assert_sparse_tensor_value(
self,
sparse_tensor.SparseTensorValue(
indices=inputs.indices,
values=np.array((2, 60, 0, 4), dtype=np.int64),
dense_shape=inputs.dense_shape),
id_weight_pair.id_tensor.eval())
def test_linear_model(self):
wire_column = fc.categorical_column_with_vocabulary_list(
key='aaa',
vocabulary_list=('omar', 'stringer', 'marlo'),
num_oov_buckets=1)
self.assertEqual(4, wire_column.num_buckets)
with ops.Graph().as_default():
model = fc.LinearModel((wire_column,))
predictions = model({
wire_column.name:
sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=('marlo', 'skywalker', 'omar'),
dense_shape=(2, 2))
})
wire_var, bias = model.variables
with _initialized_session():
self.assertAllClose((0.,), self.evaluate(bias))
self.assertAllClose(((0.,), (0.,), (0.,), (0.,)),
self.evaluate(wire_var))
self.assertAllClose(((0.,), (0.,)), self.evaluate(predictions))
wire_var.assign(((1.,), (2.,), (3.,), (4.,))).eval()
# 'marlo' -> 2: wire_var[2] = 3
# 'skywalker' -> 3, 'omar' -> 0: wire_var[3] + wire_var[0] = 4+1 = 5
self.assertAllClose(((3.,), (5.,)), self.evaluate(predictions))
def test_old_linear_model(self):
wire_column = fc.categorical_column_with_vocabulary_list(
key='aaa',
vocabulary_list=('omar', 'stringer', 'marlo'),
num_oov_buckets=1)
self.assertEqual(4, wire_column.num_buckets)
with ops.Graph().as_default():
predictions = fc_old.linear_model({
wire_column.name:
sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=('marlo', 'skywalker', 'omar'),
dense_shape=(2, 2))
}, (wire_column,))
bias = get_linear_model_bias()
wire_var = get_linear_model_column_var(wire_column)
with _initialized_session():
self.assertAllClose((0.,), self.evaluate(bias))
self.assertAllClose(((0.,), (0.,), (0.,), (0.,)),
self.evaluate(wire_var))
self.assertAllClose(((0.,), (0.,)), self.evaluate(predictions))
wire_var.assign(((1.,), (2.,), (3.,), (4.,))).eval()
# 'marlo' -> 2: wire_var[2] = 3
# 'skywalker' -> 3, 'omar' -> 0: wire_var[3] + wire_var[0] = 4+1 = 5
self.assertAllClose(((3.,), (5.,)), self.evaluate(predictions))
def test_serialization(self):
wire_column = fc.categorical_column_with_vocabulary_list(
key='aaa',
vocabulary_list=('omar', 'stringer', 'marlo'),
num_oov_buckets=1)
self.assertEqual(['aaa'], wire_column.parents)
config = wire_column._get_config()
self.assertEqual({
'default_value': -1,
'dtype': 'string',
'key': 'aaa',
'num_oov_buckets': 1,
'vocabulary_list': ('omar', 'stringer', 'marlo')
}, config)
self.assertEqual(wire_column,
fc.VocabularyListCategoricalColumn._from_config(config))
class IdentityCategoricalColumnTest(test.TestCase):
def test_constructor(self):
column = fc.categorical_column_with_identity(key='aaa', num_buckets=3)
self.assertEqual('aaa', column.name)
self.assertEqual('aaa', column.key)
self.assertEqual(3, column.num_buckets)
self.assertEqual({
'aaa': parsing_ops.VarLenFeature(dtypes.int64)
}, column.parse_example_spec)
self.assertTrue(column._is_v2_column)
def test_key_should_be_string(self):
with self.assertRaisesRegexp(ValueError, 'key must be a string.'):
fc.categorical_column_with_identity(key=('aaa',), num_buckets=3)
def test_deep_copy(self):
original = fc.categorical_column_with_identity(key='aaa', num_buckets=3)
for column in (original, copy.deepcopy(original)):
self.assertEqual('aaa', column.name)
self.assertEqual(3, column.num_buckets)
self.assertEqual({
'aaa': parsing_ops.VarLenFeature(dtypes.int64)
}, column.parse_example_spec)
def test_invalid_num_buckets_zero(self):
with self.assertRaisesRegexp(ValueError, 'num_buckets 0 < 1'):
fc.categorical_column_with_identity(key='aaa', num_buckets=0)
def test_invalid_num_buckets_negative(self):
with self.assertRaisesRegexp(ValueError, 'num_buckets -1 < 1'):
fc.categorical_column_with_identity(key='aaa', num_buckets=-1)
def test_invalid_default_value_too_small(self):
with self.assertRaisesRegexp(ValueError, 'default_value -1 not in range'):
fc.categorical_column_with_identity(
key='aaa', num_buckets=3, default_value=-1)
def test_invalid_default_value_too_big(self):
with self.assertRaisesRegexp(ValueError, 'default_value 3 not in range'):
fc.categorical_column_with_identity(
key='aaa', num_buckets=3, default_value=3)
def test_invalid_input_dtype(self):
column = fc.categorical_column_with_identity(key='aaa', num_buckets=3)
inputs = sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=('omar', 'stringer', 'marlo'),
dense_shape=(2, 2))
with self.assertRaisesRegexp(ValueError, 'Invalid input, not integer'):
column.get_sparse_tensors(
fc.FeatureTransformationCache({
'aaa': inputs
}), None)
def test_parse_example(self):
a = fc.categorical_column_with_identity(key='aaa', num_buckets=30)
data = example_pb2.Example(features=feature_pb2.Features(
feature={
'aaa':
feature_pb2.Feature(int64_list=feature_pb2.Int64List(
value=[11, 21]))
}))
features = parsing_ops.parse_example(
serialized=[data.SerializeToString()],
features=fc.make_parse_example_spec_v2([a]))
self.assertIn('aaa', features)
with self.cached_session():
_assert_sparse_tensor_value(
self,
sparse_tensor.SparseTensorValue(
indices=[[0, 0], [0, 1]],
values=np.array([11, 21], dtype=np.int64),
dense_shape=[1, 2]),
features['aaa'].eval())
def test_get_sparse_tensors(self):
column = fc.categorical_column_with_identity(key='aaa', num_buckets=3)
inputs = sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=(0, 1, 0),
dense_shape=(2, 2))
id_weight_pair = column.get_sparse_tensors(
fc.FeatureTransformationCache({
'aaa': inputs
}), None)
self.assertIsNone(id_weight_pair.weight_tensor)
with _initialized_session():
_assert_sparse_tensor_value(
self,
sparse_tensor.SparseTensorValue(
indices=inputs.indices,
values=np.array((0, 1, 0), dtype=np.int64),
dense_shape=inputs.dense_shape),
id_weight_pair.id_tensor.eval())
def test_transform_feature(self):
column = fc.categorical_column_with_identity(key='aaa', num_buckets=3)
inputs = sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=(0, 1, 0),
dense_shape=(2, 2))
id_tensor = fc._transform_features_v2({
'aaa': inputs
}, [column], None)[column]
with _initialized_session():
_assert_sparse_tensor_value(
self,
sparse_tensor.SparseTensorValue(
indices=inputs.indices,
values=np.array((0, 1, 0), dtype=np.int64),
dense_shape=inputs.dense_shape), self.evaluate(id_tensor))
def test_get_sparse_tensors_dense_input(self):
column = fc.categorical_column_with_identity(key='aaa', num_buckets=3)
id_weight_pair = column.get_sparse_tensors(
fc.FeatureTransformationCache({
'aaa': ((0, -1), (1, 0))
}), None)
self.assertIsNone(id_weight_pair.weight_tensor)
with _initialized_session():
_assert_sparse_tensor_value(
self,
sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=np.array((0, 1, 0), dtype=np.int64),
dense_shape=(2, 2)),
id_weight_pair.id_tensor.eval())
def test_get_sparse_tensors_with_inputs_too_small(self):
column = fc.categorical_column_with_identity(key='aaa', num_buckets=3)
inputs = sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=(1, -1, 0),
dense_shape=(2, 2))
id_weight_pair = column.get_sparse_tensors(
fc.FeatureTransformationCache({
'aaa': inputs
}), None)
self.assertIsNone(id_weight_pair.weight_tensor)
with _initialized_session():
with self.assertRaisesRegexp(
errors.OpError, 'assert_greater_or_equal_0'):
id_weight_pair.id_tensor.eval()
def test_get_sparse_tensors_with_inputs_too_big(self):
column = fc.categorical_column_with_identity(key='aaa', num_buckets=3)
inputs = sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=(1, 99, 0),
dense_shape=(2, 2))
id_weight_pair = column.get_sparse_tensors(
fc.FeatureTransformationCache({
'aaa': inputs
}), None)
self.assertIsNone(id_weight_pair.weight_tensor)
with _initialized_session():
with self.assertRaisesRegexp(
errors.OpError, 'assert_less_than_num_buckets'):
id_weight_pair.id_tensor.eval()
def test_get_sparse_tensors_with_default_value(self):
column = fc.categorical_column_with_identity(
key='aaa', num_buckets=4, default_value=3)
inputs = sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=(1, -1, 99),
dense_shape=(2, 2))
id_weight_pair = column.get_sparse_tensors(
fc.FeatureTransformationCache({
'aaa': inputs
}), None)
self.assertIsNone(id_weight_pair.weight_tensor)
with _initialized_session():
_assert_sparse_tensor_value(
self,
sparse_tensor.SparseTensorValue(
indices=inputs.indices,
values=np.array((1, 3, 3), dtype=np.int64),
dense_shape=inputs.dense_shape),
id_weight_pair.id_tensor.eval())
def test_get_sparse_tensors_with_default_value_and_placeholder_inputs(self):
column = fc.categorical_column_with_identity(
key='aaa', num_buckets=4, default_value=3)
input_indices = array_ops.placeholder(dtype=dtypes.int64)
input_values = array_ops.placeholder(dtype=dtypes.int32)
input_shape = array_ops.placeholder(dtype=dtypes.int64)
inputs = sparse_tensor.SparseTensorValue(
indices=input_indices,
values=input_values,
dense_shape=input_shape)
id_weight_pair = column.get_sparse_tensors(
fc.FeatureTransformationCache({
'aaa': inputs
}), None)
self.assertIsNone(id_weight_pair.weight_tensor)
with _initialized_session():
_assert_sparse_tensor_value(
self,
sparse_tensor.SparseTensorValue(
indices=np.array(((0, 0), (1, 0), (1, 1)), dtype=np.int64),
values=np.array((1, 3, 3), dtype=np.int64),
dense_shape=np.array((2, 2), dtype=np.int64)),
id_weight_pair.id_tensor.eval(feed_dict={
input_indices: ((0, 0), (1, 0), (1, 1)),
input_values: (1, -1, 99),
input_shape: (2, 2),
}))
def test_linear_model(self):
column = fc.categorical_column_with_identity(key='aaa', num_buckets=3)
self.assertEqual(3, column.num_buckets)
with ops.Graph().as_default():
model = fc.LinearModel((column,))
predictions = model({
column.name:
sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=(0, 2, 1),
dense_shape=(2, 2))
})
weight_var, bias = model.variables
with _initialized_session():
self.assertAllClose((0.,), self.evaluate(bias))
self.assertAllClose(((0.,), (0.,), (0.,)), self.evaluate(weight_var))
self.assertAllClose(((0.,), (0.,)), self.evaluate(predictions))
weight_var.assign(((1.,), (2.,), (3.,))).eval()
# weight_var[0] = 1
# weight_var[2] + weight_var[1] = 3+2 = 5
self.assertAllClose(((1.,), (5.,)), self.evaluate(predictions))
def test_old_linear_model(self):
column = fc.categorical_column_with_identity(key='aaa', num_buckets=3)
self.assertEqual(3, column.num_buckets)
with ops.Graph().as_default():
predictions = fc_old.linear_model({
column.name:
sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=(0, 2, 1),
dense_shape=(2, 2))
}, (column,))
bias = get_linear_model_bias()
weight_var = get_linear_model_column_var(column)
with _initialized_session():
self.assertAllClose((0.,), self.evaluate(bias))
self.assertAllClose(((0.,), (0.,), (0.,)), self.evaluate(weight_var))
self.assertAllClose(((0.,), (0.,)), self.evaluate(predictions))
weight_var.assign(((1.,), (2.,), (3.,))).eval()
# weight_var[0] = 1
# weight_var[2] + weight_var[1] = 3+2 = 5
self.assertAllClose(((1.,), (5.,)), self.evaluate(predictions))
def test_serialization(self):
column = fc.categorical_column_with_identity(key='aaa', num_buckets=3)
self.assertEqual(['aaa'], column.parents)
config = column._get_config()
self.assertEqual({
'default_value': None,
'key': 'aaa',
'number_buckets': 3
}, config)
self.assertEqual(column, fc.IdentityCategoricalColumn._from_config(config))
class TransformFeaturesTest(test.TestCase):
# All transform tests are distributed in column test.
# Here we only test multi column case and naming
def transform_multi_column(self):
bucketized_price = fc.bucketized_column(
fc.numeric_column('price'), boundaries=[0, 2, 4, 6])
hashed_sparse = fc.categorical_column_with_hash_bucket('wire', 10)
with ops.Graph().as_default():
features = {
'price': [[-1.], [5.]],
'wire':
sparse_tensor.SparseTensor(
values=['omar', 'stringer', 'marlo'],
indices=[[0, 0], [1, 0], [1, 1]],
dense_shape=[2, 2])
}
transformed = fc._transform_features_v2(
features, [bucketized_price, hashed_sparse], None)
with _initialized_session():
self.assertIn(bucketized_price.name, transformed[bucketized_price].name)
self.assertAllEqual([[0], [3]], transformed[bucketized_price].eval())
self.assertIn(hashed_sparse.name, transformed[hashed_sparse].name)
self.assertAllEqual([6, 4, 1], transformed[hashed_sparse].values.eval())
def test_column_order(self):
"""When the column is both dense and sparse, uses sparse tensors."""
class _LoggerColumn(BaseFeatureColumnForTests):
def __init__(self, name):
self._name = name
@property
def _is_v2_column(self):
return True
@property
def name(self):
return self._name
def transform_feature(self, transformation_cache, state_manager):
self.call_order = call_logger['count']
call_logger['count'] += 1
return 'Anything'
@property
def parse_example_spec(self):
pass
with ops.Graph().as_default():
column1 = _LoggerColumn('1')
column2 = _LoggerColumn('2')
call_logger = {'count': 0}
fc._transform_features_v2({}, [column1, column2], None)
self.assertEqual(0, column1.call_order)
self.assertEqual(1, column2.call_order)
call_logger = {'count': 0}
fc._transform_features_v2({}, [column2, column1], None)
self.assertEqual(0, column1.call_order)
self.assertEqual(1, column2.call_order)
class IndicatorColumnTest(test.TestCase):
def test_indicator_column(self):
a = fc.categorical_column_with_hash_bucket('a', 4)
indicator_a = fc.indicator_column(a)
self.assertEqual(indicator_a.categorical_column.name, 'a')
self.assertEqual(indicator_a.name, 'a_indicator')
self.assertEqual(indicator_a.variable_shape, [1, 4])
self.assertTrue(indicator_a._is_v2_column)
b = fc_old._categorical_column_with_hash_bucket('b', hash_bucket_size=100)
indicator_b = fc.indicator_column(b)
self.assertEqual(indicator_b.categorical_column.name, 'b')
self.assertEqual(indicator_b.name, 'b_indicator')
self.assertEqual(indicator_b.variable_shape, [1, 100])
self.assertFalse(indicator_b._is_v2_column)
def test_1D_shape_succeeds(self):
animal = fc.indicator_column(
fc.categorical_column_with_hash_bucket('animal', 4))
transformation_cache = fc.FeatureTransformationCache({
'animal': ['fox', 'fox']
})
output = transformation_cache.get(animal, None)
with self.cached_session():
self.assertAllEqual([[0., 0., 1., 0.], [0., 0., 1., 0.]],
self.evaluate(output))
def test_2D_shape_succeeds(self):
# TODO(ispir/cassandrax): Swith to categorical_column_with_keys when ready.
animal = fc.indicator_column(
fc.categorical_column_with_hash_bucket('animal', 4))
transformation_cache = fc.FeatureTransformationCache({
'animal':
sparse_tensor.SparseTensor(
indices=[[0, 0], [1, 0]],
values=['fox', 'fox'],
dense_shape=[2, 1])
})
output = transformation_cache.get(animal, None)
with self.cached_session():
self.assertAllEqual([[0., 0., 1., 0.], [0., 0., 1., 0.]],
self.evaluate(output))
def test_multi_hot(self):
animal = fc.indicator_column(
fc.categorical_column_with_identity('animal', num_buckets=4))
transformation_cache = fc.FeatureTransformationCache({
'animal':
sparse_tensor.SparseTensor(
indices=[[0, 0], [0, 1]], values=[1, 1], dense_shape=[1, 2])
})
output = transformation_cache.get(animal, None)
with self.cached_session():
self.assertAllEqual([[0., 2., 0., 0.]], self.evaluate(output))
def test_multi_hot2(self):
animal = fc.indicator_column(
fc.categorical_column_with_identity('animal', num_buckets=4))
transformation_cache = fc.FeatureTransformationCache({
'animal':
sparse_tensor.SparseTensor(
indices=[[0, 0], [0, 1]], values=[1, 2], dense_shape=[1, 2])
})
output = transformation_cache.get(animal, None)
with self.cached_session():
self.assertAllEqual([[0., 1., 1., 0.]], self.evaluate(output))
def test_deep_copy(self):
a = fc.categorical_column_with_hash_bucket('a', 4)
column = fc.indicator_column(a)
column_copy = copy.deepcopy(column)
self.assertEqual(column_copy.categorical_column.name, 'a')
self.assertEqual(column.name, 'a_indicator')
self.assertEqual(column.variable_shape, [1, 4])
def test_parse_example(self):
a = fc.categorical_column_with_vocabulary_list(
key='aaa', vocabulary_list=('omar', 'stringer', 'marlo'))
a_indicator = fc.indicator_column(a)
data = example_pb2.Example(features=feature_pb2.Features(
feature={
'aaa':
feature_pb2.Feature(bytes_list=feature_pb2.BytesList(
value=[b'omar', b'stringer']))
}))
features = parsing_ops.parse_example(
serialized=[data.SerializeToString()],
features=fc.make_parse_example_spec_v2([a_indicator]))
self.assertIn('aaa', features)
with self.cached_session():
_assert_sparse_tensor_value(
self,
sparse_tensor.SparseTensorValue(
indices=[[0, 0], [0, 1]],
values=np.array([b'omar', b'stringer'], dtype=np.object_),
dense_shape=[1, 2]),
features['aaa'].eval())
def test_transform(self):
a = fc.categorical_column_with_vocabulary_list(
key='aaa', vocabulary_list=('omar', 'stringer', 'marlo'))
a_indicator = fc.indicator_column(a)
features = {
'aaa': sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=('marlo', 'skywalker', 'omar'),
dense_shape=(2, 2))
}
indicator_tensor = fc._transform_features_v2(features, [a_indicator],
None)[a_indicator]
with _initialized_session():
self.assertAllEqual([[0, 0, 1], [1, 0, 0]],
self.evaluate(indicator_tensor))
def test_transform_with_weighted_column(self):
# Github issue 12557
ids = fc.categorical_column_with_vocabulary_list(
key='ids', vocabulary_list=('a', 'b', 'c'))
weights = fc.weighted_categorical_column(ids, 'weights')
indicator = fc.indicator_column(weights)
features = {
'ids': constant_op.constant([['c', 'b', 'a', 'c']]),
'weights': constant_op.constant([[2., 4., 6., 1.]])
}
indicator_tensor = fc._transform_features_v2(features, [indicator],
None)[indicator]
with _initialized_session():
self.assertAllEqual([[6., 4., 3.]], self.evaluate(indicator_tensor))
def test_transform_with_missing_value_in_weighted_column(self):
# Github issue 12583
ids = fc.categorical_column_with_vocabulary_list(
key='ids', vocabulary_list=('a', 'b', 'c'))
weights = fc.weighted_categorical_column(ids, 'weights')
indicator = fc.indicator_column(weights)
features = {
'ids': constant_op.constant([['c', 'b', 'unknown']]),
'weights': constant_op.constant([[2., 4., 6.]])
}
indicator_tensor = fc._transform_features_v2(features, [indicator],
None)[indicator]
with _initialized_session():
self.assertAllEqual([[0., 4., 2.]], self.evaluate(indicator_tensor))
def test_transform_with_missing_value_in_categorical_column(self):
# Github issue 12583
ids = fc.categorical_column_with_vocabulary_list(
key='ids', vocabulary_list=('a', 'b', 'c'))
indicator = fc.indicator_column(ids)
features = {
'ids': constant_op.constant([['c', 'b', 'unknown']]),
}
indicator_tensor = fc._transform_features_v2(features, [indicator],
None)[indicator]
with _initialized_session():
self.assertAllEqual([[0., 1., 1.]], self.evaluate(indicator_tensor))
def test_linear_model(self):
animal = fc.indicator_column(
fc.categorical_column_with_identity('animal', num_buckets=4))
with ops.Graph().as_default():
features = {
'animal':
sparse_tensor.SparseTensor(
indices=[[0, 0], [0, 1]], values=[1, 2], dense_shape=[1, 2])
}
model = fc.LinearModel([animal])
predictions = model(features)
weight_var, _ = model.variables
with _initialized_session():
# All should be zero-initialized.
self.assertAllClose([[0.], [0.], [0.], [0.]], self.evaluate(weight_var))
self.assertAllClose([[0.]], self.evaluate(predictions))
weight_var.assign([[1.], [2.], [3.], [4.]]).eval()
self.assertAllClose([[2. + 3.]], self.evaluate(predictions))
def test_old_linear_model(self):
animal = fc.indicator_column(
fc.categorical_column_with_identity('animal', num_buckets=4))
with ops.Graph().as_default():
features = {
'animal':
sparse_tensor.SparseTensor(
indices=[[0, 0], [0, 1]], values=[1, 2], dense_shape=[1, 2])
}
predictions = fc_old.linear_model(features, [animal])
weight_var = get_linear_model_column_var(animal)
with _initialized_session():
# All should be zero-initialized.
self.assertAllClose([[0.], [0.], [0.], [0.]], self.evaluate(weight_var))
self.assertAllClose([[0.]], self.evaluate(predictions))
weight_var.assign([[1.], [2.], [3.], [4.]]).eval()
self.assertAllClose([[2. + 3.]], self.evaluate(predictions))
def test_old_linear_model_old_categorical(self):
animal = fc.indicator_column(
fc_old._categorical_column_with_identity('animal', num_buckets=4))
with ops.Graph().as_default():
features = {
'animal':
sparse_tensor.SparseTensor(
indices=[[0, 0], [0, 1]], values=[1, 2], dense_shape=[1, 2])
}
predictions = fc_old.linear_model(features, [animal])
weight_var = get_linear_model_column_var(animal)
with _initialized_session():
# All should be zero-initialized.
self.assertAllClose([[0.], [0.], [0.], [0.]], self.evaluate(weight_var))
self.assertAllClose([[0.]], self.evaluate(predictions))
weight_var.assign([[1.], [2.], [3.], [4.]]).eval()
self.assertAllClose([[2. + 3.]], self.evaluate(predictions))
def test_dense_features(self):
animal = fc.indicator_column(
fc.categorical_column_with_identity('animal', num_buckets=4))
with ops.Graph().as_default():
features = {
'animal':
sparse_tensor.SparseTensor(
indices=[[0, 0], [0, 1]], values=[1, 2], dense_shape=[1, 2])
}
net = fc.DenseFeatures([animal])(features)
with _initialized_session():
self.assertAllClose([[0., 1., 1., 0.]], self.evaluate(net))
def test_input_layer(self):
animal = fc.indicator_column(
fc.categorical_column_with_identity('animal', num_buckets=4))
with ops.Graph().as_default():
features = {
'animal':
sparse_tensor.SparseTensor(
indices=[[0, 0], [0, 1]], values=[1, 2], dense_shape=[1, 2])
}
net = fc_old.input_layer(features, [animal])
with _initialized_session():
self.assertAllClose([[0., 1., 1., 0.]], self.evaluate(net))
def test_input_layer_old_categorical(self):
animal = fc.indicator_column(
fc_old._categorical_column_with_identity('animal', num_buckets=4))
with ops.Graph().as_default():
features = {
'animal':
sparse_tensor.SparseTensor(
indices=[[0, 0], [0, 1]], values=[1, 2], dense_shape=[1, 2])
}
net = fc_old.input_layer(features, [animal])
with _initialized_session():
self.assertAllClose([[0., 1., 1., 0.]], self.evaluate(net))
def test_serialization(self):
parent = fc.categorical_column_with_identity('animal', num_buckets=4)
animal = fc.indicator_column(parent)
self.assertEqual([parent], animal.parents)
config = animal._get_config()
self.assertEqual({
'categorical_column': {
'class_name': 'IdentityCategoricalColumn',
'config': {
'key': 'animal',
'default_value': None,
'number_buckets': 4
}
}
}, config)
new_animal = fc.IndicatorColumn._from_config(config)
self.assertEqual(animal, new_animal)
self.assertIsNot(parent, new_animal.categorical_column)
new_animal = fc.IndicatorColumn._from_config(
config, columns_by_name={parent.name: parent})
self.assertEqual(animal, new_animal)
self.assertIs(parent, new_animal.categorical_column)
class _TestStateManager(fc.StateManager):
def __init__(self, trainable=True):
# Dict of feature_column to a dict of variables.
self._all_variables = {}
self._trainable = trainable
def create_variable(self,
feature_column,
name,
shape,
dtype=None,
trainable=True,
use_resource=True,
initializer=None):
if feature_column not in self._all_variables:
self._all_variables[feature_column] = {}
var_dict = self._all_variables[feature_column]
if name in var_dict:
return var_dict[name]
else:
var = variable_scope.get_variable(
name=name,
shape=shape,
dtype=dtype,
trainable=self._trainable and trainable,
use_resource=use_resource,
initializer=initializer)
var_dict[name] = var
return var
def get_variable(self, feature_column, name):
if feature_column not in self._all_variables:
raise ValueError('Do not recognize FeatureColumn.')
if name in self._all_variables[feature_column]:
return self._all_variables[feature_column][name]
raise ValueError('Could not find variable.')
class EmbeddingColumnTest(test.TestCase):
def test_defaults(self):
categorical_column = fc.categorical_column_with_identity(
key='aaa', num_buckets=3)
embedding_dimension = 2
embedding_column = fc.embedding_column(
categorical_column, dimension=embedding_dimension)
self.assertIs(categorical_column, embedding_column.categorical_column)
self.assertEqual(embedding_dimension, embedding_column.dimension)
self.assertEqual('mean', embedding_column.combiner)
self.assertIsNone(embedding_column.ckpt_to_load_from)
self.assertIsNone(embedding_column.tensor_name_in_ckpt)
self.assertIsNone(embedding_column.max_norm)
self.assertTrue(embedding_column.trainable)
self.assertEqual('aaa_embedding', embedding_column.name)
self.assertEqual((embedding_dimension,), embedding_column.variable_shape)
self.assertEqual({
'aaa': parsing_ops.VarLenFeature(dtypes.int64)
}, embedding_column.parse_example_spec)
self.assertTrue(embedding_column._is_v2_column)
def test_is_v2_column(self):
categorical_column = fc_old._categorical_column_with_identity(
key='aaa', num_buckets=3)
embedding_dimension = 2
embedding_column = fc.embedding_column(
categorical_column, dimension=embedding_dimension)
self.assertFalse(embedding_column._is_v2_column)
def test_all_constructor_args(self):
categorical_column = fc.categorical_column_with_identity(
key='aaa', num_buckets=3)
embedding_dimension = 2
embedding_column = fc.embedding_column(
categorical_column,
dimension=embedding_dimension,
combiner='my_combiner',
initializer=lambda: 'my_initializer',
ckpt_to_load_from='my_ckpt',
tensor_name_in_ckpt='my_ckpt_tensor',
max_norm=42.,
trainable=False)
self.assertIs(categorical_column, embedding_column.categorical_column)
self.assertEqual(embedding_dimension, embedding_column.dimension)
self.assertEqual('my_combiner', embedding_column.combiner)
self.assertEqual('my_ckpt', embedding_column.ckpt_to_load_from)
self.assertEqual('my_ckpt_tensor', embedding_column.tensor_name_in_ckpt)
self.assertEqual(42., embedding_column.max_norm)
self.assertFalse(embedding_column.trainable)
self.assertEqual('aaa_embedding', embedding_column.name)
self.assertEqual((embedding_dimension,), embedding_column.variable_shape)
self.assertEqual({
'aaa': parsing_ops.VarLenFeature(dtypes.int64)
}, embedding_column.parse_example_spec)
def test_deep_copy(self):
categorical_column = fc.categorical_column_with_identity(
key='aaa', num_buckets=3)
embedding_dimension = 2
original = fc.embedding_column(
categorical_column,
dimension=embedding_dimension,
combiner='my_combiner',
initializer=lambda: 'my_initializer',
ckpt_to_load_from='my_ckpt',
tensor_name_in_ckpt='my_ckpt_tensor',
max_norm=42.,
trainable=False)
for embedding_column in (original, copy.deepcopy(original)):
self.assertEqual('aaa', embedding_column.categorical_column.name)
self.assertEqual(3, embedding_column.categorical_column.num_buckets)
self.assertEqual({
'aaa': parsing_ops.VarLenFeature(dtypes.int64)
}, embedding_column.categorical_column.parse_example_spec)
self.assertEqual(embedding_dimension, embedding_column.dimension)
self.assertEqual('my_combiner', embedding_column.combiner)
self.assertEqual('my_ckpt', embedding_column.ckpt_to_load_from)
self.assertEqual('my_ckpt_tensor', embedding_column.tensor_name_in_ckpt)
self.assertEqual(42., embedding_column.max_norm)
self.assertFalse(embedding_column.trainable)
self.assertEqual('aaa_embedding', embedding_column.name)
self.assertEqual((embedding_dimension,), embedding_column.variable_shape)
self.assertEqual({
'aaa': parsing_ops.VarLenFeature(dtypes.int64)
}, embedding_column.parse_example_spec)
def test_invalid_initializer(self):
categorical_column = fc.categorical_column_with_identity(
key='aaa', num_buckets=3)
with self.assertRaisesRegexp(ValueError, 'initializer must be callable'):
fc.embedding_column(categorical_column, dimension=2, initializer='not_fn')
def test_parse_example(self):
a = fc.categorical_column_with_vocabulary_list(
key='aaa', vocabulary_list=('omar', 'stringer', 'marlo'))
a_embedded = fc.embedding_column(a, dimension=2)
data = example_pb2.Example(features=feature_pb2.Features(
feature={
'aaa':
feature_pb2.Feature(bytes_list=feature_pb2.BytesList(
value=[b'omar', b'stringer']))
}))
features = parsing_ops.parse_example(
serialized=[data.SerializeToString()],
features=fc.make_parse_example_spec_v2([a_embedded]))
self.assertIn('aaa', features)
with self.cached_session():
_assert_sparse_tensor_value(
self,
sparse_tensor.SparseTensorValue(
indices=[[0, 0], [0, 1]],
values=np.array([b'omar', b'stringer'], dtype=np.object_),
dense_shape=[1, 2]),
features['aaa'].eval())
def test_transform_feature(self):
a = fc.categorical_column_with_identity(key='aaa', num_buckets=3)
a_embedded = fc.embedding_column(a, dimension=2)
features = {
'aaa': sparse_tensor.SparseTensor(
indices=((0, 0), (1, 0), (1, 1)),
values=(0, 1, 0),
dense_shape=(2, 2))
}
outputs = fc._transform_features_v2(features, [a, a_embedded], None)
output_a = outputs[a]
output_embedded = outputs[a_embedded]
with _initialized_session():
_assert_sparse_tensor_value(self, self.evaluate(output_a),
self.evaluate(output_embedded))
def test_get_dense_tensor(self):
# Inputs.
vocabulary_size = 3
sparse_input = sparse_tensor.SparseTensorValue(
# example 0, ids [2]
# example 1, ids [0, 1]
# example 2, ids []
# example 3, ids [1]
indices=((0, 0), (1, 0), (1, 4), (3, 0)),
values=(2, 0, 1, 1),
dense_shape=(4, 5))
# Embedding variable.
embedding_dimension = 2
embedding_values = (
(1., 2.), # id 0
(3., 5.), # id 1
(7., 11.) # id 2
)
def _initializer(shape, dtype, partition_info):
self.assertAllEqual((vocabulary_size, embedding_dimension), shape)
self.assertEqual(dtypes.float32, dtype)
self.assertIsNone(partition_info)
return embedding_values
# Expected lookup result, using combiner='mean'.
expected_lookups = (
# example 0, ids [2], embedding = [7, 11]
(7., 11.),
# example 1, ids [0, 1], embedding = mean([1, 2] + [3, 5]) = [2, 3.5]
(2., 3.5),
# example 2, ids [], embedding = [0, 0]
(0., 0.),
# example 3, ids [1], embedding = [3, 5]
(3., 5.),
)
# Build columns.
categorical_column = fc.categorical_column_with_identity(
key='aaa', num_buckets=vocabulary_size)
embedding_column = fc.embedding_column(
categorical_column,
dimension=embedding_dimension,
initializer=_initializer)
state_manager = _TestStateManager()
embedding_column.create_state(state_manager)
# Provide sparse input and get dense result.
embedding_lookup = embedding_column.get_dense_tensor(
fc.FeatureTransformationCache({
'aaa': sparse_input
}), state_manager)
# Assert expected embedding variable and lookups.
global_vars = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
self.assertItemsEqual(('embedding_weights:0',),
tuple([v.name for v in global_vars]))
with _initialized_session():
self.assertAllEqual(embedding_values, global_vars[0].eval())
self.assertAllEqual(expected_lookups, self.evaluate(embedding_lookup))
def test_get_dense_tensor_old_categorical(self):
# Inputs.
vocabulary_size = 3
sparse_input = sparse_tensor.SparseTensorValue(
# example 0, ids [2]
# example 1, ids [0, 1]
# example 2, ids []
# example 3, ids [1]
indices=((0, 0), (1, 0), (1, 4), (3, 0)),
values=(2, 0, 1, 1),
dense_shape=(4, 5))
# Embedding variable.
embedding_dimension = 2
embedding_values = (
(1., 2.), # id 0
(3., 5.), # id 1
(7., 11.) # id 2
)
def _initializer(shape, dtype, partition_info):
self.assertAllEqual((vocabulary_size, embedding_dimension), shape)
self.assertEqual(dtypes.float32, dtype)
self.assertIsNone(partition_info)
return embedding_values
# Expected lookup result, using combiner='mean'.
expected_lookups = (
# example 0, ids [2], embedding = [7, 11]
(7., 11.),
# example 1, ids [0, 1], embedding = mean([1, 2] + [3, 5]) = [2, 3.5]
(2., 3.5),
# example 2, ids [], embedding = [0, 0]
(0., 0.),
# example 3, ids [1], embedding = [3, 5]
(3., 5.),
)
# Build columns.
categorical_column = fc_old._categorical_column_with_identity(
key='aaa', num_buckets=vocabulary_size)
embedding_column = fc.embedding_column(
categorical_column,
dimension=embedding_dimension,
initializer=_initializer)
# Provide sparse input and get dense result.
embedding_lookup = embedding_column._get_dense_tensor(
fc_old._LazyBuilder({
'aaa': sparse_input
}))
# Assert expected embedding variable and lookups.
global_vars = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
self.assertItemsEqual(('embedding_weights:0',),
tuple([v.name for v in global_vars]))
with _initialized_session():
self.assertAllEqual(embedding_values, global_vars[0].eval())
self.assertAllEqual(expected_lookups, self.evaluate(embedding_lookup))
def test_get_dense_tensor_3d(self):
# Inputs.
vocabulary_size = 4
sparse_input = sparse_tensor.SparseTensorValue(
# example 0, ids [2]
# example 1, ids [0, 1]
# example 2, ids []
# example 3, ids [1]
indices=((0, 0, 0), (1, 1, 0), (1, 1, 4), (3, 0, 0), (3, 1, 2)),
values=(2, 0, 1, 1, 2),
dense_shape=(4, 2, 5))
# Embedding variable.
embedding_dimension = 3
embedding_values = (
(1., 2., 4.), # id 0
(3., 5., 1.), # id 1
(7., 11., 2.), # id 2
(2., 7., 12.) # id 3
)
def _initializer(shape, dtype, partition_info):
self.assertAllEqual((vocabulary_size, embedding_dimension), shape)
self.assertEqual(dtypes.float32, dtype)
self.assertIsNone(partition_info)
return embedding_values
# Expected lookup result, using combiner='mean'.
expected_lookups = (
# example 0, ids [[2], []], embedding = [[7, 11, 2], [0, 0, 0]]
((7., 11., 2.), (0., 0., 0.)),
# example 1, ids [[], [0, 1]], embedding
# = mean([[], [1, 2, 4] + [3, 5, 1]]) = [[0, 0, 0], [2, 3.5, 2.5]]
((0., 0., 0.), (2., 3.5, 2.5)),
# example 2, ids [[], []], embedding = [[0, 0, 0], [0, 0, 0]]
((0., 0., 0.), (0., 0., 0.)),
# example 3, ids [[1], [2]], embedding = [[3, 5, 1], [7, 11, 2]]
((3., 5., 1.), (7., 11., 2.)),
)
# Build columns.
categorical_column = fc.categorical_column_with_identity(
key='aaa', num_buckets=vocabulary_size)
embedding_column = fc.embedding_column(
categorical_column,
dimension=embedding_dimension,
initializer=_initializer)
state_manager = _TestStateManager()
embedding_column.create_state(state_manager)
# Provide sparse input and get dense result.
embedding_lookup = embedding_column.get_dense_tensor(
fc.FeatureTransformationCache({
'aaa': sparse_input
}), state_manager)
# Assert expected embedding variable and lookups.
global_vars = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
self.assertItemsEqual(('embedding_weights:0',),
tuple([v.name for v in global_vars]))
with _initialized_session():
self.assertAllEqual(embedding_values, global_vars[0].eval())
self.assertAllEqual(expected_lookups, self.evaluate(embedding_lookup))
def test_get_dense_tensor_placeholder_inputs(self):
# Inputs.
vocabulary_size = 3
sparse_input = sparse_tensor.SparseTensorValue(
# example 0, ids [2]
# example 1, ids [0, 1]
# example 2, ids []
# example 3, ids [1]
indices=((0, 0), (1, 0), (1, 4), (3, 0)),
values=(2, 0, 1, 1),
dense_shape=(4, 5))
# Embedding variable.
embedding_dimension = 2
embedding_values = (
(1., 2.), # id 0
(3., 5.), # id 1
(7., 11.) # id 2
)
def _initializer(shape, dtype, partition_info):
self.assertAllEqual((vocabulary_size, embedding_dimension), shape)
self.assertEqual(dtypes.float32, dtype)
self.assertIsNone(partition_info)
return embedding_values
# Expected lookup result, using combiner='mean'.
expected_lookups = (
# example 0, ids [2], embedding = [7, 11]
(7., 11.),
# example 1, ids [0, 1], embedding = mean([1, 2] + [3, 5]) = [2, 3.5]
(2., 3.5),
# example 2, ids [], embedding = [0, 0]
(0., 0.),
# example 3, ids [1], embedding = [3, 5]
(3., 5.),
)
# Build columns.
categorical_column = fc.categorical_column_with_identity(
key='aaa', num_buckets=vocabulary_size)
embedding_column = fc.embedding_column(
categorical_column,
dimension=embedding_dimension,
initializer=_initializer)
state_manager = _TestStateManager()
embedding_column.create_state(state_manager)
# Provide sparse input and get dense result.
input_indices = array_ops.placeholder(dtype=dtypes.int64)
input_values = array_ops.placeholder(dtype=dtypes.int64)
input_shape = array_ops.placeholder(dtype=dtypes.int64)
embedding_lookup = embedding_column.get_dense_tensor(
fc.FeatureTransformationCache({
'aaa':
sparse_tensor.SparseTensorValue(
indices=input_indices,
values=input_values,
dense_shape=input_shape)
}), state_manager)
# Assert expected embedding variable and lookups.
global_vars = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
self.assertItemsEqual(
('embedding_weights:0',), tuple([v.name for v in global_vars]))
with _initialized_session():
self.assertAllEqual(embedding_values, global_vars[0].eval())
self.assertAllEqual(expected_lookups, embedding_lookup.eval(
feed_dict={
input_indices: sparse_input.indices,
input_values: sparse_input.values,
input_shape: sparse_input.dense_shape,
}))
def test_get_dense_tensor_restore_from_ckpt(self):
# Inputs.
vocabulary_size = 3
sparse_input = sparse_tensor.SparseTensorValue(
# example 0, ids [2]
# example 1, ids [0, 1]
# example 2, ids []
# example 3, ids [1]
indices=((0, 0), (1, 0), (1, 4), (3, 0)),
values=(2, 0, 1, 1),
dense_shape=(4, 5))
# Embedding variable. The checkpoint file contains _embedding_values.
embedding_dimension = 2
embedding_values = (
(1., 2.), # id 0
(3., 5.), # id 1
(7., 11.) # id 2
)
ckpt_path = test.test_src_dir_path(
'python/feature_column/testdata/embedding.ckpt')
ckpt_tensor = 'my_embedding'
# Expected lookup result, using combiner='mean'.
expected_lookups = (
# example 0, ids [2], embedding = [7, 11]
(7., 11.),
# example 1, ids [0, 1], embedding = mean([1, 2] + [3, 5]) = [2, 3.5]
(2., 3.5),
# example 2, ids [], embedding = [0, 0]
(0., 0.),
# example 3, ids [1], embedding = [3, 5]
(3., 5.),
)
# Build columns.
categorical_column = fc.categorical_column_with_identity(
key='aaa', num_buckets=vocabulary_size)
embedding_column = fc.embedding_column(
categorical_column,
dimension=embedding_dimension,
ckpt_to_load_from=ckpt_path,
tensor_name_in_ckpt=ckpt_tensor)
state_manager = _TestStateManager()
embedding_column.create_state(state_manager)
# Provide sparse input and get dense result.
embedding_lookup = embedding_column.get_dense_tensor(
fc.FeatureTransformationCache({
'aaa': sparse_input
}), state_manager)
# Assert expected embedding variable and lookups.
global_vars = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
self.assertItemsEqual(
('embedding_weights:0',), tuple([v.name for v in global_vars]))
with _initialized_session():
self.assertAllEqual(embedding_values, global_vars[0].eval())
self.assertAllEqual(expected_lookups, self.evaluate(embedding_lookup))
def test_linear_model(self):
# Inputs.
batch_size = 4
vocabulary_size = 3
sparse_input = sparse_tensor.SparseTensorValue(
# example 0, ids [2]
# example 1, ids [0, 1]
# example 2, ids []
# example 3, ids [1]
indices=((0, 0), (1, 0), (1, 4), (3, 0)),
values=(2, 0, 1, 1),
dense_shape=(batch_size, 5))
# Embedding variable.
embedding_dimension = 2
embedding_shape = (vocabulary_size, embedding_dimension)
zeros_embedding_values = np.zeros(embedding_shape)
def _initializer(shape, dtype, partition_info):
self.assertAllEqual(embedding_shape, shape)
self.assertEqual(dtypes.float32, dtype)
self.assertIsNone(partition_info)
return zeros_embedding_values
# Build columns.
categorical_column = fc.categorical_column_with_identity(
key='aaa', num_buckets=vocabulary_size)
embedding_column = fc.embedding_column(
categorical_column,
dimension=embedding_dimension,
initializer=_initializer)
with ops.Graph().as_default():
model = fc.LinearModel((embedding_column,))
predictions = model({categorical_column.name: sparse_input})
expected_var_names = (
'linear_model/bias_weights:0',
'linear_model/aaa_embedding/weights:0',
'linear_model/aaa_embedding/embedding_weights:0',
)
self.assertItemsEqual(
expected_var_names,
[v.name for v in ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)])
trainable_vars = {
v.name: v for v in ops.get_collection(
ops.GraphKeys.TRAINABLE_VARIABLES)
}
self.assertItemsEqual(expected_var_names, trainable_vars.keys())
bias = trainable_vars['linear_model/bias_weights:0']
embedding_weights = trainable_vars[
'linear_model/aaa_embedding/embedding_weights:0']
linear_weights = trainable_vars['linear_model/aaa_embedding/weights:0']
with _initialized_session():
# Predictions with all zero weights.
self.assertAllClose(np.zeros((1,)), self.evaluate(bias))
self.assertAllClose(zeros_embedding_values,
self.evaluate(embedding_weights))
self.assertAllClose(
np.zeros((embedding_dimension, 1)), self.evaluate(linear_weights))
self.assertAllClose(
np.zeros((batch_size, 1)), self.evaluate(predictions))
# Predictions with all non-zero weights.
embedding_weights.assign((
(1., 2.), # id 0
(3., 5.), # id 1
(7., 11.) # id 2
)).eval()
linear_weights.assign(((4.,), (6.,))).eval()
# example 0, ids [2], embedding[0] = [7, 11]
# example 1, ids [0, 1], embedding[1] = mean([1, 2] + [3, 5]) = [2, 3.5]
# example 2, ids [], embedding[2] = [0, 0]
# example 3, ids [1], embedding[3] = [3, 5]
# sum(embeddings * linear_weights)
# = [4*7 + 6*11, 4*2 + 6*3.5, 4*0 + 6*0, 4*3 + 6*5] = [94, 29, 0, 42]
self.assertAllClose(((94.,), (29.,), (0.,), (42.,)),
self.evaluate(predictions))
def test_dense_features(self):
# Inputs.
vocabulary_size = 3
sparse_input = sparse_tensor.SparseTensorValue(
# example 0, ids [2]
# example 1, ids [0, 1]
# example 2, ids []
# example 3, ids [1]
indices=((0, 0), (1, 0), (1, 4), (3, 0)),
values=(2, 0, 1, 1),
dense_shape=(4, 5))
# Embedding variable.
embedding_dimension = 2
embedding_values = (
(1., 2.), # id 0
(3., 5.), # id 1
(7., 11.) # id 2
)
def _initializer(shape, dtype, partition_info):
self.assertAllEqual((vocabulary_size, embedding_dimension), shape)
self.assertEqual(dtypes.float32, dtype)
self.assertIsNone(partition_info)
return embedding_values
# Expected lookup result, using combiner='mean'.
expected_lookups = (
# example 0, ids [2], embedding = [7, 11]
(7., 11.),
# example 1, ids [0, 1], embedding = mean([1, 2] + [3, 5]) = [2, 3.5]
(2., 3.5),
# example 2, ids [], embedding = [0, 0]
(0., 0.),
# example 3, ids [1], embedding = [3, 5]
(3., 5.),
)
# Build columns.
categorical_column = fc.categorical_column_with_identity(
key='aaa', num_buckets=vocabulary_size)
embedding_column = fc.embedding_column(
categorical_column,
dimension=embedding_dimension,
initializer=_initializer)
# Provide sparse input and get dense result.
l = fc.DenseFeatures((embedding_column,))
dense_features = l({'aaa': sparse_input})
# Assert expected embedding variable and lookups.
global_vars = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
self.assertItemsEqual(('dense_features/aaa_embedding/embedding_weights:0',),
tuple([v.name for v in global_vars]))
for v in global_vars:
self.assertTrue(isinstance(v, variables_lib.RefVariable))
trainable_vars = ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)
self.assertItemsEqual(('dense_features/aaa_embedding/embedding_weights:0',),
tuple([v.name for v in trainable_vars]))
with _initialized_session():
self.assertAllEqual(embedding_values, trainable_vars[0].eval())
self.assertAllEqual(expected_lookups, self.evaluate(dense_features))
def test_dense_features_not_trainable(self):
# Inputs.
vocabulary_size = 3
sparse_input = sparse_tensor.SparseTensorValue(
# example 0, ids [2]
# example 1, ids [0, 1]
# example 2, ids []
# example 3, ids [1]
indices=((0, 0), (1, 0), (1, 4), (3, 0)),
values=(2, 0, 1, 1),
dense_shape=(4, 5))
# Embedding variable.
embedding_dimension = 2
embedding_values = (
(1., 2.), # id 0
(3., 5.), # id 1
(7., 11.) # id 2
)
def _initializer(shape, dtype, partition_info):
self.assertAllEqual((vocabulary_size, embedding_dimension), shape)
self.assertEqual(dtypes.float32, dtype)
self.assertIsNone(partition_info)
return embedding_values
# Expected lookup result, using combiner='mean'.
expected_lookups = (
# example 0, ids [2], embedding = [7, 11]
(7., 11.),
# example 1, ids [0, 1], embedding = mean([1, 2] + [3, 5]) = [2, 3.5]
(2., 3.5),
# example 2, ids [], embedding = [0, 0]
(0., 0.),
# example 3, ids [1], embedding = [3, 5]
(3., 5.),
)
# Build columns.
categorical_column = fc.categorical_column_with_identity(
key='aaa', num_buckets=vocabulary_size)
embedding_column = fc.embedding_column(
categorical_column,
dimension=embedding_dimension,
initializer=_initializer,
trainable=False)
# Provide sparse input and get dense result.
dense_features = fc.DenseFeatures((embedding_column,))({
'aaa': sparse_input
})
# Assert expected embedding variable and lookups.
global_vars = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
self.assertItemsEqual(('dense_features/aaa_embedding/embedding_weights:0',),
tuple([v.name for v in global_vars]))
self.assertItemsEqual(
[], ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES))
with _initialized_session():
self.assertAllEqual(embedding_values, global_vars[0].eval())
self.assertAllEqual(expected_lookups, self.evaluate(dense_features))
def test_input_layer(self):
# Inputs.
vocabulary_size = 3
sparse_input = sparse_tensor.SparseTensorValue(
# example 0, ids [2]
# example 1, ids [0, 1]
# example 2, ids []
# example 3, ids [1]
indices=((0, 0), (1, 0), (1, 4), (3, 0)),
values=(2, 0, 1, 1),
dense_shape=(4, 5))
# Embedding variable.
embedding_dimension = 2
embedding_values = (
(1., 2.), # id 0
(3., 5.), # id 1
(7., 11.) # id 2
)
def _initializer(shape, dtype, partition_info):
self.assertAllEqual((vocabulary_size, embedding_dimension), shape)
self.assertEqual(dtypes.float32, dtype)
self.assertIsNone(partition_info)
return embedding_values
# Expected lookup result, using combiner='mean'.
expected_lookups = (
# example 0, ids [2], embedding = [7, 11]
(7., 11.),
# example 1, ids [0, 1], embedding = mean([1, 2] + [3, 5]) = [2, 3.5]
(2., 3.5),
# example 2, ids [], embedding = [0, 0]
(0., 0.),
# example 3, ids [1], embedding = [3, 5]
(3., 5.),
)
# Build columns.
categorical_column = fc.categorical_column_with_identity(
key='aaa', num_buckets=vocabulary_size)
embedding_column = fc.embedding_column(
categorical_column,
dimension=embedding_dimension,
initializer=_initializer)
# Provide sparse input and get dense result.
feature_layer = fc_old.input_layer({
'aaa': sparse_input
}, (embedding_column,))
# Assert expected embedding variable and lookups.
global_vars = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
self.assertItemsEqual(('input_layer/aaa_embedding/embedding_weights:0',),
tuple([v.name for v in global_vars]))
trainable_vars = ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)
self.assertItemsEqual(('input_layer/aaa_embedding/embedding_weights:0',),
tuple([v.name for v in trainable_vars]))
with _initialized_session():
self.assertAllEqual(embedding_values, trainable_vars[0].eval())
self.assertAllEqual(expected_lookups, self.evaluate(feature_layer))
def test_old_linear_model(self):
# Inputs.
batch_size = 4
vocabulary_size = 3
sparse_input = sparse_tensor.SparseTensorValue(
# example 0, ids [2]
# example 1, ids [0, 1]
# example 2, ids []
# example 3, ids [1]
indices=((0, 0), (1, 0), (1, 4), (3, 0)),
values=(2, 0, 1, 1),
dense_shape=(batch_size, 5))
# Embedding variable.
embedding_dimension = 2
embedding_shape = (vocabulary_size, embedding_dimension)
zeros_embedding_values = np.zeros(embedding_shape)
def _initializer(shape, dtype, partition_info):
self.assertAllEqual(embedding_shape, shape)
self.assertEqual(dtypes.float32, dtype)
self.assertIsNone(partition_info)
return zeros_embedding_values
# Build columns.
categorical_column = fc.categorical_column_with_identity(
key='aaa', num_buckets=vocabulary_size)
embedding_column = fc.embedding_column(
categorical_column,
dimension=embedding_dimension,
initializer=_initializer)
with ops.Graph().as_default():
predictions = fc_old.linear_model({
categorical_column.name: sparse_input
}, (embedding_column,))
expected_var_names = (
'linear_model/bias_weights:0',
'linear_model/aaa_embedding/weights:0',
'linear_model/aaa_embedding/embedding_weights:0',
)
self.assertItemsEqual(
expected_var_names,
[v.name for v in ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)])
trainable_vars = {
v.name: v
for v in ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)
}
self.assertItemsEqual(expected_var_names, trainable_vars.keys())
bias = trainable_vars['linear_model/bias_weights:0']
embedding_weights = trainable_vars[
'linear_model/aaa_embedding/embedding_weights:0']
linear_weights = trainable_vars['linear_model/aaa_embedding/weights:0']
with _initialized_session():
# Predictions with all zero weights.
self.assertAllClose(np.zeros((1,)), self.evaluate(bias))
self.assertAllClose(zeros_embedding_values,
self.evaluate(embedding_weights))
self.assertAllClose(
np.zeros((embedding_dimension, 1)), self.evaluate(linear_weights))
self.assertAllClose(
np.zeros((batch_size, 1)), self.evaluate(predictions))
# Predictions with all non-zero weights.
embedding_weights.assign((
(1., 2.), # id 0
(3., 5.), # id 1
(7., 11.) # id 2
)).eval()
linear_weights.assign(((4.,), (6.,))).eval()
# example 0, ids [2], embedding[0] = [7, 11]
# example 1, ids [0, 1], embedding[1] = mean([1, 2] + [3, 5]) = [2, 3.5]
# example 2, ids [], embedding[2] = [0, 0]
# example 3, ids [1], embedding[3] = [3, 5]
# sum(embeddings * linear_weights)
# = [4*7 + 6*11, 4*2 + 6*3.5, 4*0 + 6*0, 4*3 + 6*5] = [94, 29, 0, 42]
self.assertAllClose(((94.,), (29.,), (0.,), (42.,)),
self.evaluate(predictions))
def test_old_linear_model_old_categorical(self):
# Inputs.
batch_size = 4
vocabulary_size = 3
sparse_input = sparse_tensor.SparseTensorValue(
# example 0, ids [2]
# example 1, ids [0, 1]
# example 2, ids []
# example 3, ids [1]
indices=((0, 0), (1, 0), (1, 4), (3, 0)),
values=(2, 0, 1, 1),
dense_shape=(batch_size, 5))
# Embedding variable.
embedding_dimension = 2
embedding_shape = (vocabulary_size, embedding_dimension)
zeros_embedding_values = np.zeros(embedding_shape)
def _initializer(shape, dtype, partition_info):
self.assertAllEqual(embedding_shape, shape)
self.assertEqual(dtypes.float32, dtype)
self.assertIsNone(partition_info)
return zeros_embedding_values
# Build columns.
categorical_column = fc_old._categorical_column_with_identity(
key='aaa', num_buckets=vocabulary_size)
embedding_column = fc.embedding_column(
categorical_column,
dimension=embedding_dimension,
initializer=_initializer)
with ops.Graph().as_default():
predictions = fc_old.linear_model({
categorical_column.name: sparse_input
}, (embedding_column,))
expected_var_names = (
'linear_model/bias_weights:0',
'linear_model/aaa_embedding/weights:0',
'linear_model/aaa_embedding/embedding_weights:0',
)
self.assertItemsEqual(
expected_var_names,
[v.name for v in ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)])
trainable_vars = {
v.name: v
for v in ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)
}
self.assertItemsEqual(expected_var_names, trainable_vars.keys())
bias = trainable_vars['linear_model/bias_weights:0']
embedding_weights = trainable_vars[
'linear_model/aaa_embedding/embedding_weights:0']
linear_weights = trainable_vars['linear_model/aaa_embedding/weights:0']
with _initialized_session():
# Predictions with all zero weights.
self.assertAllClose(np.zeros((1,)), self.evaluate(bias))
self.assertAllClose(zeros_embedding_values,
self.evaluate(embedding_weights))
self.assertAllClose(
np.zeros((embedding_dimension, 1)), self.evaluate(linear_weights))
self.assertAllClose(
np.zeros((batch_size, 1)), self.evaluate(predictions))
# Predictions with all non-zero weights.
embedding_weights.assign((
(1., 2.), # id 0
(3., 5.), # id 1
(7., 11.) # id 2
)).eval()
linear_weights.assign(((4.,), (6.,))).eval()
# example 0, ids [2], embedding[0] = [7, 11]
# example 1, ids [0, 1], embedding[1] = mean([1, 2] + [3, 5]) = [2, 3.5]
# example 2, ids [], embedding[2] = [0, 0]
# example 3, ids [1], embedding[3] = [3, 5]
# sum(embeddings * linear_weights)
# = [4*7 + 6*11, 4*2 + 6*3.5, 4*0 + 6*0, 4*3 + 6*5] = [94, 29, 0, 42]
self.assertAllClose(((94.,), (29.,), (0.,), (42.,)),
self.evaluate(predictions))
def test_serialization(self):
def _initializer(shape, dtype, partition_info):
del shape, dtype, partition_info
return ValueError('Not expected to be called')
# Build columns.
categorical_column = fc.categorical_column_with_identity(
key='aaa', num_buckets=3)
embedding_column = fc.embedding_column(
categorical_column, dimension=2, initializer=_initializer)
self.assertEqual([categorical_column], embedding_column.parents)
config = embedding_column._get_config()
self.assertEqual({
'categorical_column': {
'class_name': 'IdentityCategoricalColumn',
'config': {
'number_buckets': 3,
'key': 'aaa',
'default_value': None
}
},
'ckpt_to_load_from': None,
'combiner': 'mean',
'dimension': 2,
'initializer': '_initializer',
'max_norm': None,
'tensor_name_in_ckpt': None,
'trainable': True
}, config)
custom_objects = {
'_initializer': _initializer,
}
new_embedding_column = fc.EmbeddingColumn._from_config(
config, custom_objects=custom_objects)
self.assertEqual(embedding_column, new_embedding_column)
self.assertIsNot(categorical_column,
new_embedding_column.categorical_column)
new_embedding_column = fc.EmbeddingColumn._from_config(
config,
custom_objects=custom_objects,
columns_by_name={categorical_column.name: categorical_column})
self.assertEqual(embedding_column, new_embedding_column)
self.assertIs(categorical_column, new_embedding_column.categorical_column)
class SharedEmbeddingColumnTest(test.TestCase):
def test_defaults(self):
categorical_column_a = fc.categorical_column_with_identity(
key='aaa', num_buckets=3)
categorical_column_b = fc.categorical_column_with_identity(
key='bbb', num_buckets=3)
embedding_dimension = 2
embedding_column_b, embedding_column_a = fc.shared_embedding_columns_v2(
[categorical_column_b, categorical_column_a],
dimension=embedding_dimension)
self.assertIs(categorical_column_a, embedding_column_a.categorical_column)
self.assertIs(categorical_column_b, embedding_column_b.categorical_column)
self.assertIsNone(embedding_column_a.max_norm)
self.assertIsNone(embedding_column_b.max_norm)
self.assertEqual('aaa_shared_embedding', embedding_column_a.name)
self.assertEqual('bbb_shared_embedding', embedding_column_b.name)
self.assertEqual((embedding_dimension,), embedding_column_a.variable_shape)
self.assertEqual((embedding_dimension,), embedding_column_b.variable_shape)
self.assertEqual({
'aaa': parsing_ops.VarLenFeature(dtypes.int64)
}, embedding_column_a.parse_example_spec)
self.assertEqual({
'bbb': parsing_ops.VarLenFeature(dtypes.int64)
}, embedding_column_b.parse_example_spec)
def test_all_constructor_args(self):
categorical_column_a = fc.categorical_column_with_identity(
key='aaa', num_buckets=3)
categorical_column_b = fc.categorical_column_with_identity(
key='bbb', num_buckets=3)
embedding_dimension = 2
embedding_column_a, embedding_column_b = fc.shared_embedding_columns_v2(
[categorical_column_a, categorical_column_b],
dimension=embedding_dimension,
combiner='my_combiner',
initializer=lambda: 'my_initializer',
shared_embedding_collection_name='shared_embedding_collection_name',
ckpt_to_load_from='my_ckpt',
tensor_name_in_ckpt='my_ckpt_tensor',
max_norm=42.,
trainable=False)
self.assertIs(categorical_column_a, embedding_column_a.categorical_column)
self.assertIs(categorical_column_b, embedding_column_b.categorical_column)
self.assertEqual(42., embedding_column_a.max_norm)
self.assertEqual(42., embedding_column_b.max_norm)
self.assertEqual('aaa_shared_embedding', embedding_column_a.name)
self.assertEqual('bbb_shared_embedding', embedding_column_b.name)
self.assertEqual((embedding_dimension,), embedding_column_a.variable_shape)
self.assertEqual((embedding_dimension,), embedding_column_b.variable_shape)
self.assertEqual({
'aaa': parsing_ops.VarLenFeature(dtypes.int64)
}, embedding_column_a.parse_example_spec)
self.assertEqual({
'bbb': parsing_ops.VarLenFeature(dtypes.int64)
}, embedding_column_b.parse_example_spec)
def test_deep_copy(self):
categorical_column_a = fc.categorical_column_with_identity(
key='aaa', num_buckets=3)
categorical_column_b = fc.categorical_column_with_identity(
key='bbb', num_buckets=3)
embedding_dimension = 2
original_a, _ = fc.shared_embedding_columns_v2(
[categorical_column_a, categorical_column_b],
dimension=embedding_dimension,
combiner='my_combiner',
initializer=lambda: 'my_initializer',
shared_embedding_collection_name='shared_embedding_collection_name',
ckpt_to_load_from='my_ckpt',
tensor_name_in_ckpt='my_ckpt_tensor',
max_norm=42.,
trainable=False)
for embedding_column_a in (original_a, copy.deepcopy(original_a)):
self.assertEqual('aaa', embedding_column_a.categorical_column.name)
self.assertEqual(3, embedding_column_a.categorical_column.num_buckets)
self.assertEqual({
'aaa': parsing_ops.VarLenFeature(dtypes.int64)
}, embedding_column_a.categorical_column.parse_example_spec)
self.assertEqual(42., embedding_column_a.max_norm)
self.assertEqual('aaa_shared_embedding', embedding_column_a.name)
self.assertEqual((embedding_dimension,),
embedding_column_a.variable_shape)
self.assertEqual({
'aaa': parsing_ops.VarLenFeature(dtypes.int64)
}, embedding_column_a.parse_example_spec)
def test_invalid_initializer(self):
categorical_column_a = fc.categorical_column_with_identity(
key='aaa', num_buckets=3)
categorical_column_b = fc.categorical_column_with_identity(
key='bbb', num_buckets=3)
with self.assertRaisesRegexp(ValueError, 'initializer must be callable'):
fc.shared_embedding_columns_v2(
[categorical_column_a, categorical_column_b],
dimension=2,
initializer='not_fn')
def test_incompatible_column_type(self):
categorical_column_a = fc.categorical_column_with_identity(
key='aaa', num_buckets=3)
categorical_column_b = fc.categorical_column_with_identity(
key='bbb', num_buckets=3)
categorical_column_c = fc.categorical_column_with_hash_bucket(
key='ccc', hash_bucket_size=3)
with self.assertRaisesRegexp(
ValueError, 'all categorical_columns must have the same type.*'
'IdentityCategoricalColumn.*HashedCategoricalColumn'):
fc.shared_embedding_columns_v2(
[categorical_column_a, categorical_column_b, categorical_column_c],
dimension=2)
def test_weighted_categorical_column_ok(self):
categorical_column_a = fc.categorical_column_with_identity(
key='aaa', num_buckets=3)
weighted_categorical_column_a = fc.weighted_categorical_column(
categorical_column_a, weight_feature_key='aaa_weights')
categorical_column_b = fc.categorical_column_with_identity(
key='bbb', num_buckets=3)
weighted_categorical_column_b = fc.weighted_categorical_column(
categorical_column_b, weight_feature_key='bbb_weights')
fc.shared_embedding_columns_v2(
[weighted_categorical_column_a, categorical_column_b], dimension=2)
fc.shared_embedding_columns_v2(
[categorical_column_a, weighted_categorical_column_b], dimension=2)
fc.shared_embedding_columns_v2(
[weighted_categorical_column_a, weighted_categorical_column_b],
dimension=2)
def test_parse_example(self):
a = fc.categorical_column_with_vocabulary_list(
key='aaa', vocabulary_list=('omar', 'stringer', 'marlo'))
b = fc.categorical_column_with_vocabulary_list(
key='bbb', vocabulary_list=('omar', 'stringer', 'marlo'))
a_embedded, b_embedded = fc.shared_embedding_columns_v2([a, b], dimension=2)
data = example_pb2.Example(features=feature_pb2.Features(
feature={
'aaa':
feature_pb2.Feature(bytes_list=feature_pb2.BytesList(
value=[b'omar', b'stringer'])),
'bbb':
feature_pb2.Feature(bytes_list=feature_pb2.BytesList(
value=[b'stringer', b'marlo'])),
}))
features = parsing_ops.parse_example(
serialized=[data.SerializeToString()],
features=fc.make_parse_example_spec_v2([a_embedded, b_embedded]))
self.assertIn('aaa', features)
self.assertIn('bbb', features)
with self.cached_session():
_assert_sparse_tensor_value(
self,
sparse_tensor.SparseTensorValue(
indices=[[0, 0], [0, 1]],
values=np.array([b'omar', b'stringer'], dtype=np.object_),
dense_shape=[1, 2]),
features['aaa'].eval())
_assert_sparse_tensor_value(
self,
sparse_tensor.SparseTensorValue(
indices=[[0, 0], [0, 1]],
values=np.array([b'stringer', b'marlo'], dtype=np.object_),
dense_shape=[1, 2]),
features['bbb'].eval())
def test_transform_feature(self):
a = fc.categorical_column_with_identity(key='aaa', num_buckets=3)
b = fc.categorical_column_with_identity(key='bbb', num_buckets=3)
a_embedded, b_embedded = fc.shared_embedding_columns_v2([a, b], dimension=2)
features = {
'aaa': sparse_tensor.SparseTensor(
indices=((0, 0), (1, 0), (1, 1)),
values=(0, 1, 0),
dense_shape=(2, 2)),
'bbb': sparse_tensor.SparseTensor(
indices=((0, 0), (1, 0), (1, 1)),
values=(1, 2, 1),
dense_shape=(2, 2)),
}
outputs = fc._transform_features_v2(features,
[a, a_embedded, b, b_embedded], None)
output_a = outputs[a]
output_a_embedded = outputs[a_embedded]
output_b = outputs[b]
output_b_embedded = outputs[b_embedded]
with _initialized_session():
_assert_sparse_tensor_value(self, self.evaluate(output_a),
self.evaluate(output_a_embedded))
_assert_sparse_tensor_value(self, self.evaluate(output_b),
self.evaluate(output_b_embedded))
def test_get_dense_tensor(self):
# Inputs.
vocabulary_size = 3
# -1 values are ignored.
input_a = np.array(
[[2, -1, -1], # example 0, ids [2]
[0, 1, -1]]) # example 1, ids [0, 1]
input_b = np.array(
[[0, -1, -1], # example 0, ids [0]
[-1, -1, -1]]) # example 1, ids []
input_features = {
'aaa': input_a,
'bbb': input_b
}
# Embedding variable.
embedding_dimension = 2
embedding_values = (
(1., 2.), # id 0
(3., 5.), # id 1
(7., 11.) # id 2
)
def _initializer(shape, dtype, partition_info):
self.assertAllEqual((vocabulary_size, embedding_dimension), shape)
self.assertEqual(dtypes.float32, dtype)
self.assertIsNone(partition_info)
return embedding_values
# Expected lookup result, using combiner='mean'.
expected_lookups_a = (
# example 0:
(7., 11.), # ids [2], embedding = [7, 11]
# example 1:
(2., 3.5), # ids [0, 1], embedding = mean([1, 2] + [3, 5]) = [2, 3.5]
)
expected_lookups_b = (
# example 0:
(1., 2.), # ids [0], embedding = [1, 2]
# example 1:
(0., 0.), # ids [], embedding = [0, 0]
)
# Build columns.
categorical_column_a = fc.categorical_column_with_identity(
key='aaa', num_buckets=vocabulary_size)
categorical_column_b = fc.categorical_column_with_identity(
key='bbb', num_buckets=vocabulary_size)
embedding_column_a, embedding_column_b = fc.shared_embedding_columns_v2(
[categorical_column_a, categorical_column_b],
dimension=embedding_dimension,
initializer=_initializer)
# Provide sparse input and get dense result.
embedding_lookup_a = embedding_column_a.get_dense_tensor(
fc.FeatureTransformationCache(input_features), None)
embedding_lookup_b = embedding_column_b.get_dense_tensor(
fc.FeatureTransformationCache(input_features), None)
# Assert expected embedding variable and lookups.
global_vars = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
self.assertItemsEqual(('aaa_bbb_shared_embedding:0',),
tuple([v.name for v in global_vars]))
embedding_var = global_vars[0]
with _initialized_session():
self.assertAllEqual(embedding_values, self.evaluate(embedding_var))
self.assertAllEqual(expected_lookups_a, self.evaluate(embedding_lookup_a))
self.assertAllEqual(expected_lookups_b, self.evaluate(embedding_lookup_b))
def test_get_dense_tensor_placeholder_inputs(self):
# Inputs.
vocabulary_size = 3
# -1 values are ignored.
input_a = np.array(
[[2, -1, -1], # example 0, ids [2]
[0, 1, -1]]) # example 1, ids [0, 1]
input_b = np.array(
[[0, -1, -1], # example 0, ids [0]
[-1, -1, -1]]) # example 1, ids []
# Specify shape, because dense input must have rank specified.
input_a_placeholder = array_ops.placeholder(
dtype=dtypes.int64, shape=[None, 3])
input_b_placeholder = array_ops.placeholder(
dtype=dtypes.int64, shape=[None, 3])
input_features = {
'aaa': input_a_placeholder,
'bbb': input_b_placeholder,
}
feed_dict = {
input_a_placeholder: input_a,
input_b_placeholder: input_b,
}
# Embedding variable.
embedding_dimension = 2
embedding_values = (
(1., 2.), # id 0
(3., 5.), # id 1
(7., 11.) # id 2
)
def _initializer(shape, dtype, partition_info):
self.assertAllEqual((vocabulary_size, embedding_dimension), shape)
self.assertEqual(dtypes.float32, dtype)
self.assertIsNone(partition_info)
return embedding_values
# Build columns.
categorical_column_a = fc.categorical_column_with_identity(
key='aaa', num_buckets=vocabulary_size)
categorical_column_b = fc.categorical_column_with_identity(
key='bbb', num_buckets=vocabulary_size)
embedding_column_a, embedding_column_b = fc.shared_embedding_columns_v2(
[categorical_column_a, categorical_column_b],
dimension=embedding_dimension,
initializer=_initializer)
# Provide sparse input and get dense result.
embedding_lookup_a = embedding_column_a.get_dense_tensor(
fc.FeatureTransformationCache(input_features), None)
embedding_lookup_b = embedding_column_b.get_dense_tensor(
fc.FeatureTransformationCache(input_features), None)
with _initialized_session() as sess:
sess.run([embedding_lookup_a, embedding_lookup_b], feed_dict=feed_dict)
def test_linear_model(self):
# Inputs.
batch_size = 2
vocabulary_size = 3
# -1 values are ignored.
input_a = np.array(
[[2, -1, -1], # example 0, ids [2]
[0, 1, -1]]) # example 1, ids [0, 1]
input_b = np.array(
[[0, -1, -1], # example 0, ids [0]
[-1, -1, -1]]) # example 1, ids []
# Embedding variable.
embedding_dimension = 2
embedding_shape = (vocabulary_size, embedding_dimension)
zeros_embedding_values = np.zeros(embedding_shape)
def _initializer(shape, dtype, partition_info):
self.assertAllEqual(embedding_shape, shape)
self.assertEqual(dtypes.float32, dtype)
self.assertIsNone(partition_info)
return zeros_embedding_values
# Build columns.
categorical_column_a = fc.categorical_column_with_identity(
key='aaa', num_buckets=vocabulary_size)
categorical_column_b = fc.categorical_column_with_identity(
key='bbb', num_buckets=vocabulary_size)
embedding_column_a, embedding_column_b = fc.shared_embedding_columns_v2(
[categorical_column_a, categorical_column_b],
dimension=embedding_dimension,
initializer=_initializer)
with ops.Graph().as_default():
model = fc.LinearModel((embedding_column_a, embedding_column_b))
predictions = model({
categorical_column_a.name: input_a,
categorical_column_b.name: input_b
})
# Linear weights do not follow the column name. But this is a rare use
# case, and fixing it would add too much complexity to the code.
expected_var_names = (
'linear_model/bias_weights:0',
'linear_model/aaa_shared_embedding/weights:0',
'aaa_bbb_shared_embedding:0',
'linear_model/bbb_shared_embedding/weights:0',
)
self.assertItemsEqual(
expected_var_names,
[v.name for v in ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)])
trainable_vars = {
v.name: v for v in ops.get_collection(
ops.GraphKeys.TRAINABLE_VARIABLES)
}
self.assertItemsEqual(expected_var_names, trainable_vars.keys())
bias = trainable_vars['linear_model/bias_weights:0']
embedding_weights = trainable_vars['aaa_bbb_shared_embedding:0']
linear_weights_a = trainable_vars[
'linear_model/aaa_shared_embedding/weights:0']
linear_weights_b = trainable_vars[
'linear_model/bbb_shared_embedding/weights:0']
with _initialized_session():
# Predictions with all zero weights.
self.assertAllClose(np.zeros((1,)), self.evaluate(bias))
self.assertAllClose(zeros_embedding_values,
self.evaluate(embedding_weights))
self.assertAllClose(
np.zeros((embedding_dimension, 1)), self.evaluate(linear_weights_a))
self.assertAllClose(
np.zeros((embedding_dimension, 1)), self.evaluate(linear_weights_b))
self.assertAllClose(
np.zeros((batch_size, 1)), self.evaluate(predictions))
# Predictions with all non-zero weights.
embedding_weights.assign((
(1., 2.), # id 0
(3., 5.), # id 1
(7., 11.) # id 2
)).eval()
linear_weights_a.assign(((4.,), (6.,))).eval()
# example 0, ids [2], embedding[0] = [7, 11]
# example 1, ids [0, 1], embedding[1] = mean([1, 2] + [3, 5]) = [2, 3.5]
# sum(embeddings * linear_weights)
# = [4*7 + 6*11, 4*2 + 6*3.5] = [94, 29]
linear_weights_b.assign(((3.,), (5.,))).eval()
# example 0, ids [0], embedding[0] = [1, 2]
# example 1, ids [], embedding[1] = 0, 0]
# sum(embeddings * linear_weights)
# = [3*1 + 5*2, 3*0 +5*0] = [13, 0]
self.assertAllClose([[94. + 13.], [29.]], self.evaluate(predictions))
def _test_dense_features(self, trainable=True):
# Inputs.
vocabulary_size = 3
sparse_input_a = sparse_tensor.SparseTensorValue(
# example 0, ids [2]
# example 1, ids [0, 1]
indices=((0, 0), (1, 0), (1, 4)),
values=(2, 0, 1),
dense_shape=(2, 5))
sparse_input_b = sparse_tensor.SparseTensorValue(
# example 0, ids [0]
# example 1, ids []
indices=((0, 0),),
values=(0,),
dense_shape=(2, 5))
sparse_input_c = sparse_tensor.SparseTensorValue(
# example 0, ids [2]
# example 1, ids [0, 1]
indices=((0, 1), (1, 1), (1, 3)),
values=(2, 0, 1),
dense_shape=(2, 5))
sparse_input_d = sparse_tensor.SparseTensorValue(
# example 0, ids [2]
# example 1, ids []
indices=((0, 1),),
values=(2,),
dense_shape=(2, 5))
# Embedding variable.
embedding_dimension = 2
embedding_values = (
(1., 2.), # id 0
(3., 5.), # id 1
(7., 11.) # id 2
)
def _initializer(shape, dtype, partition_info):
self.assertAllEqual((vocabulary_size, embedding_dimension), shape)
self.assertEqual(dtypes.float32, dtype)
self.assertIsNone(partition_info)
return embedding_values
# Expected lookup result, using combiner='mean'.
expected_lookups = (
# example 0:
# A ids [2], embedding = [7, 11]
# B ids [0], embedding = [1, 2]
# C ids [2], embedding = [7, 11]
# D ids [2], embedding = [7, 11]
(7., 11., 1., 2., 7., 11., 7., 11.),
# example 1:
# A ids [0, 1], embedding = mean([1, 2] + [3, 5]) = [2, 3.5]
# B ids [], embedding = [0, 0]
# C ids [0, 1], embedding = mean([1, 2] + [3, 5]) = [2, 3.5]
# D ids [], embedding = [0, 0]
(2., 3.5, 0., 0., 2., 3.5, 0., 0.),
)
# Build columns.
categorical_column_a = fc.categorical_column_with_identity(
key='aaa', num_buckets=vocabulary_size)
categorical_column_b = fc.categorical_column_with_identity(
key='bbb', num_buckets=vocabulary_size)
categorical_column_c = fc.categorical_column_with_identity(
key='ccc', num_buckets=vocabulary_size)
categorical_column_d = fc.categorical_column_with_identity(
key='ddd', num_buckets=vocabulary_size)
embedding_column_a, embedding_column_b = fc.shared_embedding_columns_v2(
[categorical_column_a, categorical_column_b],
dimension=embedding_dimension,
initializer=_initializer,
trainable=trainable)
embedding_column_c, embedding_column_d = fc.shared_embedding_columns_v2(
[categorical_column_c, categorical_column_d],
dimension=embedding_dimension,
initializer=_initializer,
trainable=trainable)
features = {
'aaa': sparse_input_a,
'bbb': sparse_input_b,
'ccc': sparse_input_c,
'ddd': sparse_input_d
}
# Provide sparse input and get dense result.
dense_features = fc.DenseFeatures(
feature_columns=(embedding_column_b, embedding_column_a,
embedding_column_c, embedding_column_d))(
features)
# Assert expected embedding variable and lookups.
global_vars = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
self.assertItemsEqual(
['aaa_bbb_shared_embedding:0', 'ccc_ddd_shared_embedding:0'],
tuple([v.name for v in global_vars]))
for v in global_vars:
self.assertTrue(isinstance(v, variables_lib.RefVariable))
trainable_vars = ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)
if trainable:
self.assertItemsEqual(
['aaa_bbb_shared_embedding:0', 'ccc_ddd_shared_embedding:0'],
tuple([v.name for v in trainable_vars]))
else:
self.assertItemsEqual([], tuple([v.name for v in trainable_vars]))
shared_embedding_vars = global_vars
with _initialized_session():
self.assertAllEqual(embedding_values, shared_embedding_vars[0].eval())
self.assertAllEqual(expected_lookups, self.evaluate(dense_features))
def test_dense_features(self):
self._test_dense_features()
def test_dense_features_no_trainable(self):
self._test_dense_features(trainable=False)
def test_serialization(self):
def _initializer(shape, dtype, partition_info):
del shape, dtype, partition_info
return ValueError('Not expected to be called')
categorical_column_a = fc.categorical_column_with_identity(
key='aaa', num_buckets=3)
categorical_column_b = fc.categorical_column_with_identity(
key='bbb', num_buckets=3)
embedding_column_a, embedding_column_b = fc.shared_embedding_columns_v2(
[categorical_column_a, categorical_column_b],
dimension=2,
initializer=_initializer)
self.assertEqual([categorical_column_a], embedding_column_a.parents)
self.assertEqual([categorical_column_b], embedding_column_b.parents)
# TODO(rohanj): Add tests for (from|get)_config once implemented
class WeightedCategoricalColumnTest(test.TestCase):
def test_defaults(self):
column = fc.weighted_categorical_column(
categorical_column=fc.categorical_column_with_identity(
key='ids', num_buckets=3),
weight_feature_key='values')
self.assertEqual('ids_weighted_by_values', column.name)
self.assertEqual(3, column.num_buckets)
self.assertEqual({
'ids': parsing_ops.VarLenFeature(dtypes.int64),
'values': parsing_ops.VarLenFeature(dtypes.float32)
}, column.parse_example_spec)
self.assertTrue(column._is_v2_column)
def test_is_v2_column(self):
column = fc.weighted_categorical_column(
categorical_column=fc_old._categorical_column_with_identity(
key='ids', num_buckets=3),
weight_feature_key='values')
self.assertFalse(column._is_v2_column)
def test_deep_copy(self):
"""Tests deepcopy of categorical_column_with_hash_bucket."""
original = fc.weighted_categorical_column(
categorical_column=fc.categorical_column_with_identity(
key='ids', num_buckets=3),
weight_feature_key='values')
for column in (original, copy.deepcopy(original)):
self.assertEqual('ids_weighted_by_values', column.name)
self.assertEqual(3, column.num_buckets)
self.assertEqual({
'ids': parsing_ops.VarLenFeature(dtypes.int64),
'values': parsing_ops.VarLenFeature(dtypes.float32)
}, column.parse_example_spec)
def test_invalid_dtype_none(self):
with self.assertRaisesRegexp(ValueError, 'is not convertible to float'):
fc.weighted_categorical_column(
categorical_column=fc.categorical_column_with_identity(
key='ids', num_buckets=3),
weight_feature_key='values',
dtype=None)
def test_invalid_dtype_string(self):
with self.assertRaisesRegexp(ValueError, 'is not convertible to float'):
fc.weighted_categorical_column(
categorical_column=fc.categorical_column_with_identity(
key='ids', num_buckets=3),
weight_feature_key='values',
dtype=dtypes.string)
def test_invalid_input_dtype(self):
column = fc.weighted_categorical_column(
categorical_column=fc.categorical_column_with_identity(
key='ids', num_buckets=3),
weight_feature_key='values')
strings = sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=('omar', 'stringer', 'marlo'),
dense_shape=(2, 2))
with self.assertRaisesRegexp(ValueError, 'Bad dtype'):
fc._transform_features_v2({
'ids': strings,
'values': strings
}, (column,), None)
def test_column_name_collision(self):
with self.assertRaisesRegexp(ValueError, r'Parse config.*already exists'):
fc.weighted_categorical_column(
categorical_column=fc.categorical_column_with_identity(
key='aaa', num_buckets=3),
weight_feature_key='aaa').parse_example_spec()
def test_missing_weights(self):
column = fc.weighted_categorical_column(
categorical_column=fc.categorical_column_with_identity(
key='ids', num_buckets=3),
weight_feature_key='values')
inputs = sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=('omar', 'stringer', 'marlo'),
dense_shape=(2, 2))
with self.assertRaisesRegexp(
ValueError, 'values is not in features dictionary'):
fc._transform_features_v2({'ids': inputs}, (column,), None)
def test_parse_example(self):
a = fc.categorical_column_with_vocabulary_list(
key='aaa', vocabulary_list=('omar', 'stringer', 'marlo'))
a_weighted = fc.weighted_categorical_column(a, weight_feature_key='weights')
data = example_pb2.Example(features=feature_pb2.Features(
feature={
'aaa':
feature_pb2.Feature(bytes_list=feature_pb2.BytesList(
value=[b'omar', b'stringer'])),
'weights':
feature_pb2.Feature(float_list=feature_pb2.FloatList(
value=[1., 10.]))
}))
features = parsing_ops.parse_example(
serialized=[data.SerializeToString()],
features=fc.make_parse_example_spec_v2([a_weighted]))
self.assertIn('aaa', features)
self.assertIn('weights', features)
with self.cached_session():
_assert_sparse_tensor_value(
self,
sparse_tensor.SparseTensorValue(
indices=[[0, 0], [0, 1]],
values=np.array([b'omar', b'stringer'], dtype=np.object_),
dense_shape=[1, 2]),
features['aaa'].eval())
_assert_sparse_tensor_value(
self,
sparse_tensor.SparseTensorValue(
indices=[[0, 0], [0, 1]],
values=np.array([1., 10.], dtype=np.float32),
dense_shape=[1, 2]),
features['weights'].eval())
def test_transform_features(self):
column = fc.weighted_categorical_column(
categorical_column=fc.categorical_column_with_identity(
key='ids', num_buckets=3),
weight_feature_key='values')
inputs = sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=(0, 1, 0),
dense_shape=(2, 2))
weights = sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=(0.5, 1.0, 0.1),
dense_shape=(2, 2))
id_tensor, weight_tensor = fc._transform_features_v2({
'ids': inputs,
'values': weights,
}, (column,), None)[column]
with _initialized_session():
_assert_sparse_tensor_value(
self,
sparse_tensor.SparseTensorValue(
indices=inputs.indices,
values=np.array(inputs.values, dtype=np.int64),
dense_shape=inputs.dense_shape), self.evaluate(id_tensor))
_assert_sparse_tensor_value(
self,
sparse_tensor.SparseTensorValue(
indices=weights.indices,
values=np.array(weights.values, dtype=np.float32),
dense_shape=weights.dense_shape), self.evaluate(weight_tensor))
def test_transform_features_dense_input(self):
column = fc.weighted_categorical_column(
categorical_column=fc.categorical_column_with_identity(
key='ids', num_buckets=3),
weight_feature_key='values')
weights = sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=(0.5, 1.0, 0.1),
dense_shape=(2, 2))
id_tensor, weight_tensor = fc._transform_features_v2({
'ids': ((0, -1), (1, 0)),
'values': weights,
}, (column,), None)[column]
with _initialized_session():
_assert_sparse_tensor_value(
self,
sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=np.array((0, 1, 0), dtype=np.int64),
dense_shape=(2, 2)), self.evaluate(id_tensor))
_assert_sparse_tensor_value(
self,
sparse_tensor.SparseTensorValue(
indices=weights.indices,
values=np.array(weights.values, dtype=np.float32),
dense_shape=weights.dense_shape), self.evaluate(weight_tensor))
def test_transform_features_dense_weights(self):
column = fc.weighted_categorical_column(
categorical_column=fc.categorical_column_with_identity(
key='ids', num_buckets=3),
weight_feature_key='values')
inputs = sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=(2, 1, 0),
dense_shape=(2, 2))
id_tensor, weight_tensor = fc._transform_features_v2({
'ids': inputs,
'values': ((.5, 0.), (1., .1)),
}, (column,), None)[column]
with _initialized_session():
_assert_sparse_tensor_value(
self,
sparse_tensor.SparseTensorValue(
indices=inputs.indices,
values=np.array(inputs.values, dtype=np.int64),
dense_shape=inputs.dense_shape), self.evaluate(id_tensor))
_assert_sparse_tensor_value(
self,
sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=np.array((.5, 1., .1), dtype=np.float32),
dense_shape=(2, 2)), self.evaluate(weight_tensor))
def test_linear_model(self):
column = fc.weighted_categorical_column(
categorical_column=fc.categorical_column_with_identity(
key='ids', num_buckets=3),
weight_feature_key='values')
with ops.Graph().as_default():
model = fc.LinearModel((column,))
predictions = model({
'ids':
sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=(0, 2, 1),
dense_shape=(2, 2)),
'values':
sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=(.5, 1., .1),
dense_shape=(2, 2))
})
weight_var, bias = model.variables
with _initialized_session():
self.assertAllClose((0.,), self.evaluate(bias))
self.assertAllClose(((0.,), (0.,), (0.,)), self.evaluate(weight_var))
self.assertAllClose(((0.,), (0.,)), self.evaluate(predictions))
weight_var.assign(((1.,), (2.,), (3.,))).eval()
# weight_var[0] * weights[0, 0] = 1 * .5 = .5
# weight_var[2] * weights[1, 0] + weight_var[1] * weights[1, 1]
# = 3*1 + 2*.1 = 3+.2 = 3.2
self.assertAllClose(((.5,), (3.2,)), self.evaluate(predictions))
def test_linear_model_mismatched_shape(self):
column = fc.weighted_categorical_column(
categorical_column=fc.categorical_column_with_identity(
key='ids', num_buckets=3),
weight_feature_key='values')
with ops.Graph().as_default():
with self.assertRaisesRegexp(ValueError,
r'Dimensions.*are not compatible'):
model = fc.LinearModel((column,))
model({
'ids':
sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=(0, 2, 1),
dense_shape=(2, 2)),
'values':
sparse_tensor.SparseTensorValue(
indices=((0, 0), (0, 1), (1, 0), (1, 1)),
values=(.5, 11., 1., .1),
dense_shape=(2, 2))
})
def test_linear_model_mismatched_dense_values(self):
column = fc.weighted_categorical_column(
categorical_column=fc.categorical_column_with_identity(
key='ids', num_buckets=3),
weight_feature_key='values')
with ops.Graph().as_default():
model = fc.LinearModel((column,), sparse_combiner='mean')
predictions = model({
'ids':
sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=(0, 2, 1),
dense_shape=(2, 2)),
'values': ((.5,), (1.,))
})
# Disabling the constant folding optimizer here since it changes the
# error message differently on CPU and GPU.
config = config_pb2.ConfigProto()
config.graph_options.rewrite_options.constant_folding = (
rewriter_config_pb2.RewriterConfig.OFF)
with _initialized_session(config):
with self.assertRaisesRegexp(errors.OpError, 'Incompatible shapes'):
self.evaluate(predictions)
def test_linear_model_mismatched_dense_shape(self):
column = fc.weighted_categorical_column(
categorical_column=fc.categorical_column_with_identity(
key='ids', num_buckets=3),
weight_feature_key='values')
with ops.Graph().as_default():
model = fc.LinearModel((column,))
predictions = model({
'ids':
sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=(0, 2, 1),
dense_shape=(2, 2)),
'values': ((.5,), (1.,), (.1,))
})
weight_var, bias = model.variables
with _initialized_session():
self.assertAllClose((0.,), self.evaluate(bias))
self.assertAllClose(((0.,), (0.,), (0.,)), self.evaluate(weight_var))
self.assertAllClose(((0.,), (0.,)), self.evaluate(predictions))
weight_var.assign(((1.,), (2.,), (3.,))).eval()
# weight_var[0] * weights[0, 0] = 1 * .5 = .5
# weight_var[2] * weights[1, 0] + weight_var[1] * weights[1, 1]
# = 3*1 + 2*.1 = 3+.2 = 3.2
self.assertAllClose(((.5,), (3.2,)), self.evaluate(predictions))
def test_old_linear_model(self):
column = fc.weighted_categorical_column(
categorical_column=fc.categorical_column_with_identity(
key='ids', num_buckets=3),
weight_feature_key='values')
with ops.Graph().as_default():
predictions = fc_old.linear_model({
'ids':
sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=(0, 2, 1),
dense_shape=(2, 2)),
'values':
sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=(.5, 1., .1),
dense_shape=(2, 2))
}, (column,))
bias = get_linear_model_bias()
weight_var = get_linear_model_column_var(column)
with _initialized_session():
self.assertAllClose((0.,), self.evaluate(bias))
self.assertAllClose(((0.,), (0.,), (0.,)), self.evaluate(weight_var))
self.assertAllClose(((0.,), (0.,)), self.evaluate(predictions))
weight_var.assign(((1.,), (2.,), (3.,))).eval()
# weight_var[0] * weights[0, 0] = 1 * .5 = .5
# weight_var[2] * weights[1, 0] + weight_var[1] * weights[1, 1]
# = 3*1 + 2*.1 = 3+.2 = 3.2
self.assertAllClose(((.5,), (3.2,)), self.evaluate(predictions))
def test_old_linear_model_mismatched_shape(self):
column = fc.weighted_categorical_column(
categorical_column=fc.categorical_column_with_identity(
key='ids', num_buckets=3),
weight_feature_key='values')
with ops.Graph().as_default():
with self.assertRaisesRegexp(ValueError,
r'Dimensions.*are not compatible'):
fc_old.linear_model({
'ids':
sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=(0, 2, 1),
dense_shape=(2, 2)),
'values':
sparse_tensor.SparseTensorValue(
indices=((0, 0), (0, 1), (1, 0), (1, 1)),
values=(.5, 11., 1., .1),
dense_shape=(2, 2))
}, (column,))
def test_old_linear_model_mismatched_dense_values(self):
column = fc.weighted_categorical_column(
categorical_column=fc.categorical_column_with_identity(
key='ids', num_buckets=3),
weight_feature_key='values')
with ops.Graph().as_default():
predictions = fc_old.linear_model({
'ids':
sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=(0, 2, 1),
dense_shape=(2, 2)),
'values': ((.5,), (1.,))
}, (column,),
sparse_combiner='mean')
# Disabling the constant folding optimizer here since it changes the
# error message differently on CPU and GPU.
config = config_pb2.ConfigProto()
config.graph_options.rewrite_options.constant_folding = (
rewriter_config_pb2.RewriterConfig.OFF)
with _initialized_session(config):
with self.assertRaisesRegexp(errors.OpError, 'Incompatible shapes'):
self.evaluate(predictions)
def test_old_linear_model_mismatched_dense_shape(self):
column = fc.weighted_categorical_column(
categorical_column=fc.categorical_column_with_identity(
key='ids', num_buckets=3),
weight_feature_key='values')
with ops.Graph().as_default():
predictions = fc_old.linear_model({
'ids':
sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=(0, 2, 1),
dense_shape=(2, 2)),
'values': ((.5,), (1.,), (.1,))
}, (column,))
bias = get_linear_model_bias()
weight_var = get_linear_model_column_var(column)
with _initialized_session():
self.assertAllClose((0.,), self.evaluate(bias))
self.assertAllClose(((0.,), (0.,), (0.,)), self.evaluate(weight_var))
self.assertAllClose(((0.,), (0.,)), self.evaluate(predictions))
weight_var.assign(((1.,), (2.,), (3.,))).eval()
# weight_var[0] * weights[0, 0] = 1 * .5 = .5
# weight_var[2] * weights[1, 0] + weight_var[1] * weights[1, 1]
# = 3*1 + 2*.1 = 3+.2 = 3.2
self.assertAllClose(((.5,), (3.2,)), self.evaluate(predictions))
def test_old_linear_model_old_categorical(self):
column = fc.weighted_categorical_column(
categorical_column=fc_old._categorical_column_with_identity(
key='ids', num_buckets=3),
weight_feature_key='values')
with ops.Graph().as_default():
predictions = fc_old.linear_model({
'ids':
sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=(0, 2, 1),
dense_shape=(2, 2)),
'values':
sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=(.5, 1., .1),
dense_shape=(2, 2))
}, (column,))
bias = get_linear_model_bias()
weight_var = get_linear_model_column_var(column)
with _initialized_session():
self.assertAllClose((0.,), self.evaluate(bias))
self.assertAllClose(((0.,), (0.,), (0.,)), self.evaluate(weight_var))
self.assertAllClose(((0.,), (0.,)), self.evaluate(predictions))
weight_var.assign(((1.,), (2.,), (3.,))).eval()
# weight_var[0] * weights[0, 0] = 1 * .5 = .5
# weight_var[2] * weights[1, 0] + weight_var[1] * weights[1, 1]
# = 3*1 + 2*.1 = 3+.2 = 3.2
self.assertAllClose(((.5,), (3.2,)), self.evaluate(predictions))
# TODO(ptucker): Add test with embedding of weighted categorical.
def test_serialization(self):
categorical_column = fc.categorical_column_with_identity(
key='ids', num_buckets=3)
column = fc.weighted_categorical_column(
categorical_column=categorical_column, weight_feature_key='weight')
self.assertEqual([categorical_column, 'weight'], column.parents)
config = column._get_config()
self.assertEqual({
'categorical_column': {
'config': {
'key': 'ids',
'number_buckets': 3,
'default_value': None
},
'class_name': 'IdentityCategoricalColumn'
},
'dtype': 'float32',
'weight_feature_key': 'weight'
}, config)
self.assertEqual(column, fc.WeightedCategoricalColumn._from_config(config))
new_column = fc.WeightedCategoricalColumn._from_config(
config, columns_by_name={categorical_column.name: categorical_column})
self.assertEqual(column, new_column)
self.assertIs(categorical_column, new_column.categorical_column)
class FeatureColumnForSerializationTest(BaseFeatureColumnForTests):
@property
def _is_v2_column(self):
return True
@property
def name(self):
return 'BadParentsFeatureColumn'
def transform_feature(self, transformation_cache, state_manager):
return 'Output'
@property
def parse_example_spec(self):
pass
class SerializationTest(test.TestCase):
"""Tests for serialization, deserialization helpers."""
def test_serialize_non_feature_column(self):
class NotAFeatureColumn(object):
pass
with self.assertRaisesRegexp(ValueError, 'is not a FeatureColumn'):
fc.serialize_feature_column(NotAFeatureColumn())
def test_deserialize_invalid_config(self):
with self.assertRaisesRegexp(ValueError, 'Improper config format: {}'):
fc.deserialize_feature_column({})
def test_deserialize_config_missing_key(self):
config_missing_key = {
'config': {
# Dtype is missing and should cause a failure.
# 'dtype': 'int32',
'default_value': None,
'key': 'a',
'normalizer_fn': None,
'shape': (2,)
},
'class_name': 'NumericColumn'
}
with self.assertRaisesRegexp(ValueError, 'Invalid config:'):
fc.deserialize_feature_column(config_missing_key)
def test_deserialize_invalid_class(self):
with self.assertRaisesRegexp(
ValueError, 'Unknown feature_column_v2: NotExistingFeatureColumnClass'):
fc.deserialize_feature_column({
'class_name': 'NotExistingFeatureColumnClass',
'config': {}
})
def test_deserialization_deduping(self):
price = fc.numeric_column('price')
bucketized_price = fc.bucketized_column(price, boundaries=[0, 1])
configs = fc.serialize_feature_columns([price, bucketized_price])
deserialized_feature_columns = fc.deserialize_feature_columns(configs)
self.assertEqual(2, len(deserialized_feature_columns))
new_price = deserialized_feature_columns[0]
new_bucketized_price = deserialized_feature_columns[1]
# Ensure these are not the original objects:
self.assertIsNot(price, new_price)
self.assertIsNot(bucketized_price, new_bucketized_price)
# But they are equivalent:
self.assertEquals(price, new_price)
self.assertEquals(bucketized_price, new_bucketized_price)
# Check that deduping worked:
self.assertIs(new_bucketized_price.source_column, new_price)
def deserialization_custom_objects(self):
# Note that custom_objects is also tested extensively above per class, this
# test ensures that the public wrappers also handle it correctly.
def _custom_fn(input_tensor):
return input_tensor + 42.
price = fc.numeric_column('price', normalizer_fn=_custom_fn)
configs = fc.serialize_feature_columns([price])
deserialized_feature_columns = fc.deserialize_feature_columns(configs)
self.assertEqual(1, len(deserialized_feature_columns))
new_price = deserialized_feature_columns[0]
# Ensure these are not the original objects:
self.assertIsNot(price, new_price)
# But they are equivalent:
self.assertEquals(price, new_price)
# Check that normalizer_fn points to the correct function.
self.assertIs(new_price.normalizer_fn, _custom_fn)
if __name__ == '__main__':
test.main()
| {
"content_hash": "be8eb632375da443dd9c2acf692dc85a",
"timestamp": "",
"source": "github",
"line_count": 7939,
"max_line_length": 123,
"avg_line_length": 39.89318553974052,
"alnum_prop": 0.5963462072797999,
"repo_name": "hehongliang/tensorflow",
"id": "115763f656e93eed5a2e82f7b1ac848c9f81ad83",
"size": "317401",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow/python/feature_column/feature_column_v2_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "7666"
},
{
"name": "C",
"bytes": "194748"
},
{
"name": "C++",
"bytes": "26947133"
},
{
"name": "CMake",
"bytes": "174938"
},
{
"name": "Go",
"bytes": "908627"
},
{
"name": "Java",
"bytes": "323804"
},
{
"name": "Jupyter Notebook",
"bytes": "1833659"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "37293"
},
{
"name": "Objective-C",
"bytes": "7056"
},
{
"name": "Objective-C++",
"bytes": "63210"
},
{
"name": "Protocol Buffer",
"bytes": "249901"
},
{
"name": "PureBasic",
"bytes": "24932"
},
{
"name": "Python",
"bytes": "22872386"
},
{
"name": "Ruby",
"bytes": "327"
},
{
"name": "Shell",
"bytes": "336334"
}
],
"symlink_target": ""
} |
"""Optionnal shortcuts module for DAL.
This module exports all the public classes for the project. It imports dal
module classes and extension module classes by checking the INSTALLED_APPS
setting:
- if dal_select2 is present, import classes from dal_select2_*,
- with dal_queryset_sequence, import views and fields from it,
- with dal_queryset_sequence and dal_select2, import from
dal_select2_queryset_sequence,
- with django.contrib.contenttypes, import dal_contenttypes,
- with genericm2m, import dal_genericm2m,
- with gm2m, import dal_gm2m,
- with taggit, import dal_taggit,
- with tagulous, import dal_tagulous.
Note that using this module is optional.
"""
from django.conf import settings as django_settings
from .forms import FutureModelForm
from .views import ViewMixin
from .widgets import (
Select,
SelectMultiple,
)
def _installed(*apps):
for app in apps:
if app not in django_settings.INSTALLED_APPS:
return False
return True
if _installed('dal_select2'):
from dal_select2.widgets import (
Select2,
Select2Multiple,
ModelSelect2,
ModelSelect2Multiple,
TagSelect2,
ListSelect2
)
from dal_select2.views import (
Select2QuerySetView,
Select2GroupQuerySetView,
Select2ListView,
Select2GroupListView
)
from dal_select2.fields import (
Select2ListChoiceField,
Select2ListCreateChoiceField
)
if _installed('dal_queryset_sequence'):
from dal_queryset_sequence.fields import (
QuerySetSequenceModelField,
QuerySetSequenceModelMultipleField,
GenericForeignKeyModelField,
)
from dal_queryset_sequence.views import (
BaseQuerySetSequenceView,
)
from queryset_sequence import QuerySetSequence
if _installed('dal_select2', 'dal_queryset_sequence'):
from dal_select2_queryset_sequence.views import (
Select2QuerySetSequenceView,
)
from dal_select2_queryset_sequence.widgets import (
QuerySetSequenceSelect2,
QuerySetSequenceSelect2Multiple,
)
from dal_select2_queryset_sequence.fields import (
Select2GenericForeignKeyModelField,
)
if _installed('dal_select2') and _installed('taggit'):
from dal_select2_taggit.widgets import TaggitSelect2
if _installed('dal_select2') and _installed('tagging'):
from dal_select2_tagging.widgets import TaggingSelect2
if _installed('genericm2m') and _installed('dal_queryset_sequence'):
from dal_genericm2m_queryset_sequence.fields import (
GenericM2MQuerySetSequenceField
)
if _installed('gm2m') and _installed('dal_queryset_sequence'):
from dal_gm2m_queryset_sequence.fields import GM2MQuerySetSequenceField
| {
"content_hash": "5e71885197307e72746fff9aa805b43c",
"timestamp": "",
"source": "github",
"line_count": 92,
"max_line_length": 75,
"avg_line_length": 29.902173913043477,
"alnum_prop": 0.7230098146128681,
"repo_name": "yourlabs/django-autocomplete-light",
"id": "7f201b893103258868917e08cf2fa4fd7594f512",
"size": "2751",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/dal/autocomplete.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "11205"
},
{
"name": "HTML",
"bytes": "5709"
},
{
"name": "JavaScript",
"bytes": "27379"
},
{
"name": "Python",
"bytes": "210537"
},
{
"name": "Shell",
"bytes": "1950"
}
],
"symlink_target": ""
} |
import agate
import agateremote
class TestArchive(agate.AgateTestCase):
def setUp(self):
self.archive = agateremote.Archive('https://github.com/vincentarelbundock/Rdatasets/raw/master/csv/')
def test_get_table(self):
table = self.archive.get_table('sandwich/PublicSchools.csv')
self.assertColumnNames(table, ('a', 'Expenditure', 'Income'))
self.assertColumnTypes(table, [agate.Text, agate.Number, agate.Number])
self.assertEqual(len(table.rows), 51)
| {
"content_hash": "2b2bbbc9bb35709b881055a56f4784c6",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 109,
"avg_line_length": 33.53333333333333,
"alnum_prop": 0.7017892644135189,
"repo_name": "wireservice/agate-remote",
"id": "47cdad4dfea5c7ad487271e304305c54a3f04f47",
"size": "549",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_archive.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "6612"
}
],
"symlink_target": ""
} |
from ez_setup import use_setuptools
use_setuptools()
from setuptools import setup, find_packages
setup(
name='sanneal',
version='0.1.dev1',
packages=find_packages(),
entry_points = {
'console_scripts': [
'sanneal-demo = sanneal.sanneal_demo:main',
],
},
install_requires = [
'docutils>=0.3',
'pillow',
],
package_data = {
'': ['*.txt', '*.rst', 'LICENSE'],
},
author='Yakov Shklarov',
author_email='[email protected]',
description='An implementation of the simulated annealing algorithm',
license='MIT',
keywords='sanneal anneal simulated annealing optimization',
url='https://github.com/yshklarov/sanneal',
)
| {
"content_hash": "48a591b95440ec6f41430314a20012b7",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 73,
"avg_line_length": 23.903225806451612,
"alnum_prop": 0.5964912280701754,
"repo_name": "yshklarov/sanneal",
"id": "d07e08325dab8cc75ce7ea99fcdc797c070ef06d",
"size": "741",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "18809"
}
],
"symlink_target": ""
} |
from CIM15.IEC61970.Core.IdentifiedObject import IdentifiedObject
class StartupModel(IdentifiedObject):
"""Unit start up characteristics depending on how long the unit has been off lineUnit start up characteristics depending on how long the unit has been off line
"""
def __init__(self, startupCost=0.0, stbyAuxP=0.0, minimumRunTime=0.0, riskFactorCost=0.0, minimumDownTime=0.0, fixedMaintCost=0.0, startupDate='', hotStandbyHeat=0.0, startupPriority=0, incrementalMaintCost=0.0, StartRampCurve=None, StartMainFuelCurve=None, StartIgnFuelCurve=None, ThermalGeneratingUnit=None, *args, **kw_args):
"""Initialises a new 'StartupModel' instance.
@param startupCost: Total miscellaneous start up costs
@param stbyAuxP: The unit's auxiliary active power consumption to maintain standby mode
@param minimumRunTime: The minimum number of hours the unit must be operating before being allowed to shut down
@param riskFactorCost: The opportunity cost associated with the return in monetary unit. This represents the restart's 'share' of the unit depreciation and risk of an event which would damage the unit.
@param minimumDownTime: The minimum number of hours the unit must be down before restart
@param fixedMaintCost: Fixed Maintenance Cost
@param startupDate: The date and time of the most recent generating unit startup
@param hotStandbyHeat: The amount of heat input per time uint required for hot standby operation
@param startupPriority: Startup priority within control area where lower numbers indicate higher priorities. More than one unit in an area may be assigned the same priority.
@param incrementalMaintCost: Incremental Maintenance Cost
@param StartRampCurve: The unit's startup model may have a startup ramp curve
@param StartMainFuelCurve: The unit's startup model may have a startup main fuel curve
@param StartIgnFuelCurve: The unit's startup model may have a startup ignition fuel curve
@param ThermalGeneratingUnit: A thermal generating unit may have a startup model
"""
#: Total miscellaneous start up costs
self.startupCost = startupCost
#: The unit's auxiliary active power consumption to maintain standby mode
self.stbyAuxP = stbyAuxP
#: The minimum number of hours the unit must be operating before being allowed to shut down
self.minimumRunTime = minimumRunTime
#: The opportunity cost associated with the return in monetary unit. This represents the restart's 'share' of the unit depreciation and risk of an event which would damage the unit.
self.riskFactorCost = riskFactorCost
#: The minimum number of hours the unit must be down before restart
self.minimumDownTime = minimumDownTime
#: Fixed Maintenance Cost
self.fixedMaintCost = fixedMaintCost
#: The date and time of the most recent generating unit startup
self.startupDate = startupDate
#: The amount of heat input per time uint required for hot standby operation
self.hotStandbyHeat = hotStandbyHeat
#: Startup priority within control area where lower numbers indicate higher priorities. More than one unit in an area may be assigned the same priority.
self.startupPriority = startupPriority
#: Incremental Maintenance Cost
self.incrementalMaintCost = incrementalMaintCost
self._StartRampCurve = None
self.StartRampCurve = StartRampCurve
self._StartMainFuelCurve = None
self.StartMainFuelCurve = StartMainFuelCurve
self._StartIgnFuelCurve = None
self.StartIgnFuelCurve = StartIgnFuelCurve
self._ThermalGeneratingUnit = None
self.ThermalGeneratingUnit = ThermalGeneratingUnit
super(StartupModel, self).__init__(*args, **kw_args)
_attrs = ["startupCost", "stbyAuxP", "minimumRunTime", "riskFactorCost", "minimumDownTime", "fixedMaintCost", "startupDate", "hotStandbyHeat", "startupPriority", "incrementalMaintCost"]
_attr_types = {"startupCost": float, "stbyAuxP": float, "minimumRunTime": float, "riskFactorCost": float, "minimumDownTime": float, "fixedMaintCost": float, "startupDate": str, "hotStandbyHeat": float, "startupPriority": int, "incrementalMaintCost": float}
_defaults = {"startupCost": 0.0, "stbyAuxP": 0.0, "minimumRunTime": 0.0, "riskFactorCost": 0.0, "minimumDownTime": 0.0, "fixedMaintCost": 0.0, "startupDate": '', "hotStandbyHeat": 0.0, "startupPriority": 0, "incrementalMaintCost": 0.0}
_enums = {}
_refs = ["StartRampCurve", "StartMainFuelCurve", "StartIgnFuelCurve", "ThermalGeneratingUnit"]
_many_refs = []
def getStartRampCurve(self):
"""The unit's startup model may have a startup ramp curve
"""
return self._StartRampCurve
def setStartRampCurve(self, value):
if self._StartRampCurve is not None:
self._StartRampCurve._StartupModel = None
self._StartRampCurve = value
if self._StartRampCurve is not None:
self._StartRampCurve.StartupModel = None
self._StartRampCurve._StartupModel = self
StartRampCurve = property(getStartRampCurve, setStartRampCurve)
def getStartMainFuelCurve(self):
"""The unit's startup model may have a startup main fuel curve
"""
return self._StartMainFuelCurve
def setStartMainFuelCurve(self, value):
if self._StartMainFuelCurve is not None:
self._StartMainFuelCurve._StartupModel = None
self._StartMainFuelCurve = value
if self._StartMainFuelCurve is not None:
self._StartMainFuelCurve.StartupModel = None
self._StartMainFuelCurve._StartupModel = self
StartMainFuelCurve = property(getStartMainFuelCurve, setStartMainFuelCurve)
def getStartIgnFuelCurve(self):
"""The unit's startup model may have a startup ignition fuel curve
"""
return self._StartIgnFuelCurve
def setStartIgnFuelCurve(self, value):
if self._StartIgnFuelCurve is not None:
self._StartIgnFuelCurve._StartupModel = None
self._StartIgnFuelCurve = value
if self._StartIgnFuelCurve is not None:
self._StartIgnFuelCurve.StartupModel = None
self._StartIgnFuelCurve._StartupModel = self
StartIgnFuelCurve = property(getStartIgnFuelCurve, setStartIgnFuelCurve)
def getThermalGeneratingUnit(self):
"""A thermal generating unit may have a startup model
"""
return self._ThermalGeneratingUnit
def setThermalGeneratingUnit(self, value):
if self._ThermalGeneratingUnit is not None:
self._ThermalGeneratingUnit._StartupModel = None
self._ThermalGeneratingUnit = value
if self._ThermalGeneratingUnit is not None:
self._ThermalGeneratingUnit.StartupModel = None
self._ThermalGeneratingUnit._StartupModel = self
ThermalGeneratingUnit = property(getThermalGeneratingUnit, setThermalGeneratingUnit)
| {
"content_hash": "0218659ca7ffcb795094700baeafb923",
"timestamp": "",
"source": "github",
"line_count": 139,
"max_line_length": 332,
"avg_line_length": 51.10071942446043,
"alnum_prop": 0.7132197662959313,
"repo_name": "rwl/PyCIM",
"id": "41dbf7440449e18ad8b5e7da216aaa1cbbacb549",
"size": "8203",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "CIM15/IEC61970/Generation/Production/StartupModel.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7420564"
}
],
"symlink_target": ""
} |
"""Helper script for running endtoend tests."""
import unittest
import logging
# pylint: disable=unused-import,g-bad-import-order
from grr.lib import server_plugins
# pylint: enable=unused-import,g-bad-import-order
from grr.endtoend_tests import base
from grr.lib import access_control
from grr.lib import aff4
from grr.lib import config_lib
from grr.lib import flags
from grr.lib import startup
flags.DEFINE_bool("local_client", True,
"The target client(s) are running locally.")
flags.DEFINE_bool("local_worker", False,
"Run tests with a local worker.")
flags.DEFINE_list("client_ids", [],
"List of client ids to test. If unset we use "
"Test.end_to_end_client_ids from the config.")
flags.DEFINE_list("hostnames", [],
"List of client hostnames to test. If unset we use "
"Test.end_to_end_client_hostnames from the config.")
flags.DEFINE_list("testnames", [],
"List of test names to run. If unset we run all "
"relevant tests.")
def RunEndToEndTests():
runner = unittest.TextTestRunner()
# We are running a test so let the config system know that.
config_lib.CONFIG.AddContext(
"Test Context", "Context applied when we run tests.")
startup.Init()
token = access_control.ACLToken(username="test",
reason="Running end to end client tests.")
client_id_set = base.GetClientTestTargets(
client_ids=flags.FLAGS.client_ids,
hostnames=flags.FLAGS.hostnames,
checkin_duration_threshold="1h")
for cls in base.ClientTestBase.classes.values():
for p in cls.platforms:
if p not in set(["Linux", "Darwin", "Windows"]):
raise ValueError(
"Unsupported platform: %s in class %s" % (p, cls.__name__))
if not client_id_set:
print ("No clients to test on. Define Test.end_to_end_client* config "
"options, or pass them as parameters.")
for client in aff4.FACTORY.MultiOpen(client_id_set, token=token):
client = client.GetSummary()
if hasattr(client, "system_info"):
sysinfo = client.system_info
else:
raise RuntimeError("Unknown system type, likely waiting on interrogate"
" to complete.")
for cls in base.ClientTestBase.classes.values():
if flags.FLAGS.testnames and (
cls.__name__ not in flags.FLAGS.testnames):
continue
if not aff4.issubclass(cls, base.ClientTestBase):
continue
if cls.__name__.startswith("Abstract"):
continue
# Fix the call method so we can use the test runner. See doco in
# base.ClientTestBase
def _RealCall(testcase, *args, **kwds):
return testcase.run(*args, **kwds)
cls.__call__ = _RealCall
if sysinfo.system in cls.platforms:
print "Running %s on %s (%s: %s, %s, %s)" % (
cls.__name__, client.client_id, sysinfo.fqdn,
sysinfo.system, sysinfo.version,
sysinfo.machine)
try:
# Mixin the unittest framework so we can use the test runner to run
# the test and get nice output. We don't want to depend on unitttest
# code in the tests themselves.
testcase = cls(client_id=client.client_id,
platform=sysinfo.system, token=token,
local_client=flags.FLAGS.local_client,
local_worker=flags.FLAGS.local_worker)
runner.run(testcase)
except Exception: # pylint: disable=broad-except
logging.exception("Failed to run test %s", cls)
def main(unused_argv):
RunEndToEndTests()
if __name__ == "__main__":
flags.StartMain(main)
| {
"content_hash": "b984b529dcac1a37e46e6db57cb2911e",
"timestamp": "",
"source": "github",
"line_count": 115,
"max_line_length": 79,
"avg_line_length": 32.70434782608696,
"alnum_prop": 0.6216431800053177,
"repo_name": "defaultnamehere/grr",
"id": "12ee6279bb5c351631bb0b977fd297c4259c64cb",
"size": "3783",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tools/end_to_end_tests.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "227"
},
{
"name": "C++",
"bytes": "55149"
},
{
"name": "CSS",
"bytes": "36345"
},
{
"name": "JavaScript",
"bytes": "831633"
},
{
"name": "Makefile",
"bytes": "5939"
},
{
"name": "Python",
"bytes": "4541648"
},
{
"name": "Shell",
"bytes": "31077"
}
],
"symlink_target": ""
} |
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Lighting(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "cone"
_path_str = "cone.lighting"
_valid_props = {
"ambient",
"diffuse",
"facenormalsepsilon",
"fresnel",
"roughness",
"specular",
"vertexnormalsepsilon",
}
# ambient
# -------
@property
def ambient(self):
"""
Ambient light increases overall color visibility but can wash
out the image.
The 'ambient' property is a number and may be specified as:
- An int or float in the interval [0, 1]
Returns
-------
int|float
"""
return self["ambient"]
@ambient.setter
def ambient(self, val):
self["ambient"] = val
# diffuse
# -------
@property
def diffuse(self):
"""
Represents the extent that incident rays are reflected in a
range of angles.
The 'diffuse' property is a number and may be specified as:
- An int or float in the interval [0, 1]
Returns
-------
int|float
"""
return self["diffuse"]
@diffuse.setter
def diffuse(self, val):
self["diffuse"] = val
# facenormalsepsilon
# ------------------
@property
def facenormalsepsilon(self):
"""
Epsilon for face normals calculation avoids math issues arising
from degenerate geometry.
The 'facenormalsepsilon' property is a number and may be specified as:
- An int or float in the interval [0, 1]
Returns
-------
int|float
"""
return self["facenormalsepsilon"]
@facenormalsepsilon.setter
def facenormalsepsilon(self, val):
self["facenormalsepsilon"] = val
# fresnel
# -------
@property
def fresnel(self):
"""
Represents the reflectance as a dependency of the viewing
angle; e.g. paper is reflective when viewing it from the edge
of the paper (almost 90 degrees), causing shine.
The 'fresnel' property is a number and may be specified as:
- An int or float in the interval [0, 5]
Returns
-------
int|float
"""
return self["fresnel"]
@fresnel.setter
def fresnel(self, val):
self["fresnel"] = val
# roughness
# ---------
@property
def roughness(self):
"""
Alters specular reflection; the rougher the surface, the wider
and less contrasty the shine.
The 'roughness' property is a number and may be specified as:
- An int or float in the interval [0, 1]
Returns
-------
int|float
"""
return self["roughness"]
@roughness.setter
def roughness(self, val):
self["roughness"] = val
# specular
# --------
@property
def specular(self):
"""
Represents the level that incident rays are reflected in a
single direction, causing shine.
The 'specular' property is a number and may be specified as:
- An int or float in the interval [0, 2]
Returns
-------
int|float
"""
return self["specular"]
@specular.setter
def specular(self, val):
self["specular"] = val
# vertexnormalsepsilon
# --------------------
@property
def vertexnormalsepsilon(self):
"""
Epsilon for vertex normals calculation avoids math issues
arising from degenerate geometry.
The 'vertexnormalsepsilon' property is a number and may be specified as:
- An int or float in the interval [0, 1]
Returns
-------
int|float
"""
return self["vertexnormalsepsilon"]
@vertexnormalsepsilon.setter
def vertexnormalsepsilon(self, val):
self["vertexnormalsepsilon"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
ambient
Ambient light increases overall color visibility but
can wash out the image.
diffuse
Represents the extent that incident rays are reflected
in a range of angles.
facenormalsepsilon
Epsilon for face normals calculation avoids math issues
arising from degenerate geometry.
fresnel
Represents the reflectance as a dependency of the
viewing angle; e.g. paper is reflective when viewing it
from the edge of the paper (almost 90 degrees), causing
shine.
roughness
Alters specular reflection; the rougher the surface,
the wider and less contrasty the shine.
specular
Represents the level that incident rays are reflected
in a single direction, causing shine.
vertexnormalsepsilon
Epsilon for vertex normals calculation avoids math
issues arising from degenerate geometry.
"""
def __init__(
self,
arg=None,
ambient=None,
diffuse=None,
facenormalsepsilon=None,
fresnel=None,
roughness=None,
specular=None,
vertexnormalsepsilon=None,
**kwargs
):
"""
Construct a new Lighting object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.cone.Lighting`
ambient
Ambient light increases overall color visibility but
can wash out the image.
diffuse
Represents the extent that incident rays are reflected
in a range of angles.
facenormalsepsilon
Epsilon for face normals calculation avoids math issues
arising from degenerate geometry.
fresnel
Represents the reflectance as a dependency of the
viewing angle; e.g. paper is reflective when viewing it
from the edge of the paper (almost 90 degrees), causing
shine.
roughness
Alters specular reflection; the rougher the surface,
the wider and less contrasty the shine.
specular
Represents the level that incident rays are reflected
in a single direction, causing shine.
vertexnormalsepsilon
Epsilon for vertex normals calculation avoids math
issues arising from degenerate geometry.
Returns
-------
Lighting
"""
super(Lighting, self).__init__("lighting")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.cone.Lighting
constructor must be a dict or
an instance of :class:`plotly.graph_objs.cone.Lighting`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("ambient", None)
_v = ambient if ambient is not None else _v
if _v is not None:
self["ambient"] = _v
_v = arg.pop("diffuse", None)
_v = diffuse if diffuse is not None else _v
if _v is not None:
self["diffuse"] = _v
_v = arg.pop("facenormalsepsilon", None)
_v = facenormalsepsilon if facenormalsepsilon is not None else _v
if _v is not None:
self["facenormalsepsilon"] = _v
_v = arg.pop("fresnel", None)
_v = fresnel if fresnel is not None else _v
if _v is not None:
self["fresnel"] = _v
_v = arg.pop("roughness", None)
_v = roughness if roughness is not None else _v
if _v is not None:
self["roughness"] = _v
_v = arg.pop("specular", None)
_v = specular if specular is not None else _v
if _v is not None:
self["specular"] = _v
_v = arg.pop("vertexnormalsepsilon", None)
_v = vertexnormalsepsilon if vertexnormalsepsilon is not None else _v
if _v is not None:
self["vertexnormalsepsilon"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
| {
"content_hash": "a79c28176f1d7c7017173c2be2a5e5bc",
"timestamp": "",
"source": "github",
"line_count": 311,
"max_line_length": 82,
"avg_line_length": 29.315112540192928,
"alnum_prop": 0.5517165734342437,
"repo_name": "plotly/python-api",
"id": "2d6093bf247fc2e8d68ff6791dfb6cfdd3874ddb",
"size": "9117",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/graph_objs/cone/_lighting.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "6870"
},
{
"name": "Makefile",
"bytes": "1708"
},
{
"name": "Python",
"bytes": "823245"
},
{
"name": "Shell",
"bytes": "3238"
}
],
"symlink_target": ""
} |
import discord
import pam
from pam.utils import checks
from discord.ext import commands
class StallmanModule:
def __init__(self, bot):
pam.log("GNU/Stallman initialized", tag="PAM COG")
self._bot = bot
@property
def module_name(self):
return "GNU/Stallman"
def _embed(self, ctx, title, description, cat="info"):
clr = 0x00007FFF
if cat == "error":
clr = 0x00FF7F00
elif cat == "success":
clr = 0x007FFF00
elif cat == "warning":
clr = 0x00FFFF00
embed = discord.Embed(title=title, description=description, colour=discord.Colour(clr))
embed.set_thumbnail(url=ctx.bot.user.avatar_url)
return embed
async def disable_for(self, bot, guild):
await bot.set_setting(pam.GuildSetting(guild.id, "stallman", False))
async def enable_for(self, bot, guild):
await bot.set_setting(pam.GuildSetting(guild.id, "stallman", True))
def is_enabled_for(self, guild):
st = self._bot.get_setting(guild.id, "stallman")
if st is None or str(st.setting_value).lower() == "true":
return True
return False
async def on_message(self, message):
ct = message.content.lower()
if self.is_enabled_for(message.channel.server) and "linux" in ct and "gnu/linux" not in ct and "gnu\\linux" not in ct and "gnu+linux" not in ct and "linux kernel" not in ct:
await self._bot.send_message(message.channel,
"I'd just like to interject for a moment. What you're referring to as Linux, is in fact, GNU/Linux, or as I've recently taken to calling it, GNU plus Linux. "
"Linux is not an operating system unto itself, but rather another free component of a fully functioning GNU system made useful by the GNU corelibs, shell "
"utilities and vital system components comprising a full OS as defined by POSIX.\n\nMany computer users run a modified version of the GNU system every day, "
"without realizing it.Through a peculiar turn of events, the version of GNU which is widely used today is often called \"Linux\", and many of its users are "
"not aware that it is basically the GNU system, developed by the GNU Project. There really is a Linux, and these people are using it, but it is just a part "
"of the system they use.\n\nLinux is the kernel: the program in the system that allocates the machine's resources to the other programs that you run. The "
"kernel is an essential part of an operating system, but useless by itself; it can only function in the context of a complete operating system. Linux is "
"normally used in combination with the GNU operating system: the whole system is basically GNU with Linux added, or GNU/Linux. All the so-called \"Linux\" "
"distributions are really distributions of GNU/Linux.")
@commands.group(name="stallman", pass_context=True, description="GNU/Stallman module management")
@checks.has_permissions("manage_server")
async def stallman(self, ctx):
"""
GNU/Stallman module management commands
"""
if ctx.invoked_subcommand is None:
await ctx.bot.say(embed=self._embed(ctx, "Unrecognized subcommand", "`{}` is not a valid subcommand.".format(ctx.subcommand_passed), "error"))
@stallman.command(name="disable", pass_context=True, description="Disables the GNU/Stallman module", aliases=["off"])
@checks.has_permissions("manage_server")
async def _stallman_disable(self, ctx):
"""
Disables GNU/Stallman module on invoked guild
"""
await self.disable_for(ctx.bot, ctx.message.channel.server)
await ctx.bot.say(embed=self._embed(ctx, "Disabled successfully", "GNU/Stallman module is now disabled in this guild.", "success"))
@stallman.command(name="enable", pass_context=True, description="Enables the GNU/Stallman module", aliases=["on"])
@checks.has_permissions("manage_server")
async def _stallman_enable(self, ctx):
"""
Enables GNU/Stallman module on invoked guild
"""
await self.enable_for(ctx.bot, ctx.message.channel.server)
await ctx.bot.say(embed=self._embed(ctx, "Enabled successfully", "GNU/Stallman module is now enabled in this guild.", "success"))
def setup(bot):
bot.add_cog(StallmanModule(bot))
| {
"content_hash": "157d934a90b402fb27af8725c5876f89",
"timestamp": "",
"source": "github",
"line_count": 84,
"max_line_length": 199,
"avg_line_length": 55.70238095238095,
"alnum_prop": 0.6326138063688822,
"repo_name": "Emzi0767/Discord-PAM-Bot",
"id": "4365eef3efff6e2bc96599bbddf922e2003142db",
"size": "4679",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pam/modules/stallman.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "170222"
},
{
"name": "Shell",
"bytes": "91"
}
],
"symlink_target": ""
} |
import logging
import uuid
from spacel.provision.cloudformation import BaseCloudFormationFactory
logger = logging.getLogger('spacel')
class SpaceElevatorAppFactory(BaseCloudFormationFactory):
def __init__(self, clients, change_sets, uploader, app_template):
super(SpaceElevatorAppFactory, self).__init__(clients, change_sets,
uploader)
self._app_template = app_template
def app(self, app, force_redeploy=False):
"""
Provision an app in all regions.
:param app: App to provision.
:param force_redeploy: Force redeploying this application.
:returns True if updates completed.
"""
app_name = app.full_name
updates = {}
unique_token = str(uuid.uuid4())
params = {}
if force_redeploy:
# New token: force redeploy according to UpdatePolicy
params['UniqueToken'] = unique_token
for region, app_region in app.regions.items():
template, secret_params = self._app_template.app(app_region)
if not template and not secret_params:
logger.warning('App %s will not be updated, invalid syntax!',
app_name)
continue
secret_params = secret_params or {}
# Treat token as a secret: re-use existing value if possible.
secret_params['UniqueToken'] = lambda: unique_token
updates[region] = self._stack(app_name, region, template,
parameters=params,
secret_parameters=secret_params)
return self._wait_for_updates(app_name, updates)
def delete_app(self, app):
"""
Delete an app in all regions.
:param app: App to delete.
"""
app_name = app.full_name
updates = {}
for region in app.regions:
updates[region] = self._delete_stack(app_name, region)
self._wait_for_updates(app_name, updates)
| {
"content_hash": "4522b5942a9b54d11511882589451a1a",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 77,
"avg_line_length": 37.125,
"alnum_prop": 0.5733525733525734,
"repo_name": "pebble/spacel-provision",
"id": "7fea79a4d52ad2486d74b00b56db8023c6d2abf6",
"size": "2079",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/spacel/provision/app/space.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "1502"
},
{
"name": "Makefile",
"bytes": "1225"
},
{
"name": "Python",
"bytes": "390261"
},
{
"name": "Shell",
"bytes": "1644"
}
],
"symlink_target": ""
} |
import argparse
import os
import pathlib
import sys
import subprocess
def banner_execute() -> pathlib.Path:
script_path = pathlib.Path(os.path.realpath(__file__))
sep = "-" * 79
print("{}\nExecuting: {}\n{}".format(sep, script_path.name, sep))
return script_path
def file_exists(path_str: pathlib, check_absolute: bool = False) -> bool:
path = pathlib.Path(path_str)
if not path.exists() or not path.is_file():
print("Provided path '{}' doesn't exists or is not a file."
.format(path_str), file=sys.stderr)
return False
if check_absolute:
if not path.is_absolute():
print("Provided path '{}' is not an absolute path."
.format(path_str), file=sys.stderr)
return False
return True
def main(args):
script_path = banner_execute()
if not file_exists(args.dll_path, True):
return -1
pdb2mdb_path = "pdb2mdb"
if args.bin_path:
if not file_exists(args.bin_path, True):
return -1
pdb2mdb_path = args.bin_path
else:
# try to build a full absolute path for pdb2mdb binary
pdb2mdb_path = str(script_path.parent.joinpath(pdb2mdb_path))
print("pdb2mdb binary path: '{}'".format(pdb2mdb_path))
print("Generating mdb file")
# run pdb2mdb
process = subprocess.run([pdb2mdb_path, args.dll_path],
stdout=subprocess.PIPE)
if process.stdout != b'':
print(process.stdout)
if process.returncode != 0:
print("An error occured from pdb2mdb. Return code: {}".format(
process.returncode), file=sys.stderr)
else:
print("pdb2mdb success. Return code: {}".format(process.returncode))
# return pdb2mdb return code to caller.
return process.returncode
if __name__ == "__main__":
arg_parser = argparse.ArgumentParser(description='Pdb2mdb starter script.')
arg_parser.add_argument(
'dll_path', type=str, action="store",
help='Full path to the DLL for which to generate the mdb file')
arg_parser.add_argument(
'-p', action="store", dest="bin_path",
help="Full path to pdb2mdb (default: use current dir)")
parsed_args = arg_parser.parse_args()
sys.exit(main(parsed_args))
| {
"content_hash": "1e58b48fa9646a46d599be79ce877509",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 79,
"avg_line_length": 30.93243243243243,
"alnum_prop": 0.6190476190476191,
"repo_name": "neitsa/PrepareLanding",
"id": "81c3fbf9a1a07bbeae3b204829fd6cd1f86d8db0",
"size": "2351",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "tools/pdb2mdb.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C#",
"bytes": "550528"
},
{
"name": "Python",
"bytes": "28570"
}
],
"symlink_target": ""
} |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Mail-Merge for Scribus. This file provides the backend.
#
# For further information (manual, description, etc.) please visit:
# https://github.com/berteh/ScribusGenerator/
#
# v2.9.1 (2021-01-22): update port to Python3 for Scribut 1.5.6+, various DOC update
#
"""
The MIT License
Copyright (c) 2010-2014 Ekkehard Will (www.ekkehardwill.de), 2014-2021 Berteh (https://github.com/berteh/)
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import csv
import os
import platform
import logging
import logging.config
import sys
import xml.etree.ElementTree as ET
import json
import re
import string
class CONST:
# Constants for general usage
TRUE = 1
FALSE = 0
EMPTY = ''
APP_NAME = 'Scribus Generator'
FORMAT_PDF = 'PDF'
FORMAT_SLA = 'Scribus'
FILE_EXTENSION_PDF = 'pdf'
FILE_EXTENSION_SCRIBUS = 'sla'
SEP_PATH = '/' # In any case we use '/' as path separator on any platform
SEP_EXT = os.extsep
# CSV entry separator, comma by default; tab: " " is also common if using Excel.
CSV_SEP = ","
# indent the generated SLA code for more readability, aka "XML pretty print". set to 1 if you want to edit generated SLA manually.
INDENT_SLA = 0
CONTRIB_TEXT = "\npowered by ScribusGenerator - https://github.com/berteh/ScribusGenerator/"
STORAGE_NAME = "ScribusGeneratorDefaultSettings"
# set to 0 to prevent removal of un-subsituted variables, along with their empty containing itext
CLEAN_UNUSED_EMPTY_VARS = 1
# set to 0 to keep the separating element before an unused/empty variable, typicaly a linefeed (<para>) or list syntax token (,;-.)
REMOVE_CLEANED_ELEMENT_PREFIX = 1
# set to 0 to replace all tabs and linebreaks in csv data by simple spaces.
KEEP_TAB_LINEBREAK = 1
SG_VERSION = '2.9.1 python2'
# set to any word you'd like to use to trigger a jump to the next data record. using a name similar to the variables %VAR_ ... % will ensure it is cleaned after generation, and not show in the final document(s).
NEXT_RECORD = '%SG_NEXT-RECORD%'
class ScribusGenerator:
# The Generator Module has all the logic and will do all the work
def __init__(self, dataObject):
self.__dataObject = dataObject
logging.config.fileConfig(os.path.join(
os.path.abspath(os.path.dirname(__file__)), 'logging.conf'))
# todo: check if logging works, if not warn user to configure log file path and disable.
logging.info("ScribusGenerator initialized")
logging.debug("OS: %s - Python: %s - ScribusGenerator v%s" %
(os.name, platform.python_version(), CONST.SG_VERSION))
def run(self):
# Read CSV data and replace the variables in the Scribus File with the cooresponding data. Finaly export to the specified format.
# may throw exceptions if errors are met, use traceback to get all error details
# log options
optionsTxt = self.__dataObject.toString()
logging.debug("active options: %s%s" %
(optionsTxt[:1], optionsTxt[172:]))
# output file name
if(self.__dataObject.getSingleOutput() and (self.__dataObject.getOutputFileName() is CONST.EMPTY)):
self.__dataObject.setOutputFileName(os.path.split(os.path.splitext(
self.__dataObject.getScribusSourceFile())[0])[1] + '__single')
# source sla
logging.info("parsing scribus source file %s" %
(self.__dataObject.getScribusSourceFile()))
try:
tree = ET.parse(self.__dataObject.getScribusSourceFile())
except IOError as exception:
logging.error("Scribus file not found: %s" %
(self.__dataObject.getScribusSourceFile()))
raise
root = tree.getroot()
version = root.get('Version')
logging.debug("Scribus SLA template version is %s" % (version))
# save settings
if (self.__dataObject.getSaveSettings()):
serial = self.__dataObject.toString()
# as: %s"%serial)
logging.debug(
"saving current Scribus Generator settings in your source file")
docElt = root.find('DOCUMENT')
storageElt = docElt.find('./JAVA[@NAME="'+CONST.STORAGE_NAME+'"]')
if (storageElt is None):
colorElt = docElt.find('./COLOR[1]')
scriptPos = docElt.getchildren().index(colorElt)
logging.debug(
"creating new storage element in SLA template at position %s" % scriptPos)
storageElt = ET.Element("JAVA", {"NAME": CONST.STORAGE_NAME})
docElt.insert(scriptPos, storageElt)
storageElt.set("SCRIPT", serial)
# todo check if scribus reloads (or overwrites :/ ) when doc is opened, opt use API to add a script if there's an open doc.
tree.write(self.__dataObject.getScribusSourceFile())
# data
logging.info("parsing data source file %s" %
(self.__dataObject.getDataSourceFile()))
try:
csvData = self.getCsvData(self.__dataObject.getDataSourceFile())
except IOError as exception:
logging.error("CSV file not found: %s" %
(self.__dataObject.getDataSourceFile()))
raise
if(len(csvData) < 1):
logging.error("Data file %s is empty. At least a header line and a line of data is needed. Halting." % (
self.__dataObject.getDataSourceFile()))
return -1
if(len(csvData) < 2):
logging.error("Data file %s has only one line. At least a header line and a line of data is needed. Halting." % (
self.__dataObject.getDataSourceFile()))
return -1
# range
firstElement = 1
if(self.__dataObject.getFirstRow() != CONST.EMPTY):
try:
newFirstElementValue = int(self.__dataObject.getFirstRow())
# Guard against 0 or negative numbers
firstElement = max(newFirstElementValue, 1)
except:
logging.warning(
"Could not parse value of 'first row' as an integer, using default value instead")
lastElement = len(csvData)
if(self.__dataObject.getLastRow() != CONST.EMPTY):
try:
newLastElementValue = int(self.__dataObject.getLastRow())
# Guard against numbers higher than the length of csvData
lastElement = min(newLastElementValue + 1, lastElement)
except:
logging.warning(
"Could not parse value of 'last row' as an integer, using default value instead")
if ((firstElement != 1) or (lastElement != len(csvData))):
csvData = csvData[0:1] + csvData[firstElement: lastElement]
logging.debug("custom data range is: %s - %s" %
(firstElement, lastElement))
else:
logging.debug("full data range will be used")
# generation
dataC = len(csvData)-1
fillCount = len(str(dataC))
# XML-Content/Text-Content of the Source Scribus File (List of Lines)
template = []
outputFileNames = []
index = 0 # current data record
rootStr = ET.tostring(root, encoding='utf8', method='xml')
# number of data records appearing in source document
recordsInDocument = 1 + string.count(rootStr, CONST.NEXT_RECORD)
logging.info("source document consumes %s data record(s) from %s." %
(recordsInDocument, dataC))
dataBuffer = []
for row in csvData:
if(index == 0): # first line is the Header-Row of the CSV-File
varNamesForFileName = row
varNamesForReplacingVariables = self.encodeScribusXML([row])[0]
# overwrite attributes from their /*/ItemAttribute[Parameter=SGAttribute] sibling, when applicable.
templateElt = self.overwriteAttributesFromSGAttributes(root)
else: # index > 0, row is one data entry
# accumulate row in buffer
dataBuffer.append(row)
# buffered data for all document records OR reached last data record
if (index % recordsInDocument == 0) or index == dataC:
# subsitute
outContent = self.substituteData(varNamesForReplacingVariables, self.encodeScribusXML(dataBuffer),
ET.tostring(templateElt, method='xml').split('\n'), keepTabsLF=CONST.KEEP_TAB_LINEBREAK)
if (self.__dataObject.getSingleOutput()):
# first substitution, update DOCUMENT properties
if (index == min(recordsInDocument,dataC)):
logging.debug(
"generating reference content from dataBuffer #1")
outputElt = ET.fromstring(outContent)
docElt = outputElt.find('DOCUMENT')
pagescount = int(docElt.get('ANZPAGES'))
pageheight = float(docElt.get('PAGEHEIGHT'))
vgap = float(docElt.get('GapVertical'))
groupscount = int(docElt.get('GROUPC'))
objscount = len(outputElt.findall('.//PAGEOBJECT'))
logging.debug(
"current template has #%s pageobjects" % (objscount))
# if version.startswith('1.4'):
# docElt.set('GROUPC', str(groupscount*dataC))
# todo replace +1 by roundup()
docElt.set('ANZPAGES', str(
pagescount*dataC//recordsInDocument + 1))
docElt.set('DOCCONTRIB', docElt.get(
'DOCCONTRIB')+CONST.CONTRIB_TEXT)
else: # not first substitution, append DOCUMENT content
logging.debug(
"merging content from dataBuffer #%s" % (index))
tmpElt = ET.fromstring(outContent).find('DOCUMENT')
shiftedElts = self.shiftPagesAndObjects(
tmpElt, pagescount, pageheight, vgap, index-1, recordsInDocument, groupscount, objscount, version)
docElt.extend(shiftedElts)
else: # write one of multiple sla
outputFileName = self.createOutputFileName(
index, self.__dataObject.getOutputFileName(), varNamesForFileName, dataBuffer, fillCount)
self.writeSLA(ET.fromstring(
outContent), outputFileName)
outputFileNames.append(outputFileName)
dataBuffer = []
index = index + 1
# clean & write single sla
if (self.__dataObject.getSingleOutput()):
self.writeSLA(outputElt, self.__dataObject.getOutputFileName())
outputFileNames.append(self.__dataObject.getOutputFileName())
# Export the generated Scribus Files as PDF
if(CONST.FORMAT_PDF == self.__dataObject.getOutputFormat()):
for outputFileName in outputFileNames:
pdfOutputFilePath = self.createOutputFilePath(
self.__dataObject.getOutputDirectory(), outputFileName, CONST.FILE_EXTENSION_PDF)
scribusOutputFilePath = self.createOutputFilePath(
self.__dataObject.getOutputDirectory(), outputFileName, CONST.FILE_EXTENSION_SCRIBUS)
self.exportPDF(scribusOutputFilePath, pdfOutputFilePath)
logging.info("pdf file created: %s" % (pdfOutputFilePath))
# Cleanup the generated Scribus Files
if(not (CONST.FORMAT_SLA == self.__dataObject.getOutputFormat()) and CONST.FALSE == self.__dataObject.getKeepGeneratedScribusFiles()):
for outputFileName in outputFileNames:
scribusOutputFilePath = self.createOutputFilePath(
self.__dataObject.getOutputDirectory(), outputFileName, CONST.FILE_EXTENSION_SCRIBUS)
self.deleteFile(scribusOutputFilePath)
return 1
def exportPDF(self, scribusFilePath, pdfFilePath):
import scribus
d = os.path.dirname(pdfFilePath)
if not os.path.exists(d):
os.makedirs(d)
# Export to PDF
scribus.openDoc(scribusFilePath)
listOfPages = []
i = 0
while (i < scribus.pageCount()):
i = i + 1
listOfPages.append(i)
pdfExport = scribus.PDFfile()
pdfExport.info = CONST.APP_NAME
pdfExport.file = str(pdfFilePath)
pdfExport.pages = listOfPages
pdfExport.save()
scribus.closeDoc()
def writeSLA(self, slaET, outFileName, clean=CONST.CLEAN_UNUSED_EMPTY_VARS, indentSLA=CONST.INDENT_SLA):
# write SLA to filepath computed from given elements, optionnaly cleaning empty ITEXT elements and their empty PAGEOBJECTS
scribusOutputFilePath = self.createOutputFilePath(
self.__dataObject.getOutputDirectory(), outFileName, CONST.FILE_EXTENSION_SCRIBUS)
d = os.path.dirname(scribusOutputFilePath)
if not os.path.exists(d):
os.makedirs(d)
outTree = ET.ElementTree(slaET)
if (clean):
self.removeEmptyTexts(outTree.getroot())
if (indentSLA):
from xml.dom import minidom
xmlstr = minidom.parseString(ET.tostring(outTree.getroot())).toprettyxml(indent=" ")
with open(scribusOutputFilePath, "w") as f:
f.write(xmlstr.encode('utf-8'))
else:
outTree.write(scribusOutputFilePath, encoding="UTF-8")
logging.info("scribus file created: %s" % (scribusOutputFilePath))
return scribusOutputFilePath
def overwriteAttributesFromSGAttributes(self, root):
# modifies root such that
# attributes have been rewritten from their /*/ItemAttribute[Parameter=SGAttribute] sibling, when applicable.
#
# allows to use %VAR_<var-name>% in Item Attribute to overwrite internal attributes (eg FONT)
for pageobject in root.findall(".//ItemAttribute[@Parameter='SGAttribute']/../.."):
for sga in pageobject.findall(".//ItemAttribute[@Parameter='SGAttribute']"):
attribute = sga.get('Name')
value = sga.get('Value')
ref = sga.get('RelationshipTo')
if ref is "": # Cannot use 'default' on .get() as it is "" by default in SLA file.
# target is pageobject by default. Cannot use ".|*" as not supported by ET.
ref = "."
elif ref.startswith("/"): # ET cannot use absolute path on element
ref = "."+ref
try:
targets = pageobject.findall(ref)
if targets:
for target in targets:
logging.debug('overwriting value of %s in %s with "%s"' % (
attribute, target.tag, value))
target.set(attribute, value)
else:
logging.error('Target "%s" could be parsed but designated no node. Check it out as it is probably not what you expected to replace %s.' % (
ref, attribute)) # todo message to user
except SyntaxError:
logging.error('XPATH expression "%s" could not be parsed by ElementTree to overwrite %s. Skipping.' % (
ref, attribute)) # todo message to user
return root
def shiftPagesAndObjects(self, docElt, pagescount, pageheight, vgap, index, recordsInDocument, groupscount, objscount, version):
shifted = []
voffset = (float(pageheight)+float(vgap)) * \
(index // recordsInDocument)
for page in docElt.findall('PAGE'):
page.set('PAGEYPOS', str(float(page.get('PAGEYPOS')) + voffset))
page.set('NUM', str(int(page.get('NUM')) + pagescount))
shifted.append(page)
for obj in docElt.findall('PAGEOBJECT'):
obj.set('YPOS', str(float(obj.get('YPOS')) + voffset))
obj.set('OwnPage', str(int(obj.get('OwnPage')) + pagescount))
# update ID and links
if version.startswith('1.4'):
# if not (int(obj.get('NUMGROUP')) == 0):
# obj.set('NUMGROUP', str(int(obj.get('NUMGROUP')) + groupscount * index))
# next linked frame by position
if (obj.get('NEXTITEM') != None and (str(obj.get('NEXTITEM')) != "-1")):
obj.set('NEXTITEM', str(
int(obj.get('NEXTITEM')) + (objscount * index)))
# previous linked frame by position
if (obj.get('BACKITEM') != None and (str(obj.get('BACKITEM')) != "-1")):
obj.set('BACKITEM', str(
int(obj.get('BACKITEM')) + (objscount * index)))
else: # 1.5, 1.6
logging.debug("shifting object %s (#%s)" %
(obj.tag, obj.get('ItemID')))
# todo update ID with something unlikely allocated, TODO ensure unique ID instead of 6:, issue #101
obj.set('ItemID', str(objscount * index) +
str(int(obj.get('ItemID')))[7:])
# next linked frame by ItemID
if (obj.get('NEXTITEM') != None and (str(obj.get('NEXTITEM')) != "-1")):
obj.set('NEXTITEM', str(objscount * index) +
str(int(obj.get('NEXTITEM')))[7:])
# previous linked frame by ItemID
if (obj.get('BACKITEM') != None and (str(obj.get('BACKITEM')) != "-1")):
obj.set('BACKITEM', str(objscount * index) +
str(int(obj.get('BACKITEM')))[7:])
shifted.append(obj)
logging.debug("shifted page %s element of %s" % (index, voffset))
return shifted
def removeEmptyTexts(self, root):
# *modifies* root ElementTree by removing empty text elements and their empty placeholders.
# returns number of ITEXT elements deleted.
# 1. clean text in which some variable-like text is not substituted (ie: known or unknown variable):
# <ITEXT CH="empty %VAR_empty% variable should not show" FONT="Arial Regular" />
# 2. remove <ITEXT> with empty @CH and precedings <para/> if any
# 3. remove any <PAGEOBJECT> that has no <ITEXT> child left
emptyXPath = "ITEXT[@CH='']"
d = 0
# little obscure because its parent is needed to remove an element, and ElementTree has no parent() method.
for page in root.findall(".//%s/../.." % emptyXPath):
# collect emptyXPath and <para> that precede for removal, iter is need for lack of sibling-previous navigation in ElementTree
for po in page.findall(".//%s/.." % emptyXPath):
trash = []
for pos, item in enumerate(po):
if (item.tag == "ITEXT") and (item.get("CH") == ""):
logging.debug(
"cleaning 1 empty ITEXT and preceding linefeed (opt.)")
if (CONST.REMOVE_CLEANED_ELEMENT_PREFIX and po[pos-1].tag == "para"):
trash.append(pos-1)
trash.append(pos)
d += 1
trash.reverse()
# remove trashed elements as stack (lifo order), to preserve positions validity
for i in trash:
po.remove(po[i])
if (len(po.findall("ITEXT")) is 0):
logging.debug("cleaning 1 empty PAGEOBJECT")
page.remove(po)
logging.info("removed %d empty texts items" % d)
return d
def deleteFile(self, outputFilePath):
# Delete the temporarily generated files from off the file system
os.remove(outputFilePath)
def createOutputFilePath(self, outputDirectory, outputFileName, fileExtension):
# Build the absolute path, like C:/tmp/template.sla
return outputDirectory + CONST.SEP_PATH + outputFileName + CONST.SEP_EXT + fileExtension
def createOutputFileName(self, index, outputFileName, varNames, rows, fillCount):
# If the User has not set an Output File Name, an internal unique file name
# will be generated which is the index of the loop.
result = str(index)
result = result.zfill(fillCount)
# Following characters are not allowed for File-Names on WINDOWS: < > ? " : | \ / *
# Note / is still allowed in filename as it allows dynamic subdirectory in Linux (issue 102); todo check & fix for Windows
if(CONST.EMPTY != outputFileName):
table = {
# ord(u'ä'): u'ae',
# ord(u'Ä'): u'Ae',
# ord(u'ö'): u'oe',
# ord(u'Ö'): u'Oe',
# ord(u'ü'): u'ue',
# ord(u'Ü'): u'Ue',
# ord(u'ß'): u'ss',
ord(u'<'): u'_',
ord(u'>'): u'_',
ord(u'?'): u'_',
ord(u'"'): u'_',
ord(u':'): u'_',
ord(u'|'): u'_',
ord(u'\\'): u'_',
# ord(u'/'): u'_',
ord(u'*'): u'_'
}
result = self.substituteData(varNames, rows, [outputFileName])
result = result.decode('utf_8')
result = result.translate(table)
logging.debug("output file name is %s" % result)
return result
def copyScribusContent(self, src):
# Returns a plain copy of src where src is expected to be a list (of text lines)
result = []
for line in src:
result.append(line)
return result
def readFileContent(self, src):
# Returns the list of lines (as strings) of the text-file
tmp = open(src, 'r')
result = tmp.readlines()
tmp.close()
return result
def encodeScribusXML(self, rows):
# Encode some characters that can be found in CSV into XML entities
# not all are needed as Scribus handles most UTF8 characters just fine.
result = []
replacements = {'&':'&', '"':'"', '<':'<'}
for row in rows:
res1 = []
for i in row:
res1.append(self.multiple_replace(i, replacements))
result.append(res1)
return result
def multiple_replace(self, string, rep_dict):
# multiple simultaneous string replacements, per http://stackoverflow.com/a/15448887/1694411)
# combine with dictionary = dict(zip(keys, values)) to use on arrays
pattern = re.compile("|".join([re.escape(k)
for k in rep_dict.keys()]), re.M)
return pattern.sub(lambda x: rep_dict[x.group(0)], string)
# lines as list of strings
def substituteData(self, varNames, rows, lines, clean=CONST.CLEAN_UNUSED_EMPTY_VARS, keepTabsLF=0):
result = ''
currentRecord = 0
replacements = dict(
zip(['%VAR_'+i+'%' for i in varNames], rows[currentRecord]))
#logging.debug("replacements is: %s"%replacements)
# done in string instead of XML for lack of efficient attribute-value-based substring-search in ElementTree
for idx, line in enumerate(lines):
# logging.debug("replacing vars in (out of %s): %s"%(len(line), line[:30]))
# skip un-needed computations and preserve colors declarations
if (re.search('%VAR_|'+CONST.NEXT_RECORD, line) == None) or (re.search('\s*<COLOR\s+', line) != None):
result = result + line
# logging.debug(" keeping intact %s"%line[:30])
continue
# detect NEXT_RECORD
if CONST.NEXT_RECORD in line:
currentRecord += 1
if currentRecord < len(rows):
logging.debug("loading next record")
replacements = dict(
zip(['%VAR_'+i+'%' for i in varNames], rows[currentRecord]))
else: # last record reach, leave remaing variables to be cleaned
replacements = {
"END-OF-REPLACEMENTS": "END-OF-REPLACEMENTS"}
logging.debug("next record reached last data entry")
# replace with data
logging.debug("replacing VARS_* in %s" % line[:30].strip())
line = self.multiple_replace(line, replacements)
#logging.debug("replaced in line: %s" % line)
# remove (& trim) any (unused) %VAR_\w*% like string.
if (clean):
if (CONST.REMOVE_CLEANED_ELEMENT_PREFIX):
(line, d) = re.subn('\s*[,;-]*\s*%VAR_\w*%\s*', '', line)
else:
(line, d) = re.subn('\s*%VAR_\w*%\s*', '', line)
if (d > 0):
logging.debug("cleaned %d empty variable" % d)
(line, d) = re.subn('\s*%s\w*\s*' %
CONST.NEXT_RECORD, '', line)
# convert \t and \n into scribus <tab/> and <linebreak/>
if (keepTabsLF == 1) and (re.search('[\t\n]+', line, flags=re.MULTILINE)):
m = re.search(
'(<ITEXT.* CH=")([^"]+)(".*/>)', line, flags=re.MULTILINE | re.DOTALL)
if m:
begm = m.group(1)
endm = m.group(3)
# logging.debug("converting tabs and linebreaks in line: %s"%(line))
line = re.sub('([\t\n]+)', endm + '\g<1>' +
begm, line, flags=re.MULTILINE)
# replace \t and \n
line = re.sub('\t', '<tab />', line)
line = re.sub('\n', '<breakline />',
line, flags=re.MULTILINE)
logging.debug(
"converted tabs and linebreaks in line: %s" % line)
else:
logging.warning(
"could not convert tabs and linebreaks in this line, kindly report this to the developppers: %s" % (line))
result = result + line
return result
def getCsvData(self, csvfile):
# Read CSV file and return 2-dimensional list containing the data ,
# TODO check to replace with https://docs.python.org/3/library/csv.html#csv.DictReader
reader = csv.reader(file(csvfile), delimiter=self.__dataObject.getCsvSeparator(
), skipinitialspace=True, doublequote=True)
result = []
for row in reader:
if(len(row) > 0): # strip empty lines in source CSV
rowlist = []
for col in row:
rowlist.append(col)
result.append(rowlist)
return result
def getLog(self):
return logging
def getSavedSettings(self):
logging.debug("parsing scribus source file %s for user settings" % (
self.__dataObject.getScribusSourceFile()))
try:
t = ET.parse(self.__dataObject.getScribusSourceFile())
r = t.getroot()
doc = r.find('DOCUMENT')
storage = doc.find('./JAVA[@NAME="'+CONST.STORAGE_NAME+'"]')
return storage.get("SCRIPT")
except SyntaxError as exception:
logging.error(
"Loading settings in only possible with Python 2.7 and later, please update your system: %s" % exception)
return None
except Exception as exception:
logging.debug("could not load the user settings: %s" % exception)
return None
class GeneratorDataObject:
# Data Object for transfering the settings made by the user on the UI / CLI
def __init__(self,
scribusSourceFile=CONST.EMPTY,
dataSourceFile=CONST.EMPTY,
outputDirectory=CONST.EMPTY,
outputFileName=CONST.EMPTY,
outputFormat=CONST.EMPTY,
keepGeneratedScribusFiles=CONST.FALSE,
csvSeparator=CONST.CSV_SEP,
singleOutput=CONST.FALSE,
firstRow=CONST.EMPTY,
lastRow=CONST.EMPTY,
saveSettings=CONST.TRUE,
closeDialog=CONST.FALSE):
self.__scribusSourceFile = scribusSourceFile
self.__dataSourceFile = dataSourceFile
self.__outputDirectory = outputDirectory
self.__outputFileName = outputFileName
self.__outputFormat = outputFormat
self.__keepGeneratedScribusFiles = keepGeneratedScribusFiles
self.__csvSeparator = csvSeparator
self.__singleOutput = singleOutput
self.__firstRow = firstRow
self.__lastRow = lastRow
self.__saveSettings = saveSettings
self.__closeDialog = closeDialog
# Get
def getScribusSourceFile(self):
return self.__scribusSourceFile
def getDataSourceFile(self):
return self.__dataSourceFile
def getOutputDirectory(self):
return self.__outputDirectory
def getOutputFileName(self):
return self.__outputFileName
def getOutputFormat(self):
return self.__outputFormat
def getKeepGeneratedScribusFiles(self):
return self.__keepGeneratedScribusFiles
def getCsvSeparator(self):
return self.__csvSeparator
def getSingleOutput(self):
return self.__singleOutput
def getFirstRow(self):
return self.__firstRow
def getLastRow(self):
return self.__lastRow
def getSaveSettings(self):
return self.__saveSettings
def getCloseDialog(self):
return self.__closeDialog
# Set
def setScribusSourceFile(self, fileName):
self.__scribusSourceFile = fileName
def setDataSourceFile(self, fileName):
self.__dataSourceFile = fileName
def setOutputDirectory(self, directory):
self.__outputDirectory = directory
def setOutputFileName(self, fileName):
self.__outputFileName = fileName
def setOutputFormat(self, outputFormat):
self.__outputFormat = outputFormat
def setKeepGeneratedScribusFiles(self, value):
self.__keepGeneratedScribusFiles = value
def setCsvSeparator(self, value):
self.__csvSeparator = value
def setSingleOutput(self, value):
self.__singleOutput = value
def setFirstRow(self, value):
self.__firstRow = value
def setLastRow(self, value):
self.__lastRow = value
def setSaveSettings(self, value):
self.__saveSettings = value
def setCloseDialog(self, value):
self.__closeDialog = value
# (de)Serialize all options but scribusSourceFile and saveSettings
def toString(self):
return json.dumps({
'_comment': "this is an automated placeholder for ScribusGenerator default settings. more info at https://github.com/berteh/ScribusGenerator/. modify at your own risks.",
# 'scribusfile':self.__scribusSourceFile NOT saved
'csvfile': self.__dataSourceFile,
'outdir': self.__outputDirectory,
'outname': self.__outputFileName,
'outformat': self.__outputFormat,
'keepsla': self.__keepGeneratedScribusFiles,
'separator': self.__csvSeparator,
'single': self.__singleOutput,
'from': self.__firstRow,
'to': self.__lastRow,
'close': self.__closeDialog
# 'savesettings':self.__saveSettings NOT saved
}, sort_keys=True)
# todo add validity/plausibility checks on all values?
def loadFromString(self, string):
j = json.loads(string)
for k, v in j.iteritems():
if v == None:
j[k] = CONST.EMPTY
# self.__scribusSourceFile NOT loaded
self.__dataSourceFile = j['csvfile']
self.__outputDirectory = j['outdir']
self.__outputFileName = j['outname']
self.__outputFormat = j['outformat']
self.__keepGeneratedScribusFiles = j['keepsla']
# str()to prevent TypeError: : "delimiter" must be string, not unicode, in csv.reader() call
self.__csvSeparator = str(j['separator'])
self.__singleOutput = j["single"]
self.__firstRow = j["from"]
self.__lastRow = j["to"]
self.__closeDialog = j["close"]
# self.__saveSettings NOT loaded
logging.debug("loaded %d user settings" %
(len(j)-1)) # -1 for the artificial "comment"
return j
| {
"content_hash": "4dffa62959b7a31f468299d31acb471e",
"timestamp": "",
"source": "github",
"line_count": 721,
"max_line_length": 559,
"avg_line_length": 48.697642163661584,
"alnum_prop": 0.5599384808179773,
"repo_name": "berteh/ScribusGenerator",
"id": "b6bcb6eb18bca2489bb289a9610ce1606004a383",
"size": "35118",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ScribusGeneratorBackend.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "68467"
}
],
"symlink_target": ""
} |
__author__ = 'Daniel'
import sys
from PySide.QtCore import *
from PySide.QtGui import *
import GUI.ui_menu
import GUI.ui_startwidget
import GUI.ui_gamedialog
import GUI.ui_historywidget
class MenuWidget(QWidget, GUI.ui_menu.Ui_Menu):
def __init__(self):
super(MenuWidget, self).__init__()
self.setupUi(self)
model = QStringListModel(list(map(str, range(100))))
self.myListView.setModel(model)
class GameDialog(QDialog, GUI.ui_gamedialog.Ui_Dialog):
def __init__(self, parent=None):
super(GameDialog, self).__init__(parent)
self.parent_component = parent
self.setupUi(self)
self.game = parent.game
self.t = QTimer()
self.lag = 1
def start(self):
x, ok = QInputDialog.getInt(self, "Rounds", "Rounds", 10)
if not (ok and isinstance(x, int)):
return
self.questions_left = x
self.lcdNumber.display(self.lag)
self.t.timeout.connect(self.on_timeout)
self.t.start(1000)
self.userInputLineEdit.textEdited.connect(self.on_userInputLineEdit_textEdited)
self.userInputLineEdit.returnPressed.connect(self.on_userInputLineEdit_returnPressed)
self.show()
def on_userInputLineEdit_returnPressed(self):
txt = self.userInputLineEdit.text()
self.submit_answer(txt)
def on_userInputLineEdit_textEdited(self, txt):
if self.parent_component.checkAns(txt):
self.submit_answer(txt)
def submit_answer(self, txt):
self.game.end_timing()
_, solution, time_taken = self.game.solve_question(int(txt))
self.lcdNumber.display(time_taken)
self.questions_left -= 1
if self.questions_left == 0:
self.stop_game()
else:
self.start_game()
def on_timeout(self):
self.lag -= 1
self.lcdNumber.display(self.lag)
if self.lag <= 0:
self.t.stop()
self.start_game()
def start_game(self):
self.game.gen_next_question()
self.game.start_timing()
self.userInputLineEdit.setText("")
self.questionLabel.setText(self.game.current_question.query)
def stop_game(self):
self.done()
def done(self, *args):
# Cleanup as well
try:
self.game.user_score.save_db()
QMessageBox.information(self, "Statistics", "blah\nblah\nblah")
self.t.stop()
except:
pass
super(GameDialog, self).done(0)
class StartWidget(QWidget, GUI.ui_startwidget.Ui_Form):
def __init__(self, parent=None):
super(StartWidget, self).__init__(parent)
self.setupUi(self)
self.quickGameBtn.clicked.connect(parent.start_quick_game)
self.historyOrSavedBtn.clicked.connect(parent.display_history)
self.trainingBtn.clicked.connect(parent.start_training)
self.settingsBtn.clicked.connect(parent.display_settings)
class HistoryDlg(QDialog, GUI.ui_historywidget.Ui_Dialog):
def __init__(self, parent=None):
super(HistoryDlg, self).__init__(parent)
self.parent_component = parent
self.setupUi(self)
self.historyTableView.resizeColumnsToContents()
self.historyTableView.setSortingEnabled(True)
def set_up_history(self, model):
self.historyTableView.setModel(model) | {
"content_hash": "53508e5a1b16b4f1dd4114a231f62554",
"timestamp": "",
"source": "github",
"line_count": 104,
"max_line_length": 93,
"avg_line_length": 32.35576923076923,
"alnum_prop": 0.6350668647845468,
"repo_name": "daniellowtw/MentalMaths",
"id": "0265ff4a0431f63a6dcd4e33be60b0d99fd61e73",
"size": "3365",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "GUI/component.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "39396"
}
],
"symlink_target": ""
} |
"""This example gets a forecast for a prospective line item.
The prospective line item targets the entire network. The targeting can be
modified to determine forecasts for other criteria such as custom criteria
targeting (in addition to targeting the whole network). See
create_line_items.py for an example of how to create more complex targeting.
"""
import datetime
# Import appropriate modules from the client library.
from googleads import ad_manager
import pytz
# Set the ID of the advertiser (company) to forecast for. Setting an advertiser
# will cause the forecast to apply the appropriate unified blocking rules.
ADVERTISER_ID = 'INSERT_ADVERTISER_ID_HERE'
def main(client, advertiser_id):
# Initialize appropriate service.
forecast_service = client.GetService('ForecastService', version='v201811')
network_service = client.GetService('NetworkService', version='v201811')
# get the root ad unit ID to target the entire network.
root_ad_unit_id = network_service.getCurrentNetwork()['effectiveRootAdUnitId']
now_datetime = datetime.datetime.now(tz=pytz.timezone('America/New_York'))
end_datetime = now_datetime + datetime.timedelta(days=5)
# Create prospective line item.
line_item = {
'targeting': {
'inventoryTargeting': {
'targetedAdUnits': [
{
'includeDescendants': True,
'adUnitId': root_ad_unit_id,
}
]
}
},
'creativePlaceholders': [
{
'size': {
'width': '300',
'height': '250'
},
'isAmpOnly': True,
},
{
'size': {
'width': '120',
'height': '600'
}
}
],
'lineItemType': 'SPONSORSHIP',
'startDateTimeType': 'IMMEDIATELY',
'endDateTime': end_datetime,
'costType': 'CPM',
'costPerUnit': {
'currencyCode': 'USD',
'microAmount': '2000000'
},
'primaryGoal': {
'units': '50',
'unitType': 'IMPRESSIONS',
'goalType': 'DAILY'
},
'contractedUnitsBought': '100',
'creativeRotationType': 'EVEN',
'discountType': 'PERCENTAGE',
}
prospective_line_item = {
'lineItem': line_item,
'advertiserId': advertiser_id
}
# Set forecasting options.
forecast_options = {
'includeContendingLineItems': True,
# The field includeTargetingCriteriaBreakdown can only be set if
# breakdowns are not manually specified.
# 'includeTargetingCriteriaBreakdown': True,
'breakdown': {
'timeWindows': [
now_datetime,
now_datetime + datetime.timedelta(days=1),
now_datetime + datetime.timedelta(days=2),
now_datetime + datetime.timedelta(days=3),
now_datetime + datetime.timedelta(days=4),
end_datetime
],
'targets': [
{
# Optional, the name field is only used to identify this
# breakdown in the response.
'name': 'United States',
'targeting': {
'inventoryTargeting': {
'targetedAdUnits': [
{
'includeDescendants': True,
'adUnitId': root_ad_unit_id,
}
]
},
'geoTargeting': {
'targetedLocations': [
{
'id': '2840',
'displayName': 'US'
}
]
}
}
},
{
# Optional, the name field is only used to identify this
# breakdown in the response.
'name': 'Geneva',
'targeting': {
'inventoryTargeting': {
'targetedAdUnits': [
{
'includeDescendants': True,
'adUnitId': root_ad_unit_id,
}
]
},
'geoTargeting': {
'targetedLocations': [
{
'id': '20133',
'displayName': 'Geneva'
}
]
}
}
}
]
}
}
# Get forecast.
forecast = forecast_service.getAvailabilityForecast(
prospective_line_item, forecast_options)
matched = forecast['matchedUnits']
available = forecast['availableUnits']
possible = forecast['possibleUnits'] if 'possibleUnits' in forecast else None
unit_type = forecast['unitType'].lower()
available_percent_overall, possible_percent_overall = CalculateForecastStats(
matched, available, possible)
contending_line_items = getattr(forecast, 'contendingLineItems', [])
# Display results.
print '%s %s matched overall.' % (matched, unit_type)
print '%s%% %s available overall.' % (available_percent_overall, unit_type)
print '%d contending line items overall.' % len(contending_line_items)
if possible:
print '%s%% %s possible overall.' % (possible_percent_overall, unit_type)
if 'breakdowns' in forecast and len(forecast['breakdowns']):
for breakdown in forecast['breakdowns']:
print 'For breakdown time period %s - %s:' % (
FormatSOAPDateTime(breakdown['startTime']),
FormatSOAPDateTime(breakdown['endTime']))
for breakdown_entry in breakdown['breakdownEntries']:
matched = breakdown_entry['forecast']['matched']
available = breakdown_entry['forecast']['available']
possible = (breakdown_entry['forecast']['possible']
if 'possible' in breakdown_entry['forecast'] else None)
name = breakdown_entry['name'] if 'name' in breakdown_entry else None
if name:
print '\tFor targeting breakdown named \'%s\'' % name
available_percent, possible_percent = CalculateForecastStats(
matched, available, possible)
print '\t\t%s %s matched.' % (matched, unit_type)
print '\t\t%s%% %s available.' % (available_percent, unit_type)
if possible:
print '\t\t%s%% %s possible.' % (possible_percent, unit_type)
def FormatSOAPDateTime(value):
"""Format a SOAP DateTime object for printing.
Args:
value: The DateTime object to format.
Returns:
A string representing the value.
"""
value_date = value['date']
return '%s-%s-%s %s:%s:%s (%s)' % (
value_date['year'], value_date['month'], value_date['day'],
value['hour'], value['minute'], value['second'], value['timeZoneId'])
def CalculateForecastStats(matched, available, possible=None):
"""Calculate forecast percentage stats.
Args:
matched: The number of matched impressions.
available: The number of available impressions.
possible: The optional number of possible impressions.
Returns:
The percentage of impressions that are available and possible.
"""
if matched > 0:
available_percent = (float(available) / matched) * 100.
else:
available_percent = 0
if possible is not None:
if matched > 0:
possible_percent = (possible/float(matched)) * 100.
else:
possible_percent = 0
else:
possible_percent = None
return available_percent, possible_percent
if __name__ == '__main__':
# Initialize client object.
ad_manager_client = ad_manager.AdManagerClient.LoadFromStorage()
main(ad_manager_client, ADVERTISER_ID)
| {
"content_hash": "44ed8356927325060afc537512d512a6",
"timestamp": "",
"source": "github",
"line_count": 235,
"max_line_length": 80,
"avg_line_length": 34.11063829787234,
"alnum_prop": 0.5415419161676647,
"repo_name": "Aloomaio/googleads-python-lib",
"id": "96980310ea1a9613aaa10efad5fa9e800809ecd1",
"size": "8638",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/ad_manager/v201811/forecast_service/get_availability_forecast.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "491015"
}
],
"symlink_target": ""
} |
"""
Volume driver for NetApp Data ONTAP (7-mode) FibreChannel storage systems.
"""
from oslo_log import log as logging
from cinder.volume import driver
from cinder.volume.drivers.netapp.dataontap import block_7mode
from cinder.zonemanager import utils as fczm_utils
LOG = logging.getLogger(__name__)
class NetApp7modeFibreChannelDriver(driver.BaseVD,
driver.ManageableVD,
driver.ExtendVD,
driver.TransferVD,
driver.SnapshotVD):
"""NetApp 7-mode FibreChannel volume driver."""
DRIVER_NAME = 'NetApp_FibreChannel_7mode_direct'
def __init__(self, *args, **kwargs):
super(NetApp7modeFibreChannelDriver, self).__init__(*args, **kwargs)
self.library = block_7mode.NetAppBlockStorage7modeLibrary(
self.DRIVER_NAME, 'FC', **kwargs)
def do_setup(self, context):
self.library.do_setup(context)
def check_for_setup_error(self):
self.library.check_for_setup_error()
def create_volume(self, volume):
self.library.create_volume(volume)
def create_volume_from_snapshot(self, volume, snapshot):
self.library.create_volume_from_snapshot(volume, snapshot)
def create_cloned_volume(self, volume, src_vref):
self.library.create_cloned_volume(volume, src_vref)
def delete_volume(self, volume):
self.library.delete_volume(volume)
def create_snapshot(self, snapshot):
self.library.create_snapshot(snapshot)
def delete_snapshot(self, snapshot):
self.library.delete_snapshot(snapshot)
def get_volume_stats(self, refresh=False):
return self.library.get_volume_stats(refresh,
self.get_filter_function(),
self.get_goodness_function())
def get_default_filter_function(self):
return self.library.get_default_filter_function()
def get_default_goodness_function(self):
return self.library.get_default_goodness_function()
def extend_volume(self, volume, new_size):
self.library.extend_volume(volume, new_size)
def ensure_export(self, context, volume):
return self.library.ensure_export(context, volume)
def create_export(self, context, volume, connector):
return self.library.create_export(context, volume)
def remove_export(self, context, volume):
self.library.remove_export(context, volume)
def manage_existing(self, volume, existing_ref):
return self.library.manage_existing(volume, existing_ref)
def manage_existing_get_size(self, volume, existing_ref):
return self.library.manage_existing_get_size(volume, existing_ref)
def unmanage(self, volume):
return self.library.unmanage(volume)
@fczm_utils.AddFCZone
def initialize_connection(self, volume, connector):
return self.library.initialize_connection_fc(volume, connector)
@fczm_utils.RemoveFCZone
def terminate_connection(self, volume, connector, **kwargs):
return self.library.terminate_connection_fc(volume, connector,
**kwargs)
def get_pool(self, volume):
return self.library.get_pool(volume)
| {
"content_hash": "75a0139ab297d57a367a9f269f0a12e2",
"timestamp": "",
"source": "github",
"line_count": 95,
"max_line_length": 76,
"avg_line_length": 35.09473684210526,
"alnum_prop": 0.6475704859028194,
"repo_name": "dims/cinder",
"id": "9efe27c6922571bdce8147b1701b5f6ecc16f4bd",
"size": "3970",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cinder/volume/drivers/netapp/dataontap/fc_7mode.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "14784553"
},
{
"name": "Shell",
"bytes": "8222"
}
],
"symlink_target": ""
} |
"""Create auth tokens for existing users"""
from __future__ import unicode_literals
from django.db import models, migrations
def no_op(apps, schema_editor):
pass
def create_auth_tokens(apps, schema_editor):
User = apps.get_model('email_user', 'EmailUser')
Token = apps.get_model('authtoken', 'Token')
for user in User.objects.all():
Token.objects.get_or_create(user=user)
class Migration(migrations.Migration):
dependencies = [
('email_user', '0001_initial'),
('authtoken', '0001_initial')
]
operations = [
migrations.RunPython(
code=create_auth_tokens,
reverse_code=no_op,
)
]
| {
"content_hash": "328ddb20a7f246eb92f739dcb072df4e",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 52,
"avg_line_length": 22.06451612903226,
"alnum_prop": 0.6242690058479532,
"repo_name": "theirc/ServiceInfo",
"id": "2d7e70305202c8239fa31499bc0a03147396f3f5",
"size": "708",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "email_user/migrations/0002_create_auth_tokens.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "91208"
},
{
"name": "HTML",
"bytes": "169211"
},
{
"name": "JavaScript",
"bytes": "126261"
},
{
"name": "Python",
"bytes": "486647"
},
{
"name": "Shell",
"bytes": "141"
}
],
"symlink_target": ""
} |
from io import BytesIO
from unittest import TestCase, SkipTest
from os.path import join
from gzip import GzipFile
from scrapy.spiders import Spider
from scrapy.http import Response, Request, HtmlResponse
from scrapy.downloadermiddlewares.httpcompression import HttpCompressionMiddleware, \
ACCEPTED_ENCODINGS
from scrapy.responsetypes import responsetypes
from scrapy.utils.gz import gunzip
from tests import tests_datadir
from w3lib.encoding import resolve_encoding
SAMPLEDIR = join(tests_datadir, 'compressed')
FORMAT = {
'gzip': ('html-gzip.bin', 'gzip'),
'x-gzip': ('html-gzip.bin', 'gzip'),
'rawdeflate': ('html-rawdeflate.bin', 'deflate'),
'zlibdeflate': ('html-zlibdeflate.bin', 'deflate'),
'br': ('html-br.bin', 'br')
}
class HttpCompressionTest(TestCase):
def setUp(self):
self.spider = Spider('foo')
self.mw = HttpCompressionMiddleware()
def _getresponse(self, coding):
if coding not in FORMAT:
raise ValueError()
samplefile, contentencoding = FORMAT[coding]
with open(join(SAMPLEDIR, samplefile), 'rb') as sample:
body = sample.read()
headers = {
'Server': 'Yaws/1.49 Yet Another Web Server',
'Date': 'Sun, 08 Mar 2009 00:41:03 GMT',
'Content-Length': len(body),
'Content-Type': 'text/html',
'Content-Encoding': contentencoding,
}
response = Response('http://scrapytest.org/', body=body, headers=headers)
response.request = Request('http://scrapytest.org', headers={'Accept-Encoding': 'gzip, deflate'})
return response
def test_process_request(self):
request = Request('http://scrapytest.org')
assert 'Accept-Encoding' not in request.headers
self.mw.process_request(request, self.spider)
self.assertEqual(request.headers.get('Accept-Encoding'),
b', '.join(ACCEPTED_ENCODINGS))
def test_process_response_gzip(self):
response = self._getresponse('gzip')
request = response.request
self.assertEqual(response.headers['Content-Encoding'], b'gzip')
newresponse = self.mw.process_response(request, response, self.spider)
assert newresponse is not response
assert newresponse.body.startswith(b'<!DOCTYPE')
assert 'Content-Encoding' not in newresponse.headers
def test_process_response_br(self):
try:
import brotli # noqa: F401
except ImportError:
raise SkipTest("no brotli")
response = self._getresponse('br')
request = response.request
self.assertEqual(response.headers['Content-Encoding'], b'br')
newresponse = self.mw.process_response(request, response, self.spider)
assert newresponse is not response
assert newresponse.body.startswith(b"<!DOCTYPE")
assert 'Content-Encoding' not in newresponse.headers
def test_process_response_rawdeflate(self):
response = self._getresponse('rawdeflate')
request = response.request
self.assertEqual(response.headers['Content-Encoding'], b'deflate')
newresponse = self.mw.process_response(request, response, self.spider)
assert newresponse is not response
assert newresponse.body.startswith(b'<!DOCTYPE')
assert 'Content-Encoding' not in newresponse.headers
def test_process_response_zlibdelate(self):
response = self._getresponse('zlibdeflate')
request = response.request
self.assertEqual(response.headers['Content-Encoding'], b'deflate')
newresponse = self.mw.process_response(request, response, self.spider)
assert newresponse is not response
assert newresponse.body.startswith(b'<!DOCTYPE')
assert 'Content-Encoding' not in newresponse.headers
def test_process_response_plain(self):
response = Response('http://scrapytest.org', body=b'<!DOCTYPE...')
request = Request('http://scrapytest.org')
assert not response.headers.get('Content-Encoding')
newresponse = self.mw.process_response(request, response, self.spider)
assert newresponse is response
assert newresponse.body.startswith(b'<!DOCTYPE')
def test_multipleencodings(self):
response = self._getresponse('gzip')
response.headers['Content-Encoding'] = ['uuencode', 'gzip']
request = response.request
newresponse = self.mw.process_response(request, response, self.spider)
assert newresponse is not response
self.assertEqual(newresponse.headers.getlist('Content-Encoding'), [b'uuencode'])
def test_process_response_encoding_inside_body(self):
headers = {
'Content-Type': 'text/html',
'Content-Encoding': 'gzip',
}
f = BytesIO()
plainbody = b"""<html><head><title>Some page</title><meta http-equiv="Content-Type" content="text/html; charset=gb2312">"""
zf = GzipFile(fileobj=f, mode='wb')
zf.write(plainbody)
zf.close()
response = Response("http;//www.example.com/", headers=headers, body=f.getvalue())
request = Request("http://www.example.com/")
newresponse = self.mw.process_response(request, response, self.spider)
assert isinstance(newresponse, HtmlResponse)
self.assertEqual(newresponse.body, plainbody)
self.assertEqual(newresponse.encoding, resolve_encoding('gb2312'))
def test_process_response_force_recalculate_encoding(self):
headers = {
'Content-Type': 'text/html',
'Content-Encoding': 'gzip',
}
f = BytesIO()
plainbody = b"""<html><head><title>Some page</title><meta http-equiv="Content-Type" content="text/html; charset=gb2312">"""
zf = GzipFile(fileobj=f, mode='wb')
zf.write(plainbody)
zf.close()
response = HtmlResponse("http;//www.example.com/page.html", headers=headers, body=f.getvalue())
request = Request("http://www.example.com/")
newresponse = self.mw.process_response(request, response, self.spider)
assert isinstance(newresponse, HtmlResponse)
self.assertEqual(newresponse.body, plainbody)
self.assertEqual(newresponse.encoding, resolve_encoding('gb2312'))
def test_process_response_no_content_type_header(self):
headers = {
'Content-Encoding': 'identity',
}
plainbody = b"""<html><head><title>Some page</title><meta http-equiv="Content-Type" content="text/html; charset=gb2312">"""
respcls = responsetypes.from_args(url="http://www.example.com/index", headers=headers, body=plainbody)
response = respcls("http://www.example.com/index", headers=headers, body=plainbody)
request = Request("http://www.example.com/index")
newresponse = self.mw.process_response(request, response, self.spider)
assert isinstance(newresponse, respcls)
self.assertEqual(newresponse.body, plainbody)
self.assertEqual(newresponse.encoding, resolve_encoding('gb2312'))
def test_process_response_gzipped_contenttype(self):
response = self._getresponse('gzip')
response.headers['Content-Type'] = 'application/gzip'
request = response.request
newresponse = self.mw.process_response(request, response, self.spider)
self.assertIsNot(newresponse, response)
self.assertTrue(newresponse.body.startswith(b'<!DOCTYPE'))
self.assertNotIn('Content-Encoding', newresponse.headers)
def test_process_response_gzip_app_octetstream_contenttype(self):
response = self._getresponse('gzip')
response.headers['Content-Type'] = 'application/octet-stream'
request = response.request
newresponse = self.mw.process_response(request, response, self.spider)
self.assertIsNot(newresponse, response)
self.assertTrue(newresponse.body.startswith(b'<!DOCTYPE'))
self.assertNotIn('Content-Encoding', newresponse.headers)
def test_process_response_gzip_binary_octetstream_contenttype(self):
response = self._getresponse('x-gzip')
response.headers['Content-Type'] = 'binary/octet-stream'
request = response.request
newresponse = self.mw.process_response(request, response, self.spider)
self.assertIsNot(newresponse, response)
self.assertTrue(newresponse.body.startswith(b'<!DOCTYPE'))
self.assertNotIn('Content-Encoding', newresponse.headers)
def test_process_response_gzipped_gzip_file(self):
"""Test that a gzip Content-Encoded .gz file is gunzipped
only once by the middleware, leaving gunzipping of the file
to upper layers.
"""
headers = {
'Content-Type': 'application/gzip',
'Content-Encoding': 'gzip',
}
# build a gzipped file (here, a sitemap)
f = BytesIO()
plainbody = b"""<?xml version="1.0" encoding="UTF-8"?>
<urlset xmlns="http://www.google.com/schemas/sitemap/0.84">
<url>
<loc>http://www.example.com/</loc>
<lastmod>2009-08-16</lastmod>
<changefreq>daily</changefreq>
<priority>1</priority>
</url>
<url>
<loc>http://www.example.com/Special-Offers.html</loc>
<lastmod>2009-08-16</lastmod>
<changefreq>weekly</changefreq>
<priority>0.8</priority>
</url>
</urlset>"""
gz_file = GzipFile(fileobj=f, mode='wb')
gz_file.write(plainbody)
gz_file.close()
# build a gzipped response body containing this gzipped file
r = BytesIO()
gz_resp = GzipFile(fileobj=r, mode='wb')
gz_resp.write(f.getvalue())
gz_resp.close()
response = Response("http;//www.example.com/", headers=headers, body=r.getvalue())
request = Request("http://www.example.com/")
newresponse = self.mw.process_response(request, response, self.spider)
self.assertEqual(gunzip(newresponse.body), plainbody)
def test_process_response_head_request_no_decode_required(self):
response = self._getresponse('gzip')
response.headers['Content-Type'] = 'application/gzip'
request = response.request
request.method = 'HEAD'
response = response.replace(body = None)
newresponse = self.mw.process_response(request, response, self.spider)
self.assertIs(newresponse, response)
self.assertEqual(response.body, b'')
| {
"content_hash": "cb4ea916d0078eb5e3e169e6e389178b",
"timestamp": "",
"source": "github",
"line_count": 251,
"max_line_length": 131,
"avg_line_length": 41.8804780876494,
"alnum_prop": 0.653824200913242,
"repo_name": "eLRuLL/scrapy",
"id": "64488841a299e37e1967973b62069b6acaa70acd",
"size": "10512",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_downloadermiddleware_httpcompression.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "2800"
},
{
"name": "Python",
"bytes": "1518287"
},
{
"name": "Roff",
"bytes": "2010"
},
{
"name": "Shell",
"bytes": "259"
}
],
"symlink_target": ""
} |
def break_words(stuff):
"""This function will break up words for us."""
words = stuff.split(' ')
return words
def sort_words(words):
"""Sorts the words."""
return sorted(words)
def print_first_word(words):
"""Prints the first word after popping it off."""
word = words.pop(0)
print word
def print_last_word(words):
"""Prints the last word after popping it off."""
word = words.pop(-1)
print word
def sort_sentence(sentence):
"""Takes in a full sentence and returns the sorted words."""
words = break_words(sentence)
return sort_words(words)
def print_first_and_last(sentence):
"""Prints the first and last words of the sentence."""
words = break_words(sentence)
print_first_word(words)
print_last_word(words)
def print_first_and_last_sorted(sentence):
"""Sorts the words then prints the first and last one."""
words = sort_sentence(sentence)
print_first_word(words)
print_last_word(words)
| {
"content_hash": "bb684940b3c6356a7012f196b6fe3f31",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 64,
"avg_line_length": 27.333333333333332,
"alnum_prop": 0.6646341463414634,
"repo_name": "darthbinamira/learn_python",
"id": "2534942dad78526a7d684012562fb2063edbc4a4",
"size": "984",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "lpthw/25/ex25.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "29338"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, unicode_literals
from django.core.handlers.wsgi import WSGIRequest
from django.utils.html import escape
from django.utils.safestring import mark_safe
from django.test.client import FakePayload
from itertools import chain
import inspect
import six
import warnings
def python_2_unicode_compatible(klass):
"""
A decorator that defines __unicode__ and __str__ methods under Python 2.
Under Python 3 it does nothing.
To support Python 2 and 3 with a single code base, define a __str__ method
returning text and apply this decorator to the class.
Taken directly from Django.
"""
if not six.PY3:
klass.__unicode__ = klass.__str__
klass.__str__ = lambda self: self.__unicode__().encode('utf-8')
return klass
class Sequence(list):
"""
Represents a column sequence, e.g. ``("first_name", "...", "last_name")``
This is used to represent `.Table.Meta.sequence` or the `.Table`
constructors's *sequence* keyword argument.
The sequence must be a list of column names and is used to specify the
order of the columns on a table. Optionally a "..." item can be inserted,
which is treated as a *catch-all* for column names that aren't explicitly
specified.
"""
def expand(self, columns):
"""
Expands the ``"..."`` item in the sequence into the appropriate column
names that should be placed there.
:raises: `ValueError` if the sequence is invalid for the columns.
"""
ellipses = self.count("...")
if ellipses > 1:
raise ValueError("'...' must be used at most once in a sequence.")
elif ellipses == 0:
self.append("...")
# everything looks good, let's expand the "..." item
columns = list(columns) # take a copy and exhaust the generator
head = []
tail = []
target = head # start by adding things to the head
for name in self:
if name == "...":
# now we'll start adding elements to the tail
target = tail
continue
target.append(name)
if name in columns:
columns.pop(columns.index(name))
self[:] = chain(head, columns, tail)
class OrderBy(six.text_type):
"""
A single item in an `.OrderByTuple` object. This class is
essentially just a `str` with some extra properties.
"""
@property
def bare(self):
"""
Return the bare form.
The *bare form* is the non-prefixed form. Typically the bare form is
just the ascending form.
Example: ``age`` is the bare form of ``-age``
:rtype: `.OrderBy` object
"""
return OrderBy(self[1:]) if self[:1] == '-' else self
@property
def opposite(self):
"""
Return an `.OrderBy` object with an opposite sort influence.
Example:
.. code-block:: python
>>> order_by = OrderBy('name')
>>> order_by.opposite
'-name'
:rtype: `.OrderBy` object
"""
return OrderBy(self[1:]) if self.is_descending else OrderBy('-' + self)
@property
def is_descending(self):
"""
Return `True` if this object induces *descending* ordering
:rtype: `bool`
"""
return self.startswith('-')
@property
def is_ascending(self):
"""
Return `True` if this object induces *ascending* ordering.
:returns: `bool`
"""
return not self.is_descending
@python_2_unicode_compatible
class OrderByTuple(tuple):
"""Stores ordering as (as `.OrderBy` objects). The
`~django_tables2.tables.Table.order_by` property is always converted
to an `.OrderByTuple` object.
This class is essentially just a `tuple` with some useful extras.
Example:
.. code-block:: python
>>> x = OrderByTuple(('name', '-age'))
>>> x['age']
'-age'
>>> x['age'].is_descending
True
>>> x['age'].opposite
'age'
"""
def __new__(cls, iterable):
transformed = []
for item in iterable:
if not isinstance(item, OrderBy):
item = OrderBy(item)
transformed.append(item)
return super(OrderByTuple, cls).__new__(cls, transformed)
def __unicode__(self):
return ','.join(self)
def __contains__(self, name):
"""
Determine if a column has an influence on ordering.
Example:
.. code-block:: python
>>> ordering =
>>> x = OrderByTuple(('name', ))
>>> 'name' in x
True
>>> '-name' in x
True
:param name: The name of a column. (optionally prefixed)
:returns: `bool`
"""
name = OrderBy(name).bare
for order_by in self:
if order_by.bare == name:
return True
return False
def __getitem__(self, index):
"""
Allows an `.OrderBy` object to be extracted via named or integer
based indexing.
When using named based indexing, it's fine to used a prefixed named.
.. code-block:: python
>>> x = OrderByTuple(('name', '-age'))
>>> x[0]
'name'
>>> x['age']
'-age'
>>> x['-age']
'-age'
:rtype: `.OrderBy` object
"""
if isinstance(index, six.string_types):
for order_by in self:
if order_by == index or order_by.bare == index:
return order_by
raise KeyError
return super(OrderByTuple, self).__getitem__(index)
@property
def key(self):
accessors = []
reversing = []
for order_by in self:
accessors.append(Accessor(order_by.bare))
reversing.append(order_by.is_descending)
@total_ordering
class Comparator(object):
def __init__(self, obj):
self.obj = obj
def __eq__(self, other):
for accessor in accessors:
a = accessor.resolve(self.obj, quiet=True)
b = accessor.resolve(other.obj, quiet=True)
if not a == b:
return False
return True
def __lt__(self, other):
for accessor, reverse in six.moves.zip(accessors, reversing):
a = accessor.resolve(self.obj, quiet=True)
b = accessor.resolve(other.obj, quiet=True)
if a == b:
continue
if reverse:
a, b = b, a
# The rest of this should be refactored out into a util
# function 'compare' that handles different types.
try:
return a < b
except TypeError:
# If the truth values differ, it's a good way to
# determine ordering.
if bool(a) is not bool(b):
return bool(a) < bool(b)
# Handle comparing different types, by falling back to
# the string and id of the type. This at least groups
# different types together.
a_type = type(a)
b_type = type(b)
return (repr(a_type), id(a_type)) < (repr(b_type), id(b_type))
return False
return Comparator
@property
def cmp(self):
"""
Return a function for use with `list.sort` that implements this
object's ordering. This is used to sort non-`.QuerySet` based
:term:`table data`.
:rtype: function
"""
warnings.warn('`cmp` is deprecated, use `key` instead.',
DeprecationWarning)
# pylint: disable=C0103
def _cmp(a, b):
for accessor, reverse in instructions:
x = accessor.resolve(a)
y = accessor.resolve(b)
try:
res = cmp(x, y)
except TypeError:
res = cmp((repr(type(x)), id(type(x)), x),
(repr(type(y)), id(type(y)), y))
if res != 0:
return -res if reverse else res
return 0
instructions = []
for order_by in self:
if order_by.startswith('-'):
instructions.append((Accessor(order_by[1:]), True))
else:
instructions.append((Accessor(order_by), False))
return _cmp
def get(self, key, fallback):
"""
Identical to __getitem__, but supports fallback value.
"""
try:
return self[key]
except (KeyError, IndexError):
return fallback
@property
def opposite(self):
"""
Return version with each `.OrderBy` prefix toggled.
Example:
.. code-block:: python
>>> order_by = OrderByTuple(('name', '-age'))
>>> order_by.opposite
('-name', 'age')
"""
return type(self)((o.opposite for o in self))
class Accessor(str):
"""
A string describing a path from one object to another via attribute/index
accesses. For convenience, the class has an alias `.A` to allow for more concise code.
Relations are separated by a ``.`` character.
"""
SEPARATOR = '.'
def resolve(self, context, safe=True, quiet=False):
"""
Return an object described by the accessor by traversing the attributes
of *context*.
Example:
.. code-block:: python
>>> x = Accessor('__len__')
>>> x.resolve('brad')
4
>>> x = Accessor('0.upper')
>>> x.resolve('brad')
'B'
:type context: `object`
:param context: The root/first object to traverse.
:type safe: `bool`
:param safe: Don't call anything with ``alters_data = True``
:type quiet: bool
:param quiet: Smother all exceptions and instead return `None`
:returns: target object
:raises: anything ``getattr(a, "b")`` raises, e.g. `TypeError`,
`AttributeError`, `KeyError`, `ValueError` (unless *quiet* ==
`True`)
`~.Accessor.resolve` attempts lookups in the following order:
- dictionary (e.g. ``obj[related]``)
- attribute (e.g. ``obj.related``)
- list-index lookup (e.g. ``obj[int(related)]``)
Callable objects are called, and their result is used, before
proceeding with the resolving.
"""
try:
current = context
for bit in self.bits:
try: # dictionary lookup
current = current[bit]
except (TypeError, AttributeError, KeyError):
try: # attribute lookup
current = getattr(current, bit)
except (TypeError, AttributeError):
try: # list-index lookup
current = current[int(bit)]
except (IndexError, # list index out of range
ValueError, # invalid literal for int()
KeyError, # dict without `int(bit)` key
TypeError, # unsubscriptable object
):
raise ValueError('Failed lookup for key [%s] in %r'
', when resolving the accessor %s'
% (bit, current, self))
if callable(current):
if safe and getattr(current, 'alters_data', False):
raise ValueError('refusing to call %s() because `.alters_data = True`'
% repr(current))
current = current()
# important that we break in None case, or a relationship
# spanning across a null-key will raise an exception in the
# next iteration, instead of defaulting.
if current is None:
break
return current
except:
if not quiet:
raise
@property
def bits(self):
if self == '':
return ()
return self.split(self.SEPARATOR)
A = Accessor # alias
class AttributeDict(dict):
"""
A wrapper around `dict` that knows how to render itself as HTML
style tag attributes.
The returned string is marked safe, so it can be used safely in a template.
See `.as_html` for a usage example.
"""
def as_html(self):
"""
Render to HTML tag attributes.
Example:
.. code-block:: python
>>> from django_tables2.utils import AttributeDict
>>> attrs = AttributeDict({'class': 'mytable', 'id': 'someid'})
>>> attrs.as_html()
'class="mytable" id="someid"'
:rtype: `~django.utils.safestring.SafeUnicode` object
"""
return mark_safe(' '.join(['%s="%s"' % (k, escape(v))
for k, v in six.iteritems(self)]))
class Attrs(dict):
"""
Backwards compatibility, deprecated.
"""
def __init__(self, *args, **kwargs):
super(Attrs, self).__init__(*args, **kwargs)
warnings.warn("Attrs class is deprecated, use dict instead.",
DeprecationWarning)
def segment(sequence, aliases):
"""
Translates a flat sequence of items into a set of prefixed aliases.
This allows the value set by `.QuerySet.order_by` to be translated into
a list of columns that would have the same result. These are called
"order by aliases" which are optionally prefixed column names.
e.g.
>>> list(segment(("a", "-b", "c"),
... {"x": ("a"),
... "y": ("b", "-c"),
... "z": ("-b", "c")}))
[("x", "-y"), ("x", "z")]
"""
if not (sequence or aliases):
return
for alias, parts in aliases.items():
variants = {
# alias: order by tuple
alias: OrderByTuple(parts),
OrderBy(alias).opposite: OrderByTuple(parts).opposite,
}
for valias, vparts in variants.items():
if list(sequence[:len(vparts)]) == list(vparts):
tail_aliases = dict(aliases)
del tail_aliases[alias]
tail_sequence = sequence[len(vparts):]
if tail_sequence:
for tail in segment(tail_sequence, tail_aliases):
yield tuple(chain([valias], tail))
else:
continue
else:
yield tuple([valias])
class cached_property(object): # pylint: disable=C0103
"""
Decorator that creates converts a method with a single
self argument into a property cached on the instance.
Taken directly from Django 1.4.
"""
def __init__(self, func):
from functools import wraps
wraps(func)(self)
self.func = func
def __get__(self, instance, cls):
res = instance.__dict__[self.func.__name__] = self.func(instance)
return res
funcs = (name for name in ('getfullargspec', 'getargspec')
if hasattr(inspect, name))
getargspec = getattr(inspect, next(funcs))
del funcs
def build_request(uri='/'):
"""
Return a fresh HTTP GET / request.
This is essentially a heavily cutdown version of Django 1.3's
`~django.test.client.RequestFactory`.
"""
path, _, querystring = uri.partition('?')
return WSGIRequest({
'CONTENT_TYPE': 'text/html; charset=utf-8',
'PATH_INFO': path,
'QUERY_STRING': querystring,
'REMOTE_ADDR': '127.0.0.1',
'REQUEST_METHOD': 'GET',
'SCRIPT_NAME': '',
'SERVER_NAME': 'testserver',
'SERVER_PORT': '80',
'SERVER_PROTOCOL': 'HTTP/1.1',
'wsgi.version': (1, 0),
'wsgi.url_scheme': 'http',
'wsgi.input': FakePayload(b''),
'wsgi.errors': six.StringIO(),
'wsgi.multiprocess': True,
'wsgi.multithread': False,
'wsgi.run_once': False,
})
def total_ordering(cls):
"""Class decorator that fills in missing ordering methods"""
convert = {
'__lt__': [('__gt__', lambda self, other: not (self < other or self == other)),
('__le__', lambda self, other: self < other or self == other),
('__ge__', lambda self, other: not self < other)],
'__le__': [('__ge__', lambda self, other: not self <= other or self == other),
('__lt__', lambda self, other: self <= other and not self == other),
('__gt__', lambda self, other: not self <= other)],
'__gt__': [('__lt__', lambda self, other: not (self > other or self == other)),
('__ge__', lambda self, other: self > other or self == other),
('__le__', lambda self, other: not self > other)],
'__ge__': [('__le__', lambda self, other: (not self >= other) or self == other),
('__gt__', lambda self, other: self >= other and not self == other),
('__lt__', lambda self, other: not self >= other)]
}
roots = set(dir(cls)) & set(convert)
if not roots:
raise ValueError('must define at least one ordering operation: < > <= >=')
root = max(roots) # prefer __lt__ to __le__ to __gt__ to __ge__
for opname, opfunc in convert[root]:
if opname not in roots:
opfunc.__name__ = str(opname) # Py2 requires non-unicode, Py3 requires unicode.
opfunc.__doc__ = getattr(int, opname).__doc__
setattr(cls, opname, opfunc)
return cls
| {
"content_hash": "0903336c75438c4f519e949756ff768f",
"timestamp": "",
"source": "github",
"line_count": 552,
"max_line_length": 94,
"avg_line_length": 33.28260869565217,
"alnum_prop": 0.5127367733507512,
"repo_name": "clan-wot/rewards",
"id": "2c2d9be9cb620082bb49421e56cbd1c273506340",
"size": "18388",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/django_tables2/utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3161"
},
{
"name": "HTML",
"bytes": "10574"
},
{
"name": "JavaScript",
"bytes": "16269"
},
{
"name": "Makefile",
"bytes": "680"
},
{
"name": "Python",
"bytes": "155134"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import datetime
import six
import tensorflow as tf
import tflearn
import numpy as np
import multiprocessing as mp
from multiprocessing.managers import BaseManager
from helpers import *
import utils
import dataset_utils as d_utils
import models_dict_utils
FLAGS = tf.flags.FLAGS
class DynamicsModel(object):
def __init__(self, model_id, timesteps=1, dropout=1.0, output_dropout=1.0, load_checkpoint=False, use_sess=False):
#print('Loading RNN dynamics model...')
# if timesteps:
# # if provided as an argument, overwrite n_timesteps from the model
# n_timesteps = timesteps
self._tfgraph = tf.Graph()
self._sess = None
#if use_sess:
# # use session
# self._sess = tf.Session(graph=self.tfgraph)
with self._tfgraph.as_default():
#tf.reset_default_graph()
self.timesteps = timesteps
self.model_dict = models_dict_utils.load_model_dict(model_id)
if self.model_dict["architecture"] == 'default':
self.net, self.hidden_1, self.hidden_2 = self._build_regression_lstm_net(n_timesteps=timesteps,
n_inputdim=self.model_dict["n_inputdim"],
n_hidden=self.model_dict["n_hidden"],
n_outputdim=self.model_dict["n_outputdim"],
dropout=dropout,
output_dropout=output_dropout)
elif self.model_dict["architecture"] == 'simple':
self.net, self.hidden_1, self.hidden_2 = self._build_regression_lstm_net2(n_timesteps=timesteps,
n_inputdim=self.model_dict["n_inputdim"],
n_hidden=self.model_dict["n_hidden"],
n_outputdim=self.model_dict["n_outputdim"],
dropout=dropout,
output_dropout=output_dropout)
elif self.model_dict["architecture"] == 'gru':
self.net, self.hidden_1, self.hidden_2 = self._build_regression_lstm_net_gru(n_timesteps=timesteps,
n_inputdim=self.model_dict["n_inputdim"],
n_hidden=self.model_dict["n_hidden"],
n_outputdim=self.model_dict["n_outputdim"],
dropout=dropout,
output_dropout=output_dropout)
elif self.model_dict["architecture"] == 'grusimple':
self.net, self.hidden_1, self.hidden_2 = self._build_regression_gru_net2(n_timesteps=timesteps,
n_inputdim=self.model_dict["n_inputdim"],
n_hidden=self.model_dict["n_hidden"],
n_outputdim=self.model_dict["n_outputdim"],
dropout=dropout,
output_dropout=output_dropout)
else:
assert(False)
tensorboard_dir = '../tensorboard_logs/' + model_id + '/'
checkpoint_dir = '../checkpoints/' + model_id + '/'
checkpoint_path = checkpoint_dir + '_/'
#print("Directory path for tensorboard summaries: {}".format(tensorboard_dir))
#print("Checkpoint directory path: {}".format(checkpoint_dir))
utils.check_if_path_exists_or_create(tensorboard_dir)
utils.check_if_path_exists_or_create(checkpoint_dir)
self._model = tflearn.DNN(self.net, tensorboard_verbose=0, tensorboard_dir=tensorboard_dir, \
checkpoint_path=None, max_checkpoints=3)
if load_checkpoint:
checkpoint = tf.train.latest_checkpoint(checkpoint_dir) # can be none of no checkpoint exists
#print ("Checkpoint filename: " + checkpoint)
if checkpoint:
self.model.load(checkpoint, weights_only=True, verbose=True)
#print('Checkpoint loaded.')
else:
pass
#print('No checkpoint found. ')
#print('Model loaded.')
def _build_regression_lstm_net(self, n_timesteps=1, n_inputdim=None, n_hidden=None,
n_outputdim=None, dropout=1.0):
net = tflearn.input_data([None, n_timesteps, n_inputdim],dtype=tf.float32, name='input_data')
output_mask = tflearn.input_data([None, n_timesteps, n_outputdim], dtype=tf.float32, name='output_mask')
net, hidden_states_1 = tflearn.lstm(net, n_hidden, weights_init='xavier', return_seq=True, return_state=True, dropout=dropout, name="lstm_1")
net, hidden_states_2 = tflearn.lstm(net, n_outputdim, weights_init='xavier', activation='sigmoid', return_seq=True, return_state=True, dropout=dropout, name="lstm_2")
net = tf.stack(net, axis=1)
net = net * output_mask
net = tflearn.regression(net, optimizer='adam', learning_rate=0.0005,
loss='mean_square') # mean square works; binary crossentropy does not work for some reason
return net, hidden_states_1, hidden_states_2
def _build_regression_lstm_net2(self, n_timesteps=1, n_inputdim=None, n_hidden=None,
n_outputdim=None, dropout=1.0):
# don't have 2 lstms, just have a shared output layer
# this alternative doesn't seem to work as well
net = tflearn.input_data([None, n_timesteps, n_inputdim],dtype=tf.float32, name='input_data')
output_mask = tflearn.input_data([None, n_timesteps, n_outputdim], dtype=tf.float32, name='output_mask')
net, hidden_states_1 = tflearn.lstm(net, n_hidden, weights_init='xavier', return_seq=True, return_state=True, dropout=dropout, name="lstm_1")
net = [tflearn.fully_connected(net[i], n_outputdim, activation='sigmoid', weights_init='xavier', scope='output_shared', reuse=(i>0)) for i in six.moves.range(n_timesteps)]
net = tf.stack(net, axis=1)
net = net * output_mask
net = tflearn.regression(net, optimizer='adam', learning_rate=0.0005,
loss='mean_square') # mean square works
return net, hidden_states_1, None
def _build_regression_lstm_net_gru(self, n_timesteps=1, n_inputdim=None, n_hidden=None,
n_outputdim=None, dropout=1.0):
net = tflearn.input_data([None, n_timesteps, n_inputdim],dtype=tf.float32, name='input_data')
output_mask = tflearn.input_data([None, n_timesteps, n_outputdim], dtype=tf.float32, name='output_mask')
net, hidden_states_1 = tflearn.gru(net, n_hidden, weights_init='xavier', return_seq=True, return_state=True, dropout=dropout, name="gru_1")
net, hidden_states_2 = tflearn.gru(net, n_outputdim, weights_init='xavier', activation='sigmoid', return_seq=True, return_state=True, dropout=dropout, name="gru_2")
net = tf.stack(net, axis=1)
net = net * output_mask
net = tflearn.regression(net, optimizer='adam', learning_rate=0.0005,
loss='mean_square') # mean square works
return net, hidden_states_1, hidden_states_2
def _build_regression_gru_net2(self, n_timesteps=1, n_inputdim=None, n_hidden=None,
n_outputdim=None, dropout=1.0, output_dropout=1.0):
# don't have 2 lstms, just have a shared output layer
# this alternative doesn't seem to work as well
net = tflearn.input_data([None, n_timesteps, n_inputdim],dtype=tf.float32, name='input_data')
output_mask = tflearn.input_data([None, n_timesteps, n_outputdim], dtype=tf.float32, name='output_mask')
net, hidden_states_1 = tflearn.gru(net, n_hidden, weights_init='xavier', return_seq=True, return_state=True, dropout=dropout, name="gru_1")
# only add dropout to the outputs
net = tflearn.dropout(net, output_dropout)
net = [tflearn.fully_connected(net[i], n_outputdim, activation='sigmoid', weights_init='xavier', scope='output_shared', reuse=(i>0)) for i in six.moves.range(n_timesteps)]
net = tf.stack(net, axis=1)
net = net * output_mask
net = tflearn.regression(net, optimizer='adam', learning_rate=0.0005,
loss='mean_square') # mean square works
return net, hidden_states_1, None
def load(self, s):
with self._tfgraph.as_default():
#tf.reset_default_graph()
self._model.load(s)
def save(self, s):
with self._tfgraph.as_default():
#tf.reset_default_graph()
self._model.save(s)
def train(self, train_data, n_epoch=1, callbacks=[], shuffle=None, load_checkpoint=True, validation_set=0.1, batch_size=None):
"""
:param train_data: tuple (input_data, output_mask, output_data)
:param n_epoch: number of epochs to train for
:param load_checkpoint: whether to train from checkpoint or from scratch
:return:
"""
with self._tfgraph.as_default():
#tf.reset_default_graph()
input_data, output_mask, output_data = train_data
date_time_string = datetime.datetime.now().strftime("%m-%d-%Y_%H-%M-%S")
run_id = "{}".format(date_time_string)
self._model.fit([input_data, output_mask], output_data, n_epoch=n_epoch, validation_set=validation_set, run_id=run_id, callbacks=callbacks, shuffle=shuffle, batch_size=batch_size)
def predict(self, input_data):
"""
:param input_data: of shape (n_samples, n_timesteps, n_inputdim).
:return:
"""
with self._tfgraph.as_default():
n_samples, n_timesteps, n_inputdim = input_data.shape
assert(n_inputdim == self.model_dict["n_inputdim"]), "input dimension of data doesn't match the model."
n_outputdim = self.model_dict["n_outputdim"]
output_mask = np.ones((n_samples, n_timesteps, n_outputdim))
if n_timesteps < self.timesteps: # pad inputs and mask
padded_input = np.zeros((n_samples, self.timesteps, n_inputdim))
padded_input[:, :n_timesteps, :] = input_data[:, :, :]
input_data = padded_input
padded_mask = np.zeros((n_samples, self.timesteps, n_outputdim))
padded_mask[:,:n_timesteps,:] = output_mask[:,:,:]
output_mask = padded_mask
elif n_timesteps > self.timesteps: # truncate inputs and mask
input_data = input_data[:, :self.timesteps, :]
output_mask = output_mask[:, :self.timesteps, :]
#tf.reset_default_graph()
return self._model.predict([input_data, output_mask])
def get_timesteps(self):
return self.timesteps
class DMCManager(BaseManager):
'''
Allows to create DynamicsModel objects in a separate process.
'''
pass
DMCManager.register('DynamicsModel', DynamicsModel)
class RnnStudentSim(object):
'''
A model-based simulator for a student. Maintains its own internal hidden state.
Currently model can be shared because only the history matters
'''
def __init__(self, model):
self.model = model
self.seq_max_len = model.get_timesteps()
self.sequence = [] # will store up to seq_max_len
pass
def sample_observations(self):
"""
Returns list of probabilities
"""
# special case when self.sequence is empty
if not self.sequence:
return None
else:
# turns the list of input vectors, into a numpy matrix of shape (1, n_timesteps, 2*n_concepts)
# We need the first dimension since the network expects a batch.
rnn_input_sequence = np.expand_dims(np.array(self.sequence), axis=0)
pred = self.model.predict(rnn_input_sequence)
t = len(self.sequence)
prob_success_action = pred[0][t-1]
# observation is a probability
return prob_success_action
def advance_simulator(self, action, observation):
'''
Given next action and observation, advance the internal hidden state of the simulator.
'''
input = d_utils.convert_to_rnn_input(action, observation)
if len(self.sequence) == self.seq_max_len:
self.sequence = self.sequence[1:] + [input]
else:
self.sequence.append(input)
def copy(self):
'''
Make a copy of the current simulator.
'''
sim_copy = RnnStudentSim(self.model)
sim_copy.sequence = self.sequence[:] # deep copy
return sim_copy
class RnnStudentSimMemEnsemble(object):
'''
A model-based simulator for a student. Maintains its own internal hidden state.
Currently model can be shared because only the history matters
Uses an ensemble of memoized models.
'''
def __init__(self, n_concepts, mem_arrays_list):
self.n_concepts = n_concepts
self.mem_arrays_list = mem_arrays_list
self.seq_max_len = len(mem_arrays_list[0])-1
# story the current state
self.step = 0
self.history_ix = 0
def sample_observations(self):
"""
Returns next probabilities
"""
# special case when self.sequence is empty
if self.step == 0:
return None
else:
pred_list = []
for mem_arrays in self.mem_arrays_list:
pred_list.append(mem_arrays[self.step][self.history_ix,:])
return np.mean(pred_list,axis=0)
def advance_simulator(self, action, observation):
'''
Given next action and observation, advance the internal hidden state of the simulator.
action is StudentAction
observation is 0 or 1
'''
self.step += 1
next_branch = action_ob_encode(self.n_concepts, action.concept, observation)
self.history_ix = history_ix_append(self.n_concepts, self.history_ix, next_branch)
def copy(self):
'''
Make a copy of the current simulator.
'''
sim_copy = RnnStudentSimMemEnsemble(self.n_concepts, self.mem_arrays_list)
sim_copy.step = self.step
sim_copy.history_ix = self.history_ix
return sim_copy
class RnnStudentSimEnsemble(object):
'''
A model-based simulator for a student.
It's an ensemble of many models and averages their predictions.
'''
def __init__(self, model_list):
self.model_list = model_list
self.seq_max_len = model_list[0].get_timesteps()
self.sequence = [] # will store up to seq_max_len
pass
def sample_observations(self):
"""
Returns list of probabilities
"""
# special case when self.sequence is empty
if not self.sequence:
return None
else:
# turns the list of input vectors, into a numpy matrix of shape (1, n_timesteps, 2*n_concepts)
# We need the first dimension since the network expects a batch.
rnn_input_sequence = np.expand_dims(np.array(self.sequence), axis=0)
t = len(self.sequence)
# average the predictions
pred_list = []
for curr_model in self.model_list:
pred_list.append(curr_model.predict(rnn_input_sequence)[0][t-1])
prob_success_action = np.mean(pred_list,axis=0)
#six.print_('prob success action shape {}'.format(prob_success_action.shape))
# observation is a probability
return prob_success_action
def advance_simulator(self, action, observation):
'''
Given next action and observation, advance the internal hidden state of the simulator.
'''
input = d_utils.convert_to_rnn_input(action, observation)
if len(self.sequence) == self.seq_max_len:
self.sequence = self.sequence[1:] + [input]
else:
self.sequence.append(input)
def copy(self):
'''
Make a copy of the current simulator.
'''
sim_copy = RnnStudentSimEnsemble(self.model_list) #list of models is shared
sim_copy.sequence = self.sequence[:] # deep copy
return sim_copy
| {
"content_hash": "c6df673a739230120a18a2afd5f07d00",
"timestamp": "",
"source": "github",
"line_count": 365,
"max_line_length": 191,
"avg_line_length": 48.51780821917808,
"alnum_prop": 0.5538426788638545,
"repo_name": "lisa-1010/smart-tutor",
"id": "442b74ef37f39c34952def42c68c4cec3dde76c3",
"size": "18195",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "code/dynamics_model_class.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "3061986"
},
{
"name": "Python",
"bytes": "199954"
}
],
"symlink_target": ""
} |
import os
import sys
sys.path.insert(0, os.path.abspath('../'))
sys.path.insert(0, os.path.abspath('../src/'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'VideoDigitalWatermarking'
copyright = '2017, pira'
author = 'pira'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.0'
# The full version, including alpha/beta/rc tags.
release = '1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#
# today = ''
#
# Else, today_fmt is used as the format for a strftime call.
#
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#
# html_title = 'VideoDigitalWatermarking v1.0'
# A shorter title for the navigation bar. Default is the same as html_title.
#
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#
# html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#
# html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#
# html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#
# html_additional_pages = {}
# If false, no module index is generated.
#
# html_domain_indices = True
# If false, no index is generated.
#
# html_use_index = True
# If true, the index is split into individual pages for each letter.
#
# html_split_index = False
# If true, links to the reST sources are added to the pages.
#
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr', 'zh'
#
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'VideoDigitalWatermarkingdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'VideoDigitalWatermarking.tex', 'VideoDigitalWatermarking Documentation',
'pira', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#
# latex_use_parts = False
# If true, show page references after internal links.
#
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
#
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
#
# latex_appendices = []
# It false, will not define \strong, \code, itleref, \crossref ... but only
# \sphinxstrong, ..., \sphinxtitleref, ... To help avoid clash with user added
# packages.
#
# latex_keep_old_macro_names = True
# If false, no module index is generated.
#
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'videodigitalwatermarking', 'VideoDigitalWatermarking Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'VideoDigitalWatermarking', 'VideoDigitalWatermarking Documentation',
author, 'VideoDigitalWatermarking', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#
# texinfo_appendices = []
# If false, no module index is generated.
#
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#
# texinfo_no_detailmenu = False
| {
"content_hash": "c5b266295ef6cf501bbac2532ec850d2",
"timestamp": "",
"source": "github",
"line_count": 321,
"max_line_length": 90,
"avg_line_length": 28.981308411214954,
"alnum_prop": 0.6919273352681931,
"repo_name": "piraaa/VideoDigitalWatermarking",
"id": "4dec7a3648e4f5394473e9f3a6aba1d18fe3ece3",
"size": "10003",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs/conf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "21728"
}
],
"symlink_target": ""
} |
import sys
n = int(input().strip())
sockColors = [int(color) for color in input().strip().split(' ')]
d = dict()
for color in sockColors:
if color not in d:
d[color] = 0
d[color] += 1
count = 0
for key, colorCount in d.items():
count += colorCount // 2
print(count) | {
"content_hash": "6d4c6e5a47a73dbc6848c8a2a190dced",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 65,
"avg_line_length": 17.125,
"alnum_prop": 0.635036496350365,
"repo_name": "shobhitmishra/CodingProblems",
"id": "89aed15274465d52bce949ccec78ac7f572ed52d",
"size": "274",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "HackerRank/SockMerchant.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C#",
"bytes": "854"
},
{
"name": "Makefile",
"bytes": "31844"
},
{
"name": "Python",
"bytes": "437556"
}
],
"symlink_target": ""
} |
import cv2
import io
import base64
import numpy as np
import pandas as pd
from subprocess import Popen, PIPE
class VideoAnalysis(object):
"""
TODO:
- Define common interfaces on similar functions
- Define what format video will come in as
- Probably want a preprocessed dataframe with image, time, (and maybe features)
"""
def __init__(self):
"""
Default constructor
"""
self.features = pd.DataFrame()
@staticmethod
def detect_cut(video_file_path, time_pd_series):
"""
Detect where the scene boundaries ("cuts") are in the video.
Uses FFMPEG utility.
Args:
video_file_path: Path to video file
time_pd_series: pandas series with timestamps for every frame
Returns:
A dataframe of with each frame labeled as whether it is a
scene boundary.
"""
## Create output array, initialize with all zeros
time_df = pd.DataFrame(time_pd_series)
time_df.columns = ["time"]
out_df = time_df.copy()
out_df['is_scene_transition'] = 0
## Use the bash script to call ffprobe, a utility for detecting scene changes
p = Popen(["bash", "ffprobe_script.bash", video_file_path], stdout=PIPE, stderr=PIPE)
output, err = p.communicate()
# Create a dataframe of scene change times
#import pdb; pdb.set_trace()
scene_trans_df = pd.DataFrame(output.split()[2:])
## Check that scene transitions occur
if not scene_trans_df.empty:
scene_trans_df.columns = ["time"]
scene_trans_df.time = scene_trans_df.time.apply(lambda x: float(x))
for scene_time in scene_trans_df.time:
closest_pt = out_df.ix[(time_df.time - scene_time).abs().argsort()[:1]]
index = int(closest_pt.index[0])
out_df['is_scene_transition'][index] = 1
return out_df
@staticmethod
def detect_shake(video):
'''
Shake detection
'''
pass
@staticmethod
def detect_blur(video):
'''
Detect blur
'''
pass
@staticmethod
def optical_flow(video):
'''
Optical flow - useful for preprocessing
Should this go here? Or should this be in preprocessing???
'''
pass
@staticmethod
def synchrony(video):
'''
Audio/Visual Synchrony
'''
pass
@staticmethod
def find_faces(video):
'''
Find faces in the images
'''
pass
| {
"content_hash": "f2aa31f52346aac5b96f32719d09378d",
"timestamp": "",
"source": "github",
"line_count": 97,
"max_line_length": 93,
"avg_line_length": 26.938144329896907,
"alnum_prop": 0.572139303482587,
"repo_name": "pdxcycling/carv.io",
"id": "dfbe7b37bded89f37eb6d09373ef479e33938af4",
"size": "2613",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "video_analysis/code/video_analysis.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "79613"
}
],
"symlink_target": ""
} |
from qrl.core.Transaction_subtypes import TX_SUBTYPE_STAKE, TX_SUBTYPE_TX, TX_SUBTYPE_COINBASE
from collections import OrderedDict, namedtuple
from pyqrllib.pyqrllib import getHashChainSeed, bin2hstr
from qrl.core import config, logger
from qrl.core.ChainBuffer import ChainBuffer
from qrl.core.Transaction import Transaction
from qrl.core.GenesisBlock import GenesisBlock
from qrl.core.wallet import Wallet
from qrl.core.block import Block
from qrl.core.helper import json_bytestream, json_print
from qrl.crypto.misc import sha256, merkle_tx_hash
import bz2
from io import StringIO
from time import time
from math import log, ceil
import heapq
import os, copy
import simplejson as json
from collections import defaultdict
from decimal import Decimal
BlockFrame = namedtuple('BlockFrame', 'position size') # FIXME: Remove/Refactor. This is temporary
class Chain:
def __init__(self, state):
self.state = state
self.wallet = Wallet()
self.chain_dat_filename = os.path.join(config.user.data_path, config.dev.mnemonic_filename)
# FIXME: should self.mining_address be self.staking_address
self.mining_address = self.wallet.address_bundle[0].xmss.get_address().encode()
self.ping_list = [] # FIXME: This has nothing to do with chain
self.block_framedata = dict()
self.transaction_pool = []
self.txhash_timestamp = []
self.m_blockchain = []
self.blockheight_map = []
self.block_chain_buffer = None # Initialized by node.py
self.prev_txpool = [None] * 1000 # TODO: use python dequeue
self.pending_tx_pool = []
self.pending_tx_pool_hash = []
self.duplicate_tx_pool = OrderedDict()
self.stake_list = []
self.stake_commit = []
self.stake_reveal_one = []
self.stake_ban_list = []
self.stake_ban_block = {}
self.stake_validator_latency = defaultdict(dict)
def add_tx_to_duplicate_pool(self, duplicate_txn):
if len(self.duplicate_tx_pool) >= config.dev.transaction_pool_size:
self.duplicate_tx_pool.popitem(last=False)
self.duplicate_tx_pool[duplicate_txn.get_message_hash()] = duplicate_txn
def validate_reboot(self, mhash, nonce):
# FIXME: Reboot validation in the chain? This is node related
reboot_data = ['2920c8ec34f04f59b7df4284a4b41ca8cbec82ccdde331dd2d64cc89156af653', 0]
try:
# FIXME: Accessing DB directly
reboot_data_db = self.state.db.get('reboot_data')
reboot_data = reboot_data_db
except:
pass
if reboot_data[1] >= nonce: # already used
msg = 'nonce in db ' + str(reboot_data[1])
msg += '\nnonce provided ' + str(nonce)
return None, msg
reboot_data[1] = nonce
output = mhash
for i in range(0, reboot_data[1]):
output = sha256(output)
if output != reboot_data[0]:
msg = 'expected hash ' + str(reboot_data[0])
msg += '\nhash found ' + str(output)
msg += '\nnonce provided ' + str(nonce) + "\n"
return None, msg
# reboot_data[1] += 1
# self.state.db.put('reboot_data', reboot_data)
return True, 'Success'
def generate_reboot_hash(self, key, nonce=None, blocknumber=0):
# FIXME: Reboot validation in the chain? This is node related
reboot_data = ['2920c8ec34f04f59b7df4284a4b41ca8cbec82ccdde331dd2d64cc89156af653', 0]
try:
# FIXME: Accessing DB directly
reboot_data = self.state.db.get('reboot_data')
except:
pass
if nonce:
if reboot_data[1] > nonce:
return None, 'Nonce must be greater than or equals to ' + str(reboot_data[1]) + '\r\n'
reboot_data[1] = int(nonce)
output = sha256(key)
for i in range(0, 40000 - reboot_data[1]):
output = sha256(output)
status, error = self.validate_reboot(output, reboot_data[1])
if not status:
return None, error
return json.dumps(
{'hash': output, 'nonce': reboot_data[1], 'blocknumber': int(blocknumber)}), "Reboot Initiated\r\n"
def get_sv(self, terminator):
for s in self.state.stake_list_get():
if terminator in s[1]:
return s[0]
return None
def reveal_to_terminator(self, reveal, blocknumber, add_loop=0):
tmp = sha256(reveal)
epoch = blocknumber // config.dev.blocks_per_epoch
for _ in range(blocknumber - (epoch * config.dev.blocks_per_epoch) + add_loop):
tmp = sha256(tmp)
return tmp
def select_hashchain(self, stake_address=None, hashchain=None, blocknumber=None):
if not hashchain:
for s in self.block_chain_buffer.stake_list_get(blocknumber):
if s[0] == stake_address:
hashchain = s[1]
break
if not hashchain:
return
return hashchain
def select_winners(self, reveals, topN=1, blocknumber=None, block=None, seed=None):
# FIXME: This is POS related
winners = None
if not seed:
logger.info('Exception raised due to Seed is None')
raise Exception
if blocknumber:
winners = heapq.nsmallest(topN, reveals, key=lambda reveal: self.score(
stake_address=self.get_sv(self.reveal_to_terminator(reveal, blocknumber, add_loop=1)),
reveal_one=reveal,
balance=self.block_chain_buffer.get_st_balance(
self.get_sv(self.reveal_to_terminator(reveal, blocknumber, add_loop=1)), blocknumber),
seed=seed)) # blocknumber+1 as we have one extra hash for the reveal
return winners
winners = heapq.nsmallest(topN, reveals, key=lambda reveal: reveal[4]) # reveal[4] is score
winners_dict = {}
for winner in winners:
winners_dict[winner[3]] = winner # winner[3] is reveal_one
return winners_dict
@staticmethod
def score(stake_address, reveal_one, balance=0, seed=None, verbose=False):
if not seed:
logger.info('Exception Raised due to seed none in score fn')
raise Exception
if not balance:
logger.info(' balance 0 so score none ')
logger.info(' stake_address %s', stake_address)
return None
reveal_one_number = int(bin2hstr(reveal_one), 16)
score = (Decimal(config.dev.N) - (Decimal(reveal_one_number | seed).log10() / Decimal(2).log10())) / Decimal(
balance)
if verbose:
logger.info('=' * 10)
logger.info('Score - %s', score)
logger.info('reveal_one - %s', reveal_one_number)
logger.info('seed - %s', seed)
logger.info('balance - %s', balance)
return score
def update_pending_tx_pool(self, tx, peer):
if len(self.pending_tx_pool) >= config.dev.transaction_pool_size:
del self.pending_tx_pool[0]
del self.pending_tx_pool_hash[0]
self.pending_tx_pool.append([tx, peer])
self.pending_tx_pool_hash.append(tx.txhash)
def get_stake_validators_hash(self):
sv_hash = StringIO()
stake_validators_list = self.state.stake_validators_list
for staker in stake_validators_list.sv_list:
balance = self.state.balance(staker)
sv_hash.write(staker + str(balance))
sv_hash = sha256(sv_hash.getvalue())
return sv_hash
# create a block from a list of supplied tx_hashes, check state to ensure validity..
def create_stake_block(self, reveal_hash, last_block_number):
t_pool2 = copy.deepcopy(self.transaction_pool)
del self.transaction_pool[:]
curr_epoch = (last_block_number + 1) // config.dev.blocks_per_epoch
# recreate the transaction pool as in the tx_hash_list, ordered by txhash..
tx_nonce = defaultdict(int)
total_txn = len(t_pool2)
txnum = 0
while txnum < total_txn:
tx = t_pool2[txnum]
if self.block_chain_buffer.pubhashExists(tx.txfrom, tx.pubhash, last_block_number + 1):
del t_pool2[txnum]
total_txn -= 1
continue
if tx.subtype == TX_SUBTYPE_STAKE:
epoch_blocknum = last_block_number + 1 - (curr_epoch * config.dev.blocks_per_epoch)
# skip 1st st txn without tx.first_hash in case its beyond allowed epoch blocknumber
if (not tx.first_hash) and epoch_blocknum >= config.dev.stake_before_x_blocks:
logger.warning('Skipping st as blocknumber beyond stake limit , CreateBlock()')
logger.warning('Expected ST txn before epoch_blocknumber : %s', config.dev.stake_before_x_blocks)
logger.warning('Found ST txn in epoch_blocknumber : %s', epoch_blocknum)
del t_pool2[txnum]
total_txn -= 1
continue
if tx.epoch != curr_epoch:
logger.warning('Skipping st as epoch mismatch, CreateBlock()')
logger.warning('Expected st epoch : %s', curr_epoch)
logger.warning('Found st epoch : %s', tx.epoch)
del t_pool2[txnum]
total_txn -= 1
continue
balance = 0
next_sv_list = self.block_chain_buffer.next_stake_list_get(last_block_number + 1)
if not (tx.txfrom not in next_sv_list or last_block_number == 0):
if tx.first_hash:
del t_pool2[txnum]
total_txn -= 1
continue
self.add_tx_to_pool(tx)
tx_nonce[tx.txfrom] += 1
tx.nonce = self.block_chain_buffer.get_stxn_state(last_block_number + 1, tx.txfrom)[0] + tx_nonce[tx.txfrom]
txnum += 1
# create the block..
block_obj = self.m_create_block(reveal_hash, last_block_number)
# reset the pool back
self.transaction_pool = copy.deepcopy(t_pool2)
return block_obj
# return a sorted list of txhashes from transaction_pool, sorted by timestamp from block n
# (actually from start of transaction_pool) to time, then ordered by txhash.
def sorted_tx_pool(self, timestamp=None):
if timestamp is None:
timestamp = time()
pool = copy.deepcopy(self.transaction_pool)
trimmed_pool = []
end_time = timestamp
for tx in pool:
if self.txhash_timestamp[self.txhash_timestamp.index(tx.txhash) + 1] <= end_time:
trimmed_pool.append(tx.txhash)
trimmed_pool.sort()
if not trimmed_pool:
return False
return trimmed_pool
def is_stake_banned(self, stake_address):
if stake_address in self.stake_ban_list:
epoch_diff = (self.height() / config.dev.blocks_per_epoch) - (
self.stake_ban_block[stake_address] / config.dev.blocks_per_epoch)
if self.height() - self.stake_ban_block[stake_address] > 10 or epoch_diff > 0:
logger.info('Stake removed from ban list')
del self.stake_ban_block[stake_address]
self.stake_ban_list.remove(stake_address)
return False
return True
return False
def pos_block_pool(self, n=1.5):
"""
create a snapshot of the transaction pool to account for network traversal time (probably less than 300ms, but let's give a window of 1.5 seconds).
:param n:
:return: list of merkle root hashes of the tx pool over last 1.5 seconds
"""
timestamp = time()
start_time = timestamp - n
x = self.sorted_tx_pool(start_time)
y = self.sorted_tx_pool(timestamp)
if not y: # if pool is empty -> return sha256 null
return [sha256('')], [[]]
elif x == y: # if the pool isnt empty but there is no difference then return the only merkle hash possible..
return [merkle_tx_hash(y)], [y]
else: # there is a difference in contents of pool over last 1.5 seconds..
merkle_hashes = []
txhashes = []
if not x:
merkle_hashes.append(sha256(''))
x = []
txhashes.append(x)
else:
merkle_hashes.append(merkle_tx_hash(x))
txhashes.append(x)
tmp_txhashes = x
for tx in reversed(self.transaction_pool):
if tx.txhash in y and tx.txhash not in x:
tmp_txhashes.append(tx.txhash)
tmp_txhashes.sort()
merkle_hashes.append(merkle_tx_hash(tmp_txhashes))
txhashes.append(tmp_txhashes)
return merkle_hashes, txhashes
@staticmethod
def pos_block_selector(seed, n):
"""
create the PRF selector sequence based upon a seed and number
of stakers in list (temporary..there are better ways to do this
with bigger seed value, but it works)
:param seed:
:param n:
:return:
"""
prf = getHashChainSeed(seed, 1, 20000)
# FIXME: Check with cyyber the purpose of this
prf_range = []
n_bits = int(ceil(log(n, 2)))
for z in prf:
x = ord(z) >> 8 - n_bits
if x < n:
prf_range.append(x)
return prf_range
def pos_block_selector_n(self, seed, n, i):
"""
return the POS staker list position for given seed at index, i
:param seed:
:param n:
:param i:
:return:
"""
l = self.pos_block_selector(seed, n)
return l[i]
def search(self, txcontains, islong=1):
for tx in self.transaction_pool:
if tx.txhash == txcontains or tx.txfrom == txcontains or tx.txto == txcontains:
logger.info('%s found in transaction pool..', txcontains)
if islong == 1: json_print(tx)
for block in self.m_blockchain:
for protobuf_tx in block.transactions:
tx = Transaction.from_pbdata(protobuf_tx)
if tx.txhash == txcontains or tx.txfrom == txcontains or tx.txto == txcontains:
logger.info('%s found in block %s', txcontains, str(block.blockheader.blocknumber))
if islong == 0: logger.info(('<tx:txhash> ' + tx.txhash))
if islong == 1: json_print(tx)
return
def update_last_tx(self, block):
if len(block.transactions) == 0:
return
last_txn = []
try:
# FIXME: Accessing DB directly
last_txn = self.state.db.get('last_txn')
except:
pass
for protobuf_txn in block.transactions[-20:]:
txn = Transaction.from_pbdata(protobuf_txn)
if txn.subtype == TX_SUBTYPE_TX:
last_txn.insert(0,
[txn.to_json(), block.blockheader.blocknumber, block.blockheader.timestamp])
del last_txn[20:]
# FIXME: Accessing DB directly
self.state.db.put('last_txn', last_txn)
def update_wallet_tx_metadata(self, addr, new_txhash):
try:
# FIXME: Accessing DB directly
txhash = self.state.db.get('txn_' + str(addr))
except Exception:
txhash = []
txhash.append(bin2hstr(new_txhash))
# FIXME: Accessing DB directly
self.state.db.put('txn_' + str(addr), txhash)
def update_txn_count(self, txto, txfrom):
last_count = self.state.get_txn_count(txto)
# FIXME: Accessing DB directly
self.state.db.put('txn_count_' + str(txto), last_count + 1)
last_count = self.state.get_txn_count(txfrom)
# FIXME: Accessing DB directly
self.state.db.put('txn_count_' + str(txfrom), last_count + 1)
def update_tx_metadata(self, block):
if len(block.transactions) == 0:
return
for protobuf_txn in block.transactions:
txn = Transaction.from_pbdata(protobuf_txn)
if txn.subtype in (TX_SUBTYPE_TX, TX_SUBTYPE_COINBASE):
# FIXME: Accessing DB directly
self.state.db.put(bin2hstr(txn.txhash),
[txn.to_json(),
block.blockheader.blocknumber,
block.blockheader.timestamp])
if txn.subtype == TX_SUBTYPE_TX:
self.update_wallet_tx_metadata(txn.txfrom, txn.txhash)
self.update_wallet_tx_metadata(txn.txto, txn.txhash)
self.update_txn_count(txn.txto, txn.txfrom)
def load_chain_by_epoch(self, epoch):
chains = self.f_read_chain(epoch)
self.m_blockchain.append(chains[0])
self.state.read_genesis(self.m_get_block(0))
self.block_chain_buffer = ChainBuffer(self)
for block in chains[1:]:
self.add_block_mainchain(block, validate=False)
return self.m_blockchain
def add_block_mainchain(self, block, validate=True):
return self.block_chain_buffer.add_block_mainchain(chain=self,
block=block,
validate=validate)
def m_read_chain(self):
if not self.m_blockchain:
self.m_load_chain()
return self.m_blockchain
def m_get_block(self, n):
if len(self.m_blockchain) == 0:
return []
beginning_blocknum = self.m_blockchain[0].blockheader.blocknumber
diff = n - beginning_blocknum
if diff < 0:
return self.load_from_file(n)
if diff < len(self.m_blockchain):
return self.m_blockchain[diff]
return []
def m_get_last_block(self):
if len(self.m_blockchain) == 0:
return False
return self.m_blockchain[-1]
def m_create_block(self, reveal_hash, last_block_number=-1):
myBlock = Block()
myBlock.create(self, reveal_hash, last_block_number)
slave_xmss = self.block_chain_buffer.get_slave_xmss(last_block_number + 1)
if not slave_xmss:
return
self.wallet.save_slave(slave_xmss)
return myBlock
def m_add_block(self, block_obj):
if len(self.m_blockchain) == 0:
self.m_read_chain()
if block_obj.validate_block(chain=self) is True:
if self.state.add_block(self, block_obj) is True:
self.m_blockchain.append(block_obj)
self.remove_tx_in_block_from_pool(block_obj)
else:
logger.info('last block failed state/stake checks, removed from chain')
self.state.validate_tx_pool(self)
return False
else:
logger.info('m_add_block failed - block failed validation.')
return False
self.m_f_sync_chain()
return True
def m_remove_last_block(self):
if not self.m_blockchain:
self.m_read_chain()
self.m_blockchain.pop()
def m_blockheight(self):
# return len(self.m_read_chain()) - 1
return self.height()
def height(self):
if len(self.m_blockchain):
return self.m_blockchain[-1].blockheader.blocknumber
return -1
def m_info_block(self, n):
if n > self.m_blockheight():
logger.info('No such block exists yet..')
return False
b = self.m_get_block(n)
logger.info(('Block: ', b, str(b.blockheader.blocknumber)))
logger.info(('Blocksize, ', str(len(json_bytestream(b)))))
logger.info(('Number of transactions: ', str(len(b.transactions))))
logger.info(('Validates: ', b.validate_block(self)))
def m_f_sync_chain(self):
if (self.m_blockchain[-1].blockheader.blocknumber + 1) % config.dev.disk_writes_after_x_blocks == 0:
self.f_write_m_blockchain()
return
def m_verify_chain(self, verbose=0):
for block in self.m_read_chain()[1:]:
if not block.validate_block(self):
return False
return True
# validate and update stake+state for newly appended block.
# can be streamlined to reduce repetition in the added components..
# finish next epoch code..
def add_tx_to_pool(self, tx_class_obj):
self.transaction_pool.append(tx_class_obj)
self.txhash_timestamp.append(tx_class_obj.txhash)
self.txhash_timestamp.append(time())
def remove_tx_from_pool(self, tx_class_obj):
self.transaction_pool.remove(tx_class_obj)
self.txhash_timestamp.pop(self.txhash_timestamp.index(tx_class_obj.txhash) + 1)
self.txhash_timestamp.remove(tx_class_obj.txhash)
def show_tx_pool(self):
return self.transaction_pool
def remove_tx_in_block_from_pool(self, block_obj):
for protobuf_tx in block_obj.transactions:
tx = Transaction.from_pbdata(protobuf_tx)
for txn in self.transaction_pool:
if tx.txhash == txn.txhash:
self.remove_tx_from_pool(txn)
def flush_tx_pool(self):
del self.transaction_pool[:]
def validate_tx_pool(self): # invalid transactions are auto removed from pool..
for tr in self.transaction_pool:
if tr.validate_tx() is False:
self.remove_tx_from_pool(tr)
logger.info(('invalid tx: ', tr, 'removed from pool'))
return True
############## BLOCK CHAIN PERSISTANCE
@staticmethod
def get_chaindatafile(epoch):
baseDir = os.path.join(config.user.data_path, config.dev.chain_file_directory)
config.create_path(baseDir)
return os.path.join(baseDir, 'chain.da' + str(epoch))
def m_load_chain(self):
del self.m_blockchain[:]
self.state.zero_all_addresses()
self.load_chain_by_epoch(0)
if len(self.m_blockchain) < config.dev.blocks_per_chain_file:
return self.m_blockchain
epoch = 1
while os.path.isfile(self.get_chaindatafile(epoch)):
del self.m_blockchain[:-1]
chains = self.f_read_chain(epoch)
for block in chains:
self.add_block_mainchain(block, validate=False)
epoch += 1
self.wallet.save_wallet()
return self.m_blockchain
def f_write_m_blockchain(self):
blocknumber = self.m_blockchain[-1].blockheader.blocknumber
file_epoch = int(blocknumber // config.dev.blocks_per_chain_file)
writeable = self.m_blockchain[-config.dev.disk_writes_after_x_blocks:]
logger.info('Appending data to chain')
with open(self.get_chaindatafile(file_epoch), 'ab') as myfile:
for block in writeable:
jsonBlock = bytes(json_bytestream(block), 'utf-8')
compressedBlock = bz2.compress(jsonBlock, config.dev.compression_level)
pos = myfile.tell()
blockSize = len(compressedBlock)
self.block_framedata[block.blockheader.blocknumber] = BlockFrame(pos, blockSize)
myfile.write(compressedBlock)
myfile.write(config.dev.binary_file_delimiter)
del self.m_blockchain[:-1]
def update_block_metadata(self, block_number, block_position, block_size):
# FIXME: This is not scalable but it will fine fine for Oct2017 while we replace this with protobuf
self.block_framedata[block_number] = [block_position, block_size]
def get_block_metadata(self, block_number):
# FIXME: This is not scalable but it will fine fine for Oct2017 while we replace this with protobuf
return self.block_framedata[block_number]
def load_from_file(self, blocknum):
epoch = int(blocknum // config.dev.blocks_per_chain_file)
block_offset, block_size = self.get_block_metadata(blocknum)
with open(self.get_chaindatafile(epoch), 'rb') as f:
f.seek(block_offset)
jsonBlock = bz2.decompress(f.read(block_size))
block = Block.from_json(jsonBlock)
return block
def f_read_chain(self, epoch):
delimiter = config.dev.binary_file_delimiter
block_list = []
if not os.path.isfile(self.get_chaindatafile(epoch)):
if epoch != 0:
return []
logger.info('Creating new chain file')
genesis_block = GenesisBlock().set_chain(self)
block_list.append(genesis_block)
return block_list
try:
with open(self.get_chaindatafile(epoch), 'rb') as myfile:
jsonBlock = bytearray()
tmp = bytearray()
count = 0
offset = 0
while True:
chars = myfile.read(config.dev.chain_read_buffer_size)
for char in chars:
offset += 1
if count > 0 and char != delimiter[count]:
count = 0
jsonBlock += tmp
tmp = bytearray()
if char == delimiter[count]:
tmp.append(delimiter[count])
count += 1
if count < len(delimiter):
continue
tmp = bytearray()
count = 0
pos = offset - len(delimiter) - len(jsonBlock)
jsonBlock = bz2.decompress(jsonBlock)
block = Block.from_json(jsonBlock)
self.update_block_metadata(block.blockheader.blocknumber, pos, len(jsonBlock))
block_list.append(block)
jsonBlock = bytearray()
continue
jsonBlock.append(char)
if len(chars) < config.dev.chain_read_buffer_size:
break
except Exception as e:
logger.error('IO error %s', e)
return []
return block_list
| {
"content_hash": "7a96efb4d1da70d75c94569a6f6e50c7",
"timestamp": "",
"source": "github",
"line_count": 693,
"max_line_length": 155,
"avg_line_length": 38.38239538239538,
"alnum_prop": 0.5723899394714087,
"repo_name": "elliottdehn/QRL",
"id": "f19b38d6b57296a08d1562db7f88507a1e711615",
"size": "26752",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "qrl/core/chain.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2276"
},
{
"name": "HTML",
"bytes": "20501"
},
{
"name": "JavaScript",
"bytes": "22142"
},
{
"name": "Python",
"bytes": "431741"
},
{
"name": "Shell",
"bytes": "1096"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = """
---
module: iosxr_command
version_added: "2.1"
author: "Ricardo Carrillo Cruz (@rcarrillocruz)"
short_description: Run commands on remote devices running Cisco IOS XR
description:
- Sends arbitrary commands to an IOS XR node and returns the results
read from the device. This module includes an
argument that will cause the module to wait for a specific condition
before returning or timing out if the condition is not met.
- This module does not support running commands in configuration mode.
Please use M(iosxr_config) to configure iosxr devices.
extends_documentation_fragment: iosxr
notes:
- This module does not support netconf connection
- Tested against IOS XR 6.1.2
options:
commands:
description:
- List of commands to send to the remote iosxr device over the
configured provider. The resulting output from the command
is returned. If the I(wait_for) argument is provided, the
module is not returned until the condition is satisfied or
the number of retries has expired.
required: true
wait_for:
description:
- List of conditions to evaluate against the output of the
command. The task will wait for each condition to be true
before moving forward. If the conditional is not true
within the configured number of retries, the task fails.
See examples.
aliases: ['waitfor']
version_added: "2.2"
match:
description:
- The I(match) argument is used in conjunction with the
I(wait_for) argument to specify the match policy. Valid
values are C(all) or C(any). If the value is set to C(all)
then all conditionals in the wait_for must be satisfied. If
the value is set to C(any) then only one of the values must be
satisfied.
default: all
choices: ['any', 'all']
version_added: "2.2"
retries:
description:
- Specifies the number of retries a command should by tried
before it is considered failed. The command is run on the
target device every retry and evaluated against the
I(wait_for) conditions.
default: 10
interval:
description:
- Configures the interval in seconds to wait between retries
of the command. If the command does not pass the specified
conditions, the interval indicates how long to wait before
trying the command again.
default: 1
"""
EXAMPLES = """
tasks:
- name: run show version on remote devices
iosxr_command:
commands: show version
- name: run show version and check to see if output contains iosxr
iosxr_command:
commands: show version
wait_for: result[0] contains IOS-XR
- name: run multiple commands on remote nodes
iosxr_command:
commands:
- show version
- show interfaces
- { command: example command that prompts, prompt: expected prompt, answer: yes}
- name: run multiple commands and evaluate the output
iosxr_command:
commands:
- show version
- show interfaces
wait_for:
- result[0] contains IOS-XR
- result[1] contains Loopback0
"""
RETURN = """
stdout:
description: The set of responses from the commands
returned: always apart from low level errors (such as action plugin)
type: list
sample: ['...', '...']
stdout_lines:
description: The value of stdout split into a list
returned: always apart from low level errors (such as action plugin)
type: list
sample: [['...', '...'], ['...'], ['...']]
failed_conditions:
description: The list of conditionals that have failed
returned: failed
type: list
sample: ['...', '...']
"""
import time
from ansible.module_utils._text import to_text
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.common.parsing import Conditional
from ansible.module_utils.network.common.utils import to_lines
from ansible.module_utils.network.iosxr.iosxr import run_commands, iosxr_argument_spec
from ansible.module_utils.network.iosxr.iosxr import command_spec
def parse_commands(module, warnings):
commands = module.params['commands']
for item in list(commands):
try:
command = item['command']
except Exception:
command = item
if module.check_mode and not command.startswith('show'):
warnings.append(
'Only show commands are supported when using check mode, not '
'executing %s' % command
)
commands.remove(item)
return commands
def main():
argument_spec = dict(
commands=dict(type='list', required=True),
wait_for=dict(type='list', aliases=['waitfor']),
match=dict(default='all', choices=['all', 'any']),
retries=dict(default=10, type='int'),
interval=dict(default=1, type='int')
)
argument_spec.update(iosxr_argument_spec)
argument_spec.update(command_spec)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
warnings = list()
result = {'changed': False, 'warnings': warnings}
commands = parse_commands(module, warnings)
wait_for = module.params['wait_for'] or list()
try:
conditionals = [Conditional(c) for c in wait_for]
except AttributeError as exc:
module.fail_json(msg=to_text(exc))
retries = module.params['retries']
interval = module.params['interval']
match = module.params['match']
while retries > 0:
responses = run_commands(module, commands)
for item in list(conditionals):
if item(responses):
if match == 'any':
conditionals = list()
break
conditionals.remove(item)
if not conditionals:
break
time.sleep(interval)
retries -= 1
if conditionals:
failed_conditions = [item.raw for item in conditionals]
msg = 'One or more conditional statements have not been satisfied'
module.fail_json(msg=msg, failed_conditions=failed_conditions)
result.update({
'stdout': responses,
'stdout_lines': list(to_lines(responses)),
})
module.exit_json(**result)
if __name__ == '__main__':
main()
| {
"content_hash": "130338dc2bfd18769782ce7d492feb01",
"timestamp": "",
"source": "github",
"line_count": 205,
"max_line_length": 88,
"avg_line_length": 32.03414634146341,
"alnum_prop": 0.6520481193848028,
"repo_name": "SergeyCherepanov/ansible",
"id": "e3d971a76a5e71046523966f4c011a12a11466e5",
"size": "6710",
"binary": false,
"copies": "38",
"ref": "refs/heads/master",
"path": "ansible/ansible/modules/network/iosxr/iosxr_command.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Shell",
"bytes": "824"
}
],
"symlink_target": ""
} |
#The MIT License (MIT)
#Copyright (c) 2014 Microsoft Corporation
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
#SOFTWARE.
"""PyCosmos Exceptions in the Azure Cosmos database service.
"""
import azure.cosmos.http_constants as http_constants
class CosmosError(Exception):
"""Base class for all Azure Cosmos errors.
"""
class HTTPFailure(CosmosError):
"""Raised when a HTTP request to the Azure Cosmos has failed.
"""
def __init__(self, status_code, message='', headers=None):
"""
:param int status_code:
:param str message:
"""
if headers is None:
headers = {}
self.status_code = status_code
self.headers = headers
self.sub_status = None
self._http_error_message = message
if http_constants.HttpHeaders.SubStatus in self.headers:
self.sub_status = int(self.headers[http_constants.HttpHeaders.SubStatus])
CosmosError.__init__(self,
'Status code: %d Sub-status: %d\n%s' % (self.status_code, self.sub_status, message))
else:
CosmosError.__init__(self,
'Status code: %d\n%s' % (self.status_code, message))
class JSONParseFailure(CosmosError):
"""Raised when fails to parse JSON message.
"""
class UnexpectedDataType(CosmosError):
"""Raised when unexpected data type is provided as parameter.
""" | {
"content_hash": "47f45ecc0f72fd2b2600d484c009f051",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 117,
"avg_line_length": 39.016129032258064,
"alnum_prop": 0.6903679206283588,
"repo_name": "Azure/azure-documentdb-python",
"id": "584eecd05482e25d32e079a6483174cd40b9084f",
"size": "2421",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "azure/cosmos/errors.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "663705"
}
],
"symlink_target": ""
} |
"""gcloud datastore emulator start command."""
from googlecloudsdk.api_lib.emulators import datastore_util
from googlecloudsdk.api_lib.emulators import util
from googlecloudsdk.calliope import arg_parsers
from googlecloudsdk.calliope import base
class Start(base.Command):
"""Start a local datastore emulator.
This command starts a local datastore emulator.
"""
detailed_help = {
'DESCRIPTION': '{description}',
'EXAMPLES': """\
To start a local datastore emulator, run:
$ {command} --data-dir DATA-DIR
""",
}
@staticmethod
def Args(parser):
parser.add_argument(
'--host-port',
required=False,
type=arg_parsers.HostPort.Parse,
help='The host:port to which the emulator should be bound.')
parser.add_argument(
'--store-on-disk',
required=False,
type=bool,
default=True,
help='Whether data should be persisted to disk.')
parser.add_argument(
'--consistency',
required=False,
type=float,
default=0.9,
help='Fraction of job application attempts that should succeed.')
def Run(self, args):
if not args.host_port:
args.host_port = arg_parsers.HostPort.Parse(datastore_util.GetHostPort())
args.host_port.host = args.host_port.host or 'localhost'
datastore_util.PrepareGCDDataDir(args.data_dir)
datastore_process = datastore_util.StartGCDEmulator(args)
datastore_util.WriteGCDEnvYaml(args)
util.PrefixOutput(datastore_process, 'datastore')
| {
"content_hash": "6f4e13b83f4fe04f8c38e16922788600",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 79,
"avg_line_length": 29.903846153846153,
"alnum_prop": 0.6655948553054662,
"repo_name": "flgiordano/netcash",
"id": "568a13d8e0ff7bff616f6afe80f232fb3e6f8f95",
"size": "2150",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "+/google-cloud-sdk/lib/surface/emulators/datastore/start.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "622"
},
{
"name": "HTML",
"bytes": "33831"
},
{
"name": "JavaScript",
"bytes": "13859"
},
{
"name": "Shell",
"bytes": "2716"
}
],
"symlink_target": ""
} |
"""Various chroot-related exception classes"""
from __future__ import unicode_literals
import os
class ChrootError(Exception):
"""Exception that is raised when there is an error when trying to set up a chroot."""
def __init__(self, message, errno=None):
self.message = message
self.args = (message,)
if errno is not None:
self.errno = errno
self.strerror = os.strerror(errno)
def __str__(self):
error_messages = [self.message]
if getattr(self, 'strerror', False):
error_messages.append(self.strerror)
return ': '.join(error_messages)
class ChrootMountError(ChrootError):
"""Exception that is raised when there is an error trying to set up the bind mounts for a chroot."""
pass
| {
"content_hash": "a5a71cfa8e87a31c53be94bb05b50a9f",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 104,
"avg_line_length": 27.275862068965516,
"alnum_prop": 0.638432364096081,
"repo_name": "radhermit/pychroot",
"id": "548f2b31461712d04b3601f249b35aa50233d388",
"size": "791",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pychroot/exceptions.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "65064"
}
],
"symlink_target": ""
} |
import os
import sys
import urlparse
import requests
try:
from requests.packages import urllib3
urllib3.disable_warnings()
except ImportError:
pass
AUTH_USERNAME = 'getsentry-bot'
AUTH_TOKEN = os.environ['GITHUB_AUTH_TOKEN']
AUTH = (AUTH_USERNAME, AUTH_TOKEN)
TAG = os.environ.get('TRAVIS_TAG') or \
os.environ.get('APPVEYOR_REPO_TAG_NAME') or os.environ.get('BUILD_TAG')
NAME = 'symsynd'
REPO = 'getsentry/symsynd'
if sys.platform.startswith('win'):
EXT = '.exe'
else:
EXT = ''
def log(message, *args):
if args:
message = message % args
print >> sys.stderr, message
def api_request(method, path, **kwargs):
url = urlparse.urljoin('https://api.github.com/', path.lstrip('/'))
# default travis python does not have SNI
return requests.request(method, url, auth=AUTH, verify=False, **kwargs)
def find_wheels():
dist = os.path.join('dist')
for filename in os.listdir(dist):
if filename.endswith('.whl'):
yield os.path.join(dist, filename)
def get_target_executable_name():
bits = TARGET.split('-')
platform = bits[2].title()
arch = bits[0]
return 'sentry-cli-%s-%s%s' % (platform, arch, EXT)
def ensure_release():
resp = api_request('GET', 'repos/%s/releases' % REPO)
resp.raise_for_status()
for release in resp.json():
if release['tag_name'] == TAG:
log('Found already existing release %s' % release['id'])
return release
resp = api_request('POST', 'repos/%s/releases' % REPO, json={
'tag_name': TAG,
'name': '%s %s' % (NAME, TAG),
'draft': True,
})
resp.raise_for_status()
release = resp.json()
log('Created new release %s' % release['id'])
return release
def upload_asset(release, path, asset_info):
asset_name = os.path.basename(path)
for asset in asset_info:
if asset['name'] == asset_name:
log('Already have release asset %s. Skipping' % asset_name)
return
upload_url = release['upload_url'].split('{')[0]
with open(path, 'rb') as f:
log('Creating new release asset %s.' % asset_name)
resp = api_request('POST', upload_url,
params={'name': asset_name},
headers={'Content-Type': 'application/octet-stream'},
data=f)
resp.raise_for_status()
def upload_assets(release, wheels):
resp = api_request('GET', release['assets_url'])
resp.raise_for_status()
asset_info = resp.json()
for wheel in wheels:
upload_asset(release, wheel, asset_info)
def main():
if not TAG:
return log('No tag specified. Doing nothing.')
wheels = list(find_wheels())
if not wheels:
return log('Could not locate wheels. Doing nothing.')
release = ensure_release()
upload_assets(release, wheels)
if __name__ == '__main__':
main()
| {
"content_hash": "775da096f938fab6c4d474c1bf44f079",
"timestamp": "",
"source": "github",
"line_count": 108,
"max_line_length": 80,
"avg_line_length": 27.12962962962963,
"alnum_prop": 0.6013651877133106,
"repo_name": "getsentry/symsynd",
"id": "b1ebf29cf283a82ca85d0505e43fe537d8929ad8",
"size": "2952",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": ".ci/upload-release.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "7771"
},
{
"name": "C++",
"bytes": "226798"
},
{
"name": "CMake",
"bytes": "382"
},
{
"name": "Makefile",
"bytes": "1065"
},
{
"name": "Python",
"bytes": "66662"
},
{
"name": "Rust",
"bytes": "16553"
},
{
"name": "Shell",
"bytes": "4521"
}
],
"symlink_target": ""
} |
from plugin.srv_graph.pretty_printer import ARCADIAXMLPrinter
from plugin.srv_graph.graph_element import ComponentFactory
from plugin.srv_graph.graph_element import ComponentFactoryFacade
from plugin.srv_graph.graph_builder import GraphBuilder
from plugin.api.responses import ARCADIACompResponse
from mock import MagicMock
class ARCADIAClientMock(object):
def __init__(self, *args, **kwargs):
self._service_graph_tree = None
self._service_graph_printed = None
def create_comp(self, _instance):
pass
def config_comp(self, _instance):
if _instance._node_instance['name'] == 'mysql':
#print "!!!!!!!!!!!!!!!!!!!!!"
#print _instance._node._node.properties['component_jar_path']
_instance._node_instance.runtime_properties['nid'] = 'graph_node_mysql_id'
_instance._node_instance.runtime_properties['cnid'] = 'mysql_id'
_instance._node_instance.runtime_properties['cepcid'] = 'mysqltcp_cepcid'
_instance._node_instance.runtime_properties['ecepid'] = 'mysqltcp'
if _instance._node_instance['name'] == 'wordpress':
_instance._node_instance.runtime_properties['nid'] = 'graph_node_wordpress_id'
_instance._node_instance.runtime_properties['cnid'] = 'wordpress_id'
def create_srv_graph(self, _instance):
pass
def config_srv_graph(self, _instance):
_instance._node_instance.runtime_properties['sgid'] = 'wordpress_mysql_service_graph_id'
_instance._node_instance.runtime_properties['sgname'] = 'SimpleWordPressServiceGraph'
_instance._node_instance.runtime_properties['sgdesc'] = 'SGDescription'
def create_policy(self, _instance):
pass
def config_policy(self, _instance):
_instance._node_instance.runtime_properties['rpid'] = 'RPID'
_instance._node_instance.runtime_properties['rpname'] = 'RPName'
def preconfig_src_relationship(self, _instance):
_instance._relationship_instance['runtime_properties'] = {'nid': 'NID'}
def generate_service_graph(self, _service_graph):
factory = ComponentFactory()
graph_builder = GraphBuilder(_comp_factory = factory)
self._service_graph_tree = graph_builder.build(_service_graph)
def install_service_graph(self):
self._service_graph_printed = self._service_graph_tree.print_element(ARCADIAXMLPrinter())
class ARCADIARestAPIClientMock(object):
def __init__(self, *args, **kwargs):
self.fail_request = False
def get_component_info(self, cnid):
#simulating results from a failure call
if self.fail_request:
return {'rc' : 1, 'message' : 'failed to request server, wrong params'}
#simulating results from a success call
mock_response = MagicMock(spec=ARCADIACompResponse)
if cnid == 'graph_node_mysql_id':
#no
pass
elif cnid == 'graph_node_wordpress_id':
mock_response.cepcid = 'mysqltcp_cepcid'
mock_response.ecepid = 'mysqltcp'
return {'rc' : 0, 'message' : 'SUCCESS', 'response' : mock_response}
def register_service_graph(self, service_tree):
#simulating results from a failure call
if self.fail_request:
return {'rc' : 1, 'message' : 'failed to request server, wrong params'}
#simulatiing results from a success call
return {'rc' : 0, 'message' : 'SUCCESS'} | {
"content_hash": "c4eb8089aa985d1583914eb6a7193a73",
"timestamp": "",
"source": "github",
"line_count": 91,
"max_line_length": 91,
"avg_line_length": 34.395604395604394,
"alnum_prop": 0.7207667731629392,
"repo_name": "SINTEF-9012/cloudify-arcadia-plugin",
"id": "de01129cc752d5fd235efaeffe78342e9b554871",
"size": "3130",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "plugin/tests/mocks/client.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "63097"
}
],
"symlink_target": ""
} |
"""
Created on Mon Sep 4 20:25:49 2017
@author: Kirby Urner
"""
"""
primes.py -- Oregon Curriculum Network (OCN)
Feb 1, 2001 changed global var primes to _primes, added relative primes test
Dec 17, 2000 appended probable prime generating methods, plus invmod
Dec 16, 2000 revised to use pow(), removed methods not in text, added sieve()
Dec 12, 2000 small improvements to erastosthenes()
Dec 10, 2000 added Euler test
Oct 3, 2000 modified fermat test
Jun 5, 2000 rewrite of erastosthenes method
May 19, 2000 added more documentation
May 18, 2000 substituted Euclid's algorithm for gcd
Apr 7, 2000 changed name of union function to 'cover' for consistency
Apr 6, 2000 added union, intersection -- changed code of lcm, gcd
Apr 5, 2000 added euler, base, expo functions (renamed old euler to phi)
Mar 31, 2000 added sieve, renaming/modifying divtrial
Mar 30, 2000 added LCM, GCD, euler, fermat
Mar 28, 2000 added lcm
Feb 18, 2000 improved efficiency of isprime(), made getfactors recursive
Apr 28, 2013 changed LCM to not use reduce
Sep 4, 2017 convert to Python 3.6, move to Github
"""
import time, random, operator
from functools import reduce
_primes = [2] # global list of primes
def iseven(n):
"""Return true if n is even."""
return n%2==0
def isodd(n):
"""Return true if n is odd."""
return not iseven(n)
def get2max(maxnb):
"""Return list of primes up to maxnb."""
nbprimes = 0
if maxnb < 2: return []
# start with highest prime so far
i = _primes[-1]
# and add...
i = i + 1 + isodd(i)*1 # next odd number
if i <= maxnb: # if more prime checking needed...
while i<=maxnb:
if divtrial(i): _primes.append(i) # append to list if verdict true
i=i+2 # next odd number
nbprimes = len(_primes)
else:
for i in _primes: # return primes =< maxnb, even if more on file
if i<=maxnb: nbprimes = nbprimes + 1
else: break # quit testing once maxnb exceeded
return _primes[:nbprimes]
def get2nb(nbprimes):
"""Return list of primes with nbprimes members."""
if nbprimes>len(_primes):
# start with highest prime so far
i = _primes[-1]
# and add...
i = i + 1 + isodd(i)*1
while len(_primes)<nbprimes:
if divtrial(i): _primes.append(i)
i=i+2
return _primes[:nbprimes]
def isprime(n):
"""
Divide by primes until n proves composite or prime.
Brute force algorithm, will wimp out for humongous n
return 0 if n is divisible
return 1 if n is prime
"""
rtnval = 1
if n == 2: return 1
if n < 2 or iseven(n): return 0
maxnb = n ** 0.5 # 2nd root of n
# if n < largest prime on file, check for n in list
if n <= _primes[-1]: rtnval = (n in _primes)
# if primes up to root(n) on file, run divtrial (a prime test)
elif maxnb <= _primes[-1]: rtnval = divtrial(n)
else:
rtnval = divtrial(n) # check divisibility by primes so far
if rtnval==1: # now, if still tentatively prime...
# start with highest prime so far
i = _primes[-1]
# and add...
i = i + 1 + isodd(i)*1 # next odd number
while i <= maxnb:
if divtrial(i): # test of primehood
_primes.append(i) # append to list if prime
if not n%i: # if n divisible by latest prime
rtnval = 0 # then we're done
break
i=i+2 # next odd number
return rtnval
def iscomposite(n):
"""
Return true if n is composite.
Uses isprime"""
return not isprime(n)
def divtrial(n):
"""
Trial by division check whether a number is prime."""
verdict = 1 # default is "yes, add to list"
cutoff = n**0.5 # 2nd root of n
for i in _primes:
if not n%i: # if no remainder
verdict = 0 # then we _don't_ want to add
break
if i >= cutoff: # stop trying to divide by
break # lower primes when p**2 > n
return verdict
def erastosthenes(n):
"""
Suggestions from Ka-Ping Yee, John Posner and Tim Peters"""
sieve = [0, 0, 1] + [1, 0] * (n/2) # [0 0 1 1 0 1 0...]
prime = 3 # initial odd prime
while prime**2 <= n:
for i in range(prime**2, n+1, prime*2):
sieve[i] = 0 # step through sieve by prime*2
prime += 1 + sieve[prime+1:].index(1) # get next prime
# filter includes corresponding integers where sieve = 1
return filter(lambda i, sieve=sieve: sieve[i], range(n+1))
def sieve(n):
"""
In-place sieving of odd numbers, adapted from code
by Mike Fletcher
"""
candidates = range(3, n+1, 2) # start with odds
for p in candidates:
if p: # skip zeros
if p*p>n: break # done
for q in range(p*p, n+1, 2*p): # sieving
candidates[(q-3)/2] = 0
return [2] + filter(None, candidates) # [2] + remaining nonzeros
def base(n,b):
"""
Accepts n in base 10, returns list corresponding to n base b."""
output = []
while n>=1:
n,r = divmod(n,b) # returns quotient, remainder
output.append(r)
output.reverse()
return output
def fermat(n,b=2):
"""Test for primality based on Fermat's Little Theorem.
returns 0 (condition false) if n is composite, -1 if
base is not relatively prime
"""
if gcd(n,b)>1: return -1
else: return pow(b,n-1,n)==1
def jacobi(a,n):
"""Return jacobi number.
source: http://www.utm.edu/research/primes/glossary/JacobiSymbol.html"""
j = 1
while not a == 0:
while iseven(a):
a = a/2
if (n%8 == 3 or n%8 == 5): j = -j
x=a; a=n; n=x # exchange places
if (a%4 == 3 and n%4 == 3): j = -j
a = a%n
if n == 1: return j
else: return 0
def euler(n,b=2):
"""Euler probable prime if (b**(n-1)/2)%n = jacobi(a,n).
(stronger than simple fermat test)"""
term = pow(b,(n-1)/2.0,n)
jac = jacobi(b,n)
if jac == -1: return term == n-1
else: return term == jac
def getfactors(n):
"""Return list containing prime factors of a number."""
if isprime(n) or n==1: return [n]
else:
for i in _primes:
if not n%i: # if goes evenly
n = n//i
return [i] + getfactors(n)
def gcd(a,b):
"""Return greatest common divisor using Euclid's Algorithm."""
while b:
a, b = b, a % b
return a
def lcm(a,b):
"""
Return lowest common multiple."""
return (a*b)/gcd(a,b)
def GCD(terms):
"Return gcd of a list of numbers."
return reduce(lambda a,b: gcd(a,b), terms)
def LCM(terms):
"Return lcm of a list of numbers."
result = 1
for t in terms:
result = lcm(result, t)
return result
def phi(n):
"""Return number of integers < n relatively prime to n."""
product = n
used = []
for i in getfactors(n):
if i not in used: # use only unique prime factors
used.append(i)
product = product * (1 - 1.0/i)
return int(product)
def relprimes(n,b=1):
"""
List the remainders after dividing n by each
n-relative prime * some relative prime b
"""
relprimes = []
for i in range(1,n):
if gcd(i,n)==1: relprimes.append(i)
print(" n-rp's: %s" % (relprimes))
relprimes = map(operator.mul,[b]*len(relprimes),relprimes)
newremainders = map(operator.mod,relprimes,[n]*len(relprimes))
print("b * n-rp's mod n: %s" % newremainders)
def testeuler(a,n):
"""Test Euler's Theorem"""
if gcd(a,n)>1:
print("(a,n) not relative primes")
else:
print("Result: %s" % pow(a,phi(n),n))
def goldbach(n):
"""Return pair of primes such that p1 + p2 = n."""
rtnval = []
_primes = get2max(n)
if isodd(n) and n >= 5:
rtnval = [3] # 3 is a term
n = n-3 # 3 + goldbach(lower even)
if n==2: rtnval.append(2) # catch 5
else:
if n<=3: rtnval = [0,0] # quit if n too small
for i in range(len(_primes)):
# quit if we've found the answer
if len(rtnval) >= 2: break
# work back from highest prime < n
testprime = _primes[-(i+1)]
for j in _primes:
# j works from start of list
if j + testprime == n:
rtnval.append(j)
rtnval.append(testprime) # answer!
break
if j + testprime > n:
break # ready for next testprime
return rtnval
"""
The functions below have to do with encryption, and RSA in
particular, which uses large probable _primes. More discussion
at the Oregon Curriculum Network website is at
http://www.inetarena.com/~pdx4d/ocn/clubhouse.html
"""
def bighex(n):
hexdigits = list('0123456789ABCDEF')
hexstring = random.choice(hexdigits[1:])
for i in range(n):
hexstring += random.choice(hexdigits)
return eval('0x'+hexstring)
def bigdec(n):
decdigits = list('0123456789')
decstring = random.choice(decdigits[1:])
for i in range(n):
decstring += random.choice(decdigits)
return decstring
def bigppr(digits=100):
"""
Randomly generate a probable prime with a given
number of decimal digits
"""
start = time.clock()
print("Working...")
candidate = int(bigdec(digits)) # or use bighex
if candidate & 1==0:
candidate += 1
prob = 0
while True:
prob=pptest(candidate)
if prob>0: break
else: candidate += 2
print("Percent chance of being prime: %r" % (prob*100))
print("Elapsed time: %s seconds" % (time.clock()-start))
return candidate
def pptest(n):
"""
Simple implementation of Miller-Rabin test for
determining probable primehood.
"""
bases = [random.randrange(2,50000) for x in range(90)]
# if any of the primes is a factor, we're done
if n<=1: return 0
for b in bases:
if n%b==0: return 0
tests,s = 0, 0
m = n-1
# turning (n-1) into (2**s) * m
while not m&1: # while m is even
m >>= 1
s += 1
for b in bases:
tests += 1
isprob = algP(m,s,b,n)
if not isprob: break
if isprob: return (1-(1./(4**tests)))
else: return 0
def algP(m,s,b,n):
"""
based on Algorithm P in Donald Knuth's 'Art of
Computer Programming' v.2 pg. 395
"""
result = 0
y = pow(b,m,n)
for j in range(s):
if (y==1 and j==0) or (y==n-1):
result = 1
break
y = pow(y,2,n)
return result
def invmod(a,b):
"""
Return modular inverse using a version Euclid's Algorithm
Code by Andrew Kuchling in Python Journal:
http://www.pythonjournal.com/volume1/issue1/art-algorithms/
-- in turn also based on Knuth, vol 2.
"""
a1, a2, a3 = 1, 0, a
b1, b2, b3 = 0, 1, b
while b3 != 0:
# The following division will drop decimals.
q = a3 / b3
t = a1 - b1*q, a2 - b2*q, a3 - b3*q
a1, a2, a3 = b1, b2, b3
b1, b2, b3 = t
while a2<0: a2 = a2 + a
return a2
| {
"content_hash": "dcc6c3a59ed5596439718e22abc53d68",
"timestamp": "",
"source": "github",
"line_count": 397,
"max_line_length": 79,
"avg_line_length": 28.34005037783375,
"alnum_prop": 0.5720380410630166,
"repo_name": "4dsolutions/Python5",
"id": "9ff676d169a0b0e7a4ccfdea15d72818323725ff",
"size": "11298",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "primes.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "2969"
},
{
"name": "Jupyter Notebook",
"bytes": "2264451"
},
{
"name": "Python",
"bytes": "157873"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.core.exceptions import ValidationError
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from rapidsms.models import Contact, Connection
@python_2_unicode_compatible
class Message(models.Model):
INCOMING = "I"
OUTGOING = "O"
DIRECTION_CHOICES = (
(INCOMING, "Incoming"),
(OUTGOING, "Outgoing"),
)
contact = models.ForeignKey(Contact, blank=True, null=True)
connection = models.ForeignKey(Connection, blank=True, null=True)
direction = models.CharField(max_length=1, choices=DIRECTION_CHOICES)
date = models.DateTimeField()
text = models.TextField()
class Meta:
app_label = 'messagelog'
def save(self, *args, **kwargs):
"""
Verifies that one (not both) of the contact or connection fields
have been populated (raising ValidationError if not), and saves
the object as usual.
"""
if self.contact is None and self.connection is None:
raise ValidationError("A valid (not null) contact or connection "
"(but not both) must be provided to save the object.")
elif self.connection and self.contact and \
(self.contact != self.connection.contact):
raise ValidationError("The connection and contact you tried to "
"save did not match! You need to pick one or the other.")
if self.connection and self.connection.contact is not None:
# set the contact here as well, even if they didn't
# do it explicitly. If the contact's number changes
# we still might want to know who it originally came
# in from.
self.contact = self.connection.contact
super(Message, self).save(*args, **kwargs)
@property
def who(self):
"""Returns the Contact or Connection linked to this object."""
return self.contact or self.connection
def __str__(self):
# crop the text (to avoid exploding the admin)
text = self.text if len(self.text) < 60 else "%s..." % self.text[0:57]
direction = "to" if self.direction == self.INCOMING else "from"
return "%s (%s %s)" % (text, direction, self.who)
| {
"content_hash": "329f75fe1ae80ac2de3f4452e20800dc",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 78,
"avg_line_length": 38.847457627118644,
"alnum_prop": 0.6391797556719022,
"repo_name": "catalpainternational/rapidsms",
"id": "1a35b0c5d04c8693bb0287ad2ca52b554f547915",
"size": "2343",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rapidsms/contrib/messagelog/models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "8143"
},
{
"name": "HTML",
"bytes": "27797"
},
{
"name": "JavaScript",
"bytes": "7020"
},
{
"name": "Python",
"bytes": "305443"
},
{
"name": "Shell",
"bytes": "149"
}
],
"symlink_target": ""
} |
update_template = '''#!/usr/bin/env bash
# The MIT License (MIT)
#
# Copyright (c) 2016 Philippe Proulx <eepp.ca>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# Run this file from your shell to update the {name} project
# within this virtual environment.
# Make sure we're in the right current working directory
cd {src_path}
# Uninstall {name}
{uninstall_lines}
# Update the code
git clean -xdf
git fetch origin
git checkout origin/{gitref}
if [ $? -ne 0 ]; then
echo 'Error: Cannot checkout origin/{gitref}'
exit 1
fi
# Configure {name}
../../conf-{name}.bash
# Build {name}
../../build-{name}.bash
../../install-{name}.bash
'''
| {
"content_hash": "e9ff8a9eea92f7736317f11d30e21f07",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 79,
"avg_line_length": 33.18,
"alnum_prop": 0.7426160337552743,
"repo_name": "eepp/vlttng",
"id": "ae41f501f292cf7434852a6a13090718a9156ca5",
"size": "2790",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vlttng/update_template.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "86872"
}
],
"symlink_target": ""
} |
'''Autogenerated by get_gl_extensions script, do not edit!'''
from OpenGL import platform as _p, constants as _cs, arrays
from OpenGL.GL import glget
import ctypes
EXTENSION_NAME = 'GL_ARB_geometry_shader4'
def _f( function ):
return _p.createFunction( function,_p.GL,'GL_ARB_geometry_shader4',False)
_p.unpack_constants( """GL_LINES_ADJACENCY_ARB 0xA
GL_LINE_STRIP_ADJACENCY_ARB 0xB
GL_TRIANGLES_ADJACENCY_ARB 0xC
GL_TRIANGLE_STRIP_ADJACENCY_ARB 0xD
GL_PROGRAM_POINT_SIZE_ARB 0x8642
GL_MAX_GEOMETRY_TEXTURE_IMAGE_UNITS_ARB 0x8C29
GL_FRAMEBUFFER_ATTACHMENT_LAYERED_ARB 0x8DA7
GL_FRAMEBUFFER_INCOMPLETE_LAYER_TARGETS_ARB 0x8DA8
GL_FRAMEBUFFER_INCOMPLETE_LAYER_COUNT_ARB 0x8DA9
GL_GEOMETRY_SHADER_ARB 0x8DD9
GL_GEOMETRY_VERTICES_OUT_ARB 0x8DDA
GL_GEOMETRY_INPUT_TYPE_ARB 0x8DDB
GL_GEOMETRY_OUTPUT_TYPE_ARB 0x8DDC
GL_MAX_GEOMETRY_VARYING_COMPONENTS_ARB 0x8DDD
GL_MAX_VERTEX_VARYING_COMPONENTS_ARB 0x8DDE
GL_MAX_GEOMETRY_UNIFORM_COMPONENTS_ARB 0x8DDF
GL_MAX_GEOMETRY_OUTPUT_VERTICES_ARB 0x8DE0
GL_MAX_GEOMETRY_TOTAL_OUTPUT_COMPONENTS_ARB 0x8DE1""", globals())
glget.addGLGetConstant( GL_PROGRAM_POINT_SIZE_ARB, (1,) )
glget.addGLGetConstant( GL_MAX_GEOMETRY_TEXTURE_IMAGE_UNITS_ARB, (1,) )
glget.addGLGetConstant( GL_MAX_GEOMETRY_VARYING_COMPONENTS_ARB, (1,) )
glget.addGLGetConstant( GL_MAX_VERTEX_VARYING_COMPONENTS_ARB, (1,) )
glget.addGLGetConstant( GL_MAX_GEOMETRY_UNIFORM_COMPONENTS_ARB, (1,) )
glget.addGLGetConstant( GL_MAX_GEOMETRY_OUTPUT_VERTICES_ARB, (1,) )
glget.addGLGetConstant( GL_MAX_GEOMETRY_TOTAL_OUTPUT_COMPONENTS_ARB, (1,) )
@_f
@_p.types(None,_cs.GLuint,_cs.GLenum,_cs.GLint)
def glProgramParameteriARB( program,pname,value ):pass
@_f
@_p.types(None,_cs.GLenum,_cs.GLenum,_cs.GLuint,_cs.GLint)
def glFramebufferTextureARB( target,attachment,texture,level ):pass
@_f
@_p.types(None,_cs.GLenum,_cs.GLenum,_cs.GLuint,_cs.GLint,_cs.GLint)
def glFramebufferTextureLayerARB( target,attachment,texture,level,layer ):pass
@_f
@_p.types(None,_cs.GLenum,_cs.GLenum,_cs.GLuint,_cs.GLint,_cs.GLenum)
def glFramebufferTextureFaceARB( target,attachment,texture,level,face ):pass
def glInitGeometryShader4ARB():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( EXTENSION_NAME )
| {
"content_hash": "18c5b4e20fa20cb38718762f8d9d1f42",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 78,
"avg_line_length": 45.62,
"alnum_prop": 0.7834283209118807,
"repo_name": "frederica07/Dragon_Programming_Process",
"id": "28968012f36d90283e79cffc4361acecfe07c24f",
"size": "2281",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "PyOpenGL-3.0.2/OpenGL/raw/GL/ARB/geometry_shader4.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Makefile",
"bytes": "1548"
},
{
"name": "Python",
"bytes": "2558317"
}
],
"symlink_target": ""
} |
import logging
import os
import subprocess
import sys
import thread
from . import server
from options import parser
from pkg_resources import resource_filename
from utils import container, target, project
gdb_command_file = '.gdb_commands'
gdb_commands = '''\
target remote %s:3333
file %s
load
'''
def addOptions(parser):
'''
This is a required function that's called from main when this module is imported
'''
pass
def execCommand(args, following_args):
current_target = target.get_current_target()
if not current_target:
print >>sys.stderr, 'Set a target and build your project before debugging'
sys.exit(1)
generate_gdb_commands(current_target)
if server.get_server_status() == server.server_stopped:
print 'Kubos GDB server not running...\nStarting GDB Server...'
server.start_server()
gdb_file_path = os.path.join(os.getcwd(), gdb_command_file)
if current_target.startswith('stm32') or current_target.startswith('na'):
command = ['arm-none-eabi-gdb', '-x', gdb_file_path]
elif current_target.startswith('msp430'):
command = ['msp430-gdb', '-x', gdb_file_path]
container.debug(command)
def generate_gdb_commands(current_target):
proj_name = project.get_project_name()
exe_path = os.path.join(os.getcwd(), 'build', current_target, 'source', proj_name)
commands = gdb_commands % (get_host_ip(), exe_path)
if not os.path.isfile(exe_path):
print >>sys.stderr, 'Error, the binary %s does not exist. Run `kubos build` to build your project before debugging'
sys.exit(1)
with open(gdb_command_file, 'w') as gdb_file:
gdb_file.write(commands)
def get_host_ip():
if sys.platform.startswith('linux'):
return 'localhost'
if sys.platform.startswith('darwin'):
kubos_dir = get_kubos_dir()
machine_name = os.getenv('DOCKER_MACHINE_NAME')
script_path = os.path.join(kubos_dir, 'utils', 'getip.sh')
try:
ip = subprocess.check_output(['/bin/bash', script_path])
return ip.strip()
except subprocess.CalledProcessError as e:
print >>sys.stderr, 'There was an error getting your docker-machine configuration. You might need to re-run the `docker-machine env` command'
sys.exit(1)
def get_kubos_dir():
kubos_dir = resource_filename(__name__, '')
return kubos_dir
| {
"content_hash": "2571fcaa599b1a5ce481e3251a6f6354",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 153,
"avg_line_length": 33.06849315068493,
"alnum_prop": 0.6628003314001657,
"repo_name": "kyleparrott/kubos-sdk",
"id": "fd25c93c0be3d3e7484f0a8abe1420a068140e08",
"size": "3012",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kubos/debug.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "91637"
},
{
"name": "Shell",
"bytes": "1124"
},
{
"name": "Tcl",
"bytes": "1687"
}
],
"symlink_target": ""
} |
from typing import Any, AsyncIterable, Callable, Dict, Optional, TypeVar
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._queues_operations import build_create_or_update_authorization_rule_request, build_create_or_update_request, build_delete_authorization_rule_request, build_delete_request, build_get_authorization_rule_request, build_get_request, build_list_authorization_rules_request, build_list_by_namespace_request, build_list_keys_request, build_regenerate_keys_request
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class QueuesOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.servicebus.v2022_01_01_preview.aio.ServiceBusManagementClient`'s
:attr:`queues` attribute.
"""
models = _models
def __init__(self, *args, **kwargs) -> None:
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace
def list_authorization_rules(
self,
resource_group_name: str,
namespace_name: str,
queue_name: str,
**kwargs: Any
) -> AsyncIterable[_models.SBAuthorizationRuleListResult]:
"""Gets all authorization rules for a queue.
:param resource_group_name: Name of the Resource group within the Azure subscription.
:type resource_group_name: str
:param namespace_name: The namespace name.
:type namespace_name: str
:param queue_name: The queue name.
:type queue_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either SBAuthorizationRuleListResult or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.servicebus.v2022_01_01_preview.models.SBAuthorizationRuleListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2022-01-01-preview")) # type: str
cls = kwargs.pop('cls', None) # type: ClsType[_models.SBAuthorizationRuleListResult]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_authorization_rules_request(
resource_group_name=resource_group_name,
namespace_name=namespace_name,
queue_name=queue_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list_authorization_rules.metadata['url'],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
else:
request = build_list_authorization_rules_request(
resource_group_name=resource_group_name,
namespace_name=namespace_name,
queue_name=queue_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=next_link,
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("SBAuthorizationRuleListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_authorization_rules.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceBus/namespaces/{namespaceName}/queues/{queueName}/authorizationRules"} # type: ignore
@distributed_trace_async
async def create_or_update_authorization_rule(
self,
resource_group_name: str,
namespace_name: str,
queue_name: str,
authorization_rule_name: str,
parameters: _models.SBAuthorizationRule,
**kwargs: Any
) -> _models.SBAuthorizationRule:
"""Creates an authorization rule for a queue.
:param resource_group_name: Name of the Resource group within the Azure subscription.
:type resource_group_name: str
:param namespace_name: The namespace name.
:type namespace_name: str
:param queue_name: The queue name.
:type queue_name: str
:param authorization_rule_name: The authorization rule name.
:type authorization_rule_name: str
:param parameters: The shared access authorization rule.
:type parameters: ~azure.mgmt.servicebus.v2022_01_01_preview.models.SBAuthorizationRule
:keyword callable cls: A custom type or function that will be passed the direct response
:return: SBAuthorizationRule, or the result of cls(response)
:rtype: ~azure.mgmt.servicebus.v2022_01_01_preview.models.SBAuthorizationRule
:raises: ~azure.core.exceptions.HttpResponseError
"""
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2022-01-01-preview")) # type: str
content_type = kwargs.pop('content_type', _headers.pop('Content-Type', "application/json")) # type: Optional[str]
cls = kwargs.pop('cls', None) # type: ClsType[_models.SBAuthorizationRule]
_json = self._serialize.body(parameters, 'SBAuthorizationRule')
request = build_create_or_update_authorization_rule_request(
resource_group_name=resource_group_name,
namespace_name=namespace_name,
queue_name=queue_name,
authorization_rule_name=authorization_rule_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
template_url=self.create_or_update_authorization_rule.metadata['url'],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('SBAuthorizationRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update_authorization_rule.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceBus/namespaces/{namespaceName}/queues/{queueName}/authorizationRules/{authorizationRuleName}"} # type: ignore
@distributed_trace_async
async def delete_authorization_rule( # pylint: disable=inconsistent-return-statements
self,
resource_group_name: str,
namespace_name: str,
queue_name: str,
authorization_rule_name: str,
**kwargs: Any
) -> None:
"""Deletes a queue authorization rule.
:param resource_group_name: Name of the Resource group within the Azure subscription.
:type resource_group_name: str
:param namespace_name: The namespace name.
:type namespace_name: str
:param queue_name: The queue name.
:type queue_name: str
:param authorization_rule_name: The authorization rule name.
:type authorization_rule_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2022-01-01-preview")) # type: str
cls = kwargs.pop('cls', None) # type: ClsType[None]
request = build_delete_authorization_rule_request(
resource_group_name=resource_group_name,
namespace_name=namespace_name,
queue_name=queue_name,
authorization_rule_name=authorization_rule_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.delete_authorization_rule.metadata['url'],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete_authorization_rule.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceBus/namespaces/{namespaceName}/queues/{queueName}/authorizationRules/{authorizationRuleName}"} # type: ignore
@distributed_trace_async
async def get_authorization_rule(
self,
resource_group_name: str,
namespace_name: str,
queue_name: str,
authorization_rule_name: str,
**kwargs: Any
) -> _models.SBAuthorizationRule:
"""Gets an authorization rule for a queue by rule name.
:param resource_group_name: Name of the Resource group within the Azure subscription.
:type resource_group_name: str
:param namespace_name: The namespace name.
:type namespace_name: str
:param queue_name: The queue name.
:type queue_name: str
:param authorization_rule_name: The authorization rule name.
:type authorization_rule_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: SBAuthorizationRule, or the result of cls(response)
:rtype: ~azure.mgmt.servicebus.v2022_01_01_preview.models.SBAuthorizationRule
:raises: ~azure.core.exceptions.HttpResponseError
"""
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2022-01-01-preview")) # type: str
cls = kwargs.pop('cls', None) # type: ClsType[_models.SBAuthorizationRule]
request = build_get_authorization_rule_request(
resource_group_name=resource_group_name,
namespace_name=namespace_name,
queue_name=queue_name,
authorization_rule_name=authorization_rule_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.get_authorization_rule.metadata['url'],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('SBAuthorizationRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_authorization_rule.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceBus/namespaces/{namespaceName}/queues/{queueName}/authorizationRules/{authorizationRuleName}"} # type: ignore
@distributed_trace_async
async def list_keys(
self,
resource_group_name: str,
namespace_name: str,
queue_name: str,
authorization_rule_name: str,
**kwargs: Any
) -> _models.AccessKeys:
"""Primary and secondary connection strings to the queue.
:param resource_group_name: Name of the Resource group within the Azure subscription.
:type resource_group_name: str
:param namespace_name: The namespace name.
:type namespace_name: str
:param queue_name: The queue name.
:type queue_name: str
:param authorization_rule_name: The authorization rule name.
:type authorization_rule_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: AccessKeys, or the result of cls(response)
:rtype: ~azure.mgmt.servicebus.v2022_01_01_preview.models.AccessKeys
:raises: ~azure.core.exceptions.HttpResponseError
"""
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2022-01-01-preview")) # type: str
cls = kwargs.pop('cls', None) # type: ClsType[_models.AccessKeys]
request = build_list_keys_request(
resource_group_name=resource_group_name,
namespace_name=namespace_name,
queue_name=queue_name,
authorization_rule_name=authorization_rule_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list_keys.metadata['url'],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('AccessKeys', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list_keys.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceBus/namespaces/{namespaceName}/queues/{queueName}/authorizationRules/{authorizationRuleName}/ListKeys"} # type: ignore
@distributed_trace_async
async def regenerate_keys(
self,
resource_group_name: str,
namespace_name: str,
queue_name: str,
authorization_rule_name: str,
parameters: _models.RegenerateAccessKeyParameters,
**kwargs: Any
) -> _models.AccessKeys:
"""Regenerates the primary or secondary connection strings to the queue.
:param resource_group_name: Name of the Resource group within the Azure subscription.
:type resource_group_name: str
:param namespace_name: The namespace name.
:type namespace_name: str
:param queue_name: The queue name.
:type queue_name: str
:param authorization_rule_name: The authorization rule name.
:type authorization_rule_name: str
:param parameters: Parameters supplied to regenerate the authorization rule.
:type parameters:
~azure.mgmt.servicebus.v2022_01_01_preview.models.RegenerateAccessKeyParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:return: AccessKeys, or the result of cls(response)
:rtype: ~azure.mgmt.servicebus.v2022_01_01_preview.models.AccessKeys
:raises: ~azure.core.exceptions.HttpResponseError
"""
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2022-01-01-preview")) # type: str
content_type = kwargs.pop('content_type', _headers.pop('Content-Type', "application/json")) # type: Optional[str]
cls = kwargs.pop('cls', None) # type: ClsType[_models.AccessKeys]
_json = self._serialize.body(parameters, 'RegenerateAccessKeyParameters')
request = build_regenerate_keys_request(
resource_group_name=resource_group_name,
namespace_name=namespace_name,
queue_name=queue_name,
authorization_rule_name=authorization_rule_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
template_url=self.regenerate_keys.metadata['url'],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('AccessKeys', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
regenerate_keys.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceBus/namespaces/{namespaceName}/queues/{queueName}/authorizationRules/{authorizationRuleName}/regenerateKeys"} # type: ignore
@distributed_trace
def list_by_namespace(
self,
resource_group_name: str,
namespace_name: str,
skip: Optional[int] = None,
top: Optional[int] = None,
**kwargs: Any
) -> AsyncIterable[_models.SBQueueListResult]:
"""Gets the queues within a namespace.
:param resource_group_name: Name of the Resource group within the Azure subscription.
:type resource_group_name: str
:param namespace_name: The namespace name.
:type namespace_name: str
:param skip: Skip is only used if a previous operation returned a partial result. If a previous
response contains a nextLink element, the value of the nextLink element will include a skip
parameter that specifies a starting point to use for subsequent calls. Default value is None.
:type skip: int
:param top: May be used to limit the number of results to the most recent N usageDetails.
Default value is None.
:type top: int
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either SBQueueListResult or the result of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.servicebus.v2022_01_01_preview.models.SBQueueListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2022-01-01-preview")) # type: str
cls = kwargs.pop('cls', None) # type: ClsType[_models.SBQueueListResult]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_namespace_request(
resource_group_name=resource_group_name,
namespace_name=namespace_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
skip=skip,
top=top,
template_url=self.list_by_namespace.metadata['url'],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
else:
request = build_list_by_namespace_request(
resource_group_name=resource_group_name,
namespace_name=namespace_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
skip=skip,
top=top,
template_url=next_link,
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("SBQueueListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_namespace.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceBus/namespaces/{namespaceName}/queues"} # type: ignore
@distributed_trace_async
async def create_or_update(
self,
resource_group_name: str,
namespace_name: str,
queue_name: str,
parameters: _models.SBQueue,
**kwargs: Any
) -> _models.SBQueue:
"""Creates or updates a Service Bus queue. This operation is idempotent.
:param resource_group_name: Name of the Resource group within the Azure subscription.
:type resource_group_name: str
:param namespace_name: The namespace name.
:type namespace_name: str
:param queue_name: The queue name.
:type queue_name: str
:param parameters: Parameters supplied to create or update a queue resource.
:type parameters: ~azure.mgmt.servicebus.v2022_01_01_preview.models.SBQueue
:keyword callable cls: A custom type or function that will be passed the direct response
:return: SBQueue, or the result of cls(response)
:rtype: ~azure.mgmt.servicebus.v2022_01_01_preview.models.SBQueue
:raises: ~azure.core.exceptions.HttpResponseError
"""
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2022-01-01-preview")) # type: str
content_type = kwargs.pop('content_type', _headers.pop('Content-Type', "application/json")) # type: Optional[str]
cls = kwargs.pop('cls', None) # type: ClsType[_models.SBQueue]
_json = self._serialize.body(parameters, 'SBQueue')
request = build_create_or_update_request(
resource_group_name=resource_group_name,
namespace_name=namespace_name,
queue_name=queue_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
template_url=self.create_or_update.metadata['url'],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('SBQueue', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceBus/namespaces/{namespaceName}/queues/{queueName}"} # type: ignore
@distributed_trace_async
async def delete( # pylint: disable=inconsistent-return-statements
self,
resource_group_name: str,
namespace_name: str,
queue_name: str,
**kwargs: Any
) -> None:
"""Deletes a queue from the specified namespace in a resource group.
:param resource_group_name: Name of the Resource group within the Azure subscription.
:type resource_group_name: str
:param namespace_name: The namespace name.
:type namespace_name: str
:param queue_name: The queue name.
:type queue_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2022-01-01-preview")) # type: str
cls = kwargs.pop('cls', None) # type: ClsType[None]
request = build_delete_request(
resource_group_name=resource_group_name,
namespace_name=namespace_name,
queue_name=queue_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.delete.metadata['url'],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceBus/namespaces/{namespaceName}/queues/{queueName}"} # type: ignore
@distributed_trace_async
async def get(
self,
resource_group_name: str,
namespace_name: str,
queue_name: str,
**kwargs: Any
) -> _models.SBQueue:
"""Returns a description for the specified queue.
:param resource_group_name: Name of the Resource group within the Azure subscription.
:type resource_group_name: str
:param namespace_name: The namespace name.
:type namespace_name: str
:param queue_name: The queue name.
:type queue_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: SBQueue, or the result of cls(response)
:rtype: ~azure.mgmt.servicebus.v2022_01_01_preview.models.SBQueue
:raises: ~azure.core.exceptions.HttpResponseError
"""
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2022-01-01-preview")) # type: str
cls = kwargs.pop('cls', None) # type: ClsType[_models.SBQueue]
request = build_get_request(
resource_group_name=resource_group_name,
namespace_name=namespace_name,
queue_name=queue_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.get.metadata['url'],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('SBQueue', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceBus/namespaces/{namespaceName}/queues/{queueName}"} # type: ignore
| {
"content_hash": "e92408e56c6fbf8567d388c5268cc03d",
"timestamp": "",
"source": "github",
"line_count": 814,
"max_line_length": 375,
"avg_line_length": 45.09213759213759,
"alnum_prop": 0.6363165781228716,
"repo_name": "Azure/azure-sdk-for-python",
"id": "7f7e1f04ab7a24202c7e173c4e76250042c4b5fc",
"size": "37205",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/servicebus/azure-mgmt-servicebus/azure/mgmt/servicebus/v2022_01_01_preview/aio/operations/_queues_operations.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('visit', '0086_auto_20150829_2157'),
]
operations = [
migrations.RemoveField(
model_name='issueprek',
name='visit_num',
),
migrations.RemoveField(
model_name='issueprimary',
name='visit_num',
),
]
| {
"content_hash": "04138541d1e4727223650a8e81fbc86a",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 45,
"avg_line_length": 21.142857142857142,
"alnum_prop": 0.5630630630630631,
"repo_name": "koebbe/homeworks",
"id": "0ee07f07ff5824f2b26edeaacdfc0d7d812fbbbd",
"size": "468",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "visit/migrations/0087_auto_20150829_2219.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "44210"
},
{
"name": "HTML",
"bytes": "69003"
},
{
"name": "JavaScript",
"bytes": "124572"
},
{
"name": "Python",
"bytes": "223075"
}
],
"symlink_target": ""
} |
import os.path
import StringIO
import lxml.etree as ET
import dhtmlparser
from marcxml_parser import MARCXMLRecord
# Variables ===================================================================
XML_TEMPLATE = """<root>
<collection xmlns="http://www.loc.gov/MARC21/slim"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://www.loc.gov/MARC21/slim \
http://www.loc.gov/standards/marcxml/schema/MARC21slim.xsd">
$CONTENT
</collection>
</root>
"""
# Functions & objects =========================================================
def _oai_to_xml(marc_oai): # TODO: move this to MARC XML parser?
"""
Convert OAI to MARC XML.
Args:
marc_oai (str): String with either OAI or MARC XML.
Returns:
str: String with MARC XML.
"""
record = MARCXMLRecord(marc_oai)
record.oai_marc = False
return record.to_XML()
def _add_namespace(marc_xml):
"""
Add proper XML namespace to the `marc_xml` record.
Args:
marc_xml (str): String representation of the XML record.
Returns:
str: XML with namespace.
"""
dom = marc_xml
if isinstance(dom, basestring):
dom = dhtmlparser.parseString(marc_xml)
root = dom.find("root")
if root:
root[0].params = {}
for record in dom.find("record"):
record.params = {}
collections = dom.find("collection")
if not collections:
record = dom.find("record")[0]
return XML_TEMPLATE.replace("$CONTENT", str(record))
for col in collections:
col.params["xmlns"] = "http://www.loc.gov/MARC21/slim"
col.params["xmlns:xsi"] = "http://www.w3.org/2001/XMLSchema-instance"
col.params["xsi:schemaLocation"] = "http://www.loc.gov/MARC21/slim " + \
"http://www.loc.gov/standards/marcxml/schema/MARC21slim.xsd"
return str(dom)
def _read_content_or_path(content_or_path):
"""
If `content_or_path` contains ``\\n``, return it. Else assume, that it is
path and read file at that path.
Args:
content_or_path (str): Content or path to the file.
Returns:
str: Content.
Raises:
IOError: whhen the file is not found.
"""
if "\n" in content_or_path.strip():
return content_or_path
if not os.path.exists(content_or_path):
raise IOError("File '%s' doesn't exists!" % content_or_path)
with open(content_or_path) as f:
return f.read()
def _read_marcxml(xml):
"""
Read MARC XML or OAI file, convert, add namespace and return XML in
required format with all necessities.
Args:
xml (str): Filename or XML string. Don't use ``\\n`` in case of
filename.
Returns:
obj: Required XML parsed with ``lxml.etree``.
"""
# read file, if `xml` is valid file path
marc_xml = _read_content_or_path(xml)
# process input file - convert it from possible OAI to MARC XML and add
# required XML namespaces
marc_xml = _oai_to_xml(marc_xml)
marc_xml = _add_namespace(marc_xml)
file_obj = StringIO.StringIO(marc_xml)
return ET.parse(file_obj)
def _read_template(template):
"""
Read XSLT template.
Args:
template (str): Filename or XML string. Don't use ``\\n`` in case of
filename.
Returns:
obj: Required XML parsed with ``lxml.etree``.
"""
template = _read_content_or_path(template)
file_obj = StringIO.StringIO(template)
return ET.parse(file_obj)
def xslt_transformation(xml, template):
"""
Transform `xml` using XSLT `template`.
Args:
xml (str): Filename or XML string. Don't use ``\\n`` in case of
filename.
template (str): Filename or XML string. Don't use ``\\n`` in case of
filename.
Returns:
str: Transformed `xml` as string.
"""
transformer = ET.XSLT(
_read_template(template)
)
newdom = transformer(
_read_marcxml(xml)
)
return ET.tostring(newdom, pretty_print=True, encoding="utf-8")
| {
"content_hash": "3595aa26c7d10f014b7e66c985ec7cfc",
"timestamp": "",
"source": "github",
"line_count": 161,
"max_line_length": 80,
"avg_line_length": 25.596273291925467,
"alnum_prop": 0.5877214268381461,
"repo_name": "edeposit/marcxml2mods",
"id": "bebf009e969c163bed502cf25638cb3f003b17d7",
"size": "4286",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/marcxml2mods/xslt_transformer.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "47569"
},
{
"name": "Shell",
"bytes": "72"
},
{
"name": "XSLT",
"bytes": "531721"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from django import forms
from django.utils.translation import ugettext_lazy as _
from allauth.account.forms import BaseSignupForm
from allauth.account.utils import (user_username, user_email,
user_field)
from .models import SocialAccount
from .adapter import get_adapter
from . import app_settings
from . import signals
class SignupForm(BaseSignupForm):
def __init__(self, *args, **kwargs):
self.sociallogin = kwargs.pop('sociallogin')
user = self.sociallogin.user
# TODO: Should become more generic, not listing
# a few fixed properties.
initial = {'email': user_email(user) or '',
'username': user_username(user) or '',
'first_name': user_field(user, 'first_name') or '',
'last_name': user_field(user, 'last_name') or ''}
kwargs.update({
'initial': initial,
'email_required': kwargs.get('email_required',
app_settings.EMAIL_REQUIRED)})
super(SignupForm, self).__init__(*args, **kwargs)
def save(self, request):
adapter = get_adapter(request)
user = adapter.save_user(request, self.sociallogin, form=self)
self.custom_signup(request, user)
return user
def raise_duplicate_email_error(self):
raise forms.ValidationError(
_("An account already exists with this e-mail address."
" Please sign in to that account first, then connect"
" your %s account.")
% self.sociallogin.account.get_provider().name)
class DisconnectForm(forms.Form):
account = forms.ModelChoiceField(queryset=SocialAccount.objects.none(),
widget=forms.RadioSelect,
required=True)
def __init__(self, *args, **kwargs):
self.request = kwargs.pop('request')
self.accounts = SocialAccount.objects.filter(user=self.request.user)
super(DisconnectForm, self).__init__(*args, **kwargs)
self.fields['account'].queryset = self.accounts
def clean(self):
cleaned_data = super(DisconnectForm, self).clean()
account = cleaned_data.get('account')
if account:
get_adapter(self.request).validate_disconnect(
account,
self.accounts)
return cleaned_data
def save(self):
account = self.cleaned_data['account']
account.delete()
signals.social_account_removed.send(sender=SocialAccount,
request=self.request,
socialaccount=account)
| {
"content_hash": "6ce9098cc7d917138d9f13401298cd2c",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 76,
"avg_line_length": 38.263888888888886,
"alnum_prop": 0.585480943738657,
"repo_name": "jwhitlock/django-allauth",
"id": "c79387cf1cdb4800b3fefc7f17a12296a4d543a6",
"size": "2755",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "allauth/socialaccount/forms.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "42100"
},
{
"name": "JavaScript",
"bytes": "3967"
},
{
"name": "Makefile",
"bytes": "295"
},
{
"name": "Python",
"bytes": "710875"
}
],
"symlink_target": ""
} |
import time
import pytest
from flexmock import flexmock
from pyp2rpm.package_data import *
class TestPackageData(object):
@pytest.mark.parametrize(('s', 'expected'), [
('Spam.', 'Spam'),
('Spam', 'Spam'),
])
def test_summary_with_dot(self, s, expected):
pd = PackageData('spam', 'spam', 'python-spam', 'spam')
pd.summary = s
assert pd.summary == expected
@pytest.mark.parametrize('name', [
'summary', 'description', ])
def test_set_none_value(self, name):
pd = PackageData('spam', 'spam', 'python-spam', 'spam')
setattr(pd, name, None)
actual = getattr(pd, name)
assert actual == 'TODO:'
def test_get_nonexistent_attribute(self):
pd = PackageData('spam', 'spam', 'python-spam', 'spam')
assert pd.eggs == 'TODO:'
@pytest.mark.parametrize(('n', 'expected'), [
('py-spam', 'py_spam'),
('py_spam', 'py_spam'),
('spam', 'spam'),
])
def test_underscored_name(self, n, expected):
pd = PackageData('spam', n, 'python-spam', 'spam')
assert pd.underscored_name == expected
| {
"content_hash": "15d228433c76c235cb1b4426185a5564",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 63,
"avg_line_length": 30.263157894736842,
"alnum_prop": 0.5669565217391305,
"repo_name": "joequant/pyp2rpm",
"id": "683d88562f10b98cb7cbf31dc392a5aded90f1ba",
"size": "1150",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/test_package_data.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "91419"
}
],
"symlink_target": ""
} |
from datetime import date
class Prescription(object):
def __init__(self, dispense_date=None, days_supply=30):
self.dispense_date = dispense_date or date.today()
self.days_supply = days_supply
| {
"content_hash": "ffa02e3cfbd12271d6b14b4c07d4969c",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 59,
"avg_line_length": 31.142857142857142,
"alnum_prop": 0.6743119266055045,
"repo_name": "emilybache/KataMedicineClash",
"id": "a528a94422446e1897210380cd5e1dbec8106ba3",
"size": "219",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Python/prescription.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "3407"
},
{
"name": "Java",
"bytes": "1234"
},
{
"name": "JavaScript",
"bytes": "841"
},
{
"name": "Makefile",
"bytes": "114"
},
{
"name": "Python",
"bytes": "26568"
},
{
"name": "Ruby",
"bytes": "15509"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, print_function, unicode_literals
import copy
import itertools
import os
import sys
import attr
import plette.models.base
import plette.pipfiles
import tomlkit
from vistir.compat import FileNotFoundError, Path
from ..environment import MYPY_RUNNING
from ..exceptions import RequirementError
from ..utils import is_editable, is_vcs, merge_items
from .project import ProjectFile
from .requirements import Requirement
from .utils import get_url_name, optional_instance_of, tomlkit_value_to_python
if MYPY_RUNNING:
from typing import Union, Any, Dict, Iterable, Mapping, List, Text
package_type = Dict[Text, Dict[Text, Union[List[Text], Text]]]
source_type = Dict[Text, Union[Text, bool]]
sources_type = Iterable[source_type]
meta_type = Dict[Text, Union[int, Dict[Text, Text], sources_type]]
lockfile_type = Dict[Text, Union[package_type, meta_type]]
is_pipfile = optional_instance_of(plette.pipfiles.Pipfile)
is_path = optional_instance_of(Path)
is_projectfile = optional_instance_of(ProjectFile)
def reorder_source_keys(data):
# type: (tomlkit.toml_document.TOMLDocument) -> tomlkit.toml_document.TOMLDocument
sources = [] # type: sources_type
for source_key in ["source", "sources"]:
sources.extend(data.get(source_key, tomlkit.aot()).value)
new_source_aot = tomlkit.aot()
for entry in sources:
table = tomlkit.table() # type: tomlkit.items.Table
source_entry = PipfileLoader.populate_source(entry.copy())
for key in ["name", "url", "verify_ssl"]:
table.update({key: source_entry[key]})
new_source_aot.append(table)
data["source"] = new_source_aot
if data.get("sources", None):
del data["sources"]
return data
class PipfileLoader(plette.pipfiles.Pipfile):
@classmethod
def validate(cls, data):
# type: (tomlkit.toml_document.TOMLDocument) -> None
for key, klass in plette.pipfiles.PIPFILE_SECTIONS.items():
if key not in data or key == "sources":
continue
try:
klass.validate(data[key])
except Exception:
pass
@classmethod
def ensure_package_sections(cls, data):
# type: (tomlkit.toml_document.TOMLDocument[Text, Any]) -> tomlkit.toml_document.TOMLDocument[Text, Any]
"""
Ensure that all pipfile package sections are present in the given toml document
:param :class:`~tomlkit.toml_document.TOMLDocument` data: The toml document to
ensure package sections are present on
:return: The updated toml document, ensuring ``packages`` and ``dev-packages``
sections are present
:rtype: :class:`~tomlkit.toml_document.TOMLDocument`
"""
package_keys = (
k for k in plette.pipfiles.PIPFILE_SECTIONS.keys() if k.endswith("packages")
)
for key in package_keys:
if key not in data:
data.update({key: tomlkit.table()})
return data
@classmethod
def populate_source(cls, source):
"""Derive missing values of source from the existing fields."""
# Only URL pararemter is mandatory, let the KeyError be thrown.
if "name" not in source:
source["name"] = get_url_name(source["url"])
if "verify_ssl" not in source:
source["verify_ssl"] = "https://" in source["url"]
if not isinstance(source["verify_ssl"], bool):
source["verify_ssl"] = str(source["verify_ssl"]).lower() == "true"
return source
@classmethod
def load(cls, f, encoding=None):
# type: (Any, Text) -> PipfileLoader
content = f.read()
if encoding is not None:
content = content.decode(encoding)
_data = tomlkit.loads(content)
should_reload = "source" not in _data
_data = reorder_source_keys(_data)
if should_reload:
if "sources" in _data:
content = tomlkit.dumps(_data)
else:
# HACK: There is no good way to prepend a section to an existing
# TOML document, but there's no good way to copy non-structural
# content from one TOML document to another either. Modify the
# TOML content directly, and load the new in-memory document.
sep = "" if content.startswith("\n") else "\n"
content = plette.pipfiles.DEFAULT_SOURCE_TOML + sep + content
data = tomlkit.loads(content)
data = cls.ensure_package_sections(data)
instance = cls(data)
instance._data = dict(instance._data)
return instance
def __contains__(self, key):
# type: (Text) -> bool
if key not in self._data:
package_keys = self._data.get("packages", {}).keys()
dev_package_keys = self._data.get("dev-packages", {}).keys()
return any(key in pkg_list for pkg_list in (package_keys, dev_package_keys))
return True
def __getattribute__(self, key):
# type: (Text) -> Any
if key == "source":
return self._data[key]
return super(PipfileLoader, self).__getattribute__(key)
@attr.s(slots=True)
class Pipfile(object):
path = attr.ib(validator=is_path, type=Path)
projectfile = attr.ib(validator=is_projectfile, type=ProjectFile)
_pipfile = attr.ib(type=PipfileLoader)
_pyproject = attr.ib(
default=attr.Factory(tomlkit.document), type=tomlkit.toml_document.TOMLDocument
)
build_system = attr.ib(default=attr.Factory(dict), type=dict)
_requirements = attr.ib(default=attr.Factory(list), type=list)
_dev_requirements = attr.ib(default=attr.Factory(list), type=list)
@path.default
def _get_path(self):
# type: () -> Path
return Path(os.curdir).absolute()
@projectfile.default
def _get_projectfile(self):
# type: () -> ProjectFile
return self.load_projectfile(os.curdir, create=False)
@_pipfile.default
def _get_pipfile(self):
# type: () -> Union[plette.pipfiles.Pipfile, PipfileLoader]
return self.projectfile.model
@property
def root(self):
return self.path.parent
@property
def extended_keys(self):
return [
k
for k in itertools.product(
("packages", "dev-packages"), ("", "vcs", "editable")
)
]
@property
def pipfile(self):
# type: () -> Union[PipfileLoader, plette.pipfiles.Pipfile]
return self._pipfile
def get_deps(self, dev=False, only=True):
# type: (bool, bool) -> Dict[Text, Dict[Text, Union[List[Text], Text]]]
deps = {} # type: Dict[Text, Dict[Text, Union[List[Text], Text]]]
if dev:
deps.update(dict(self.pipfile._data.get("dev-packages", {})))
if only:
return deps
return tomlkit_value_to_python(
merge_items([deps, dict(self.pipfile._data.get("packages", {}))])
)
def get(self, k):
# type: (Text) -> Any
return self.__getitem__(k)
def __contains__(self, k):
# type: (Text) -> bool
check_pipfile = k in self.extended_keys or self.pipfile.__contains__(k)
if check_pipfile:
return True
return False
def __getitem__(self, k, *args, **kwargs):
# type: ignore
retval = None
pipfile = self._pipfile
section = None
pkg_type = None
try:
retval = pipfile[k]
except KeyError:
if "-" in k:
section, _, pkg_type = k.rpartition("-")
vals = getattr(pipfile.get(section, {}), "_data", {})
vals = tomlkit_value_to_python(vals)
if pkg_type == "vcs":
retval = {k: v for k, v in vals.items() if is_vcs(v)}
elif pkg_type == "editable":
retval = {k: v for k, v in vals.items() if is_editable(v)}
if retval is None:
raise
else:
retval = getattr(retval, "_data", retval)
return retval
def __getattr__(self, k, *args, **kwargs):
# type: ignore
retval = None
pipfile = super(Pipfile, self).__getattribute__("_pipfile")
try:
retval = super(Pipfile, self).__getattribute__(k)
except AttributeError:
retval = getattr(pipfile, k, None)
if retval is not None:
return retval
return super(Pipfile, self).__getattribute__(k, *args, **kwargs)
@property
def requires_python(self):
# type: () -> bool
return getattr(
self._pipfile.requires,
"python_version",
getattr(self._pipfile.requires, "python_full_version", None),
)
@property
def allow_prereleases(self):
# type: () -> bool
return self._pipfile.get("pipenv", {}).get("allow_prereleases", False)
@classmethod
def read_projectfile(cls, path):
# type: (Text) -> ProjectFile
"""Read the specified project file and provide an interface for writing/updating.
:param Text path: Path to the target file.
:return: A project file with the model and location for interaction
:rtype: :class:`~requirementslib.models.project.ProjectFile`
"""
pf = ProjectFile.read(path, PipfileLoader, invalid_ok=True)
return pf
@classmethod
def load_projectfile(cls, path, create=False):
# type: (Text, bool) -> ProjectFile
"""
Given a path, load or create the necessary pipfile.
:param Text path: Path to the project root or pipfile
:param bool create: Whether to create the pipfile if not found, defaults to True
:raises OSError: Thrown if the project root directory doesn't exist
:raises FileNotFoundError: Thrown if the pipfile doesn't exist and ``create=False``
:return: A project file instance for the supplied project
:rtype: :class:`~requirementslib.models.project.ProjectFile`
"""
if not path:
raise RuntimeError("Must pass a path to classmethod 'Pipfile.load'")
if not isinstance(path, Path):
path = Path(path).absolute()
pipfile_path = path if path.is_file() else path.joinpath("Pipfile")
project_path = pipfile_path.parent
if not project_path.exists():
raise FileNotFoundError("%s is not a valid project path!" % path)
elif not pipfile_path.exists() or not pipfile_path.is_file():
if not create:
raise RequirementError("%s is not a valid Pipfile" % pipfile_path)
return cls.read_projectfile(pipfile_path.as_posix())
@classmethod
def load(cls, path, create=False):
# type: (Text, bool) -> Pipfile
"""
Given a path, load or create the necessary pipfile.
:param Text path: Path to the project root or pipfile
:param bool create: Whether to create the pipfile if not found, defaults to True
:raises OSError: Thrown if the project root directory doesn't exist
:raises FileNotFoundError: Thrown if the pipfile doesn't exist and ``create=False``
:return: A pipfile instance pointing at the supplied project
:rtype:: class:`~requirementslib.models.pipfile.Pipfile`
"""
projectfile = cls.load_projectfile(path, create=create)
pipfile = projectfile.model
creation_args = {
"projectfile": projectfile,
"pipfile": pipfile,
"path": Path(projectfile.location),
}
return cls(**creation_args)
def write(self):
# type: () -> None
self.projectfile.model = copy.deepcopy(self._pipfile)
self.projectfile.write()
@property
def dev_packages(self):
# type: () -> List[Requirement]
return self.dev_requirements
@property
def packages(self):
# type: () -> List[Requirement]
return self.requirements
@property
def dev_requirements(self):
# type: () -> List[Requirement]
if not self._dev_requirements:
packages = tomlkit_value_to_python(self.pipfile.get("dev-packages", {}))
self._dev_requirements = [
Requirement.from_pipfile(k, v)
for k, v in packages.items()
if v is not None
]
return self._dev_requirements
@property
def requirements(self):
# type: () -> List[Requirement]
if not self._requirements:
packages = tomlkit_value_to_python(self.pipfile.get("packages", {}))
self._requirements = [
Requirement.from_pipfile(k, v)
for k, v in packages.items()
if v is not None
]
return self._requirements
def _read_pyproject(self):
# type: () -> None
pyproject = self.path.parent.joinpath("pyproject.toml")
if pyproject.exists():
self._pyproject = tomlkit.loads(pyproject.read_text())
build_system = self._pyproject.get("build-system", None)
if build_system and not build_system.get("build_backend"):
build_system["build-backend"] = "setuptools.build_meta:__legacy__"
elif not build_system or not build_system.get("requires"):
build_system = {
"requires": ["setuptools>=40.8", "wheel"],
"build-backend": "setuptools.build_meta:__legacy__",
}
self.build_system = build_system
@property
def build_requires(self):
# type: () -> List[Text]
if not self.build_system:
self._read_pyproject()
return self.build_system.get("requires", [])
@property
def build_backend(self):
# type: () -> Text
pyproject = self.path.parent.joinpath("pyproject.toml")
if not self.build_system:
self._read_pyproject()
return self.build_system.get("build-backend", None)
| {
"content_hash": "1f5f99be796e29d91764d89ad25f8090",
"timestamp": "",
"source": "github",
"line_count": 382,
"max_line_length": 112,
"avg_line_length": 37.15706806282723,
"alnum_prop": 0.5935606594335635,
"repo_name": "kennethreitz/pipenv",
"id": "9c0aea4ea35ea59a85067152e2ec11f8cc6f116b",
"size": "14219",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pipenv/vendor/requirementslib/models/pipfile.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "202"
},
{
"name": "PowerShell",
"bytes": "7195"
},
{
"name": "Python",
"bytes": "2588085"
},
{
"name": "Roff",
"bytes": "40754"
}
],
"symlink_target": ""
} |
from django.contrib import messages
from django.utils.translation import ugettext_lazy as _
from django.shortcuts import render_to_response, redirect, HttpResponse, Http404
from django.template import RequestContext
from django.contrib.auth import authenticate, logout, login
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.core.urlresolvers import reverse
from django.utils import timezone
from django.contrib.humanize.templatetags.humanize import intcomma
from django.conf import settings
from django.views.decorators.csrf import csrf_exempt
from patient.models import PatientInformation, Guardian, MedicalHistory, PastMedicalHistory, PresentMedicalHistory, \
FamilyMedicalHistory, MenstrualHistory, ObstetricHistory, GynaecologicalHistory, ImmunizationHistory, \
Routinecheckup, LaboratoryTest, UltrasoundScanning, AdditionalPatientInformation, Prescription, PreviousSurgery
FREQUENCY = ['AL', 'US', 'OF', 'SO', 'SE', 'RA']
def check_additional_patient_information(patient):
probs = []
try:
info = AdditionalPatientInformation.objects.get(patient=patient)
if info.cigarettes in FREQUENCY:
probs.append("Cigarettes - %s" % info.get_cigarettes_display())
if info.alcohol in FREQUENCY:
probs.append("Alcohol - %s" % info.get_alcohol_display())
if info.other_harmful_substances in FREQUENCY:
probs.append("Other harmful subtances - %s" % info.get_other_harmful_substances_display())
except:
pass
return probs
def check_family_medical_history(patient):
probs = []
try:
fmh = FamilyMedicalHistory.objects.get(patient=patient)
probs_text = []
if fmh.tuberculosis:
probs_text.append('tuberculosis')
if fmh.heart_disease:
probs_text.append('heart disease')
if fmh.chronical_renal_disease:
probs_text.append('chronical renal disease')
if fmh.epilepsy:
probs_text.append('epilepsy')
if fmh.diabetes_melitus:
probs_text.append('diabetes melitus')
if fmh.sexually_transmitted_infection:
probs_text.append('sexually transmitted infection')
if fmh.hepatitis:
probs_text.append('hepatitis')
if fmh.malaria:
probs_text.append('malaria')
if fmh.sickle_cell_trait:
probs_text.append('sickle cell trait')
if fmh.rhesus_d_antibodies:
probs_text.append('rhesus d antibodies')
if fmh.HIV_status_if_known:
probs_text.append('HIV')
if fmh.kidney_disease:
probs_text.append('kidney disease')
if fmh.liver_problems:
probs_text.append('liver problems')
if fmh.hypertension:
probs_text.append('hypertension')
if fmh.urinary_tract_surgeries:
probs_text.append('urinary tract surgeries')
if fmh.seizures:
probs_text.append('seizures')
if fmh.pelvic_backinjuries:
probs_text.append('pelvic back injuries')
if fmh.haemorrhage:
probs_text.append('haemorrhage')
if fmh.others:
probs_text.append(fmh.others)
if probs_text:
probs.append('Family medical history - ' + ', '.join(probs_text))
except:
pass
return probs
def check_past_medical_history(patient):
probs = []
try:
pmh = PastMedicalHistory.objects.get(patient=patient)
probs_text = []
if pmh.tuberculosis:
probs_text.append('tuberculosis')
if pmh.heart_disease:
probs_text.append('heart disease')
if pmh.chronical_renal_disease:
probs_text.append('chronical renal disease')
if pmh.epilepsy:
probs_text.append('epilepsy')
if pmh.diabetes_melitus:
probs_text.append('diabetes melitus')
if pmh.sexually_transmitted_infection:
probs_text.append('sexually transmitted infection')
if pmh.hepatitis:
probs_text.append('hepatitis')
if pmh.malaria:
probs_text.append('malaria')
if pmh.sickle_cell_trait:
probs_text.append('sickle cell trait')
if pmh.rhesus_d_antibodies:
probs_text.append('rhesus d antibodies')
if pmh.HIV_status_if_known:
probs_text.append('HIV')
if pmh.kidney_disease:
probs_text.append('kidney disease')
if pmh.liver_problems:
probs_text.append('liver problems')
if pmh.hypertension:
probs_text.append('hypertension')
if pmh.urinary_tract_surgeries:
probs_text.append('urinary tract surgeries')
if pmh.seizures:
probs_text.append('seizures')
if pmh.pelvic_backinjuries:
probs_text.append('pelvic back injuries')
if pmh.haemorrhage:
probs_text.append('haemorrhage')
if pmh.others:
probs_text.append(pmh.others)
if probs_text:
probs.append('Past medical history - ' + ', '.join(probs_text))
except:
pass
return probs
def check_obstetric_history(patient):
probs = []
try:
obh = ObstetricHistory.objects.get(patient=patient)
if obh.check_if_you_have_been_miscarriages:
probs.append('miscarriages')
except:
pass
return probs
def check_present_medical_history(patient):
probs = []
try:
pmh = PresentMedicalHistory.objects.get(patient=patient)
probs_text = []
if pmh.tuberculosis:
probs_text.append('tuberculosis')
if pmh.heart_disease:
probs_text.append('heart disease')
if pmh.chronical_renal_disease:
probs_text.append('chronical renal disease')
if pmh.epilepsy:
probs_text.append('epilepsy')
if pmh.diabetes_melitus:
probs_text.append('diabetes melitus')
if pmh.sexually_transmitted_infection:
probs_text.append('sexually transmitted infection')
if pmh.hepatitis:
probs_text.append('hepatitis')
if pmh.malaria:
probs_text.append('malaria')
if pmh.sickle_cell_trait:
probs_text.append('sickle cell trait')
if pmh.rhesus_d_antibodies:
probs_text.append('rhesus d antibodies')
if pmh.HIV_status_if_known:
probs_text.append('HIV')
if pmh.kidney_disease:
probs_text.append('kidney disease')
if pmh.liver_problems:
probs_text.append('liver problems')
if pmh.hypertension:
probs_text.append('hypertension')
if pmh.urinary_tract_surgeries:
probs_text.append('urinary tract surgeries')
if pmh.seizures:
probs_text.append('seizures')
if pmh.pelvic_backinjuries:
probs_text.append('pelvic back injuries')
if pmh.haemorrhage:
probs_text.append('haemorrhage')
if pmh.others:
probs_text.append(pmh.others)
if probs_text:
probs.append('Present medical history - ' + ', '.join(probs_text))
except:
pass
return probs
def check_gynaecological_history(patient):
probs = []
try:
gyh = GynaecologicalHistory.objects.get(patient=patient)
if gyh.result_pap_smear == 'AB':
probs.append('Gynaecological history - Abnormal')
except:
pass
return probs
def check_previous_surgery(patient):
probs = []
try:
prs = PreviousSurgery.objects.get(patient=patient)
probs_text = []
if prs.fibrocystic_breasts:
probs_text.append('fibrocystic breasts')
if prs.ovarian_cysts:
probs_text.append('ovarian cysts')
if prs.endometriosis:
probs_text.append('endometriosis')
if prs.uterine_fibroids:
probs_text.append('uterine fibroids')
if prs.others_please_state:
probs_text.append(prs.others_please_state)
if probs_text:
probs.append('Previous surgery - ' + ', '.join(probs_text))
except:
pass
return probs
def check_laboratary_test(patient):
probs = []
try:
labs = LaboratoryTest.objects.filter(patient=patient)
for lab in labs:
if lab.urinalysis == 'AB' and not 'Urinalysis > 0.3g/24h' in probs:
probs.append('Urinalysis > 0.3g/24h')
if lab.hemoglobin == 'A' and not 'Haemoglobin 9-10' in probs:
probs.append('Haemoglobin 9-10')
elif lab.hemoglobin == 'B' and not 'Haemoglobin 7-8 g/dl' in probs:
probs.append('Haemoglobin 7-8 g/dl')
elif lab.hemoglobin == 'C' and not 'Haemoglobin <7 g/dl' in probs:
probs.append('Haemoglobin <7 g/dl')
except:
pass
return probs
@login_required
def notification(request):
problems = []
patients = PatientInformation.objects.all()
for patient in patients:
probs1 = check_additional_patient_information(patient)
probs2 = check_family_medical_history(patient)
probs3 = check_past_medical_history(patient)
probs4 = check_obstetric_history(patient)
probs5 = check_present_medical_history(patient)
probs6 = check_gynaecological_history(patient)
probs7 = check_previous_surgery(patient)
probs8 = check_laboratary_test(patient)
if probs1 or probs2 or probs3 or probs4 or probs5 or probs6 or probs7 or probs8:
all_probs = probs1 + probs2 + probs3 + probs4 + probs5 + probs6 + probs7 + probs8
problems.append([patient, all_probs])
context = {
'problems': problems,
'selected_page': 'notification',
}
return render_to_response('notification/notification.html', context, RequestContext(request))
| {
"content_hash": "96406fa4c44e4dd6386d20302826c9ec",
"timestamp": "",
"source": "github",
"line_count": 262,
"max_line_length": 117,
"avg_line_length": 38.29007633587786,
"alnum_prop": 0.6275917065390749,
"repo_name": "aazhbd/medical_info01",
"id": "a4e4c89dec9653a8e8bd95fb9f76d2aa099fa560",
"size": "10057",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "notification/views.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "54905"
},
{
"name": "HTML",
"bytes": "139815"
},
{
"name": "JavaScript",
"bytes": "1241861"
},
{
"name": "Python",
"bytes": "1336885"
},
{
"name": "Shell",
"bytes": "156"
}
],
"symlink_target": ""
} |
import unittest
from pyloc.db import Results
from pyloc.util import loc
class Tests(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_python(self):
t1_expected = {'comment': 2, 'code': 2, 'total': 5, 'docstring': 2, 'empty': 1}
t2_expected = {'comment': 1, 'code': 3, 'total': 5, 'docstring': 1, 'empty': 1}
r = Results()
loc(['data/test1.py'], r)
self.assert_(r.counts_by_type('Python') == t1_expected)
r = Results()
loc(['data/test2.py'], r)
self.assert_(r.counts_by_type('Python') == t2_expected)
def test_python_dir(self):
expected = { 'comment' : 3,
'code' : 5,
'total' : 10,
'docstring' : 3,
'empty' : 2 }
r = Results()
loc(['data'], r)
self.assert_(r.counts_by_type('Python') == expected)
def suite():
return unittest.TestLoader().loadTestsFromTestCase(Tests)
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "5283a009bd7b6ff1a512b4e07520aab2",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 87,
"avg_line_length": 26.875,
"alnum_prop": 0.5106976744186047,
"repo_name": "abingham/pyloc",
"id": "a59fd03f298f2c856a351192d121d8a93a08c15d",
"size": "1075",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "unittests/pyloc_tests.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "33596"
}
],
"symlink_target": ""
} |
import os
import sys
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
if sys.argv[-1] == 'publish':
os.system('python setup.py sdist upload')
sys.exit()
readme = open('README.rst').read()
history = open('HISTORY.rst').read().replace('.. :changelog:', '')
setup(
name='mmtr',
version='0.2.0',
description='RabbitMQ based simple task runner',
long_description=readme + '\n\n' + history,
author='Trenton Broughton',
author_email='[email protected]',
url='https://github.com/trenton42/mmtr',
packages=[
'mmtr',
],
package_dir={'mmtr': 'mmtr'},
include_package_data=True,
install_requires=[
'pika',
'wrapt'
],
license="BSD",
zip_safe=False,
keywords='mmtr',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
"Programming Language :: Python :: 2",
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
],
test_suite='tests',
)
| {
"content_hash": "64b591c8a1d78684ef7f7587c07d4758",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 66,
"avg_line_length": 25.897959183673468,
"alnum_prop": 0.5902285263987391,
"repo_name": "trenton42/mmtr",
"id": "424fb6993e2a5ee66d7e180b01b5be55038f8c91",
"size": "1316",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "16931"
},
{
"name": "Shell",
"bytes": "6466"
}
],
"symlink_target": ""
} |
'''
Created on 11.03.15
@author = mharder
'''
from termcolor import colored
import syslog
def get_log_color_and_background(level):
log_color = 'green'
log_background = None
if level == syslog.LOG_CRIT:
log_color = 'white'
log_background = 'on_red'
elif level == syslog.LOG_ERR:
log_color = 'red'
elif level == syslog.LOG_WARNING:
log_color = 'yellow'
elif level == syslog.LOG_NOTICE:
log_color = 'green'
elif level == syslog.LOG_INFO:
log_color = 'green'
elif level == syslog.LOG_DEBUG:
log_color = 'blue'
return log_color, log_background
def tail_format(fields, colorful=False):
return formatter(fields, " ", colorful)
def dump_format(fields, colorful=False):
return formatter(fields, ";", colorful)
def formatter(fields, seperator, colorful):
def format(entry):
timestamp = entry.timestamp.to('local').format("YYYY-MM-DD HH:mm:ss.SS")
msg = timestamp + seperator + seperator.join(map(lambda f: u"'{val}'"\
.format(val=entry.message_dict.get(f, "")), fields))
if colorful:
log_color, log_background = get_log_color_and_background(entry.level)
return colored(msg, log_color, log_background)
return msg
return format
| {
"content_hash": "bf32e757076efb071328959b6fcc5ad8",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 81,
"avg_line_length": 28.977777777777778,
"alnum_prop": 0.6234662576687117,
"repo_name": "blue-yonder/bonfire",
"id": "f345592d19a3e4c7a45e1c8f7455df07f41d4ccc",
"size": "1304",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bonfire/formats.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "61975"
}
],
"symlink_target": ""
} |
__program__ = 'PseudoBusy'
__version__ = '1.0.0'
__description__ = 'Terminal vomit'
__author__ = 'Brandon Dreager'
__author_email__ ='[email protected]'
__copyright__ = 'Copyright (c) 2016 Brandon Dreager'
__license__ = 'MIT'
__website__ = 'https://github.com/Regaerd/PseudoBusy'
import os
from printer import Printer, Random
from argparse import ArgumentParser
class PseudoBusy():
MAX_PATIENCE = 6
MIN_FILE_LENGTH = 1
MAX_FILE_LENGTH = 5000
MAX_FILE_SIZE = 100000000 # 100 MB
MAX_CHARS_PER_LINE = 1000 # avoids heavily compressed files
def __init__(self, args=None):
self.args = args
self.rand = Random()
self.printer = Printer(shift_in_chance=25)
self.target_dir = os.path.expanduser('~/')
self.hide_target = True
self.running = False
def start(self):
print (self.printer.CLEAR)
self.running = True
self.printer.write('Generating file list... ', speed=1)
# TODO whitelist and blacklist
self.files = [os.path.join(path, filename) for path, dirs, files in os.walk(self.target_dir) for filename in files]
self.original_num_files = len(self.files)
self.printer.write('Found {} files'.format(self.original_num_files))
if self.args.reject_first:
self.printer.write('\nChecking all files... ', speed=1)
for i in xrange(self.original_num_files - 1, -1, -1):
self.pick_file(i)
self.printer.write('Rejected {} files'.format(self.original_num_files - len(self.files)))
self.run()
def run(self):
while self.running:
try:
self.printer.reset()
file, num_lines, size = self.pick_file()
if self.args.typing_speed: self.printer.override_speed = self.args.typing_speed
self.log(1, "Reading: "+file.replace(self.target_dir, '') if self.hide_target else file)
self.log(2, "\nBytes:{}, Lines:{}, Rejects:{}\n".format(size,num_lines,self.original_num_files - len(self.files)))
self.print_file(file)
except KeyboardInterrupt:
self.running = False
def print_file(self, infile):
try:
with open(infile) as file:
lines = file.readlines()
patience = self.MAX_PATIENCE
for line in lines:
line = line.decode('ascii') # hides garbage
if line.strip():
patience = self.MAX_PATIENCE
else:
patience -= 1
if patience <= 0:
break;
if not self.rand.int(0, 10): # type a random string as a 'mistake'
num = self.rand.int(10, 25)
self.printer.write(self.rand.string(num))
self.printer.backspace_delete(num)
self.printer.write(line)
except (Exception) as err:
self.log(3, str(err)+'\n')
def pick_file(self, index=None):
file = num_lines = size = None
while not file:
try:
file = self.files[index] if index else self.rand.choice(self.files)
size = os.path.getsize(file)
if size >= self.MAX_FILE_SIZE: raise Exception('File too large')
if not os.access(file, os.R_OK): raise Exception('No read access')
with open(file, 'r') as ins:
ins.readline().decode('ascii') # for catching junk we don't care to see
num_lines = sum(1 for _ in ins) + 1
if size / num_lines >= self.MAX_CHARS_PER_LINE: raise Exception('Too many characters per line')
if num_lines <= self.MIN_FILE_LENGTH: raise Exception('Too few lines') # for empty and single line files
if num_lines >= self.MAX_FILE_LENGTH: raise Exception('Too many lines') # for massive files (probably not code)
except (Exception) as err:
self.log(3, str(err)+'\n')
self.files.remove(file) # we don't need to see rejects again
file = None
if index: break
return (file, num_lines, size)
def log(self, level, string):
if (level <= self.args.verbose): self.printer.write(string)
def init_args():
parser = ArgumentParser(prog=__program__, description=__description__)
parser.add_argument('--version', action='version', version='%(prog)s ' + __version__)
parser.add_argument('-v', '--verbose-level', type=int, default=1, choices=range(4),
help='set verbose output level (default: %(default)s)', dest='verbose')
parser.add_argument('-s', '--typing-speed-override', type=float, default=0,
help='overrides typing speed', dest='typing_speed')
parser.add_argument('-r', '--reject-first', action='store_true', default=False,
help='finds all rejects before starting', dest='reject_first')
return parser.parse_args()
args = init_args()
def main():
try:
PseudoBusy(args=args).start()
except: pass
finally: print (Printer.CLEAR + Printer.RESET)
if __name__ == '__main__': main()
| {
"content_hash": "d3e429d71b98fbd955496f69de7b65f3",
"timestamp": "",
"source": "github",
"line_count": 127,
"max_line_length": 130,
"avg_line_length": 41.39370078740158,
"alnum_prop": 0.5683850104622408,
"repo_name": "Regaerd/PseudoBusy",
"id": "714a8fa7c4a36fd2723b4e25e503e0c5a6a50acb",
"size": "5304",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pseudobusy.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "13549"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import json, logging, os, pprint
from django.conf import settings
from django.contrib.auth import authenticate
from django.http import HttpResponse
from iip_processing_app.models import Status
log = logging.getLogger(__name__)
class UserGrabber(object):
""" Grabs user object from shib or local-dev.
Helper for views.view_processing() """
def __init__( self ):
self.LEGIT_VIEWER_USER = unicode( os.environ['IIP_PRC__LEGIT_QUEUE_VIEWER_USER'] )
self.LEGIT_VIEWER_PASSWORD = unicode( os.environ['IIP_PRC__LEGIT_QUEUE_VIEWER_PASSWORD'] )
def get_user( self, meta_dct ):
""" Returns user object.
Called by views.view_processing() """
shib_checker = ShibChecker()
if shib_checker.validate_user( meta_dct ):
log.debug( 'validated via shib' )
user = self.grab_good_user()
elif meta_dct['SERVER_NAME'] is '127.0.0.1' and settings.DEBUG is True:
log.debug( 'validated via localdev' )
user = self.grab_good_user()
else:
log.debug( 'not validated' )
user = None
return user
def grab_good_user( self ):
""" Grabs generic authenticated user.
Called by get_user() """
user = authenticate( username=self.LEGIT_VIEWER_USER, password=self.LEGIT_VIEWER_PASSWORD )
log.debug( 'user authenticated' )
return user
## end class UserGrabber()
class ShibChecker( object ):
""" Checks shib for valid eppn or group.
Helper for views.view_processing() """
def __init__( self ):
self.LEGIT_VIEWER_GROUPER_GROUPS = json.loads( os.environ['IIP_PRC__LEGIT_QUEUE_VIEWER_GROUPS_JSON'] )
self.LEGIT_VIEWER_EPPNS = json.loads( os.environ['IIP_PRC__LEGIT_QUEUE_VIEWER_EPPNS_JSON'] )
def validate_user( self, meta_dct ):
""" Checks shib info.
Called by UserGrabber.get_user() """
return_val = False
shib_dct = self.grab_shib_info( meta_dct )
if shib_dct:
if self.check_group( shib_dct['member_of'] ):
return_val = True
elif self.check_eppn( shib_dct['eppn'] ):
return_val = True
log.debug( 'return_val, `{}`'.format(return_val) )
return return_val
def grab_shib_info( self, meta_dct ):
""" Grabs shib values from http-header.
Called by: validate_user() """
shib_dct = None
if 'Shibboleth-eppn' in meta_dct:
shib_dct = self.grab_shib_from_meta( meta_dct )
log.debug( 'shib_dct, ```{}```'.format(pprint.pformat(shib_dct)) )
return shib_dct
def check_group( self, user_memberships ):
""" Checks user's grouper groups.
Called by validate_user() """
return_val = False
for group in self.LEGIT_VIEWER_GROUPER_GROUPS:
if group in user_memberships:
return_val = True
break
log.debug( 'check_group() return_val, `{}`'.format(return_val) )
return return_val
def check_eppn( self, eppn ):
""" Checks user's eppn.
Called by validate_user() """
return_val = False
if eppn in self.LEGIT_VIEWER_EPPNS:
return_val = True
log.debug( 'check_eppn() return_val, `{}`'.format(return_val) )
return return_val
def grab_shib_from_meta( self, meta_dct ):
""" Extracts shib values from http-header.
Called by grab_shib_info() """
shib_dct = {
# 'brown_status': meta_dct.get( 'Shibboleth-brownStatus', '' ), # eg. 'active'
# 'brown_type': meta_dct.get( 'Shibboleth-brownType', '' ), # eg. 'Staff'
# 'department': meta_dct.get( 'Shibboleth-department', '' ),
# 'edu_person_primary_affiliation': meta_dct.get( 'Shibboleth-eduPersonPrimaryAffiliation', '' ), # eg. 'staff'
# 'email': meta_dct.get( 'Shibboleth-mail', '' ).lower(),
'eppn': meta_dct.get( 'Shibboleth-eppn', '' ),
# 'id_net': meta_dct.get( 'Shibboleth-brownNetId', '' ),
# 'id_short': meta_dct.get( 'Shibboleth-brownShortId', '' ),
'member_of': sorted( meta_dct.get('Shibboleth-isMemberOf', '').split(';') ), # only dct element that's not a unicode string
# 'name_first': meta_dct.get( 'Shibboleth-givenName', '' ),
# 'name_last': meta_dct.get( 'Shibboleth-sn', '' ),
# 'patron_barcode': meta_dct.get( 'Shibboleth-brownBarCode', '' ),
# 'phone': meta_dct.get( 'Shibboleth-phone', 'unavailable' ), # valid?
# 'title': meta_dct.get( 'Shibboleth-title', '' ),
}
return shib_dct
## end class ShibChecker()
class ProcessStatusRecorder( object ):
""" Contains functions for recording processed-status.
Helper for views.view_processing() """
def __init__( self ):
""" Settings. """
pass
def check_for_data( self, request_body ):
""" Returns any multiple-enqueue data and any single-update data.
Called by views.update_processing_status() """
data_dct = self.grab_data_dct( request_body )
to_process_dct = self.grab_to_process_dct( data_dct )
single_update_dct = self.grab_single_update_dct( data_dct )
return ( to_process_dct, single_update_dct )
def grab_data_dct( self, request_body ):
""" Grabs dct info from request.body.
Called by check_for_data() """
try:
data_dct = json.loads( request_body )
except:
data_dct = {}
log.debug( 'data_dct, ```{}```'.format(pprint.pformat(data_dct)) )
return data_dct
def grab_to_process_dct( self, data_dct ):
""" Grabs possible enqueue-these data.
Called by check_for_data() """
try:
to_process_dct = data_dct['to_process_dct']
except:
to_process_dct = {}
log.debug( 'to_process_dct, ```{}```'.format(pprint.pformat(to_process_dct)) )
return to_process_dct
def grab_single_update_dct( self, data_dct ):
""" Grabs possible single-item data.
Called by check_for_data() """
try:
single_update_dct = {
'inscription_id': data_dct['inscription_id'],
'status_summary': data_dct['status_summary'],
'status_detail': data_dct['status_detail'],
}
except:
single_update_dct = {}
log.debug( 'single_update_dct, ```{}```'.format(pprint.pformat(single_update_dct)) )
return single_update_dct
def handle_enqueues( self, to_process_dct ):
""" Adds enqueu info to processing-status db.
Called by views.update_processing_status() """
for inscription_id in to_process_dct.get( 'files_removed', [] ):
self.update_processing_status( inscription_id=inscription_id, new_status_summary='queued for deletion', new_status_detail='' )
for inscription_id in to_process_dct.get( 'files_updated', [] ):
self.update_processing_status( inscription_id=inscription_id, new_status_summary='queued for update', new_status_detail='' )
resp = HttpResponse( '200 / OK' )
return resp
def update_processing_status( self, inscription_id, new_status_summary, new_status_detail ):
""" Updates tracker that entry is queued for deletion.
Called by handle_enqueues(), and by handle_single_update() """
try:
process_status = Status.objects.get( inscription_id=inscription_id )
except Exception:
log.debug( 'creating new Status instance' )
process_status = Status( inscription_id=inscription_id )
process_status.status_summary = new_status_summary
process_status.status_detail = new_status_detail
process_status.save()
return
def handle_single_update( self, single_update_dct ):
""" Updates single entry processed status.
Called by views.update_processing_status() """
( inscription_id, new_status_summary, new_status_detail ) = (
single_update_dct['inscription_id'], single_update_dct['status_summary'], single_update_dct['status_detail'] )
self.update_processing_status(
inscription_id=inscription_id, new_status_summary=new_status_summary, new_status_detail=new_status_detail )
resp = HttpResponse( '200 / OK' )
return resp
## end class ProcessStatusRecorder()
| {
"content_hash": "98525db4a0c94c418f1fd0afbf9359ec",
"timestamp": "",
"source": "github",
"line_count": 203,
"max_line_length": 138,
"avg_line_length": 42.38423645320197,
"alnum_prop": 0.5890283589028359,
"repo_name": "birkin/iip_processing_project",
"id": "7d9b19bee217d5e1dc3d941caf9517ed60bed56d",
"size": "8629",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "iip_processing_app/lib/process_viewer_helper.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "3510"
},
{
"name": "Python",
"bytes": "79612"
}
],
"symlink_target": ""
} |
"""
Pelix basic HTTP service test module.
:author: Thomas Calmant
"""
from pelix.framework import FrameworkFactory
from tests.http.gen_cert import make_certs
from tests.http.test_basic import install_bundle, install_ipopo
import logging
import os
import shutil
import tempfile
try:
import unittest2 as unittest
except ImportError:
import unittest
try:
# Python 3
import http.client as httplib
except (ImportError, AttributeError):
# Python 2 or IronPython
import httplib
# HTTP service constants
import pelix.http as http
# Check if we can run the tests
try:
from ssl import SSLContext, create_default_context
except ImportError:
raise unittest.SkipTest("SSLContext not supported")
# ------------------------------------------------------------------------------
__version__ = (1, 0, 0)
DEFAULT_HOST = "localhost"
DEFAULT_PORT = 8043
PASSWORD = "test_password"
TMP_DIR = tempfile.mkdtemp(prefix="ipopo-tests-https")
# ------------------------------------------------------------------------------
def get_file(name):
"""
Returns the path to the given certificate file
:param name: File name
:return: Full path to the file
"""
if name and not os.path.exists(name):
name = os.path.join(TMP_DIR, name)
return name
def instantiate_server(ipopo_svc, cert_file, key_file, password=None,
address=DEFAULT_HOST, port=DEFAULT_PORT):
"""
Instantiates a basic server component
"""
cert_file = get_file(cert_file)
key_file = get_file(key_file)
return ipopo_svc.instantiate(http.FACTORY_HTTP_BASIC,
"test-https-service",
{http.HTTP_SERVICE_ADDRESS: address,
http.HTTP_SERVICE_PORT: port,
http.HTTPS_CERT_FILE: cert_file,
http.HTTPS_KEY_FILE: key_file,
http.HTTPS_KEY_PASSWORD: password})
def kill_server(ipopo_svc):
"""
Kills the basic server component
"""
ipopo_svc.kill("test-https-service")
def get_https_page(host=DEFAULT_HOST, port=DEFAULT_PORT,
uri="/", method="GET", headers=None, content=None,
only_code=True):
"""
Retrieves the result of an HTTP request
:param host: Server host name
:param port: Server port
:param uri: Request URI
:param method: Request HTTP method (GET, POST, ...)
:param headers: Request headers
:param content: POST request content
:param only_code: If True, only the code is returned
:return: A (code, content) tuple
"""
# Setup the certificate authority
ctx = create_default_context()
ctx.load_verify_locations(get_file("ca.crt"), get_file("ca.key"))
# Don't check the host name, as it depends on the test machine
ctx.check_hostname = False
conn = httplib.HTTPSConnection(host, port, context=ctx)
conn.connect()
conn.request(method, uri, content, headers or {})
result = conn.getresponse()
data = result.read()
conn.close()
if only_code:
return result.status
return result.status, data
# ------------------------------------------------------------------------------
class BasicHTTPSTest(unittest.TestCase):
"""
Tests of the basic HTTPS service
"""
@classmethod
def setUpClass(cls):
"""
Setup the certificates
"""
make_certs(TMP_DIR, PASSWORD)
@classmethod
def tearDownClass(cls):
"""
Clears the certificates
"""
shutil.rmtree(TMP_DIR)
def setUp(self):
"""
Sets up the test environment
"""
# Start a framework
self.framework = FrameworkFactory.get_framework()
self.framework.start()
# Install iPOPO
self.ipopo = install_ipopo(self.framework)
# Install HTTP service
install_bundle(self.framework, "pelix.http.basic")
# Install test bundle
self.servlets = install_bundle(self.framework,
"tests.http.servlets_bundle")
def tearDown(self):
"""
Cleans up the test environment
"""
# Stop the framework
FrameworkFactory.delete_framework()
self.framework = None
def testSimpleCertificate(self):
"""
Tests the use of a certificate without password
"""
instantiate_server(
self.ipopo, cert_file="server.crt", key_file="server.key")
self.assertEqual(get_https_page(only_code=True), 404,
"Received something other than a 404")
def testPasswordCertificate(self):
"""
Tests the use of a certificate with a password
"""
instantiate_server(
self.ipopo, cert_file="server_enc.crt",
key_file="server_enc.key", password=PASSWORD)
self.assertEqual(get_https_page(only_code=True), 404,
"Received something other than a 404")
# ------------------------------------------------------------------------------
if __name__ == "__main__":
# Set logging level
logging.basicConfig(level=logging.DEBUG)
unittest.main()
| {
"content_hash": "e85cc83885fe460ac9fb26608fb094e3",
"timestamp": "",
"source": "github",
"line_count": 195,
"max_line_length": 80,
"avg_line_length": 27.153846153846153,
"alnum_prop": 0.5709159584513692,
"repo_name": "ahmadshahwan/ipopo",
"id": "fa981f39681d841ff11c7640e5c9ef2e18dfd64c",
"size": "5349",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/http/test_basic_ssl.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1557057"
},
{
"name": "Shell",
"bytes": "2803"
}
],
"symlink_target": ""
} |
import platform
if platform.python_version() < '2.7':
unittest = __import__('unittest2')
else:
import unittest
import mse
from mse.base import Column, Index, IndexColumn
from mse.cli import Cli, CliSQLParseException
from docopt import docopt, DocoptExit
class TestCli(unittest.TestCase):
def test_fail_no_param(self):
self.assertRaises(DocoptExit, docopt, mse.cli.__doc__)
def test_fail_simple_selection(self):
self.assertRaises(DocoptExit, docopt, mse.cli.__doc__, ['dummy'])
self.assertRaises(DocoptExit, docopt, mse.cli.__doc__, ['file'])
def test_dummy(self):
args = docopt(mse.cli.__doc__, ['dummy', '-c', 'id INT', '-i', 'PRIMARY KEY (id)'])
parsed = Cli(args)
self.assertEqual("dummy", parsed.table.name)
self.assertEqual(Column('id', 'INT'), parsed.table.columns.get('id'))
self.assertEqual(Index('primary', [IndexColumn('id')], is_primary=True), parsed.table.indexes.get('primary'))
def test_dummy_fail_bad_definition(self):
args1 = docopt(mse.cli.__doc__, ['dummy', '-c', 'id'])
self.assertRaises(CliSQLParseException, Cli, args1)
args2 = docopt(mse.cli.__doc__, ['dummy', '-c', 'id INT', '-i', 'id'])
self.assertRaises(CliSQLParseException, Cli, args2)
| {
"content_hash": "b3add02919326d644776b6630f4e2071",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 117,
"avg_line_length": 35,
"alnum_prop": 0.6416988416988417,
"repo_name": "frail/mysql-size-estimator",
"id": "bad866c86239057073caa23977e15a93936edae8",
"size": "1295",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_cli.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "55159"
}
],
"symlink_target": ""
} |
"""Provides mock objects for powl.action."""
class MockAction(object):
"""
Provides a mock object for powl.action.Action.
"""
def __init__(self):
self._do_called = False
self._do_string = ""
self._do_date = ""
def do_called_with(self, string, date):
return (self._do_called and
self._do_string == string and
self._do_date == date)
# powl.action.Action methods.
def do(self, string, date):
self._do_called = True
self._do_string = string
self._do_date = date
| {
"content_hash": "309b605c3ac6d42f175f6a83d34792da",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 50,
"avg_line_length": 25.17391304347826,
"alnum_prop": 0.5457685664939551,
"repo_name": "adammansfield/Powl",
"id": "bbf2dd475e10108e822e858ae668b8cd676cc441",
"size": "579",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/mock/action.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "103726"
},
{
"name": "Shell",
"bytes": "488"
}
],
"symlink_target": ""
} |
"""
Zip splitter.
Status: can read most important headers
Authors: Christophe Gisquet and Victor Stinner
"""
from hachoir_parser import Parser
from hachoir_core.field import (FieldSet, ParserError,
Bit, Bits, Enum,
TimeDateMSDOS32, SubFile,
UInt8, UInt16, UInt32, UInt64,
String, PascalString16,
RawBytes)
from hachoir_core.text_handler import textHandler, filesizeHandler, hexadecimal
from hachoir_core.error import HACHOIR_ERRORS
from hachoir_core.tools import makeUnicode
from hachoir_core.endian import LITTLE_ENDIAN
from hachoir_parser.common.deflate import Deflate
MAX_FILESIZE = 1000 * 1024 * 1024
COMPRESSION_DEFLATE = 8
COMPRESSION_METHOD = {
0: u"no compression",
1: u"Shrunk",
2: u"Reduced (factor 1)",
3: u"Reduced (factor 2)",
4: u"Reduced (factor 3)",
5: u"Reduced (factor 4)",
6: u"Imploded",
7: u"Tokenizing",
8: u"Deflate",
9: u"Deflate64",
10: u"PKWARE Imploding",
11: u"Reserved by PKWARE",
12: u"File is compressed using BZIP2 algorithm",
13: u"Reserved by PKWARE",
14: u"LZMA (EFS)",
15: u"Reserved by PKWARE",
16: u"Reserved by PKWARE",
17: u"Reserved by PKWARE",
18: u"File is compressed using IBM TERSE (new)",
19: u"IBM LZ77 z Architecture (PFS)",
98: u"PPMd version I, Rev 1",
}
def ZipRevision(field):
return "%u.%u" % divmod(field.value, 10)
class ZipVersion(FieldSet):
static_size = 16
HOST_OS = {
0: u"FAT file system (DOS, OS/2, NT)",
1: u"Amiga",
2: u"VMS (VAX or Alpha AXP)",
3: u"Unix",
4: u"VM/CMS",
5: u"Atari",
6: u"HPFS file system (OS/2, NT 3.x)",
7: u"Macintosh",
8: u"Z-System",
9: u"CP/M",
10: u"TOPS-20",
11: u"NTFS file system (NT)",
12: u"SMS/QDOS",
13: u"Acorn RISC OS",
14: u"VFAT file system (Win95, NT)",
15: u"MVS",
16: u"BeOS (BeBox or PowerMac)",
17: u"Tandem",
}
def createFields(self):
yield textHandler(UInt8(self, "zip_version", "ZIP version"), ZipRevision)
yield Enum(UInt8(self, "host_os", "ZIP Host OS"), self.HOST_OS)
class ZipGeneralFlags(FieldSet):
static_size = 16
def createFields(self):
# Need the compression info from the parent, and that is the byte following
method = self.stream.readBits(self.absolute_address+16, 16, LITTLE_ENDIAN)
yield Bit(self, "is_encrypted", "File is encrypted?")
if method == 6:
yield Bit(self, "use_8k_sliding", "Use 8K sliding dictionary (instead of 4K)")
yield Bit(self, "use_3shannon", "Use a 3 Shannon-Fano tree (instead of 2 Shannon-Fano)")
elif method in (8, 9):
NAME = {
0: "Normal compression",
1: "Maximum compression",
2: "Fast compression",
3: "Super Fast compression"
}
yield Enum(Bits(self, "method", 2), NAME)
elif method == 14: #LZMA
yield Bit(self, "lzma_eos", "LZMA stream is ended with a EndOfStream marker")
yield Bit(self, "unused[]")
else:
yield Bits(self, "compression_info", 2)
yield Bit(self, "has_descriptor",
"Compressed data followed by descriptor?")
yield Bit(self, "enhanced_deflate", "Reserved for use with method 8")
yield Bit(self, "is_patched", "File is compressed with patched data?")
yield Bit(self, "strong_encrypt", "Strong encryption (version >= 50)")
yield Bits(self, "unused[]", 4, "Unused")
yield Bit(self, "uses_unicode", "Filename and comments are in UTF-8")
yield Bit(self, "incomplete", "Reserved by PKWARE for enhanced compression.")
yield Bit(self, "encrypted_central_dir", "Selected data values in the Local Header are masked")
yield Bits(self, "unused[]", 2, "Unused")
class ExtraField(FieldSet):
EXTRA_FIELD_ID = {
0x0007: "AV Info",
0x0009: "OS/2 extended attributes (also Info-ZIP)",
0x000a: "PKWARE Win95/WinNT FileTimes", # undocumented!
0x000c: "PKWARE VAX/VMS (also Info-ZIP)",
0x000d: "PKWARE Unix",
0x000f: "Patch Descriptor",
0x07c8: "Info-ZIP Macintosh (old, J. Lee)",
0x2605: "ZipIt Macintosh (first version)",
0x2705: "ZipIt Macintosh v 1.3.5 and newer (w/o full filename)",
0x334d: "Info-ZIP Macintosh (new, D. Haase Mac3 field)",
0x4341: "Acorn/SparkFS (David Pilling)",
0x4453: "Windows NT security descriptor (binary ACL)",
0x4704: "VM/CMS",
0x470f: "MVS",
0x4b46: "FWKCS MD5 (third party, see below)",
0x4c41: "OS/2 access control list (text ACL)",
0x4d49: "Info-ZIP VMS (VAX or Alpha)",
0x5356: "AOS/VS (binary ACL)",
0x5455: "extended timestamp",
0x5855: "Info-ZIP Unix (original; also OS/2, NT, etc.)",
0x6542: "BeOS (BeBox, PowerMac, etc.)",
0x756e: "ASi Unix",
0x7855: "Info-ZIP Unix (new)",
0xfb4a: "SMS/QDOS",
}
def createFields(self):
yield Enum(UInt16(self, "field_id", "Extra field ID"),
self.EXTRA_FIELD_ID)
size = UInt16(self, "field_data_size", "Extra field data size")
yield size
if size.value > 0:
yield RawBytes(self, "field_data", size.value, "Unknown field data")
class ExtraFields(FieldSet):
def createFields(self):
while self.current_size < self.size:
yield ExtraField(self, "extra[]")
def ZipStartCommonFields(self):
yield ZipVersion(self, "version_needed", "Version needed")
yield ZipGeneralFlags(self, "flags", "General purpose flag")
yield Enum(UInt16(self, "compression", "Compression method"),
COMPRESSION_METHOD)
yield TimeDateMSDOS32(self, "last_mod", "Last modification file time")
yield textHandler(UInt32(self, "crc32", "CRC-32"), hexadecimal)
yield UInt32(self, "compressed_size", "Compressed size")
yield UInt32(self, "uncompressed_size", "Uncompressed size")
yield UInt16(self, "filename_length", "Filename length")
yield UInt16(self, "extra_length", "Extra fields length")
def zipGetCharset(self):
if self["flags/uses_unicode"].value:
return "UTF-8"
else:
return "ISO-8859-15"
class ZipCentralDirectory(FieldSet):
HEADER = 0x02014b50
def createFields(self):
yield ZipVersion(self, "version_made_by", "Version made by")
for field in ZipStartCommonFields(self):
yield field
# Check unicode status
charset = zipGetCharset(self)
yield UInt16(self, "comment_length", "Comment length")
yield UInt16(self, "disk_number_start", "Disk number start")
yield UInt16(self, "internal_attr", "Internal file attributes")
yield UInt32(self, "external_attr", "External file attributes")
yield UInt32(self, "offset_header", "Relative offset of local header")
yield String(self, "filename", self["filename_length"].value,
"Filename", charset=charset)
if 0 < self["extra_length"].value:
yield ExtraFields(self, "extra", size=self["extra_length"].value*8,
description="Extra fields")
if 0 < self["comment_length"].value:
yield String(self, "comment", self["comment_length"].value,
"Comment", charset=charset)
def createDescription(self):
return "Central directory: %s" % self["filename"].display
class Zip64EndCentralDirectory(FieldSet):
HEADER = 0x06064b50
def createFields(self):
yield UInt64(self, "zip64_end_size",
"Size of zip64 end of central directory record")
yield ZipVersion(self, "version_made_by", "Version made by")
yield ZipVersion(self, "version_needed", "Version needed to extract")
yield UInt32(self, "number_disk", "Number of this disk")
yield UInt32(self, "number_disk2",
"Number of the disk with the start of the central directory")
yield UInt64(self, "number_entries",
"Total number of entries in the central directory on this disk")
yield UInt64(self, "number_entries2",
"Total number of entries in the central directory")
yield UInt64(self, "size", "Size of the central directory")
yield UInt64(self, "offset", "Offset of start of central directory")
if 0 < self["zip64_end_size"].value:
yield RawBytes(self, "data_sector", self["zip64_end_size"].value,
"zip64 extensible data sector")
class ZipEndCentralDirectory(FieldSet):
HEADER = 0x06054b50
def createFields(self):
yield UInt16(self, "number_disk", "Number of this disk")
yield UInt16(self, "number_disk2", "Number in the central dir")
yield UInt16(self, "total_number_disk",
"Total number of entries in this disk")
yield UInt16(self, "total_number_disk2",
"Total number of entries in the central dir")
yield UInt32(self, "size", "Size of the central directory")
yield UInt32(self, "offset", "Offset of start of central directory")
yield PascalString16(self, "comment", "ZIP comment")
class ZipDataDescriptor(FieldSet):
HEADER_STRING = "\x50\x4B\x07\x08"
HEADER = 0x08074B50
static_size = 96
def createFields(self):
yield textHandler(UInt32(self, "file_crc32",
"Checksum (CRC32)"), hexadecimal)
yield filesizeHandler(UInt32(self, "file_compressed_size",
"Compressed size (bytes)"))
yield filesizeHandler(UInt32(self, "file_uncompressed_size",
"Uncompressed size (bytes)"))
class FileEntry(FieldSet):
HEADER = 0x04034B50
filename = None
def data(self, size):
compression = self["compression"].value
if compression == 0:
return SubFile(self, "data", size, filename=self.filename)
compressed = SubFile(self, "compressed_data", size, filename=self.filename)
if compression == COMPRESSION_DEFLATE:
return Deflate(compressed)
else:
return compressed
def resync(self):
# Non-seekable output, search the next data descriptor
size = self.stream.searchBytesLength(ZipDataDescriptor.HEADER_STRING, False,
self.absolute_address+self.current_size)
if size <= 0:
raise ParserError("Couldn't resync to %s" %
ZipDataDescriptor.HEADER_STRING)
yield self.data(size)
yield textHandler(UInt32(self, "header[]", "Header"), hexadecimal)
data_desc = ZipDataDescriptor(self, "data_desc", "Data descriptor")
#self.info("Resynced!")
yield data_desc
# The above could be checked anytime, but we prefer trying parsing
# than aborting
if self["crc32"].value == 0 and \
data_desc["file_compressed_size"].value != size:
raise ParserError("Bad resync: position=>%i but data_desc=>%i" %
(size, data_desc["file_compressed_size"].value))
def createFields(self):
for field in ZipStartCommonFields(self):
yield field
length = self["filename_length"].value
if length:
filename = String(self, "filename", length, "Filename",
charset=zipGetCharset(self))
yield filename
self.filename = filename.value
if self["extra_length"].value:
yield ExtraFields(self, "extra", size=self["extra_length"].value*8,
description="Extra fields")
size = self["compressed_size"].value
if size > 0:
yield self.data(size)
elif self["flags/incomplete"].value:
for field in self.resync():
yield field
if self["flags/has_descriptor"].value and self['crc32'].value == 0:
yield ZipDataDescriptor(self, "data_desc", "Data descriptor")
def createDescription(self):
return "File entry: %s (%s)" % \
(self["filename"].value, self["compressed_size"].display)
def validate(self):
if self["compression"].value not in COMPRESSION_METHOD:
return "Unknown compression method (%u)" % self["compression"].value
return ""
class ZipSignature(FieldSet):
HEADER = 0x05054B50
def createFields(self):
yield PascalString16(self, "signature", "Signature")
class Zip64EndCentralDirectoryLocator(FieldSet):
HEADER = 0x07064b50
def createFields(self):
yield UInt32(self, "disk_number", \
"Number of the disk with the start of the zip64 end of central directory")
yield UInt64(self, "relative_offset", \
"Relative offset of the zip64 end of central directory record")
yield UInt32(self, "disk_total_number", "Total number of disks")
class ZipFile(Parser):
endian = LITTLE_ENDIAN
MIME_TYPES = {
# Default ZIP archive
u"application/zip": "zip",
u"application/x-zip": "zip",
# Java archive (JAR)
u"application/x-jar": "jar",
u"application/java-archive": "jar",
# OpenOffice 1.0
u"application/vnd.sun.xml.calc": "sxc",
u"application/vnd.sun.xml.draw": "sxd",
u"application/vnd.sun.xml.impress": "sxi",
u"application/vnd.sun.xml.writer": "sxw",
u"application/vnd.sun.xml.math": "sxm",
# OpenOffice 1.0 (template)
u"application/vnd.sun.xml.calc.template": "stc",
u"application/vnd.sun.xml.draw.template": "std",
u"application/vnd.sun.xml.impress.template": "sti",
u"application/vnd.sun.xml.writer.template": "stw",
u"application/vnd.sun.xml.writer.global": "sxg",
# OpenDocument
u"application/vnd.oasis.opendocument.chart": "odc",
u"application/vnd.oasis.opendocument.image": "odi",
u"application/vnd.oasis.opendocument.database": "odb",
u"application/vnd.oasis.opendocument.formula": "odf",
u"application/vnd.oasis.opendocument.graphics": "odg",
u"application/vnd.oasis.opendocument.presentation": "odp",
u"application/vnd.oasis.opendocument.spreadsheet": "ods",
u"application/vnd.oasis.opendocument.text": "odt",
u"application/vnd.oasis.opendocument.text-master": "odm",
# OpenDocument (template)
u"application/vnd.oasis.opendocument.graphics-template": "otg",
u"application/vnd.oasis.opendocument.presentation-template": "otp",
u"application/vnd.oasis.opendocument.spreadsheet-template": "ots",
u"application/vnd.oasis.opendocument.text-template": "ott",
}
PARSER_TAGS = {
"id": "zip",
"category": "archive",
"file_ext": tuple(MIME_TYPES.itervalues()),
"mime": tuple(MIME_TYPES.iterkeys()),
"magic": (("PK\3\4", 0),),
"subfile": "skip",
"min_size": (4 + 26)*8, # header + file entry
"description": "ZIP archive"
}
def validate(self):
if self["header[0]"].value != FileEntry.HEADER:
return "Invalid magic"
try:
file0 = self["file[0]"]
except HACHOIR_ERRORS, err:
return "Unable to get file #0"
err = file0.validate()
if err:
return "File #0: %s" % err
return True
def createFields(self):
# File data
self.signature = None
self.central_directory = []
while not self.eof:
header = textHandler(UInt32(self, "header[]", "Header"), hexadecimal)
yield header
header = header.value
if header == FileEntry.HEADER:
yield FileEntry(self, "file[]")
elif header == ZipDataDescriptor.HEADER:
yield ZipDataDescriptor(self, "spanning[]")
elif header == 0x30304b50:
yield ZipDataDescriptor(self, "temporary_spanning[]")
elif header == ZipCentralDirectory.HEADER:
yield ZipCentralDirectory(self, "central_directory[]")
elif header == ZipEndCentralDirectory.HEADER:
yield ZipEndCentralDirectory(self, "end_central_directory", "End of central directory")
elif header == Zip64EndCentralDirectory.HEADER:
yield Zip64EndCentralDirectory(self, "end64_central_directory", "ZIP64 end of central directory")
elif header == ZipSignature.HEADER:
yield ZipSignature(self, "signature", "Signature")
elif header == Zip64EndCentralDirectoryLocator.HEADER:
yield Zip64EndCentralDirectoryLocator(self, "end_locator", "ZIP64 Enf of central directory locator")
else:
raise ParserError("Error, unknown ZIP header (0x%08X)." % header)
def createMimeType(self):
if self["file[0]/filename"].value == "mimetype":
return makeUnicode(self["file[0]/data"].value)
else:
return u"application/zip"
def createFilenameSuffix(self):
if self["file[0]/filename"].value == "mimetype":
mime = self["file[0]/compressed_data"].value
if mime in self.MIME_TYPES:
return "." + self.MIME_TYPES[mime]
return ".zip"
def createContentSize(self):
start = 0
end = MAX_FILESIZE * 8
end = self.stream.searchBytes("PK\5\6", start, end)
if end is not None:
return end + 22*8
return None
| {
"content_hash": "9c5086b59802c1aaf4585c883e7955bf",
"timestamp": "",
"source": "github",
"line_count": 433,
"max_line_length": 116,
"avg_line_length": 41.006928406466514,
"alnum_prop": 0.6037395809867088,
"repo_name": "Yukinoshita47/Yuki-Chan-The-Auto-Pentest",
"id": "8271ac93cece32562c15a62111e9d9be9d298868",
"size": "17756",
"binary": false,
"copies": "71",
"ref": "refs/heads/master",
"path": "Module/metagoofil/hachoir_parser/archive/zip.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "36211"
},
{
"name": "JavaScript",
"bytes": "3038"
},
{
"name": "Makefile",
"bytes": "1360"
},
{
"name": "Perl",
"bytes": "108876"
},
{
"name": "Python",
"bytes": "3034585"
},
{
"name": "Roff",
"bytes": "6738"
},
{
"name": "Ruby",
"bytes": "2693582"
},
{
"name": "Shell",
"bytes": "53755"
},
{
"name": "XSLT",
"bytes": "5475"
}
],
"symlink_target": ""
} |
from django.contrib import admin
from django.utils.translation import ugettext as _
from django.utils.timezone import now
from . import forms, models
@admin.register(models.Post)
class PostAdmin(admin.ModelAdmin):
list_display = (
'__str__',
'status',
'approved',
'created_at',
'approved_at',
'created_by',
)
form = forms.PostForm
actions = (
'approve',
)
def approve(self, request, queryset):
rows = (
queryset.filter(
status=models.Post.FINISHED,
approved=False
)
.update(
approved_at=now(),
approved=True
)
)
self.message_user(request, _('{0} posts approved'.format(rows)))
approve.short_description = _('Approve selected posts')
def save_model(self, request, obj, *args):
if not obj.pk:
obj.created_by = request.user
obj.save()
| {
"content_hash": "1ca3d5607d997dc3078459c4dac9c926",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 72,
"avg_line_length": 23.069767441860463,
"alnum_prop": 0.5403225806451613,
"repo_name": "dvl/pyclub",
"id": "b6736b9c711e4ff6856f381520289946f2930ef5",
"size": "1017",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyclub/content/admin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "4395"
},
{
"name": "HTML",
"bytes": "8543"
},
{
"name": "JavaScript",
"bytes": "1292"
},
{
"name": "Makefile",
"bytes": "154"
},
{
"name": "Python",
"bytes": "22163"
}
],
"symlink_target": ""
} |
'''
Created on 4 juin 2013
@author: Aristote Diasonama
'''
import logging
from shop.handlers.base_handler import BaseHandler
class VerificationHandler(BaseHandler):
def get(self):
signup_token = self.request.str_GET["signup_token"]
verification_type = self.request.str_GET['type']
user_id = self.request.str_GET['user_id']
# it should be something more concise like
# self.auth.get_user_by_token(user_id, signup_token
# unfortunately the auth interface does not (yet) allow to manipulate
# signup tokens concisely
user = self.user_model.get_by_auth_token(int(user_id), signup_token,
subject="signup"
)[0]
if not user:
logging.info('Could not find any user with id "%s" signup token'
'"%s"', int(user_id), signup_token)
self.abort(404)
# store user data in the session
self.auth.set_session(self.auth.store.user_to_dict(user),
remember=True)
if verification_type == 'v':
# remove signup token, we don't want users to come back with an old link
self.user_model.delete_signup_token(user.get_id(), signup_token)
if not user.verified:
user.verified = True
user.put()
self.display_message('User email address has been verified.')
return
elif verification_type == 'p':
# supply user to the page
params = {
'user': user,
'token': signup_token
}
self.render_template('resetpassword.html', params)
else:
logging.info('verification type not supported')
self.abort(404)
| {
"content_hash": "df6f2595cd5cfa3ae58c86ea3d7292e8",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 84,
"avg_line_length": 31.24590163934426,
"alnum_prop": 0.5351521511017838,
"repo_name": "EventBuck/EventBuck",
"id": "b51a0cd6dfe626e9e631d9042852011cce9eb83c",
"size": "1906",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "shop/handlers/profile/verification_handler.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "72386"
},
{
"name": "JavaScript",
"bytes": "178307"
},
{
"name": "Python",
"bytes": "302393"
}
],
"symlink_target": ""
} |
import logging
logger = logging.getLogger('dmrg_helpers')
logger.setLevel(logging.INFO)
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
logger.addHandler(ch)
| {
"content_hash": "20a1f53d5a2f140f351e90ab6554125b",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 42,
"avg_line_length": 27.5,
"alnum_prop": 0.8,
"repo_name": "iglpdc/dmrg_helpers",
"id": "4aafc7a049822b2e7197e3605eb9a9fc54da3869",
"size": "165",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dmrg_helpers/core/dmrg_logging.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "95364"
}
],
"symlink_target": ""
} |
import os
import google_api
import google_api.lib_google
import google_api.lib_google2
import google_api.lib_google3
import dropbox_api
import dropbox_api.lib_dropbox
import local_api
import local_api.lib_local
import config
import simple_httpserver
import meta_puller
import traceback
import time
from config import name_local_to_remote;
from config import name_remote_to_local;
# I need the service object to do actual upload/download
SERVERS = []
WRITE_FAIL_MODE = 0 # 0 normal, 1: no update no commit and no error recovery, only hold, 2: partial update without commit,
# we don't do anything relating to CREATE fail, we don't have time to do it, just use the write to illustrate it
SERVERS.append( { \
'id':0, \
'live':1, \
'name' : 'Google', \
'server_object' : google_api.lib_google.create_service_object(0), \
'get_all_file_names' : google_api.lib_google.get_all_file_names, \
'download_file' : google_api.lib_google.download_file, \
'delete_file' : google_api.lib_google.delete_file, \
'upload_file' : google_api.lib_google.upload_file,\
'copy_file' : google_api.lib_google.copy_file, \
})
SERVERS = []
SERVERS.append( { \
'id':0, \
'live':1, \
'name' : 'Local', \
'server_object' : local_api.lib_local.create_service_object('wbl'), \
'get_all_file_names' : local_api.lib_local.get_all_file_names, \
'download_file' : local_api.lib_local.download_file, \
'delete_file' : local_api.lib_local.delete_file, \
'upload_file' : local_api.lib_local.upload_file
})
SERVERS.append( { \
'id':1, \
'live':1, \
'name' : 'Local', \
'server_object' : local_api.lib_local.create_service_object('wbl2'), \
'get_all_file_names' : local_api.lib_local.get_all_file_names, \
'download_file' : local_api.lib_local.download_file, \
'delete_file' : local_api.lib_local.delete_file, \
'upload_file' : local_api.lib_local.upload_file
})
SERVERS.append( { \
'id':2, \
'live':1, \
'name' : 'Local', \
'server_object' : local_api.lib_local.create_service_object('wbl3'), \
'get_all_file_names' : local_api.lib_local.get_all_file_names, \
'download_file' : local_api.lib_local.download_file, \
'delete_file' : local_api.lib_local.delete_file, \
'upload_file' : local_api.lib_local.upload_file
})
'''
SERVERS.append( { \
'id':0, \
'live':1, \
'name' : 'Local', \
'server_object' : local_api.lib_local.create_service_object('wbl'), \
'get_all_file_names' : local_api.lib_local.get_all_file_names, \
'download_file' : local_api.lib_local.download_file, \
'delete_file' : local_api.lib_local.delete_file, \
'upload_file' : local_api.lib_local.upload_file
})
SERVERS.append( { \
'id':1, \
'live':1, \
'name' : 'Local', \
'server_object' : local_api.lib_local.create_service_object('wbl2'), \
'get_all_file_names' : local_api.lib_local.get_all_file_names, \
'download_file' : local_api.lib_local.download_file, \
'delete_file' : local_api.lib_local.delete_file, \
'upload_file' : local_api.lib_local.upload_file
})
SERVERS.append( { \
'id':2, \
'live':1, \
'name' : 'Local', \
'server_object' : local_api.lib_local.create_service_object('wbl3'), \
'get_all_file_names' : local_api.lib_local.get_all_file_names, \
'download_file' : local_api.lib_local.download_file, \
'delete_file' : local_api.lib_local.delete_file, \
'upload_file' : local_api.lib_local.upload_file
}) '''
'''
SERVERS.append( { \
'id':1, \
'name' : 'Google', \
'server_object' : google_api.lib_google.create_service_object(), \
'get_all_file_names' : google_api.lib_google.get_all_file_names, \
'download_file' : google_api.lib_google.download_file, \
'delete_file' : google_api.lib_google.delete_file, \
'upload_file' : google_api.lib_google.upload_file
})
SERVERS.append( { \
'id':2, \
'name' : 'DropBox1', \
'server_object' : dropbox_api.lib_dropbox.create_service_object(), \
'get_all_file_names' : dropbox_api.lib_dropbox.get_all_file_names, \
'download_file' : dropbox_api.lib_dropbox.download_file, \
'delete_file' : dropbox_api.lib_dropbox.delete_file, \
'upload_file' : dropbox_api.lib_dropbox.upload_file
})''';
meta_puller.init_config()
meta_puller.pull_meta()
CHUNK_SIZE = config.FILE_CHUNK_SIZE
# return [(file_name,file_size), (file_name, file_size)]
# get the following structure
# { .dirs = {dir_name: SIMILAR structure } }, .files = {file_name:size } }
def cache_list_all_files():
param = {'folder_name':['/']} # the server doesn't care about the folder name, clients care
ret = {'dirs':{}, 'files':{}}
#print 'Python cache_list_all_files BEFORE'
#for file_name_index in range(4):
#print file_name_index,param
try:
#print 'get_all_files begin FFFFAKE'
buf = simple_httpserver.handle_get_all_files(param)
except Exception as e:
#print 'Except ', e
traceback.print_exc()
#print 'Python cache_list_all_files ', buf
buf = buf.split(':')
try:
assert(buf[0] == '0')
len_files = int(buf[1])
for i in range(len_files):
file_name,size,is_foler = buf[2 + i*3:5 + i*3] # is_folder is deprecated
file_name = name_remote_to_local(file_name); file_name = file_name.split('/');
if len(file_name[0]) == 0:
file_name = file_name[1:]
len_path = len(file_name)
cur_map = ret
for path in file_name[0:-1]:
if not cur_map['dirs'].has_key(path):
cur_map['dirs'][path] = {'dirs':{},'files':{}}
cur_map = cur_map['dirs'][path]
assert cur_map['files'].has_key(file_name[-1]) == False
cur_map['files'][file_name[-1]] = size
except Exception as e:
print 'Python Exception', e
#print 'Python cache_list_all_files ret', ret
return ret
def raw_cache_list_all_files():
param = {'folder_name':['/']} # the server doesn't care about the folder name, clients care
ret = {'dirs':{}, 'files':{}}
buf = simple_httpserver.handle_get_all_files(param)
buf = buf.split(':')
assert(buf[0] == '0')
len_files = int(buf[1])
file_names = []
for i in range(len_files):
file_name,size,is_foler = buf[2 + i*3:5 + i*3] # is_folder is deprecated
file_names.append([file_name,size])
return file_names
# create the directory structure under the ROOT directory
CLIENT_ROOT_DIR = '/tmp/hehehe/'
def cache_get_chunks_info(file_name):
param = {}
file_name = name_local_to_remote(file_name)
param['file_name'] = [file_name]
param['request_chunk_index'] = ['0']
buf = simple_httpserver.handle_meta_file_info(param)
buf = buf.split(':')
assert buf[0] == '0',buf
file_name = buf[1]
file_size = buf[2]
is_folder = buf[3]
len_chunks = int(buf[4])
index = 5
ret = {}
ret['file_size'] = 0
#print buf
while len_chunks > 0:
chunk_file_name,chunk_index,chunk_id, len_servers = buf[index:index+4]
chunk_id = int(chunk_id)
chunk_index = int(chunk_index)
len_servers = int(len_servers) / 2
index = index + 4
ret[chunk_index] = []
while len_servers > 0:
ret[chunk_index].append(int(buf[index]))
ret['file_size'] += int(buf[index+1])
index += 2
len_servers -= 1
len_chunks -= 1
return ret
def cache_create_file(file_name):
global SERVERS
file_name = name_local_to_remote(file_name)
buf = simple_httpserver.handle_create_file({'file_name':[file_name]})
buf = buf.split(':')
#print buf
if buf[0] != '0':
raise 'File: ' + file_name + ' could not be created! ' + buf[1]
trans_id = int(buf[1])
num_server = int(buf[2])
file_name = buf[2 + num_server + 1]
servers = [int(buf[i]) for i in range(3,2 + num_server + 1)]
# do the upload
file_name = name_local_to_remote(file_name)
target_file_name = '0_' + file_name + '.trans' + str(trans_id)
# do the upload
for server in servers:
if server < 0:
continue
s = SERVERS[server]
s['upload_file'](s['server_object'], 'fake_new_file_1k', target_file_name)
# confirm the transaction
simple_httpserver.handle_commit_trans({'id':[trans_id]})
try:
os.remove(CLIENT_ROOT_DIR + file_name)
except Exception as e:
pass
def cache_del_file(file_name):
file_name = name_local_to_remote(file_name)
simple_httpserver.handle_del_file({'file_name':[file_name]})
def get_how_many_chunks_involved(file_name,start,size,is_read, chunk_info):
file_name = name_local_to_remote(file_name);
NUM = config.FILE_CHUNK_SIZE
first_chunk = start / NUM
last_chunk = (start + size - 1) / NUM
chunk_number = len(chunk_info.keys()) - 1
if is_read:
if first_chunk >= chunk_number:
return []
if last_chunk >= chunk_number:
last_chunk = chunk_number - 1
return range(first_chunk, last_chunk + 1, 1)
else: # write
if first_chunk >= chunk_number + 1:
return []
return range(first_chunk,last_chunk + 1, 1)
# for test only
def cache_read_file(file_name, start, size):
file_name = name_local_to_remote(file_name);
chunk_info = cache_get_chunks_info(file_name)
chunk_ids = get_how_many_chunks_involved(file_name, start, size, True, chunk_info)
if len(chunk_ids) == 0:
raise ' Read error, chunk info wrong!'
str_chunk_ids = [str(i) for i in chunk_ids]
buf = simple_httpserver.handle_read_file({'file_name':[file_name], 'chunk_ids':[','.join(str_chunk_ids)]})
#print buf
assert(buf[0] == '0')
tmp = buf.split(':')
trans_id = int(tmp[1])
len_server = int(tmp[2])
servers = [int(i) for i in tmp[3:3+len(chunk_ids)]]
byte_file = ''
for chunk_index in range(len(chunk_ids)):
# download
target_file = str(chunk_ids[chunk_index]) + '_' + file_name + '.trans' + str(trans_id)
local_file = str(chunk_ids[chunk_index]) + '_' + file_name
s = SERVERS[servers[chunk_index]]
s['download_file'](s['server_object'], target_file, '/tmp/' + local_file)
f = open('/tmp/' + local_file,'r')
# Skip the first 8 bytes
version = f.read(4)
size = int(f.read(config.HEADER_LENGTH-4))
# stop reading when less than config.FILE_CHUNK_SIZE
mm = f.read(size)
byte_file += mm
f.close()
if size < config.FILE_CHUNK_SIZE or version[0] == '1':
break
simple_httpserver.handle_commit_trans({'id':[trans_id]})
s = start % config.FILE_CHUNK_SIZE
return ''.join(list(byte_file))
# test only
def cache_write_file(file_name, start, to_write):
size = len(to_write)
file_name = name_local_to_remote(file_name)
chunk_info = cache_get_chunks_info(file_name)
chunk_ids = get_how_many_chunks_involved(file_name, start, size, False, chunk_info)
if len(chunk_ids) == 0:
raise ' Write error, chunk info wrong! Start:' + str(start) + ' Size:' + str(size)
# a tmp buf to do the update
buf_to_write = ' ' * (len(chunk_ids) * config.FILE_CHUNK_SIZE)
buf_to_write = list(buf_to_write)
# first and last chunk should be considered
if start % config.FILE_CHUNK_SIZE != 0 and start / config.FILE_CHUNK_SIZE < len(chunk_info):
buf_to_write[0:config.FILE_CHUNK_SIZE] = cache_read_file(file_name, start, 1)
if (start + size) % config.FILE_CHUNK_SIZE != 0 and (start + size) / config.FILE_CHUNK_SIZE < len(chunk_info):
buf_to_write[-config.FILE_CHUNK_SIZE:] = cache_read_file(file_name, (start + size - 1) / config.FILE_CHUNK_SIZE, 1)
s = start % config.FILE_CHUNK_SIZE
e = s + size
iii = 0
while s < e:
buf_to_write[s] = to_write[iii]
s += 1
iii += 1
str_chunk_ids = [str(i) for i in chunk_ids]
str_chunk_sizes = ','.join([str(config.FILE_CHUNK_SIZE)] * len(chunk_ids))
#print str_chunk_sizes
buf = simple_httpserver.handle_write_file({'file_name':[file_name], 'chunk_ids':[','.join(str_chunk_ids)], 'chunk_size':[str_chunk_sizes]})
#print buf
assert(buf[0] == '0')
trans_id = int(buf.split(':')[1])
len_server = int(buf.split(':')[2])
servers = buf.split(':')[3:3+len_server]
NUM_PER_SERVER = len_server / len(chunk_ids)
for index in range(len(chunk_ids)):
chunk_id = chunk_ids[index]
target_server = servers[NUM_PER_SERVER*index:(index+1)*NUM_PER_SERVER]
chunk_content = str(index) * config.FILE_CHUNK_SIZE
for server in target_server:
server = int(server)
s = SERVERS[server]
f = open('/tmp/hehe','w')
f.write(''.join(buf_to_write[index*config.FILE_CHUNK_SIZE:(index+1)*config.FILE_CHUNK_SIZE]))
f.close()
target_file_name = str(chunk_ids[index]) + '_' + file_name + '.trans' + str(trans_id)
#print target_file_name,"HEHEHE"
s['upload_file'](s['server_object'], '/tmp/hehe', target_file_name)
buf = simple_httpserver.handle_commit_trans({'id':[trans_id]})
assert(buf[0] == '0')
import pickle
WRITE_LOG_FILE = 'local_write_log'
def redo_logs():
global WRITE_FAIL_MODE
WRITE_FAIL_MODE = 0
logs = pickle.load(open(WRITE_LOG_FILE,'r'))
#logs.append([file_name,to_write,chunk_ids,[],0])
for index in range(len(logs)):
m = logs[index]
file_name,to_write,chunk_ids,tmp,status = m
if status != 1:
cache_write_file_algined(file_name,to_write,chunk_ids,False)
logs[index] = [file_name,'',chunk_ids,[],1]
f = open(WRITE_LOG_FILE,'w')
pickle.dump(logs,f)
f.close()
# the write log is something like:
# [ [file_name,to_write,chunk_ids,chunk_ids_wroted,status], ... ]
# simplified version of cache_write_file, with specified chunk_id
def cache_write_file_algined(file_name,to_write,chunk_ids,real_file_length, log_it = True):
log_it = config.SAVE_FAKE_LOG
global WRITE_FAIL_MODE
assert len(to_write) > 0
# save the transactions now
if os.path.exists(WRITE_LOG_FILE) == False:
f = open(WRITE_LOG_FILE,'w')
pickle.dump([],f)
f.close()
if log_it:
logs = pickle.load(open(WRITE_LOG_FILE,'r'))
logs.append([file_name,to_write,chunk_ids,[],0])
f = open(WRITE_LOG_FILE,'w')
pickle.dump(logs,f)
f.close()
# whether to write only parts
opt_chunk_id = -1
opt_chunk_length = -1
if(real_file_length % config.FILE_CHUNK_SIZE != 0):
opt_chunk_id = real_file_length // config.FILE_CHUNK_SIZE
opt_chunk_length = real_file_length % config.FILE_CHUNK_SIZE
size = len(to_write)
file_name = name_local_to_remote(file_name)
chunk_info = cache_get_chunks_info(file_name)
# chunk_ids = get_how_many_chunks_involved(file_name, start, size, False, chunk_info)
if len(chunk_ids) == 0:
raise ' Write error, chunk info wrong! Start:' + str(start) + ' Size:' + str(size)
# a tmp buf to do the update
buf_to_write = ' ' * (len(chunk_ids) * config.FILE_CHUNK_SIZE)
buf_to_write = list(buf_to_write)
s = 0
e = s + size
iii = 0
while s < e:
buf_to_write[s] = to_write[iii]
s += 1
iii += 1
sizes = [config.FILE_CHUNK_SIZE] * (1 + ((size-1) // config.FILE_CHUNK_SIZE))
if size % config.FILE_CHUNK_SIZE != 0:
sizes[-1] = size % config.FILE_CHUNK_SIZE
str_chunk_ids = [str(i) for i in chunk_ids]
str_chunk_sizes = ','.join([str(config.FILE_CHUNK_SIZE)] * len(chunk_ids))
#print str_chunk_sizes
a = time.time()
buf = simple_httpserver.handle_write_file({'file_name':[file_name], 'chunk_ids':[','.join(str_chunk_ids)], 'chunk_size':[str_chunk_sizes]})
#print 'handle_write costs ', time.time() - a
#print buf
assert(buf[0] == '0')
trans_id = int(buf.split(':')[1])
len_server = int(buf.split(':')[2])
servers = buf.split(':')[3:3+len_server]
NUM_PER_SERVER = len_server / len(chunk_ids)
for index in range(len(chunk_ids)):
chunk_id = chunk_ids[index]
target_server = servers[NUM_PER_SERVER*index:(index+1)*NUM_PER_SERVER]
chunk_content = str(index) * config.FILE_CHUNK_SIZE
version = '0001'
size = '%012d'%(sizes[index])
count = 0
for server in target_server:
if WRITE_FAIL_MODE == 1:
continue
if WRITE_FAIL_MODE == 2 and count >= 1:
continue
count = count + 1
server = int(server)
s = SERVERS[server]
f = open('/tmp/hehe','w')
f.write(version)
if chunk_id == opt_chunk_id:
size = '%012d'%(opt_chunk_length)
f.write(size)
f.write(''.join(buf_to_write[index*config.FILE_CHUNK_SIZE:(index*config.FILE_CHUNK_SIZE + opt_chunk_length)]))
else:
f.write(size)
f.write(''.join(buf_to_write[index*config.FILE_CHUNK_SIZE:(index+1)*config.FILE_CHUNK_SIZE]))
f.close()
target_file_name = str(chunk_ids[index]) + '_' + file_name + '.trans' + str(trans_id)
#print target_file_name,"HEHEHE"
a = time.time()
s['upload_file'](s['server_object'], '/tmp/hehe', target_file_name)
#print 'upload one chunk costs ', time.time() - a
a = time.time()
if WRITE_FAIL_MODE == 0:
buf = simple_httpserver.handle_commit_trans({'id':[trans_id]})
#print 'the commit cost ', time.time() - a
assert(buf[0] == '0')
if log_it:
logs = pickle.load(open(WRITE_LOG_FILE,'r'))
#logs.append([file_name,to_write,chunk_ids,[],0])
logs[-1][4] = 1
logs[-1][1] = ''
f = open(WRITE_LOG_FILE,'w')
pickle.dump(logs,f)
f.close()
#### All the read/write functions should be only called by the Cache, clients use the api to write the file
# in local directory, the Cache will update the file and do some synchronization periodly
if __name__ == "__main__":
print cache_list_all_files()
cache_create_file('tutu2.txt')
cache_write_file('tutu2.txt',1020,'Z'*8)
#write_file('tutu2.txt', 1025, 10, 'B')
#write_file('tutu2.txt', 1026, 10, 'L')
| {
"content_hash": "d0f2aecc29d01a0c07e92cc1c072950f",
"timestamp": "",
"source": "github",
"line_count": 484,
"max_line_length": 140,
"avg_line_length": 35.73347107438016,
"alnum_prop": 0.6366001734605378,
"repo_name": "baolinw/cloud",
"id": "2390f442ed02c13ba23adb8baef3824deda2609d",
"size": "17444",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "server/simple_client_for_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "38920"
},
{
"name": "Makefile",
"bytes": "652"
},
{
"name": "Python",
"bytes": "110342"
}
],
"symlink_target": ""
} |
import RPi.GPIO as GPIO
import time
#GPIO Mode (BOARD / BCM)
GPIO.setmode(GPIO.BCM)
#set GPIO Pins
GPIO_TRIGGER = 17
GPIO_ECHO = 13
#set GPIO direction (IN / OUT)
GPIO.setup(GPIO_TRIGGER, GPIO.OUT)
GPIO.setup(GPIO_ECHO, GPIO.IN)
def main ():
try:
while True:
dist = distance()
print ("Measured Distance = %.1f cm" % dist)
time.sleep(0.5)
# Reset by pressing CTRL + C
except KeyboardInterrupt:
print("Measurement stopped by User")
GPIO.cleanup()
def distance():
# set Trigger to HIGH
GPIO.output(GPIO_TRIGGER, True)
# set Trigger after 0.01ms to LOW
time.sleep(0.00001)
GPIO.output(GPIO_TRIGGER, False)
StartTime = time.time()
StopTime = time.time()
# save StartTime
while GPIO.input(GPIO_ECHO) == 0:
StartTime = time.time()
# save time of arrival
while GPIO.input(GPIO_ECHO) == 1:
StopTime = time.time()
# time difference between start and arrival
TimeElapsed = StopTime - StartTime
# multiply with the sonic speed (34300 cm/s)
# and divide by 2, because there and back
distance = (TimeElapsed * 34300) / 2
return distance
if __name__ == '__main__':
main()
| {
"content_hash": "223bce59af9e6c90349a8dc37450398c",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 56,
"avg_line_length": 22.654545454545456,
"alnum_prop": 0.6123595505617978,
"repo_name": "JohnOmernik/pimeup",
"id": "2f8b7421ded099b4768c160d12e093d2da0fc33b",
"size": "1276",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "animatronics/distance.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "125"
},
{
"name": "HTML",
"bytes": "1766"
},
{
"name": "JavaScript",
"bytes": "1148"
},
{
"name": "Python",
"bytes": "263679"
},
{
"name": "Shell",
"bytes": "22782"
}
],
"symlink_target": ""
} |
import fallout_login as login
import fallout_boot as boot
import fallout_locked as locked
import fallout_hack as hack
import fallout_selection as select
import sys
hard = False
if len(sys.argv) == 2 and sys.argv[1].lower() == 'hard':
hard = True
if boot.beginBoot(hard):
pwd = hack.beginLogin()
if pwd != None:
login.beginLogin(hard, 'ADMIN', pwd)
print select.beginSelection()
else:
locked.beginLocked()
print 'Login failed'
| {
"content_hash": "823050b4baaaae3460fd892798d3f91f",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 56,
"avg_line_length": 23.85,
"alnum_prop": 0.6729559748427673,
"repo_name": "J77D/fallout-terminal",
"id": "584607c0f2139e84fad6d899eeb5dd95b88b6ce4",
"size": "500",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fallout.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "18601"
}
],
"symlink_target": ""
} |
"""
Automatically scrape paintings by specified artists from wikiart.
Jesse Mu
"""
import requests
from bs4 import BeautifulSoup
import os
from collections import defaultdict
from fake_useragent import UserAgent # Hehe
# Global constants
ARTIST_URL = 'http://wikiart.org/en/{artist}/mode/all-paintings/{page}'
IMG_URL = 'http://uploads4.wikiart.org/images/{artist}/{painting}/jpg'
ARTISTS = ['pablo-picasso']
FAKE_HEADERS = {'User-Agent': UserAgent().random}
IMG_DIR = '../wikiart/{artist}'
BASE_DIR = IMG_DIR.format(artist='')
def total_pages(soup):
"""
Given an artist's parsed BeautifulSoup page, determine how many
pages there are (there's a predictable pattern).
"""
pager_items = soup.find('div', {'class': 'pager-items'})
pager_links = pager_items.find_all('a')
for pager, next_pager in zip(pager_links, pager_links[1:]):
# There's always a "next" link before the last page
if next_pager.text == 'Next':
return int(pager.text)
# If here, we haven't found a last page
canonical = soup.find('link', {'rel': 'canonical'})['href']
raise ValueError("Couldn't find last page for {}".format(canonical))
def raise_if_bad(request_obj, url='undef'):
"""Throw a helpful error message when a request fails."""
try:
request_obj.raise_for_status()
except requests.exceptions.HTTPError as e:
print "wikiart-scraper.py: Error trying to retrieve {}".format(
request_obj.url
)
raise e
def clean_painting_url(painting_url):
"""
Clean the painting url by removing the size specification.
Might be other things later.
"""
splitted = painting_url.split('!')
assert len(splitted) == 2, 'url {} has more than one !'.format(
painting_url
)
return splitted[0]
def save_painting(link, directory):
"""
Actually request the url and save the painting into the directory.
"""
r_img = requests.get(link, stream=True)
raise_if_bad(r_img, link)
# Get name by splitting on slash and getting the last element
img_name = link.split('/')[-1]
# Unicode error screwing this up
# print u"Saving img %s in directory %s" % (unicode(img_name), directory + '/')
with open(directory + '/' + img_name, 'wb') as fout:
fout.write(r_img.content)
def scrape_paintings(soup, directory=BASE_DIR):
"""
Scrape the given artist page and save images into the specified
directory.
ADDITIONALLY, return a list of names of paintings scraped.
"""
# Make artist directory if it doesn't exist
if not os.path.exists(directory):
os.makedirs(directory)
# Class "Paintings" contains all links
soup_paintings = soup.find('div', {'class': 'Painting'})
# Just get all links - they should all be paintings
soup_imgs = [s['src'] for s in soup_paintings.find_all('img')]
# But double check just to make sure
cleaned_links = [clean_painting_url(s) for s in soup_imgs
if 'jpg' in s and 'uploads' in s]
for link in cleaned_links:
save_painting(link, directory)
return cleaned_links
def main():
# Create the img directory if it doesn't exist.
if not os.getcwd().endswith('util'):
print "You ought to run this script from the util directory for " + \
"accurate save locations (see IMG_DIR)"
return
if not os.path.exists(BASE_DIR):
os.makedirs(BASE_DIR)
artist_paintings = defaultdict(list)
for artist in ARTISTS:
r_artist = requests.get(
ARTIST_URL.format(artist=artist, page=1), headers=FAKE_HEADERS
)
raise_if_bad(r_artist, url=r_artist.url)
soup = BeautifulSoup(r_artist.text, 'lxml') # Default to lmxl parser
for i in xrange(1, total_pages(soup)):
r_artist_page = requests.get(
ARTIST_URL.format(artist=artist, page=i),
headers=FAKE_HEADERS
)
raise_if_bad(r_artist_page, url=r_artist_page.url)
soup_page = BeautifulSoup(r_artist_page.text, 'lxml')
# Download the paintings!
paintings = scrape_paintings(
soup_page,
directory=IMG_DIR.format(artist=artist)
)
# Add paintings to the dictionary
artist_paintings[artist].extend(paintings)
if __name__ == '__main__':
main()
| {
"content_hash": "7fe1a054d0df89ef6eb63392293fe732",
"timestamp": "",
"source": "github",
"line_count": 136,
"max_line_length": 83,
"avg_line_length": 32.63970588235294,
"alnum_prop": 0.6273935571074566,
"repo_name": "jayelm/neural-art",
"id": "471cbd8dd4a148e545fab8c74f2791fa14343f10",
"size": "4439",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "util/wikiart-scraper.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "22277"
},
{
"name": "Shell",
"bytes": "160"
}
],
"symlink_target": ""
} |
from recipe_engine import post_process
PYTHON_VERSION_COMPATIBILITY = 'PY2+3'
DEPS = [
'git',
'recipe_engine/assertions',
'recipe_engine/properties',
]
def RunSteps(api):
numbers = api.git.number(
commitrefs=api.properties.get('commitrefs'),
test_values=api.properties.get('test_values'),
)
expected_numbers = api.properties['expected_numbers']
api.assertions.assertSequenceEqual(numbers, expected_numbers)
def GenTests(api):
yield api.test(
'basic',
api.properties(expected_numbers=['3000']),
api.post_check(post_process.StatusSuccess),
api.post_process(post_process.DropExpectation),
)
yield api.test(
'commitrefs',
api.properties(
commitrefs=['rev-1', 'rev-2'],
expected_numbers=['3000', '3001'],
),
api.post_check(post_process.StatusSuccess),
api.post_process(post_process.DropExpectation),
)
yield api.test(
'basic-with-test-values',
api.properties(
test_values=[42],
expected_numbers=['42'],
),
api.post_check(post_process.StatusSuccess),
api.post_process(post_process.DropExpectation),
)
yield api.test(
'commitrefs-with-test-values',
api.properties(
test_values=[42, 13],
commitrefs=['rev-1', 'rev-2'],
expected_numbers=['42', '13'],
),
api.post_check(post_process.StatusSuccess),
api.post_process(post_process.DropExpectation),
)
| {
"content_hash": "d3c4f9cf10f2e57c7327f36b7981c47b",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 63,
"avg_line_length": 25.517241379310345,
"alnum_prop": 0.6297297297297297,
"repo_name": "CoherentLabs/depot_tools",
"id": "409ba78100e8284e6244ff894cc0abf4202f9447",
"size": "1643",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "recipes/recipe_modules/git/tests/number.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "27896"
},
{
"name": "PowerShell",
"bytes": "5337"
},
{
"name": "Python",
"bytes": "2549026"
},
{
"name": "Roff",
"bytes": "5283"
},
{
"name": "Shell",
"bytes": "64165"
}
],
"symlink_target": ""
} |
__author__ = 'VinnieJohns'
import re
def test_phones_on_home_page(app):
contact_from_home_page = app.contact.get_contacts_list()[0]
contact_from_edit_page = app.contact.get_contact_info_from_edit_page(0)
assert contact_from_home_page.all_phones_from_homepage == merge_phones_like_on_home_page(contact_from_edit_page)
def test_phones_on_contact_view_page(app):
contact_from_view_page = app.contact.get_contact_from_view_page(0)
contact_from_edit_page = app.contact.get_contact_info_from_edit_page(0)
assert contact_from_view_page.home_tel == contact_from_edit_page.home_tel
assert contact_from_view_page.mobile_tel == contact_from_edit_page.mobile_tel
assert contact_from_view_page.work_tel == contact_from_edit_page.work_tel
assert contact_from_view_page.secondary_home_phone == contact_from_edit_page.secondary_home_phone
def clear(s):
return re.sub("[() -]", "", s)
def merge_phones_like_on_home_page(contact):
return "\n".join(filter(lambda x: x != "",
map(lambda x: clear(x),
filter(lambda x: x is not None,
[contact.home_tel, contact.mobile_tel, contact.work_tel, contact.secondary_home_phone])))) | {
"content_hash": "f4506b3711c8c7166ff7d8e64c652b50",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 129,
"avg_line_length": 44.75,
"alnum_prop": 0.6624102154828412,
"repo_name": "VinnieJohns/barancev_python_training",
"id": "979dc063f1a0ff58880432a00d95a2d0e3634191",
"size": "1253",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/test_phones.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "38038"
}
],
"symlink_target": ""
} |
from sklearn2sql_heroku.tests.regression import generic as reg_gen
reg_gen.test_model("AdaBoostRegressor" , "california_housing" , "mysql")
| {
"content_hash": "47a502171bdae8ca12f27413fecf54f4",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 72,
"avg_line_length": 35.5,
"alnum_prop": 0.7816901408450704,
"repo_name": "antoinecarme/sklearn2sql_heroku",
"id": "1ed0dbb6d9d7df41dcb867a1994bef1d60b55841",
"size": "142",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/regression/california_housing/ws_california_housing_AdaBoostRegressor_mysql_code_gen.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "507043"
},
{
"name": "Procfile",
"bytes": "37"
},
{
"name": "Python",
"bytes": "1021137"
},
{
"name": "R",
"bytes": "2521"
}
],
"symlink_target": ""
} |
import json
from django.http import (
HttpResponseNotAllowed, HttpResponseBadRequest, HttpResponse,
QueryDict
)
from issues.models import Issue
def get_issues(request, **kwargs):
if request.method != 'GET':
return HttpResponseNotAllowed()
issues = [issue.serialize() for issue in Issue.objects.all()]
return HttpResponse(
json.dumps(issues),
content_type="application/json"
)
def change_issue(request, issue_id):
if request.method != 'PUT':
return HttpResponseNotAllowed()
try:
issue = Issue.objects.get(pk=issue_id)
except Issue.DoesNotExist:
return HttpResponseBadRequest()
params = QueryDict(request.body)
Issue.objects.update_positions(
issue,
int(params['status']),
int(params['position'])
)
return HttpResponse(
json.dumps(issue.serialize()),
content_type="application/json"
)
| {
"content_hash": "d1bbd84bcae67e458f0bd9207e049f3c",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 65,
"avg_line_length": 24.57894736842105,
"alnum_prop": 0.6541755888650964,
"repo_name": "zadarians/qa_dashboard",
"id": "c5a06aef515b333b7c6e0e1a9bf22794830ae168",
"size": "934",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "qa_dashboard/issues/api.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "25000"
},
{
"name": "Shell",
"bytes": "6466"
}
],
"symlink_target": ""
} |
"""
API endpoints for Registration
"""
class Registration_API_Endpoints:
"Class for registration endpoints"
def registration_url(self,suffix=''):
"""Append API end point to base URL"""
return self.base_url+'/register/'+suffix
def register_car(self,url_params,json,headers):
"register car "
url = self.registration_url('car?')+url_params
json_response = self.post(url,params=url_params,json=json,headers=headers)
return {
'url':url,
'response':json_response['json_response']
}
def get_registered_cars(self,headers):
"gets registered cars"
url = self.registration_url('')
json_response = self.get(url,headers=headers)
return {
'url':url,
'response':json_response['json_response']
}
def delete_registered_car(self,headers):
"deletes registered car"
url = self.registration_url('car/delete/')
json_response = self.delete(url,headers)
return {
'url':url,
'response':json_response['json_response']
}
| {
"content_hash": "8679df86ee9674d24bf19e3d216299cb",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 76,
"avg_line_length": 23.925,
"alnum_prop": 0.6980146290491118,
"repo_name": "qxf2/qxf2-page-object-model",
"id": "b9a7d033cce387a99123603c1d9164e610ee80ee",
"size": "958",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "endpoints/Registration_API_Endpoints.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "4299"
},
{
"name": "Python",
"bytes": "331188"
}
],
"symlink_target": ""
} |
from sklearn.base import TransformerMixin, BaseEstimator
class FitFixer(BaseEstimator, TransformerMixin):
def __init__(self, cls):
self.cl = cls
def fit(self, X, y=None):
self.cl.fit(X)
return self
def transform(self, X):
return self.cl.transform(X)
| {
"content_hash": "6161677b1bb64178b26c2b3d27332e34",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 56,
"avg_line_length": 24.75,
"alnum_prop": 0.6397306397306397,
"repo_name": "ryadzenine/featkit",
"id": "ed7e627307ae2b77f7a598f20a5b19e61c5d618d",
"size": "314",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "featkit/fit_fixer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "14063"
}
],
"symlink_target": ""
} |
"""
Data input with Pandas for Data Bootcamp course.
Course materials
* http://databootcamp.nyuecon.com/
* https://github.com/NYUDataBootcamp/Materials
Written by Dave Backus, August 2015
Created with Python 3.4
Edited by Chase Coleman and Spencer Lyon, October 2016
Created with Python 3.5
"""
#%%
"""
Read csv file from internet (and why we like csv's)
The result is a data frame: like a sheet with row and column labels
"""
import pandas as pd
# read file from url
url1 = 'https://raw.githubusercontent.com/NYUDataBootcamp'
url2 = '/Materials/master/Data/test.csv'
url = url1 + url2
df = pd.read_csv(url)
# if the internet is down
#df_fromdict = pd.DataFrame({'name': ['Dave', 'Chase', 'Spencer'],
# 'x1': [1, 4, 5], 'x2': [2, 3, 6], 'x3': [3.5, 4.3, 7.8]})
#%%
# windows users only
# folder = "\C:\Users" # WILL NOT WORK
folder = "\\C:\\Users" # Good
folder = "/C:/Users" # Good
# NEED TO CHANGE TO YOUR OWN PATH HERE
folder = "/Users/sglyon/Teaching/NYUDataBootcamp/Materials/Data/"
csv_file = folder + "test.csv"
df = pd.read_csv(csv_file)
#%%
"""
Examples
"""
import pandas as pd
# Penn World Table
url = 'http://www.rug.nl/research/ggdc/data/pwt/v81/pwt81.xlsx'
pwt = pd.read_excel(url, sheetname='Data')
#%%
# World Economic Outlook
url1 = 'https://www.imf.org/external/pubs/ft/weo/'
url2 = '2015/02/weodata/WEOOct2015all.xls'
weo = pd.read_csv(url1+url2,
sep='\t', # \t = tab
thousands=',', # kill commas
na_values=['n/a', '--']) # missing values
#%%
# PISA test scores
url = 'http://dx.doi.org/10.1787/888932937035'
pisa = pd.read_excel(url,
skiprows=18, # skip the first 18 rows
skipfooter=7, # skip the last 7
parse_cols=[0,1,9,13], # select columns of interest
index_col=0, # set the index as the first column
header=[0,1] # set the variable names
)
#%%
pisa = pisa.dropna() # drop blank lines
pisa.columns = ['Math', 'Reading', 'Science'] # simplify variable names
pisa['Math'].plot(kind='barh', figsize=(5, 12))
#%%
# UN population data
url1 = 'http://esa.un.org/unpd/wpp/DVD/Files/'
url2 = '1_Indicators%20(Standard)/EXCEL_FILES/1_Population/'
url3 = 'WPP2015_POP_F07_1_POPULATION_BY_AGE_BOTH_SEXES.XLS'
url = url1 + url2 + url3
cols = [2, 4, 5] + list(range(6,28))
est = pd.read_excel(url, sheetname=0, skiprows=16, parse_cols=cols)
#%%
# income by colleage major
url1 = 'https://raw.githubusercontent.com/fivethirtyeight/data/master/'
url2 = 'college-majors/recent-grads.csv'
url = url1 + url2
df538 = pd.read_csv(url)
df538 = df538.set_index("Major")
df538["Median"].plot(kind="barh", figsize=(5, 12))
#%%
# IMDb movies and parts
# WARNING: this file is approx 200 MB -- this might take a while
url = 'http://pages.stern.nyu.edu/~dbackus/Data/cast.csv'
cast = pd.read_csv(url, encoding='utf-8')
#%% first 2016 presdidential debate
# NOTE: data came from here:
# https://www.kaggle.com/mrisdal/2016-us-presidential-debates
url1 = "https://raw.githubusercontent.com/NYUDataBootcamp/Materials/"
url2 = "master/Data/pres_debate_2016.csv"
url= url1 + url2
debate = pd.read_csv(url)
# who spoke more
trump = debate[debate["Speaker"] == "Trump"]
clinton = debate[debate["Speaker"] == "Clinton"]
print("Length of Trumps's talking ", len(trump["Text"].sum()))
print("Length of Clinton's talking ", len(clinton["Text"].sum()))
#%%
"""
APIs
"""
from pandas_datareader import data # Package to access FRED
import datetime as dt # package to handle dates
start = dt.datetime(2010, 1, 1) # start date
codes = ['GDPC1', 'PCECC96'] # real GDP, real consumption
fred = data.DataReader(codes, 'fred', start)
fred = fred/1000 # convert billions to trillions
fred.plot()
#%%
# World Bank
from pandas_datareader import wb # World Bank api
var = ['NY.GDP.PCAP.PP.KD'] # GDP per capita
iso = ['USA', 'FRA', 'JPN', 'CHN', 'IND', 'BRA', 'MEX'] # country codes
year = 2013
wbdf = wb.download(indicator=var, country=iso, start=year, end=year)
#%%
wbdf = wbdf.reset_index(level='year', drop=True)
wbdf.plot(kind='barh')
#%%
# Fama-French equity returns
from pandas_datareader import data # Package to access FF
ff = data.DataReader('F-F_Research_Data_factors', 'famafrench')[0]
ff.columns = ['xsm', 'smb', 'hml', 'rf'] # rename variables
#%%
"""
Review
"""
data = {'EG.ELC.ACCS.ZS': [53.2, 47.3, 85.4, 22.1], # access to elec (%)
'IT.CEL.SETS.P2': [153.8, 95.0, 130.6, 74.8], # cell contracts per 100
'IT.NET.USER.P2': [11.5, 12.9, 41.0, 13.5], # internet access (%)
'Country': ['Botswana', 'Namibia', 'South Africa', 'Zambia']}
af = pd.DataFrame(data)
| {
"content_hash": "4bc1b9fed45b09f95a585612d0b7ccc3",
"timestamp": "",
"source": "github",
"line_count": 166,
"max_line_length": 79,
"avg_line_length": 29.174698795180724,
"alnum_prop": 0.6279165806318397,
"repo_name": "NYUDataBootcamp/Materials",
"id": "32aa7aee4f24dfe5d002ba43a7d1d9a03e4b4ba3",
"size": "4843",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Code/Python/bootcamp_pandas-input.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "5415004"
},
{
"name": "Python",
"bytes": "83482"
},
{
"name": "TeX",
"bytes": "97236"
}
],
"symlink_target": ""
} |
from django.utils.translation import gettext_lazy as _
from rest_framework import exceptions, status
class TokenError(Exception):
pass
class TokenBackendError(Exception):
pass
class DetailDictMixin:
def __init__(self, detail=None, code=None):
"""
Builds a detail dictionary for the error to give more information to API
users.
"""
detail_dict = {'detail': self.default_detail, 'code': self.default_code}
if isinstance(detail, dict):
detail_dict.update(detail)
elif detail is not None:
detail_dict['detail'] = detail
if code is not None:
detail_dict['code'] = code
super().__init__(detail_dict)
class AuthenticationFailed(DetailDictMixin, exceptions.AuthenticationFailed):
pass
class InvalidToken(AuthenticationFailed):
status_code = status.HTTP_401_UNAUTHORIZED
default_detail = _('Token is invalid or expired')
default_code = 'token_not_valid'
| {
"content_hash": "3f0b944ed3fd660fa7445eb27e26f905",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 80,
"avg_line_length": 25.487179487179485,
"alnum_prop": 0.6599597585513078,
"repo_name": "davesque/django-rest-framework-simplejwt",
"id": "530d3a8e052bef78e20ba962cd2b1604c3b2b44e",
"size": "994",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rest_framework_simplejwt/exceptions.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "1311"
},
{
"name": "Python",
"bytes": "122235"
},
{
"name": "Shell",
"bytes": "599"
}
],
"symlink_target": ""
} |
import os
import tempfile
from django import VERSION as DJANGO_VERSION
from testapp.helpers import get_middleware
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = ")0-t%mc5y1^fn8e7i**^^v166@5iu(&-2%9#kxud0&4ap#k!_k"
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
"django_prometheus",
"testapp",
)
MIDDLEWARE = get_middleware(
"django_prometheus.middleware.PrometheusBeforeMiddleware",
"django_prometheus.middleware.PrometheusAfterMiddleware",
)
ROOT_URLCONF = "testapp.urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
]
},
}
]
WSGI_APPLICATION = "testapp.wsgi.application"
DATABASES = {
"default": {
"ENGINE": "django_prometheus.db.backends.sqlite3",
"NAME": "db.sqlite3",
},
# Comment this to not test django_prometheus.db.backends.postgres.
"postgresql": {
"ENGINE": "django_prometheus.db.backends.postgresql",
"NAME": "postgres",
"USER": "postgres",
"PASSWORD": "",
"HOST": "localhost",
"PORT": "5432",
},
# Comment this to not test django_prometheus.db.backends.postgis.
"postgis": {
"ENGINE": "django_prometheus.db.backends.postgis",
"NAME": "postgis",
"USER": "postgres",
"PASSWORD": "",
"HOST": "localhost",
"PORT": "5432",
},
# Comment this to not test django_prometheus.db.backends.mysql.
"mysql": {
"ENGINE": "django_prometheus.db.backends.mysql",
"NAME": "django_prometheus_1",
"USER": "root",
"PASSWORD": "",
"HOST": "127.0.0.1",
"PORT": "3306",
},
# The following databases are used by test_db.py only
"test_db_1": {
"ENGINE": "django_prometheus.db.backends.sqlite3",
"NAME": "test_db_1.sqlite3",
},
"test_db_2": {
"ENGINE": "django_prometheus.db.backends.sqlite3",
"NAME": "test_db_2.sqlite3",
},
}
# Caches
_tmp_cache_dir = tempfile.mkdtemp()
CACHES = {
"default": {
"BACKEND": "django_prometheus.cache.backends.memcached.MemcachedCache",
"LOCATION": "localhost:11211",
},
"memcached.MemcachedCache": {
"BACKEND": "django_prometheus.cache.backends.memcached.MemcachedCache",
"LOCATION": "localhost:11211",
},
"filebased": {
"BACKEND": "django_prometheus.cache.backends.filebased.FileBasedCache",
"LOCATION": os.path.join(_tmp_cache_dir, "django_cache"),
},
"locmem": {
"BACKEND": "django_prometheus.cache.backends.locmem.LocMemCache",
"LOCATION": os.path.join(_tmp_cache_dir, "locmem_cache"),
},
"redis": {
"BACKEND": "django_prometheus.cache.backends.redis.RedisCache",
"LOCATION": "redis://127.0.0.1:6379/1",
},
# Fake redis config emulated stopped service
"stopped_redis": {
"BACKEND": "django_prometheus.cache.backends.redis.RedisCache",
"LOCATION": "redis://127.0.0.1:6666/1",
},
"stopped_redis_ignore_exception": {
"BACKEND": "django_prometheus.cache.backends.redis.RedisCache",
"LOCATION": "redis://127.0.0.1:6666/1",
"OPTIONS": {"IGNORE_EXCEPTIONS": True},
},
}
if DJANGO_VERSION >= (3, 2):
CACHES["memcached.PyLibMCCache"] = {
"BACKEND": "django_prometheus.cache.backends.memcached.PyLibMCCache",
"LOCATION": "localhost:11211",
}
CACHES["memcached.PyMemcacheCache"] = {
"BACKEND": "django_prometheus.cache.backends.memcached.PyMemcacheCache",
"LOCATION": "localhost:11211",
}
# Internationalization
LANGUAGE_CODE = "en-us"
TIME_ZONE = "UTC"
USE_I18N = True
USE_L10N = True
USE_TZ = False
# Static files (CSS, JavaScript, Images)
STATIC_URL = "/static/"
LOGGING = {
"version": 1,
"disable_existing_loggers": False,
"handlers": {"console": {"class": "logging.StreamHandler"}},
"root": {"handlers": ["console"], "level": "INFO"},
"loggers": {"django": {"handlers": ["console"], "level": "INFO"}},
}
| {
"content_hash": "5306f001df527cd6170001e82db2b6f0",
"timestamp": "",
"source": "github",
"line_count": 159,
"max_line_length": 80,
"avg_line_length": 29.150943396226417,
"alnum_prop": 0.6058252427184466,
"repo_name": "korfuri/django-prometheus",
"id": "fde7729fb02794c894d81a446d60052e36bd59eb",
"size": "4635",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django_prometheus/tests/end2end/testapp/settings.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "5116"
},
{
"name": "Python",
"bytes": "81845"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, print_function, unicode_literals
import os
import sys
if __name__ == "__main__":
sys.path.append(os.path.dirname(__file__))
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "test_proj.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| {
"content_hash": "cd15192d3d7974b9400b021f6ab4a30d",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 73,
"avg_line_length": 29.25,
"alnum_prop": 0.7150997150997151,
"repo_name": "luzfcb/django_documentos",
"id": "f9ea198351cda129972a59b7d7351744e2e75c6e",
"size": "397",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "manage.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "601599"
},
{
"name": "HTML",
"bytes": "1991856"
},
{
"name": "Java",
"bytes": "1558738"
},
{
"name": "JavaScript",
"bytes": "3404566"
},
{
"name": "Limbo",
"bytes": "55124"
},
{
"name": "Makefile",
"bytes": "1435"
},
{
"name": "PHP",
"bytes": "2841"
},
{
"name": "Python",
"bytes": "278658"
},
{
"name": "Ruby",
"bytes": "851"
},
{
"name": "Shell",
"bytes": "2017"
}
],
"symlink_target": ""
} |
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "supervise_backend.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
| {
"content_hash": "39eb2502dab6474bf5503a66e3680f83",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 81,
"avg_line_length": 37.76190476190476,
"alnum_prop": 0.6242118537200504,
"repo_name": "kaqfa/supervise_backend",
"id": "e254aa05450c6b3b23382aaa9c4f43923451b3e7",
"size": "815",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "manage.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "67773"
}
],
"symlink_target": ""
} |
import os
from setuptools import setup, find_packages
ROOT_DIR = os.path.dirname(__file__)
SOURCE_DIR = os.path.join(ROOT_DIR)
version = None
exec(open('aiodockerpy/version.py').read())
with open('./requirements.txt') as reqs_txt:
requirements = list(iter(reqs_txt))
with open('./requirements-test.txt') as test_reqs_txt:
test_requirements = [line for line in test_reqs_txt]
setup(
name="aiodockerpy",
version=version,
description="aiohttp port of docker-py.",
url='https://github.com/tenforce/docker-py-aiohttp',
packages=find_packages(exclude=["tests.*", "tests"]),
install_requires=requirements,
tests_require=test_requirements,
zip_safe=False,
test_suite='tests',
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Other Environment',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3 :: Only',
'Programming Language :: Python :: 3.6',
'Topic :: Utilities',
'License :: OSI Approved :: Apache Software License',
],
maintainer='Cecile Tonglet',
maintainer_email='[email protected]',
)
| {
"content_hash": "d6171567e8d3b32293f46c0bade47c98",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 61,
"avg_line_length": 31.025,
"alnum_prop": 0.6526994359387591,
"repo_name": "tenforce/docker-py-aiohttp",
"id": "0df5ef92a5efe58cbd5f4c599d329d0a053e7d30",
"size": "1265",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "18187"
}
],
"symlink_target": ""
} |
from azure.identity import DefaultAzureCredential
from azure.mgmt.testbase import TestBase
"""
# PREREQUISITES
pip install azure-identity
pip install azure-mgmt-testbase
# USAGE
python email_events_list.py
Before run the sample, please set the values of the client ID, tenant ID and client secret
of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
"""
def main():
client = TestBase(
credential=DefaultAzureCredential(),
subscription_id="subscription-id",
)
response = client.email_events.list(
resource_group_name="contoso-rg",
test_base_account_name="contoso-testBaseAccount",
)
for item in response:
print(item)
# x-ms-original-file: specification/testbase/resource-manager/Microsoft.TestBase/preview/2022-04-01-preview/examples/EmailEventsList.json
if __name__ == "__main__":
main()
| {
"content_hash": "a5b8c981aa7ef630766eca72964f3406",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 137,
"avg_line_length": 32,
"alnum_prop": 0.7196691176470589,
"repo_name": "Azure/azure-sdk-for-python",
"id": "82863b982841ffd4bc770ec02eb903b3e3b8cac8",
"size": "1556",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/testbase/azure-mgmt-testbase/generated_samples/email_events_list.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
from eventlet import timeout
import mock
from oslo_config import cfg
import requests
from mistral.actions import std_actions
from mistral.db.v2 import api as db_api
from mistral.db.v2.sqlalchemy import models
from mistral.engine import policies
from mistral import exceptions as exc
from mistral.lang import parser as spec_parser
from mistral.services import workbooks as wb_service
from mistral.services import workflows as wf_service
from mistral.tests.unit.engine import base
from mistral.workflow import states
from mistral_lib.actions import types
# Use the set_default method to set value otherwise in certain test cases
# the change in value is not permanent.
cfg.CONF.set_default('auth_enable', False, group='pecan')
WORKBOOK = """
---
version: '2.0'
name: wb
workflows:
wf1:
type: direct
tasks:
task1:
action: std.echo output="Hi!"
wait-before: 2
wait-after: 5
timeout: 7
retry:
count: 5
delay: 10
break-on: <% $.my_val = 10 %>
"""
WB_WITH_DEFAULTS = """
---
version: '2.0'
name: wb
workflows:
wf1:
type: direct
task-defaults:
wait-before: 2
retry:
count: 2
delay: 1
tasks:
task1:
action: std.echo output="Hi!"
wait-before: 3
wait-after: 5
timeout: 7
"""
WAIT_BEFORE_WB = """
---
version: '2.0'
name: wb
workflows:
wf1:
type: direct
tasks:
task1:
action: std.echo output="Hi!"
wait-before: %d
"""
WAIT_BEFORE_FROM_VAR = """
---
version: '2.0'
name: wb
workflows:
wf1:
type: direct
input:
- wait_before
tasks:
task1:
action: std.echo output="Hi!"
wait-before: <% $.wait_before %>
"""
WAIT_AFTER_WB = """
---
version: '2.0'
name: wb
workflows:
wf1:
type: direct
tasks:
task1:
action: std.echo output="Hi!"
wait-after: %d
"""
WAIT_AFTER_FROM_VAR = """
---
version: '2.0'
name: wb
workflows:
wf1:
type: direct
input:
- wait_after
tasks:
task1:
action: std.echo output="Hi!"
wait-after: <% $.wait_after %>
"""
RETRY_WB = """
---
version: '2.0'
name: wb
workflows:
wf1:
type: direct
tasks:
task1:
action: std.http url="http://some_non-existing_host"
retry:
count: %(count)d
delay: %(delay)d
"""
RETRY_WB_FROM_VAR = """
---
version: '2.0'
name: wb
workflows:
wf1:
type: direct
input:
- count
- delay
tasks:
task1:
action: std.http url="http://some_non-existing_host"
retry:
count: <% $.count %>
delay: <% $.delay %>
"""
TIMEOUT_WB = """
---
version: '2.0'
name: wb
workflows:
wf1:
type: direct
tasks:
task1:
action: std.async_noop
timeout: %d
on-error:
- task2
task2:
action: std.echo output="Hi!"
timeout: 3
"""
TIMEOUT_WB2 = """
---
version: '2.0'
name: wb
workflows:
wf1:
type: direct
tasks:
task1:
action: std.async_noop
timeout: 1
"""
TIMEOUT_FROM_VAR = """
---
version: '2.0'
name: wb
workflows:
wf1:
type: direct
input:
- timeout
tasks:
task1:
action: std.async_noop
timeout: <% $.timeout %>
"""
PAUSE_BEFORE_WB = """
---
version: '2.0'
name: wb
workflows:
wf1:
type: direct
tasks:
task1:
action: std.echo output="Hi!"
pause-before: True
on-success:
- task2
task2:
action: std.echo output="Bye!"
"""
PAUSE_BEFORE_DELAY_WB = """
---
version: '2.0'
name: wb
workflows:
wf1:
type: direct
tasks:
task1:
action: std.echo output="Hi!"
wait-before: 1
pause-before: true
on-success:
- task2
task2:
action: std.echo output="Bye!"
"""
CONCURRENCY_WB = """
---
version: '2.0'
name: wb
workflows:
wf1:
type: direct
tasks:
task1:
action: std.echo output="Hi!"
concurrency: %d
"""
CONCURRENCY_WB_FROM_VAR = """
---
version: '2.0'
name: wb
workflows:
wf1:
type: direct
input:
- concurrency
tasks:
task1:
action: std.echo output="Hi!"
concurrency: <% $.concurrency %>
"""
class PoliciesTest(base.EngineTestCase):
def setUp(self):
super(PoliciesTest, self).setUp()
self.wb_spec = spec_parser.get_workbook_spec_from_yaml(WORKBOOK)
self.wf_spec = self.wb_spec.get_workflows()['wf1']
self.task_spec = self.wf_spec.get_tasks()['task1']
def test_build_policies(self):
arr = policies.build_policies(
self.task_spec.get_policies(),
self.wf_spec
)
self.assertEqual(4, len(arr))
p = self._assert_single_item(arr, delay=2)
self.assertIsInstance(p, policies.WaitBeforePolicy)
p = self._assert_single_item(arr, delay=5)
self.assertIsInstance(p, policies.WaitAfterPolicy)
p = self._assert_single_item(arr, delay=10)
self.assertIsInstance(p, policies.RetryPolicy)
self.assertEqual(5, p.count)
self.assertEqual('<% $.my_val = 10 %>', p._break_on_clause)
p = self._assert_single_item(arr, delay=7)
self.assertIsInstance(p, policies.TimeoutPolicy)
def test_task_policy_class(self):
policy = policies.base.TaskPolicy()
policy._schema = {
"properties": {
"delay": {"type": "integer"}
}
}
wf_ex = models.WorkflowExecution(
id='1-2-3-4',
context={},
input={}
)
task_ex = models.TaskExecution(in_context={'int_var': 5})
task_ex.workflow_execution = wf_ex
policy.delay = "<% $.int_var %>"
# Validation is ok.
policy.before_task_start(task_ex, None)
policy.delay = "some_string"
# Validation is failing now.
exception = self.assertRaises(
exc.InvalidModelException,
policy.before_task_start,
task_ex,
None
)
self.assertIn("Invalid data type in TaskPolicy", str(exception))
def test_build_policies_with_workflow_defaults(self):
wb_spec = spec_parser.get_workbook_spec_from_yaml(WB_WITH_DEFAULTS)
wf_spec = wb_spec.get_workflows()['wf1']
task_spec = wf_spec.get_tasks()['task1']
arr = policies.build_policies(task_spec.get_policies(), wf_spec)
self.assertEqual(4, len(arr))
p = self._assert_single_item(arr, delay=3)
self.assertIsInstance(p, policies.WaitBeforePolicy)
p = self._assert_single_item(arr, delay=5)
self.assertIsInstance(p, policies.WaitAfterPolicy)
p = self._assert_single_item(arr, delay=1)
self.assertIsInstance(p, policies.RetryPolicy)
self.assertEqual(2, p.count)
p = self._assert_single_item(arr, delay=7)
self.assertIsInstance(p, policies.TimeoutPolicy)
def test_wait_before_policy(self):
wb_service.create_workbook_v2(WAIT_BEFORE_WB % 1)
# Start workflow.
wf_ex = self.engine.start_workflow('wb.wf1')
with db_api.transaction():
# Note: We need to reread execution to access related tasks.
wf_ex = db_api.get_workflow_execution(wf_ex.id)
task_ex = wf_ex.task_executions[0]
self.assertEqual(states.RUNNING_DELAYED, task_ex.state)
self.assertDictEqual(
{'wait_before_policy': {'skip': True}},
task_ex.runtime_context
)
self.await_workflow_success(wf_ex.id)
def test_wait_before_policy_zero_seconds(self):
wb_service.create_workbook_v2(WAIT_BEFORE_WB % 0)
# Start workflow.
wf_ex = self.engine.start_workflow('wb.wf1')
with db_api.transaction():
# Note: We need to reread execution to access related tasks.
wf_ex = db_api.get_workflow_execution(wf_ex.id)
task_ex = wf_ex.task_executions[0]
self.assertEqual(states.RUNNING, task_ex.state)
self.await_workflow_success(wf_ex.id)
def test_wait_before_policy_negative_number(self):
self.assertRaises(
exc.InvalidModelException,
wb_service.create_workbook_v2,
WAIT_BEFORE_WB % -1
)
def test_wait_before_policy_from_var(self):
wb_service.create_workbook_v2(WAIT_BEFORE_FROM_VAR)
# Start workflow.
wf_ex = self.engine.start_workflow(
'wb.wf1',
wf_input={'wait_before': 1}
)
with db_api.transaction():
# Note: We need to reread execution to access related tasks.
wf_ex = db_api.get_workflow_execution(wf_ex.id)
task_ex = wf_ex.task_executions[0]
self.assertEqual(states.RUNNING_DELAYED, task_ex.state)
self.await_workflow_success(wf_ex.id)
def test_wait_before_policy_from_var_zero_seconds(self):
wb_service.create_workbook_v2(WAIT_BEFORE_FROM_VAR)
# Start workflow.
wf_ex = self.engine.start_workflow(
'wb.wf1',
wf_input={'wait_before': 0}
)
with db_api.transaction():
# Note: We need to reread execution to access related tasks.
wf_ex = db_api.get_workflow_execution(wf_ex.id)
task_ex = wf_ex.task_executions[0]
# If wait_before is 0 start the task immediately without delay.
self.assertEqual(states.RUNNING, task_ex.state)
self.await_workflow_success(wf_ex.id)
def test_wait_before_policy_from_var_negative_number(self):
wb_service.create_workbook_v2(WAIT_BEFORE_FROM_VAR)
# Start workflow.
wf_ex = self.engine.start_workflow(
'wb.wf1',
wf_input={'wait_before': -1}
)
with db_api.transaction():
# Note: We need to reread execution to access related tasks.
wf_ex = db_api.get_workflow_execution(wf_ex.id)
task_ex = wf_ex.task_executions[0]
# If wait_before value is less than 0 the task should fail with
# InvalidModelException.
self.assertEqual(states.ERROR, task_ex.state)
self.await_workflow_error(wf_ex.id)
def test_wait_before_policy_two_tasks(self):
wf_text = """---
version: '2.0'
wf:
tasks:
a:
wait-before: 2
on-success: b
b:
action: std.noop
"""
wf_service.create_workflows(wf_text)
wf_ex = self.engine.start_workflow('wf')
self.await_workflow_success(wf_ex.id)
with db_api.transaction():
wf_ex = db_api.get_workflow_execution(wf_ex.id)
task_execs = wf_ex.task_executions
self.assertEqual(2, len(task_execs))
self._assert_multiple_items(task_execs, 2, state=states.SUCCESS)
def test_wait_after_policy(self):
wb_service.create_workbook_v2(WAIT_AFTER_WB % 2)
# Start workflow.
wf_ex = self.engine.start_workflow('wb.wf1')
with db_api.transaction():
# Note: We need to reread execution to access related tasks.
wf_ex = db_api.get_workflow_execution(wf_ex.id)
task_ex = wf_ex.task_executions[0]
self.assertEqual(states.RUNNING, task_ex.state)
self.assertDictEqual({}, task_ex.runtime_context)
self.await_task_delayed(task_ex.id, delay=0.5)
self.await_task_success(task_ex.id)
def test_wait_after_policy_zero_seconds(self):
wb_service.create_workbook_v2(WAIT_AFTER_WB % 0)
# Start workflow.
wf_ex = self.engine.start_workflow('wb.wf1')
with db_api.transaction():
# Note: We need to reread execution to access related tasks.
wf_ex = db_api.get_workflow_execution(wf_ex.id)
task_ex = wf_ex.task_executions[0]
self.assertEqual(states.RUNNING, task_ex.state)
self.assertDictEqual({}, task_ex.runtime_context)
try:
self.await_task_delayed(task_ex.id, delay=0.5)
except AssertionError:
# There was no delay as expected.
pass
else:
self.fail("Shouldn't happen")
self.await_task_success(task_ex.id)
def test_wait_after_policy_negative_number(self):
self.assertRaises(
exc.InvalidModelException,
wb_service.create_workbook_v2,
WAIT_AFTER_WB % -1
)
def test_wait_after_policy_from_var(self):
wb_service.create_workbook_v2(WAIT_AFTER_FROM_VAR)
# Start workflow.
wf_ex = self.engine.start_workflow(
'wb.wf1',
wf_input={'wait_after': 2}
)
with db_api.transaction():
# Note: We need to reread execution to access related tasks.
wf_ex = db_api.get_workflow_execution(wf_ex.id)
task_ex = wf_ex.task_executions[0]
self.assertEqual(states.RUNNING, task_ex.state)
self.assertDictEqual({}, task_ex.runtime_context)
self.await_task_delayed(task_ex.id, delay=0.5)
self.await_task_success(task_ex.id)
def test_wait_after_policy_from_var_zero_seconds(self):
wb_service.create_workbook_v2(WAIT_AFTER_FROM_VAR)
# Start workflow.
wf_ex = self.engine.start_workflow(
'wb.wf1',
wf_input={'wait_after': 0}
)
with db_api.transaction():
# Note: We need to reread execution to access related tasks.
wf_ex = db_api.get_workflow_execution(wf_ex.id)
task_ex = wf_ex.task_executions[0]
self.assertEqual(states.RUNNING, task_ex.state)
self.assertDictEqual({}, task_ex.runtime_context)
try:
self.await_task_delayed(task_ex.id, delay=0.5)
except AssertionError:
# There was no delay as expected.
pass
else:
self.fail("Shouldn't happen")
self.await_task_success(task_ex.id)
def test_wait_after_policy_from_var_negative_number(self):
wb_service.create_workbook_v2(WAIT_AFTER_FROM_VAR)
# Start workflow.
wf_ex = self.engine.start_workflow(
'wb.wf1',
wf_input={'wait_after': -1}
)
with db_api.transaction():
# Note: We need to reread execution to access related tasks.
wf_ex = db_api.get_workflow_execution(wf_ex.id)
task_ex = wf_ex.task_executions[0]
# If wait_after value is less than 0 the task should fail with
# InvalidModelException.
self.assertEqual(states.ERROR, task_ex.state)
self.await_workflow_error(wf_ex.id)
self.assertDictEqual({}, task_ex.runtime_context)
@mock.patch.object(
requests,
'request',
mock.MagicMock(side_effect=Exception())
)
def test_retry_policy(self):
wb_service.create_workbook_v2(RETRY_WB % {'count': 3, 'delay': 1})
# Start workflow.
wf_ex = self.engine.start_workflow('wb.wf1')
with db_api.transaction():
# Note: We need to reread execution to access related tasks.
wf_ex = db_api.get_workflow_execution(wf_ex.id)
task_ex = wf_ex.task_executions[0]
self.assertEqual(states.RUNNING, task_ex.state)
self.assertDictEqual({}, task_ex.runtime_context)
self.await_task_delayed(task_ex.id, delay=0.5)
self.await_task_error(task_ex.id)
self.await_workflow_error(wf_ex.id)
with db_api.transaction():
wf_ex = db_api.get_workflow_execution(wf_ex.id)
task_ex = wf_ex.task_executions[0]
self.assertEqual(
3,
task_ex.runtime_context["retry_task_policy"]["retry_no"]
)
@mock.patch.object(
requests,
'request',
mock.MagicMock(side_effect=Exception())
)
def test_retry_policy_zero_count(self):
wb_service.create_workbook_v2(RETRY_WB % {'count': 0, 'delay': 1})
# Start workflow.
wf_ex = self.engine.start_workflow('wb.wf1')
with db_api.transaction():
# Note: We need to reread execution to access related tasks.
wf_ex = db_api.get_workflow_execution(wf_ex.id)
task_ex = wf_ex.task_executions[0]
self.assertEqual(states.RUNNING, task_ex.state)
self.assertDictEqual({}, task_ex.runtime_context)
try:
self.await_task_delayed(task_ex.id, delay=0.5)
except AssertionError:
# There were no scheduled tasks as expected.
pass
else:
self.fail("Shouldn't happen")
self.await_task_error(task_ex.id)
self.await_workflow_error(wf_ex.id)
self.assertNotIn("retry_task_policy", task_ex.runtime_context)
@mock.patch.object(
requests,
'request',
mock.MagicMock(side_effect=Exception())
)
def test_retry_policy_negative_numbers(self):
# Negative delay is not accepted.
self.assertRaises(
exc.InvalidModelException,
wb_service.create_workbook_v2,
RETRY_WB % {'count': 1, 'delay': -1}
)
# Negative count is not accepted.
self.assertRaises(
exc.InvalidModelException,
wb_service.create_workbook_v2,
RETRY_WB % {'count': -1, 'delay': 1}
)
@mock.patch.object(
requests,
'request',
mock.MagicMock(side_effect=Exception())
)
def test_retry_policy_from_var(self):
wb_service.create_workbook_v2(RETRY_WB_FROM_VAR)
# Start workflow.
wf_ex = self.engine.start_workflow(
'wb.wf1',
wf_input={'count': 3, 'delay': 1}
)
with db_api.transaction():
# Note: We need to reread execution to access related tasks.
wf_ex = db_api.get_workflow_execution(wf_ex.id)
task_ex = wf_ex.task_executions[0]
self.assertEqual(states.RUNNING, task_ex.state)
self.assertDictEqual({}, task_ex.runtime_context)
self.await_task_delayed(task_ex.id, delay=0.5)
self.await_task_error(task_ex.id)
self.await_workflow_error(wf_ex.id)
with db_api.transaction():
wf_ex = db_api.get_workflow_execution(wf_ex.id)
task_ex = wf_ex.task_executions[0]
self.assertEqual(
3,
task_ex.runtime_context["retry_task_policy"]["retry_no"]
)
@mock.patch.object(
requests,
'request',
mock.MagicMock(side_effect=Exception())
)
def test_retry_policy_from_var_zero_iterations(self):
wb_service.create_workbook_v2(RETRY_WB_FROM_VAR)
# Start workflow.
wf_ex = self.engine.start_workflow(
'wb.wf1',
wf_input={'count': 0, 'delay': 1}
)
with db_api.transaction():
# Note: We need to reread execution to access related tasks.
wf_ex = db_api.get_workflow_execution(wf_ex.id)
task_ex = wf_ex.task_executions[0]
self.assertEqual(states.RUNNING, task_ex.state)
self.assertDictEqual({}, task_ex.runtime_context)
try:
self.await_task_delayed(task_ex.id, delay=0.5)
except AssertionError:
# There were no scheduled tasks as expected.
pass
else:
self.fail("Shouldn't happen")
self.await_task_error(task_ex.id)
self.await_workflow_error(wf_ex.id)
self.assertNotIn("retry_task_policy", task_ex.runtime_context)
@mock.patch.object(
requests,
'request',
mock.MagicMock(side_effect=Exception())
)
def test_retry_policy_from_var_negative_numbers(self):
wb_service.create_workbook_v2(RETRY_WB_FROM_VAR)
# Start workflow with negative count.
wf_ex = self.engine.start_workflow(
'wb.wf1',
wf_input={'count': -1, 'delay': 1}
)
with db_api.transaction():
# Note: We need to reread execution to access related tasks.
wf_ex = db_api.get_workflow_execution(wf_ex.id)
task_ex = wf_ex.task_executions[0]
self.assertEqual(states.ERROR, task_ex.state)
self.assertDictEqual({}, task_ex.runtime_context)
self.await_workflow_error(wf_ex.id)
# Start workflow with negative delay.
wf_ex = self.engine.start_workflow(
'wb.wf1',
wf_input={'count': 1, 'delay': -1}
)
with db_api.transaction():
# Note: We need to reread execution to access related tasks.
wf_ex = db_api.get_workflow_execution(wf_ex.id)
task_ex = wf_ex.task_executions[0]
self.assertEqual(states.ERROR, task_ex.state)
self.assertDictEqual({}, task_ex.runtime_context)
self.await_workflow_error(wf_ex.id)
def test_retry_policy_never_happen(self):
retry_wb = """---
version: '2.0'
name: wb
workflows:
wf1:
tasks:
task1:
action: std.echo output="hello"
retry:
count: 3
delay: 1
"""
wb_service.create_workbook_v2(retry_wb)
# Start workflow.
wf_ex = self.engine.start_workflow('wb.wf1')
with db_api.transaction():
# Note: We need to reread execution to access related tasks.
wf_ex = db_api.get_workflow_execution(wf_ex.id)
task_ex = wf_ex.task_executions[0]
self.await_task_success(task_ex.id)
self.await_workflow_success(wf_ex.id)
with db_api.transaction():
wf_ex = db_api.get_workflow_execution(wf_ex.id)
task_ex = wf_ex.task_executions[0]
self.assertEqual(
{},
task_ex.runtime_context["retry_task_policy"]
)
def test_retry_policy_break_on(self):
retry_wb = """---
version: '2.0'
name: wb
workflows:
wf1:
input:
- var: 4
tasks:
task1:
action: std.fail
retry:
count: 3
delay: 1
break-on: <% $.var >= 3 %>
"""
wb_service.create_workbook_v2(retry_wb)
# Start workflow.
wf_ex = self.engine.start_workflow('wb.wf1')
with db_api.transaction():
# Note: We need to reread execution to access related tasks.
wf_ex = db_api.get_workflow_execution(wf_ex.id)
task_ex = wf_ex.task_executions[0]
self.await_task_error(task_ex.id)
self.await_workflow_error(wf_ex.id)
with db_api.transaction():
wf_ex = db_api.get_workflow_execution(wf_ex.id)
task_ex = wf_ex.task_executions[0]
self.assertEqual(
{},
task_ex.runtime_context["retry_task_policy"]
)
def test_retry_policy_break_on_not_happened(self):
retry_wb = """---
version: '2.0'
name: wb
workflows:
wf1:
input:
- var: 2
tasks:
task1:
action: std.fail
retry:
count: 3
delay: 1
break-on: <% $.var >= 3 %>
"""
wb_service.create_workbook_v2(retry_wb)
# Start workflow.
wf_ex = self.engine.start_workflow('wb.wf1')
with db_api.transaction():
# Note: We need to reread execution to access related tasks.
wf_ex = db_api.get_workflow_execution(wf_ex.id)
task_ex = wf_ex.task_executions[0]
self.await_task_error(task_ex.id)
self.await_workflow_error(wf_ex.id)
with db_api.transaction():
wf_ex = db_api.get_workflow_execution(wf_ex.id)
task_ex = wf_ex.task_executions[0]
self.assertEqual(
3,
task_ex.runtime_context['retry_task_policy']['retry_no']
)
@mock.patch.object(
std_actions.EchoAction, 'run', mock.Mock(side_effect=[1, 2, 3, 4])
)
def test_retry_continue_on(self):
retry_wb = """---
version: '2.0'
name: wb
workflows:
wf1:
tasks:
task1:
action: std.echo output="mocked result"
retry:
count: 4
delay: 1
continue-on: <% task(task1).result < 3 %>
"""
wb_service.create_workbook_v2(retry_wb)
# Start workflow.
wf_ex = self.engine.start_workflow('wb.wf1')
with db_api.transaction():
# Note: We need to reread execution to access related tasks.
wf_ex = db_api.get_workflow_execution(wf_ex.id)
task_ex = wf_ex.task_executions[0]
self.await_task_success(task_ex.id)
self.await_workflow_success(wf_ex.id)
with db_api.transaction():
wf_ex = db_api.get_workflow_execution(wf_ex.id)
task_ex = wf_ex.task_executions[0]
self.assertEqual(
2,
task_ex.runtime_context['retry_task_policy']['retry_no']
)
def test_retry_continue_on_not_happened(self):
retry_wb = """---
version: '2.0'
name: wb
workflows:
wf1:
tasks:
task1:
action: std.echo output=4
retry:
count: 4
delay: 1
continue-on: <% task(task1).result <= 3 %>
"""
wb_service.create_workbook_v2(retry_wb)
# Start workflow.
wf_ex = self.engine.start_workflow('wb.wf1')
with db_api.transaction():
# Note: We need to reread execution to access related tasks.
wf_ex = db_api.get_workflow_execution(wf_ex.id)
task_ex = wf_ex.task_executions[0]
self.await_task_success(task_ex.id)
self.await_workflow_success(wf_ex.id)
with db_api.transaction():
wf_ex = db_api.get_workflow_execution(wf_ex.id)
task_ex = wf_ex.task_executions[0]
self.assertEqual(
{},
task_ex.runtime_context['retry_task_policy']
)
def test_retry_policy_one_line(self):
retry_wb = """---
version: '2.0'
name: wb
workflows:
wf1:
type: direct
tasks:
task1:
action: std.fail
retry: count=3 delay=1
"""
wb_service.create_workbook_v2(retry_wb)
# Start workflow.
wf_ex = self.engine.start_workflow('wb.wf1')
with db_api.transaction():
# Note: We need to reread execution to access related tasks.
wf_ex = db_api.get_workflow_execution(wf_ex.id)
task_ex = wf_ex.task_executions[0]
self.await_task_error(task_ex.id)
self.await_workflow_error(wf_ex.id)
with db_api.transaction():
wf_ex = db_api.get_workflow_execution(wf_ex.id)
task_ex = wf_ex.task_executions[0]
self.assertEqual(
3,
task_ex.runtime_context['retry_task_policy']['retry_no']
)
def test_retry_policy_subworkflow_force_fail(self):
retry_wb = """---
version: '2.0'
name: wb
workflows:
main:
tasks:
task1:
workflow: work
retry:
count: 3
delay: 1
work:
tasks:
do:
action: std.fail
on-error:
- fail
"""
wb_service.create_workbook_v2(retry_wb)
# Start workflow.
wf_ex = self.engine.start_workflow('wb.main')
with db_api.transaction():
# Note: We need to reread execution to access related tasks.
wf_ex = db_api.get_workflow_execution(wf_ex.id)
task_ex = wf_ex.task_executions[0]
self.await_task_error(task_ex.id)
self.await_workflow_error(wf_ex.id)
with db_api.transaction():
wf_ex = db_api.get_workflow_execution(wf_ex.id)
task_ex = wf_ex.task_executions[0]
self.assertEqual(
3,
task_ex.runtime_context['retry_task_policy']['retry_no']
)
@mock.patch.object(
std_actions.EchoAction,
'run',
mock.Mock(side_effect=[exc.ActionException(), "mocked result"])
)
def test_retry_policy_succeed_after_failure(self):
retry_wb = """---
version: '2.0'
name: wb
workflows:
wf1:
output:
result: <% task(task1).result %>
tasks:
task1:
action: std.echo output="mocked result"
retry:
count: 3
delay: 1
"""
wb_service.create_workbook_v2(retry_wb)
# Start workflow.
wf_ex = self.engine.start_workflow('wb.wf1')
with db_api.transaction():
# Note: We need to reread execution to access related tasks.
wf_ex = db_api.get_workflow_execution(wf_ex.id)
task_ex = wf_ex.task_executions[0]
self.await_task_success(task_ex.id)
self.await_workflow_success(wf_ex.id)
with db_api.transaction():
wf_ex = db_api.get_workflow_execution(wf_ex.id)
wf_output = wf_ex.output
task_ex = wf_ex.task_executions[0]
self.assertDictEqual(
{'retry_no': 1},
task_ex.runtime_context['retry_task_policy']
)
self.assertDictEqual({'result': 'mocked result'}, wf_output)
@mock.patch.object(
std_actions.EchoAction,
'run',
mock.MagicMock(side_effect=[exc.ActionException(), 'value'])
)
def test_retry_policy_succeed_after_failure_with_publish(self):
retry_wf = """---
version: '2.0'
wf1:
output:
result: <% task(task2).result %>
tasks:
task1:
action: std.noop
publish:
key: value
on-success:
- task2
task2:
action: std.echo output=<% $.key %>
retry:
count: 3
delay: 1
"""
wf_service.create_workflows(retry_wf)
wf_ex = self.engine.start_workflow('wf1')
self.await_workflow_success(wf_ex.id)
with db_api.transaction():
wf_ex = db_api.get_workflow_execution(wf_ex.id)
wf_output = wf_ex.output
task_execs = wf_ex.task_executions
retry_task = self._assert_single_item(task_execs, name='task2')
self.assertDictEqual(
{'retry_no': 1},
retry_task.runtime_context['retry_task_policy']
)
self.assertDictEqual({'result': 'value'}, wf_output)
def test_timeout_policy(self):
wb_service.create_workbook_v2(TIMEOUT_WB % 2)
# Start workflow.
wf_ex = self.engine.start_workflow('wb.wf1')
with db_api.transaction():
# Note: We need to reread execution to access related tasks.
wf_ex = db_api.get_workflow_execution(wf_ex.id)
task_ex = wf_ex.task_executions[0]
self.assertEqual(states.RUNNING, task_ex.state)
self.await_task_error(task_ex.id)
with db_api.transaction():
wf_ex = db_api.get_workflow_execution(wf_ex.id)
task_execs = wf_ex.task_executions
self._assert_single_item(task_execs, name='task1')
self.await_workflow_success(wf_ex.id)
def test_timeout_policy_zero_seconds(self):
wb = """---
version: '2.0'
name: wb
workflows:
wf1:
type: direct
tasks:
task1:
action: std.echo output="Hi!"
timeout: 0
"""
wb_service.create_workbook_v2(wb)
# Start workflow.
wf_ex = self.engine.start_workflow('wb.wf1')
with db_api.transaction():
# Note: We need to reread execution to access related tasks.
wf_ex = db_api.get_workflow_execution(wf_ex.id)
task_ex = wf_ex.task_executions[0]
self.assertEqual(states.RUNNING, task_ex.state)
self.await_task_success(task_ex.id)
self.await_workflow_success(wf_ex.id)
def test_timeout_policy_negative_number(self):
# Negative timeout is not accepted.
self.assertRaises(
exc.InvalidModelException,
wb_service.create_workbook_v2,
TIMEOUT_WB % -1
)
def test_timeout_policy_success_after_timeout(self):
wb_service.create_workbook_v2(TIMEOUT_WB2)
# Start workflow.
wf_ex = self.engine.start_workflow('wb.wf1')
with db_api.transaction():
# Note: We need to reread execution to access related tasks.
wf_ex = db_api.get_workflow_execution(wf_ex.id)
task_ex = wf_ex.task_executions[0]
self.assertEqual(states.RUNNING, task_ex.state)
self.await_task_error(task_ex.id)
self.await_workflow_error(wf_ex.id)
# Wait until timeout exceeds.
self._sleep(1)
with db_api.transaction():
wf_ex = db_api.get_workflow_execution(wf_ex.id)
task_execs = wf_ex.task_executions
# Make sure that engine did not create extra tasks.
self.assertEqual(1, len(task_execs))
def test_timeout_policy_from_var(self):
wb_service.create_workbook_v2(TIMEOUT_FROM_VAR)
# Start workflow.
wf_ex = self.engine.start_workflow('wb.wf1', wf_input={'timeout': 1})
with db_api.transaction():
# Note: We need to reread execution to access related tasks.
wf_ex = db_api.get_workflow_execution(wf_ex.id)
task_ex = wf_ex.task_executions[0]
self.assertEqual(states.RUNNING, task_ex.state)
self.await_task_error(task_ex.id)
self.await_workflow_error(wf_ex.id)
def test_timeout_policy_from_var_zero_seconds(self):
wb = """---
version: '2.0'
name: wb
workflows:
wf1:
type: direct
input:
- timeout
tasks:
task1:
action: std.echo output="Hi!"
timeout: <% $.timeout %>
"""
wb_service.create_workbook_v2(wb)
# Start workflow.
wf_ex = self.engine.start_workflow('wb.wf1', wf_input={'timeout': 0})
with db_api.transaction():
# Note: We need to reread execution to access related tasks.
wf_ex = db_api.get_workflow_execution(wf_ex.id)
task_ex = wf_ex.task_executions[0]
self.assertEqual(states.RUNNING, task_ex.state)
self.await_task_success(task_ex.id)
self.await_workflow_success(wf_ex.id)
def test_timeout_policy_from_var_negative_number(self):
wb_service.create_workbook_v2(TIMEOUT_FROM_VAR)
# Start workflow.
wf_ex = self.engine.start_workflow('wb.wf1', wf_input={'timeout': -1})
with db_api.transaction():
# Note: We need to reread execution to access related tasks.
wf_ex = db_api.get_workflow_execution(wf_ex.id)
task_ex = wf_ex.task_executions[0]
self.assertEqual(states.ERROR, task_ex.state)
self.await_workflow_error(wf_ex.id)
def test_action_timeout(self):
wf_text = """---
version: '2.0'
wf1:
tasks:
task1:
action: std.sleep seconds=10
timeout: 2
"""
wf_service.create_workflows(wf_text)
wf_ex = self.engine.start_workflow('wf1')
with db_api.transaction():
wf_ex = db_api.get_workflow_execution(wf_ex.id)
task_ex = wf_ex.task_executions[0]
action_ex = task_ex.action_executions[0]
with timeout.Timeout(8):
self.await_workflow_error(wf_ex.id)
self.await_task_error(task_ex.id)
self.await_action_error(action_ex.id)
def test_pause_before_policy(self):
wb_service.create_workbook_v2(PAUSE_BEFORE_WB)
# Start workflow.
wf_ex = self.engine.start_workflow('wb.wf1')
with db_api.transaction():
wf_ex = db_api.get_workflow_execution(wf_ex.id)
task_execs = wf_ex.task_executions
task_ex = self._assert_single_item(task_execs, name='task1')
self.assertEqual(states.IDLE, task_ex.state)
self.await_workflow_paused(wf_ex.id)
self._sleep(1)
self.engine.resume_workflow(wf_ex.id)
with db_api.transaction():
wf_ex = db_api.get_workflow_execution(wf_ex.id)
task_execs = wf_ex.task_executions
self._assert_single_item(task_execs, name='task1')
self.await_workflow_success(wf_ex.id)
with db_api.transaction():
wf_ex = db_api.get_workflow_execution(wf_ex.id)
task_execs = wf_ex.task_executions
task_ex = self._assert_single_item(task_execs, name='task1')
next_task_ex = self._assert_single_item(task_execs, name='task2')
self.assertEqual(states.SUCCESS, task_ex.state)
self.assertEqual(states.SUCCESS, next_task_ex.state)
def test_pause_before_with_delay_policy(self):
wb_service.create_workbook_v2(PAUSE_BEFORE_DELAY_WB)
# Start workflow.
wf_ex = self.engine.start_workflow('wb.wf1')
with db_api.transaction():
wf_ex = db_api.get_workflow_execution(wf_ex.id)
task_execs = wf_ex.task_executions
task_ex = self._assert_single_item(task_execs, name='task1')
self.assertEqual(states.IDLE, task_ex.state)
# Verify wf paused by pause-before
self.await_workflow_paused(wf_ex.id)
# Allow wait-before to expire
self._sleep(2)
wf_ex = db_api.get_workflow_execution(wf_ex.id)
# Verify wf still paused (wait-before didn't reactivate)
self.await_workflow_paused(wf_ex.id)
task_ex = db_api.get_task_execution(task_ex.id)
self.assertEqual(states.IDLE, task_ex.state)
self.engine.resume_workflow(wf_ex.id)
with db_api.transaction():
wf_ex = db_api.get_workflow_execution(wf_ex.id)
task_execs = wf_ex.task_executions
self._assert_single_item(task_execs, name='task1')
self.await_workflow_success(wf_ex.id)
with db_api.transaction():
wf_ex = db_api.get_workflow_execution(wf_ex.id)
task_execs = wf_ex.task_executions
task_ex = self._assert_single_item(task_execs, name='task1')
next_task_ex = self._assert_single_item(task_execs, name='task2')
self.assertEqual(states.SUCCESS, task_ex.state)
self.assertEqual(states.SUCCESS, next_task_ex.state)
def test_concurrency_is_in_runtime_context(self):
wb_service.create_workbook_v2(CONCURRENCY_WB % 4)
# Start workflow.
wf_ex = self.engine.start_workflow('wb.wf1')
self.await_workflow_success(wf_ex.id)
with db_api.transaction():
wf_ex = db_api.get_workflow_execution(wf_ex.id)
task_execs = wf_ex.task_executions
task_ex = self._assert_single_item(task_execs, name='task1')
self.assertEqual(states.SUCCESS, task_ex.state)
self.assertEqual(4, task_ex.runtime_context['concurrency'])
def test_concurrency_is_in_runtime_context_zero_value(self):
wb_service.create_workbook_v2(CONCURRENCY_WB % 0)
# Start workflow.
wf_ex = self.engine.start_workflow('wb.wf1')
self.await_workflow_success(wf_ex.id)
with db_api.transaction():
wf_ex = db_api.get_workflow_execution(wf_ex.id)
task_execs = wf_ex.task_executions
task_ex = self._assert_single_item(task_execs, name='task1')
self.assertEqual(states.SUCCESS, task_ex.state)
self.assertNotIn('concurrency', task_ex.runtime_context)
def test_concurrency_is_in_runtime_context_negative_number(self):
# Negative concurrency value is not accepted.
self.assertRaises(
exc.InvalidModelException,
wb_service.create_workbook_v2,
CONCURRENCY_WB % -1
)
def test_concurrency_is_in_runtime_context_from_var(self):
wb_service.create_workbook_v2(CONCURRENCY_WB_FROM_VAR)
# Start workflow.
wf_ex = self.engine.start_workflow(
'wb.wf1',
wf_input={'concurrency': 4}
)
with db_api.transaction():
wf_ex = db_api.get_workflow_execution(wf_ex.id)
task_execs = wf_ex.task_executions
task_ex = self._assert_single_item(task_execs, name='task1')
self.assertEqual(4, task_ex.runtime_context['concurrency'])
def test_concurrency_is_in_runtime_context_from_var_zero_value(self):
wb_service.create_workbook_v2(CONCURRENCY_WB_FROM_VAR)
# Start workflow.
wf_ex = self.engine.start_workflow(
'wb.wf1',
wf_input={'concurrency': 0}
)
with db_api.transaction():
wf_ex = db_api.get_workflow_execution(wf_ex.id)
task_execs = wf_ex.task_executions
task_ex = self._assert_single_item(task_execs, name='task1')
self.assertNotIn('concurrency', task_ex.runtime_context)
def test_concurrency_is_in_runtime_context_from_var_negative_number(self):
wb_service.create_workbook_v2(CONCURRENCY_WB_FROM_VAR)
# Start workflow.
wf_ex = self.engine.start_workflow(
'wb.wf1',
wf_input={'concurrency': -1}
)
with db_api.transaction():
wf_ex = db_api.get_workflow_execution(wf_ex.id)
task_ex = wf_ex.task_executions[0]
self.assertEqual(states.ERROR, task_ex.state)
self.await_workflow_error(wf_ex.id)
def test_wrong_policy_prop_type(self):
wb = """---
version: "2.0"
name: wb
workflows:
wf1:
type: direct
input:
- wait_before
tasks:
task1:
action: std.echo output="Hi!"
wait-before: <% $.wait_before %>
"""
wb_service.create_workbook_v2(wb)
# Start workflow.
wf_ex = self.engine.start_workflow(
'wb.wf1',
wf_input={'wait_before': '1'}
)
self.assertIn(
'Invalid data type in WaitBeforePolicy',
wf_ex.state_info
)
self.assertEqual(states.ERROR, wf_ex.state)
def test_delayed_task_and_correct_finish_workflow(self):
wf_delayed_state = """---
version: "2.0"
wf:
type: direct
tasks:
task1:
action: std.noop
wait-before: 1
task2:
action: std.noop
"""
wf_service.create_workflows(wf_delayed_state)
# Start workflow.
wf_ex = self.engine.start_workflow('wf')
self.await_workflow_success(wf_ex.id)
with db_api.transaction():
# Note: We need to reread execution to access related tasks.
wf_ex = db_api.get_workflow_execution(wf_ex.id)
self.assertEqual(2, len(wf_ex.task_executions))
@mock.patch('mistral.actions.std_actions.EchoAction.run')
def test_retry_policy_break_on_with_dict(self, run_method):
run_method.return_value = types.Result(error={'key-1': 15})
wf_retry_break_on_with_dictionary = """---
version: '2.0'
name: wb
workflows:
wf1:
tasks:
fail_task:
action: std.echo output='mock'
retry:
count: 3
delay: 1
break-on: <% task().result['key-1'] = 15 %>
"""
wb_service.create_workbook_v2(wf_retry_break_on_with_dictionary)
# Start workflow.
wf_ex = self.engine.start_workflow('wb.wf1')
self.await_workflow_error(wf_ex.id)
with db_api.transaction():
wf_ex = db_api.get_workflow_execution(wf_ex.id)
fail_task_ex = wf_ex.task_executions[0]
self.assertEqual(states.ERROR, fail_task_ex.state)
self.assertEqual(
{},
fail_task_ex.runtime_context["retry_task_policy"]
)
| {
"content_hash": "42c92060716b26271fc60c54703e6b60",
"timestamp": "",
"source": "github",
"line_count": 1730,
"max_line_length": 78,
"avg_line_length": 26.069364161849713,
"alnum_prop": 0.5584922394678492,
"repo_name": "StackStorm/mistral",
"id": "c1135251207c5655e4f0da80e21320dc8460aabe",
"size": "45705",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mistral/tests/unit/engine/test_policies.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1494"
},
{
"name": "Mako",
"bytes": "951"
},
{
"name": "Python",
"bytes": "2249335"
},
{
"name": "Shell",
"bytes": "31326"
}
],
"symlink_target": ""
} |
from app.models.foo import Foo
from app.models.account import Account
__all__ = ['Foo', 'Account']
| {
"content_hash": "6b9e4e4832c47dbfa4679625aad92184",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 38,
"avg_line_length": 25,
"alnum_prop": 0.71,
"repo_name": "alexcc4/flask_restful_backend",
"id": "09fdafce168a99742d1da38589939a87e0205a34",
"size": "148",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/models/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "25826"
},
{
"name": "Shell",
"bytes": "1228"
}
],
"symlink_target": ""
} |
"""
@name: Modules/Core/Mqtt/_test/test_mqtt.py
@author: D. Brian Kimmel
@contact: [email protected]
@copyright: (c) 2017-2019 by D. Brian Kimmel
@license: MIT License
@note: Created on Apr 26, 2017
@summary: Test
Passed all 11 tests - DBK - 2019-08-15
"""
__updated__ = '2020-01-05'
# Import system type stuff
from twisted.trial import unittest
from ruamel.yaml import YAML
# Import PyMh files
from _test.testing_mixin import SetupPyHouseObj
from Modules.Core.Mqtt import mqtt
from Modules.Core.Mqtt.mqtt import Api as mqttApi, MqttInformation
from Modules.Core.Utilities import json_tools
from Modules.Computer.computer import ComputerInformation
from Modules.House.Lighting.controllers import ControllerInformation
from Modules.House.Schedule.schedule import ScheduleLightingInformation
from Modules.Core.Utilities.debug_tools import FormatBytes, PrettyFormatAny
DICT = {'one': 1, "Two": 'tew'}
LIGHTING_MSG = {
'Active': True,
'BaudRate': 19200,
'ByteSize': 8,
'Comment': 'Mobile Version',
'DateTime': '2019-01-27 17:23:57.988185',
'DevCat': 288,
'DeviceFamily': 'Insteon',
'DeviceSubType': 'Controller',
'DeviceType': 'Lighting',
'DsrDtr': False,
'EngineVersion': 0,
'FirmwareVersion': 0,
'GroupList': None,
'GroupNumber': 0,
'Address': '00.00.00',
'InterfaceType': 'Serial',
'Key': 3,
'LastUpdate': '2019-01-11 16:38:20.788302',
'LasuUsed': None,
'Links': {},
'Name': 'PLM_3',
'Node': None,
'Parity': 'N',
'Port': '/dev/ttyUSB0',
'ProductKey': 0,
'Ret': None,
'RoomCoords': {'X_Easting': 0.0, 'Y_Northing': 0.0, 'Z_Height': 0.0},
'RoomName': 'Mobile',
'RoomUUID': 'c894ef92-b1e5-11e6-8a14-74da3859e09a',
'RtsCts': False,
'Sender': 'Laptop-3',
'StopBits': 1.0,
'Timeout': 1.0,
'UUID': 'c1490758-092e-3333-bffa-b827eb189eb4',
'XonXoff': False
}
DATE_TIME = "2017-03-11 10:45:02.464763"
SENDER = "Laptop-3"
MSG = "{ \
'Active': True, \
'Comment': '', \
'ConnectionAddr_IPv4': [], \
'ConnectionAddr_IPv6': [\
['::1'], \
['fe80::72b7:3dcc:f8c8:41ba%eth0'], \
['fe80::83cd:6fcd:6c62:638d%wlan0']\
], \
'ControllerCount': 1, \
'ControllerTypes': ['Insteon'], \
'DateTime': '2019-01-27 14:07:50.633413', \
'Key': 2, \
'LastUpdate': '2019-01-27 12:18:28.041302', \
'Name': 'pi-01-pp', \
'NodeId': None, \
'NodeInterfaces': None, \
'NodeRole': 0, \
'Sender': 'pi-01-pp', \
'UUID': 'd8ec093e-e4a8-11e6-b6ac-74da3859e09a' \
}"
TEST_YAML = """\
Mqtt:
Brokers:
- Name: Test Broker 1
Comment: Primary
Class: Local
Host:
Name: mqtt-ct
Port: 1883
Access:
UserName: pyhouse
Password: pyhouse
Will:
Topic: LWT Topic
Message: Going offline
Qos: 1
Retain: false
"""
class SetupMixin(object):
def setUp(self):
self.m_pyhouse_obj = SetupPyHouseObj().BuildPyHouseObj()
self.m_api = mqttApi(self.m_pyhouse_obj, self)
l_yaml = YAML()
self.m_test_config = l_yaml.load(TEST_YAML)
self.m_local_config = mqtt.LocalConfig(self.m_pyhouse_obj)
def jsonPair(self, p_json, p_key):
""" Extract key, value from json
"""
l_json = json_tools.decode_json_unicode(p_json)
try:
l_val = l_json[p_key]
except (KeyError, ValueError) as e_err:
l_val = 'ERRor on JsonPair for key "{}" {} {}'.format(p_key, e_err, l_json)
print(l_val)
return l_val
class A0(unittest.TestCase):
def test_00_Print(self):
print('Id: test_mqtt')
_w = FormatBytes('123')
_x = PrettyFormatAny.form('_test', 'title') # so it is defined when printing is cleaned up.
class A1_Setup(SetupMixin, unittest.TestCase):
"""Test that we have set up properly for the rest of the testing classes.
"""
def setUp(self):
SetupMixin.setUp(self)
def test_01_Build(self):
""" The basic read info as set up
"""
# print(PrettyFormatAny.form(self.m_pyhouse_obj, 'A1-01-A - PyHouse'))
# print(PrettyFormatAny.form(self.m_pyhouse_obj.Core, 'A1-01-B - Computer'))
# print(PrettyFormatAny.form(self.m_pyhouse_obj.Core.Mqtt, 'A1-01-C - Mqtt'))
self.assertIsInstance(self.m_pyhouse_obj.Core.Mqtt, MqttInformation)
class C1_YamlRead(SetupMixin, unittest.TestCase):
""" Read the YAML config files.
"""
def setUp(self):
SetupMixin.setUp(self)
def test_01_Build(self):
""" The basic read info as set up
"""
# print(PrettyFormatAny.form(self.m_pyhouse_obj, 'C1-01-A - PyHouse'))
# print(PrettyFormatAny.form(self.m_pyhouse_obj.Computer, 'C1-01-A - Computer'))
# print(PrettyFormatAny.form(self.m_pyhouse_obj.Core.Mqtt, 'C1-01-B - Mqtt'))
self.assertIsInstance(self.m_pyhouse_obj.Computer, ComputerInformation)
self.assertIsInstance(self.m_pyhouse_obj.Core.Mqtt, MqttInformation)
def test_02_Broker0(self):
""" Read the rooms.yaml config file
"""
l_yaml = self.m_test_config
# print('C1-02-A - Yaml: ', l_yaml)
l_mqtt = l_yaml['Mqtt']
# print('C1-02-B - Mqtt: ', l_mqtt)
l_brokers = l_mqtt['Brokers']
# print('C1-02-C - Brokers: ', l_brokers)
l_broker = l_brokers[0]
# print('C1-02-D - Broker: ', l_broker)
l_brk = self.m_local_config._extract_one_broker(l_broker, None)
print(PrettyFormatAny.form(l_brk, 'C1-02- L - Broker'))
print(PrettyFormatAny.form(l_brk.Access, 'C1-02-M - Access'))
print(PrettyFormatAny.form(l_brk.Host, 'C1-02-N - Host'))
print(PrettyFormatAny.form(l_brk.Will, 'C1-02-O - Will'))
self.assertEqual(l_brk.Name, 'Test Broker 1')
self.assertEqual(l_brk.Comment, 'Primary')
class F1_Form(SetupMixin, unittest.TestCase):
def setUp(self):
SetupMixin.setUp(self)
self.m_pyhouse_obj.Core.Mqtt.Prefix = "pyhouse/test_house/"
def test_01_Topic(self):
""" Test topic.
"""
_l_topic = mqtt._make_topic(self.m_pyhouse_obj, 'Test')
self.assertEqual(_l_topic, "pyhouse/test_house/Test")
def test_02_Topic(self):
_l_topic = mqtt._make_topic(self.m_pyhouse_obj, 'abc/def/ghi')
# print('B1-02-A - {} {}'.format(FormatBytes(l_topic), l_topic))
def test_03_Msg(self):
_l_msg = mqtt._make_message(self.m_pyhouse_obj, self.m_pyhouse_obj.House)
# print('B1-03-A - {}; {}'.format(FormatBytes(l_msg)[:300], l_msg))
def test_04_Msg(self):
_l_msg = mqtt._make_message(self.m_pyhouse_obj, DICT)
# print('B1-04-A - {}; {}'.format(FormatBytes(l_msg)[:30], l_msg))
def test_05_Message(self):
""" No payload (not too useful)
"""
l_message = mqtt._make_message(self.m_pyhouse_obj)
# print(PrettyFormatAny.form(l_message, 'B1-05-A - Bare Message', 80))
self.assertEqual(self.jsonPair(l_message, 'Sender'), self.m_pyhouse_obj.Computer.Name)
self.assertSubstring('DateTime', l_message)
def test_06_MessageObj(self):
""" Add an object.
"""
l_data = ScheduleLightingInformation()
l_data.Name = 'Mqtt Controller Object'
l_data.RoomName = 'Living Room'
l_data.Comment = 'The formal Living Room.'
l_message = mqtt._make_message(self.m_pyhouse_obj, l_data)
# print(PrettyFormatAny.form(l_message, 'C2-03-A - Message', 80))
self.assertEqual(self.jsonPair(l_message, 'Sender'), self.m_pyhouse_obj.Computer.Name)
self.assertSubstring('DateTime', l_message)
self.assertEqual(self.jsonPair(l_message, 'Name'), l_data.Name)
def test_07_MessageObj(self):
""" Add an object.
"""
l_data = ControllerInformation()
l_data.Name = 'Mqtt Schedule Object'
l_data.LightName = 'Test Light'
l_data.RoomName = 'Living Room'
l_data.Comment = 'The formal Living Room.'
l_message = mqtt._make_message(self.m_pyhouse_obj, l_data)
# print(PrettyFormatAny.form(l_message, 'C2-04-A - Message', 80))
self.assertEqual(self.jsonPair(l_message, 'Sender'), self.m_pyhouse_obj.Computer.Name)
self.assertSubstring('DateTime', l_message)
self.assertEqual(self.jsonPair(l_message, 'Name'), l_data.Name)
# ## END DBK
| {
"content_hash": "f7d749c10aa89b4c3dec8fc5ec4c8c13",
"timestamp": "",
"source": "github",
"line_count": 253,
"max_line_length": 100,
"avg_line_length": 34.07114624505929,
"alnum_prop": 0.5964037122969837,
"repo_name": "DBrianKimmel/PyHouse",
"id": "15df49c4f22710be4cdcc545cb4cb81c0fa53e37",
"size": "8620",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "Project/src/Modules/Core/Mqtt/_test/test_mqtt.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "114778"
},
{
"name": "HTML",
"bytes": "15398"
},
{
"name": "JavaScript",
"bytes": "220171"
},
{
"name": "Python",
"bytes": "1491784"
},
{
"name": "Shell",
"bytes": "2131"
}
],
"symlink_target": ""
} |
import numpy as np
from scipy.sparse import coo_matrix, csr_matrix, csc_matrix
from scipy.sparse.linalg import spsolve
from compmech.logger import *
def remove_null_cols(*args, **kwargs):
"""Remove null rows and cols of a symmetric, square sparse matrix.
Parameters
----------
args : list of sparse matrices
The first matrix in this list will be used to extract the columns
to be removed from all the other matrices. Use :class:`csr_matrix` to
obtain a better performance.
Returns
-------
out : list of sparse matrices and removed columns
A list with the reduced matrices in the same order of ``args`` plus
an array containing the removed columns at the last position.
"""
silent = kwargs.get('silent', False)
args = list(args)
log('Removing null columns...', level=3, silent=silent)
num_cols = args[0].shape[1]
if isinstance(args[0], csr_matrix):
m = args[0]
else:
m = csr_matrix(args[0])
rows, cols = m.nonzero()
used_cols = np.unique(cols)
for i, arg in enumerate(args):
if isinstance(arg, csr_matrix):
m = arg
else:
m = csr_matrix(arg)
m = m[used_cols, :]
#NOTE below, converting to csc_matrix seems to require more time than
# the "slow" column slicing for csr_matrix
m = m[:, used_cols]
args[i] = m
args.append(used_cols)
log('{} columns removed'.format(num_cols - used_cols.shape[0]),
level=4, silent=silent)
log('finished!', level=3, silent=silent)
return args
def solve(a, b, **kwargs):
"""Wrapper for spsolve removing null columns
The null columns of matrix ``a`` is removed such and the linear system of
equations is solved. The corresponding values of the solution ``x`` where
the columns are null will also be null values.
Parameters
----------
a : ndarray or sparse matrix
A square matrix that will be converted to CSR form in the solution.
b : scipy sparse matrix
The matrix or vector representing the right hand side of the equation.
kwargs : keyword arguments, optional
Other arguments directly passed to :func:`spsolve`.
Returns
-------
x : ndarray or sparse matrix
The solution of the sparse linear equation.
If ``b`` is a vector, then ``x`` is a vector of size ``a.shape[1]``.
If ``b`` is a sparse matrix, then ``x`` is a matrix of size
``(a.shape[1], b.shape[1])``.
"""
a, used_cols = remove_null_cols(a)
px = spsolve(a, b[used_cols], **kwargs)
x = np.zeros(b.shape[0], dtype=b.dtype)
x[used_cols] = px
return x
def make_symmetric(m):
"""Returns a new coo_matrix which is symmetric
Convenient function to populate a sparse matrix symmetricaly. Only the
upper triagle of matrix ``m`` has to be defined.
The rows, cols and values are evaluated such that where ``rows > cols``
the values will be ignored and recreated from the region where ``cols >
rows``, in order to obtain a symmetric matrix.
Parameters
----------
m : array or sparse matrix
A square matrix with the upper triangle defined.
Returns
-------
m_sym : coo_matrix
The symmetric sparse matrix.
"""
if m.shape[0] != m.shape[1]:
raise ValueError('m must be a square matrix')
if not isinstance(m, coo_matrix):
m = coo_matrix(m)
r, c, v = m.row, m.col, m.data
triu = c >= r
r = r[triu]
c = c[triu]
v = v[triu]
pos = r.shape[0]
r = np.concatenate((r, r*0))
c = np.concatenate((c, c*0))
v = np.concatenate((v, v*0))
triu_no_diag = np.where(c > r)[0]
r[triu_no_diag + pos] = c[triu_no_diag]
c[triu_no_diag + pos] = r[triu_no_diag]
v[triu_no_diag + pos] = v[triu_no_diag]
return coo_matrix((v, (r, c)), shape=m.shape, dtype=m.dtype)
def make_skew_symmetric(m):
"""Returns a new coo_matrix which is skew-symmetric
Convenient function to populate a sparse matrix skew-symmetricaly, where
the off-diagonal elements below the diagonal are negative when compared to
the terms above the diagonal. Only the upper triagle of matrix ``m`` has
to be defined.
The rows, cols and values are evaluated such that where ``rows > cols``
the values will be ignored and recreated from the region where ``cols >
rows``, in order to obtain a symmetric matrix.
Parameters
----------
m : array or sparse matrix
A square matrix with the upper triangle defined.
Returns
-------
m_sym : coo_matrix
The symmetric sparse matrix.
"""
if m.shape[0] != m.shape[1]:
raise ValueError('m must be a square matrix')
if not isinstance(m, coo_matrix):
m = coo_matrix(m)
r, c, v = m.row, m.col, m.data
triu = c >= r
r = r[triu]
c = c[triu]
v = v[triu]
pos = r.shape[0]
r = np.concatenate((r, r*0))
c = np.concatenate((c, c*0))
v = np.concatenate((v, v*0))
triu_no_diag = np.where(c > r)[0]
r[triu_no_diag + pos] = c[triu_no_diag]
c[triu_no_diag + pos] = r[triu_no_diag]
v[triu_no_diag + pos] = -v[triu_no_diag]
return coo_matrix((v, (r, c)), shape=m.shape, dtype=m.dtype)
def is_symmetric(m):
"""Check if a sparse matrix is symmetric
Parameters
----------
m : array or sparse matrix
A square matrix.
Returns
-------
check : bool
The check result.
"""
if m.shape[0] != m.shape[1]:
raise ValueError('m must be a square matrix')
if not isinstance(m, coo_matrix):
m = coo_matrix(m)
r, c, v = m.row, m.col, m.data
tril_no_diag = r > c
triu_no_diag = c > r
if triu_no_diag.sum() != tril_no_diag.sum():
return False
rl = r[tril_no_diag]
cl = c[tril_no_diag]
vl = v[tril_no_diag]
ru = r[triu_no_diag]
cu = c[triu_no_diag]
vu = v[triu_no_diag]
sortl = np.lexsort((cl, rl))
sortu = np.lexsort((ru, cu))
vl = vl[sortl]
vu = vu[sortu]
check = np.allclose(vl, vu)
return check
| {
"content_hash": "8757d1b23564963cd7d8578c489c2766",
"timestamp": "",
"source": "github",
"line_count": 219,
"max_line_length": 78,
"avg_line_length": 28.12785388127854,
"alnum_prop": 0.5988636363636364,
"repo_name": "saullocastro/compmech",
"id": "d06cbcd1e400fa751af066ce12191d51c5f0d08b",
"size": "6160",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "compmech/sparse.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "1630"
},
{
"name": "C",
"bytes": "5738443"
},
{
"name": "FORTRAN",
"bytes": "4813098"
},
{
"name": "Mathematica",
"bytes": "5892830"
},
{
"name": "Python",
"bytes": "4933295"
},
{
"name": "Shell",
"bytes": "1909"
}
],
"symlink_target": ""
} |
import requests
import pprint
import sys
import sys
from getpass import getpass
try:
from urllib.parse import urlparse
from urllib.parse import urljoin
except ImportError:
from urlparse import urlparse
from urlparse import urljoin
python_major_version = sys.version_info[0]
python_minor_version = sys.version_info[1]
API_BASE = "https://api.waas.barracudanetworks.com/v2/waasapi/"
proxies = { 'http': 'http://127.0.0.1:8080', 'https': 'http://127.0.0.1:8080', }
proxies = ''
def waas_api_login(email, password):
res = requests.post(urljoin(API_BASE, 'api_login/'), data=dict(email=email, password=password), proxies=proxies)
#res = requests.post(urljoin(API_BASE, 'api_login/'), data=dict(email=email, password=password))
res.raise_for_status()
response_json = res.json()
return response_json['key']
def waas_api_get(token, path):
res = requests.get(urljoin(API_BASE, path), headers={"Content-Type": "application/json", 'auth-api': token}, proxies=proxies)
res.raise_for_status()
return res.json()
def waas_api_post(token, path, mydata):
res = requests.post(urljoin(API_BASE, path), headers={"Content-Type": "application/json", "Accept": "application/json",'auth-api': token}, data=mydata, proxies=proxies)
print(res.json())
res.raise_for_status()
return res.json()
if __name__ == '__main__':
if len(sys.argv) >= 4:
email = sys.argv[1]
password = sys.argv[2]
application_name = sys.argv[3]
else:
if python_major_version == 2:
email = raw_input("Enter user email:")
elif python_major_version == 3:
email = input("Enter user email:")
else:
assert("You are not using Python version 2 nor 3, so this script cannot continue.");
password = getpass("Enter user password:")
if python_major_version == 2:
application_name = raw_input("Enter application name:")
elif python_major_version == 3:
application_name = input("Enter application name:")
else:
assert("You are not using Python version 2 nor 3, so this script cannot continue.");
token = waas_api_login(email, password)
url_allow_deny_list = []
url_allow_deny_list.append('{ "enabled": true, "name": "Exchange-AnonCookie-CVE-2021-26855", "deny_response": "Response Page", "response_page": "default", "action": "Deny and Log", "url_match": "/*", "follow_up_action_time": 1, "host_match": "*", "allow_deny_rule": "string", "redirect_url": "string", "extended_match": "(Header Cookie rco X-AnonResource-Backend=.*\\\\/.*~.*)", "follow_up_action": "None", "priority": 1}')
url_allow_deny_list.append('{ "enabled": true, "name": "Exchange-ResourceCookie-CVE-2021-26855", "deny_response": "Response Page", "response_page": "default", "action": "Deny and Log", "url_match": "/*", "follow_up_action_time": 1, "host_match": "*", "allow_deny_rule": "string", "redirect_url": "string", "extended_match": "(Header Cookie rco X-BEResource=.*\\\\/.*~.*)", "follow_up_action": "None", "priority": 2}')
url_allow_deny_list.append('{ "enabled": true, "name": "themes-CVE-2021-26855", "deny_response": "Response Page", "response_page": "default", "action": "Deny and Log", "url_match": "/owa/auth/Current/themes/resources/*", "follow_up_action_time": 1, "host_match": "*", "allow_deny_rule": "string", "redirect_url": "string", "extended_match": "(Method eq POST) && (Header User-Agent rco \\".*(DuckDuckBot|facebookexternalhit|Baiduspider|Bingbot|Googlebot|Konqueror|Yahoo|YandexBot|antSword).*\\")", "follow_up_action": "None", "priority": 1}')
url_allow_deny_list.append('{ "enabled": true, "name": "ecp-CVE-2021-26855", "deny_response": "Response Page", "response_page": "default", "action": "Deny and Log", "url_match": "/ecp/", "follow_up_action_time": 1, "host_match": "*", "allow_deny_rule": "string", "redirect_url": "string", "extended_match": "(Method eq POST) && (Header User-Agent rco \\".*(ExchangeServicesClient|python-requests).*\\")", "follow_up_action": "None", "priority": 1}')
url_allow_deny_list.append('{ "enabled": true, "name": "aspnetclient-CVE-2021-26855", "deny_response": "Response Page", "response_page": "default", "action": "Deny and Log", "url_match": "/aspnet_client/", "follow_up_action_time": 1, "host_match": "*", "allow_deny_rule": "string", "redirect_url": "string", "extended_match": "(Method eq POST) && (Header User-Agent rco \\".*(antSword|Googlebot|Baiduspider).*\\")", "follow_up_action": "None", "priority": 1}')
url_allow_deny_list.append('{ "enabled": true, "name": "owa-CVE-2021-26855", "deny_response": "Response Page", "response_page": "default", "action": "Deny and Log", "url_match": "/owa/", "follow_up_action_time": 1, "host_match": "*", "allow_deny_rule": "string", "redirect_url": "string", "extended_match": "(Method eq POST) && (Header User-Agent rco \\".*(antSword|Googlebot|Baiduspider).*\\")", "follow_up_action": "None", "priority": 1}')
url_allow_deny_list.append('{ "enabled": true, "name": "owaauth-CVE-2021-26855", "deny_response": "Response Page", "response_page": "default", "action": "Deny and Log", "url_match": "/owa/auth/Current/", "follow_up_action_time": 1, "host_match": "*", "allow_deny_rule": "string", "redirect_url": "string", "extended_match": "(Method eq POST)", "follow_up_action": "None", "priority": 1}')url_allow_deny_list.append('{ "enabled": true, "name": "ecpdefault-CVE-2021-26855", "deny_response": "Response Page", "response_page": "default", "action": "Deny and Log", "url_match": "/ecp/default.flt", "follow_up_action_time": 1, "host_match": "*", "allow_deny_rule": "string", "redirect_url": "string", "extended_match": "(Method eq POST)", "follow_up_action": "None", "priority": 1}')
url_allow_deny_list.append('{ "enabled": true, "name": "ecpcss-CVE-2021-26855", "deny_response": "Response Page", "response_page": "default", "action": "Deny and Log", "url_match": "/ecp/main.css", "follow_up_action_time": 1, "ho st_match": "*", "allow_deny_rule": "string", "redirect_url": "string", "extended_match": "(Method eq POST)", "follow_up_action": "None", "priority": 1}')
# apply url_allow_deny for all applications
#
apps = waas_api_get(token, 'applications')
for app in apps['results']:
if(app['name'] == application_name):
print("Application: {} {}".format(app['name'],app['id']))
for url_allow_deny in url_allow_deny_list:
try:
waas_api_post(token, 'applications/' + str(app['id']) + '/allow_deny/urls/', url_allow_deny)
except requests.exceptions.RequestException as e:
print("If you get an error about a Unique Set, it may mean you already ran this script so check your application in the GUI.")
raise SystemExit(e)
| {
"content_hash": "f9b2d2f05ed828adae91075ae0ad62bd",
"timestamp": "",
"source": "github",
"line_count": 84,
"max_line_length": 777,
"avg_line_length": 84.51190476190476,
"alnum_prop": 0.6195238766023383,
"repo_name": "barracudanetworks/waf-automation",
"id": "e7c1ad800ee36cadd9cad34c3f87e1540c049740",
"size": "7099",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "waf-as-a-service-api/wafaas_mitigate_CVE_2021_26855.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HCL",
"bytes": "7766"
},
{
"name": "HTML",
"bytes": "540"
},
{
"name": "Pascal",
"bytes": "514"
},
{
"name": "Perl",
"bytes": "10764"
},
{
"name": "PowerShell",
"bytes": "116613"
},
{
"name": "Puppet",
"bytes": "23489"
},
{
"name": "Python",
"bytes": "65746"
},
{
"name": "Ruby",
"bytes": "70753"
},
{
"name": "Shell",
"bytes": "6811"
}
],
"symlink_target": ""
} |
"""Nova base exception handling.
Includes decorator for re-raising Nova-type exceptions.
SHOULD include dedicated exception logging.
"""
import functools
import sys
from oslo.config import cfg
import webob.exc
from nova.openstack.common import excutils
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova import safe_utils
LOG = logging.getLogger(__name__)
exc_log_opts = [
cfg.BoolOpt('fatal_exception_format_errors',
default=False,
help='Make exception message format errors fatal'),
]
CONF = cfg.CONF
CONF.register_opts(exc_log_opts)
class ConvertedException(webob.exc.WSGIHTTPException):
def __init__(self, code=0, title="", explanation=""):
self.code = code
self.title = title
self.explanation = explanation
super(ConvertedException, self).__init__()
def _cleanse_dict(original):
"""Strip all admin_password, new_pass, rescue_pass keys from a dict."""
return dict((k, v) for k, v in original.iteritems() if not "_pass" in k)
def wrap_exception(notifier=None, get_notifier=None):
"""This decorator wraps a method to catch any exceptions that may
get thrown. It logs the exception as well as optionally sending
it to the notification system.
"""
def inner(f):
def wrapped(self, context, *args, **kw):
# Don't store self or context in the payload, it now seems to
# contain confidential information.
try:
return f(self, context, *args, **kw)
except Exception as e:
with excutils.save_and_reraise_exception():
if notifier or get_notifier:
payload = dict(exception=e)
call_dict = safe_utils.getcallargs(f, context,
*args, **kw)
cleansed = _cleanse_dict(call_dict)
payload.update({'args': cleansed})
# If f has multiple decorators, they must use
# functools.wraps to ensure the name is
# propagated.
event_type = f.__name__
(notifier or get_notifier()).error(context,
event_type,
payload)
return functools.wraps(f)(wrapped)
return inner
class NovaException(Exception):
"""Base Nova Exception
To correctly use this class, inherit from it and define
a 'msg_fmt' property. That msg_fmt will get printf'd
with the keyword arguments provided to the constructor.
"""
msg_fmt = _("An unknown exception occurred.")
code = 500
headers = {}
safe = False
def __init__(self, message=None, **kwargs):
self.kwargs = kwargs
if 'code' not in self.kwargs:
try:
self.kwargs['code'] = self.code
except AttributeError:
pass
if not message:
try:
message = self.msg_fmt % kwargs
except Exception:
exc_info = sys.exc_info()
# kwargs doesn't match a variable in the message
# log the issue and the kwargs
LOG.exception(_('Exception in string format operation'))
for name, value in kwargs.iteritems():
LOG.error("%s: %s" % (name, value))
if CONF.fatal_exception_format_errors:
raise exc_info[0], exc_info[1], exc_info[2]
else:
# at least get the core message out if something happened
message = self.msg_fmt
super(NovaException, self).__init__(message)
def format_message(self):
# NOTE(mrodden): use the first argument to the python Exception object
# which should be our full NovaException message, (see __init__)
return self.args[0]
class EncryptionFailure(NovaException):
msg_fmt = _("Failed to encrypt text: %(reason)s")
class DecryptionFailure(NovaException):
msg_fmt = _("Failed to decrypt text: %(reason)s")
class VirtualInterfaceCreateException(NovaException):
msg_fmt = _("Virtual Interface creation failed")
class VirtualInterfaceMacAddressException(NovaException):
msg_fmt = _("Creation of virtual interface with "
"unique mac address failed")
class GlanceConnectionFailed(NovaException):
msg_fmt = _("Connection to glance host %(host)s:%(port)s failed: "
"%(reason)s")
class CinderConnectionFailed(NovaException):
msg_fmt = _("Connection to cinder host failed: %(reason)s")
class Forbidden(NovaException):
ec2_code = 'AuthFailure'
msg_fmt = _("Not authorized.")
code = 403
class AdminRequired(Forbidden):
msg_fmt = _("User does not have admin privileges")
class PolicyNotAuthorized(Forbidden):
msg_fmt = _("Policy doesn't allow %(action)s to be performed.")
class ImageNotActive(NovaException):
# NOTE(jruzicka): IncorrectState is used for volumes only in EC2,
# but it still seems like the most appropriate option.
ec2_code = 'IncorrectState'
msg_fmt = _("Image %(image_id)s is not active.")
class ImageNotAuthorized(NovaException):
msg_fmt = _("Not authorized for image %(image_id)s.")
class Invalid(NovaException):
msg_fmt = _("Unacceptable parameters.")
code = 400
class InvalidBDM(Invalid):
msg_fmt = _("Block Device Mapping is Invalid.")
class InvalidBDMSnapshot(InvalidBDM):
msg_fmt = _("Block Device Mapping is Invalid: "
"failed to get snapshot %(id)s.")
class InvalidBDMVolume(InvalidBDM):
msg_fmt = _("Block Device Mapping is Invalid: "
"failed to get volume %(id)s.")
class InvalidBDMImage(InvalidBDM):
msg_fmt = _("Block Device Mapping is Invalid: "
"failed to get image %(id)s.")
class InvalidBDMBootSequence(InvalidBDM):
msg_fmt = _("Block Device Mapping is Invalid: "
"Boot sequence for the instance "
"and image/block device mapping "
"combination is not valid.")
class InvalidBDMLocalsLimit(InvalidBDM):
msg_fmt = _("Block Device Mapping is Invalid: "
"You specified more local devices than the "
"limit allows")
class InvalidBDMEphemeralSize(InvalidBDM):
msg_fmt = _("Ephemeral disks requested are larger than "
"the instance type allows.")
class InvalidBDMSwapSize(InvalidBDM):
msg_fmt = _("Swap drive requested is larger than instance type allows.")
class InvalidBDMFormat(InvalidBDM):
msg_fmt = _("Block Device Mapping is Invalid: "
"%(details)s")
class InvalidBDMForLegacy(InvalidBDM):
msg_fmt = _("Block Device Mapping cannot "
"be converted to legacy format. ")
class InvalidAttribute(Invalid):
msg_fmt = _("Attribute not supported: %(attr)s")
class ValidationError(Invalid):
msg_fmt = "%(detail)s"
class VolumeUnattached(Invalid):
ec2_code = 'IncorrectState'
msg_fmt = _("Volume %(volume_id)s is not attached to anything")
class VolumeNotCreated(NovaException):
msg_fmt = _("Volume %(volume_id)s did not finish being created"
" even after we waited %(seconds)s seconds or %(attempts)s"
" attempts.")
class InvalidKeypair(Invalid):
ec2_code = 'InvalidKeyPair.Format'
msg_fmt = _("Keypair data is invalid: %(reason)s")
class InvalidRequest(Invalid):
msg_fmt = _("The request is invalid.")
class InvalidInput(Invalid):
msg_fmt = _("Invalid input received: %(reason)s")
class InvalidVolume(Invalid):
ec2_code = 'UnsupportedOperation'
msg_fmt = _("Invalid volume: %(reason)s")
class InvalidVolumeAccessMode(Invalid):
msg_fmt = _("Invalid volume access mode") + ": %(access_mode)s"
class InvalidMetadata(Invalid):
msg_fmt = _("Invalid metadata: %(reason)s")
class InvalidMetadataSize(Invalid):
msg_fmt = _("Invalid metadata size: %(reason)s")
class InvalidPortRange(Invalid):
ec2_code = 'InvalidParameterValue'
msg_fmt = _("Invalid port range %(from_port)s:%(to_port)s. %(msg)s")
class InvalidIpProtocol(Invalid):
msg_fmt = _("Invalid IP protocol %(protocol)s.")
class InvalidContentType(Invalid):
msg_fmt = _("Invalid content type %(content_type)s.")
class InvalidCidr(Invalid):
msg_fmt = _("Invalid cidr %(cidr)s.")
class InvalidUnicodeParameter(Invalid):
msg_fmt = _("Invalid Parameter: "
"Unicode is not supported by the current database.")
# Cannot be templated as the error syntax varies.
# msg needs to be constructed when raised.
class InvalidParameterValue(Invalid):
ec2_code = 'InvalidParameterValue'
msg_fmt = _("%(err)s")
class InvalidAggregateAction(Invalid):
msg_fmt = _("Cannot perform action '%(action)s' on aggregate "
"%(aggregate_id)s. Reason: %(reason)s.")
class InvalidGroup(Invalid):
msg_fmt = _("Group not valid. Reason: %(reason)s")
class InvalidSortKey(Invalid):
msg_fmt = _("Sort key supplied was not valid.")
class InstanceInvalidState(Invalid):
msg_fmt = _("Instance %(instance_uuid)s in %(attr)s %(state)s. Cannot "
"%(method)s while the instance is in this state.")
class InstanceNotRunning(Invalid):
msg_fmt = _("Instance %(instance_id)s is not running.")
class InstanceNotInRescueMode(Invalid):
msg_fmt = _("Instance %(instance_id)s is not in rescue mode")
class InstanceNotRescuable(Invalid):
msg_fmt = _("Instance %(instance_id)s cannot be rescued: %(reason)s")
class InstanceNotReady(Invalid):
msg_fmt = _("Instance %(instance_id)s is not ready")
class InstanceSuspendFailure(Invalid):
msg_fmt = _("Failed to suspend instance: %(reason)s")
class InstanceResumeFailure(Invalid):
msg_fmt = _("Failed to resume instance: %(reason)s")
class InstancePowerOnFailure(Invalid):
msg_fmt = _("Failed to power on instance: %(reason)s")
class InstancePowerOffFailure(Invalid):
msg_fmt = _("Failed to power off instance: %(reason)s")
class InstanceRebootFailure(Invalid):
msg_fmt = _("Failed to reboot instance: %(reason)s")
class InstanceTerminationFailure(Invalid):
msg_fmt = _("Failed to terminate instance: %(reason)s")
class InstanceDeployFailure(Invalid):
msg_fmt = _("Failed to deploy instance: %(reason)s")
class MultiplePortsNotApplicable(Invalid):
msg_fmt = _("Failed to launch instances: %(reason)s")
class ServiceUnavailable(Invalid):
msg_fmt = _("Service is unavailable at this time.")
class ComputeResourcesUnavailable(ServiceUnavailable):
msg_fmt = _("Insufficient compute resources: %(reason)s.")
class HypervisorUnavailable(NovaException):
msg_fmt = _("Connection to the hypervisor is broken on host: %(host)s")
class ComputeServiceUnavailable(ServiceUnavailable):
msg_fmt = _("Compute service of %(host)s is unavailable at this time.")
class ComputeServiceInUse(NovaException):
msg_fmt = _("Compute service of %(host)s is still in use.")
class UnableToMigrateToSelf(Invalid):
msg_fmt = _("Unable to migrate instance (%(instance_id)s) "
"to current host (%(host)s).")
class InvalidHypervisorType(Invalid):
msg_fmt = _("The supplied hypervisor type of is invalid.")
class DestinationHypervisorTooOld(Invalid):
msg_fmt = _("The instance requires a newer hypervisor version than "
"has been provided.")
class DestinationDiskExists(Invalid):
msg_fmt = _("The supplied disk path (%(path)s) already exists, "
"it is expected not to exist.")
class InvalidDevicePath(Invalid):
msg_fmt = _("The supplied device path (%(path)s) is invalid.")
class DevicePathInUse(Invalid):
msg_fmt = _("The supplied device path (%(path)s) is in use.")
code = 409
class DeviceIsBusy(Invalid):
msg_fmt = _("The supplied device (%(device)s) is busy.")
class InvalidCPUInfo(Invalid):
msg_fmt = _("Unacceptable CPU info: %(reason)s")
class InvalidIpAddressError(Invalid):
msg_fmt = _("%(address)s is not a valid IP v4/6 address.")
class InvalidVLANTag(Invalid):
msg_fmt = _("VLAN tag is not appropriate for the port group "
"%(bridge)s. Expected VLAN tag is %(tag)s, "
"but the one associated with the port group is %(pgroup)s.")
class InvalidVLANPortGroup(Invalid):
msg_fmt = _("vSwitch which contains the port group %(bridge)s is "
"not associated with the desired physical adapter. "
"Expected vSwitch is %(expected)s, but the one associated "
"is %(actual)s.")
class InvalidDiskFormat(Invalid):
msg_fmt = _("Disk format %(disk_format)s is not acceptable")
class InvalidDiskInfo(Invalid):
msg_fmt = _("Disk info file is invalid: %(reason)s")
class DiskInfoReadWriteFail(Invalid):
msg_fmt = _("Failed to read or write disk info file: %(reason)s")
class ImageUnacceptable(Invalid):
msg_fmt = _("Image %(image_id)s is unacceptable: %(reason)s")
class InstanceUnacceptable(Invalid):
msg_fmt = _("Instance %(instance_id)s is unacceptable: %(reason)s")
class InvalidEc2Id(Invalid):
msg_fmt = _("Ec2 id %(ec2_id)s is unacceptable.")
class InvalidUUID(Invalid):
msg_fmt = _("Expected a uuid but received %(uuid)s.")
class InvalidID(Invalid):
msg_fmt = _("Invalid ID received %(id)s.")
class ConstraintNotMet(NovaException):
msg_fmt = _("Constraint not met.")
code = 412
class NotFound(NovaException):
msg_fmt = _("Resource could not be found.")
code = 404
class AgentBuildNotFound(NotFound):
msg_fmt = _("No agent-build associated with id %(id)s.")
class AgentBuildExists(NovaException):
msg_fmt = _("Agent-build with hypervisor %(hypervisor)s os %(os)s "
"architecture %(architecture)s exists.")
class VolumeNotFound(NotFound):
ec2_code = 'InvalidVolumeID.NotFound'
msg_fmt = _("Volume %(volume_id)s could not be found.")
class VolumeBDMNotFound(NotFound):
msg_fmt = _("No volume Block Device Mapping with id %(volume_id)s.")
class SnapshotNotFound(NotFound):
ec2_code = 'InvalidSnapshotID.NotFound'
msg_fmt = _("Snapshot %(snapshot_id)s could not be found.")
class DiskNotFound(NotFound):
msg_fmt = _("No disk at %(location)s")
class VolumeDriverNotFound(NotFound):
msg_fmt = _("Could not find a handler for %(driver_type)s volume.")
class InvalidImageRef(Invalid):
msg_fmt = _("Invalid image href %(image_href)s.")
class AutoDiskConfigDisabledByImage(Invalid):
msg_fmt = _("Requested image %(image)s "
"has automatic disk resize disabled.")
class ImageNotFound(NotFound):
msg_fmt = _("Image %(image_id)s could not be found.")
class PreserveEphemeralNotSupported(Invalid):
msg_fmt = _("The current driver does not support "
"preserving ephemeral partitions.")
# NOTE(jruzicka): ImageNotFound is not a valid EC2 error code.
class ImageNotFoundEC2(ImageNotFound):
msg_fmt = _("Image %(image_id)s could not be found. The nova EC2 API "
"assigns image ids dynamically when they are listed for the "
"first time. Have you listed image ids since adding this "
"image?")
class ProjectNotFound(NotFound):
msg_fmt = _("Project %(project_id)s could not be found.")
class StorageRepositoryNotFound(NotFound):
msg_fmt = _("Cannot find SR to read/write VDI.")
class NetworkDuplicated(Invalid):
msg_fmt = _("Network %(network_id)s is duplicated.")
class NetworkInUse(NovaException):
msg_fmt = _("Network %(network_id)s is still in use.")
class NetworkNotCreated(NovaException):
msg_fmt = _("%(req)s is required to create a network.")
class NetworkNotFound(NotFound):
msg_fmt = _("Network %(network_id)s could not be found.")
class PortNotFound(NotFound):
msg_fmt = _("Port id %(port_id)s could not be found.")
class NetworkNotFoundForBridge(NetworkNotFound):
msg_fmt = _("Network could not be found for bridge %(bridge)s")
class NetworkNotFoundForUUID(NetworkNotFound):
msg_fmt = _("Network could not be found for uuid %(uuid)s")
class NetworkNotFoundForCidr(NetworkNotFound):
msg_fmt = _("Network could not be found with cidr %(cidr)s.")
class NetworkNotFoundForInstance(NetworkNotFound):
msg_fmt = _("Network could not be found for instance %(instance_id)s.")
class NoNetworksFound(NotFound):
msg_fmt = _("No networks defined.")
class NoMoreNetworks(NovaException):
msg_fmt = _("No more available networks.")
class NetworkNotFoundForProject(NotFound):
msg_fmt = _("Either network uuid %(network_uuid)s is not present or "
"is not assigned to the project %(project_id)s.")
class NetworkAmbiguous(Invalid):
msg_fmt = _("More than one possible network found. Specify "
"network ID(s) to select which one(s) to connect to,")
class NetworkRequiresSubnet(Invalid):
msg_fmt = _("Network %(network_uuid)s requires a subnet in order to boot"
" instances on.")
class ExternalNetworkAttachForbidden(Forbidden):
msg_fmt = _("It is not allowed to create an interface on "
"external network %(network_uuid)s")
class DatastoreNotFound(NotFound):
msg_fmt = _("Could not find the datastore reference(s) which the VM uses.")
class PortInUse(Invalid):
msg_fmt = _("Port %(port_id)s is still in use.")
class PortRequiresFixedIP(Invalid):
msg_fmt = _("Port %(port_id)s requires a FixedIP in order to be used.")
class PortNotUsable(Invalid):
msg_fmt = _("Port %(port_id)s not usable for instance %(instance)s.")
class PortNotFree(Invalid):
msg_fmt = _("No free port available for instance %(instance)s.")
class FixedIpExists(NovaException):
msg_fmt = _("Fixed ip %(address)s already exists.")
class FixedIpNotFound(NotFound):
msg_fmt = _("No fixed IP associated with id %(id)s.")
class FixedIpNotFoundForAddress(FixedIpNotFound):
msg_fmt = _("Fixed ip not found for address %(address)s.")
class FixedIpNotFoundForInstance(FixedIpNotFound):
msg_fmt = _("Instance %(instance_uuid)s has zero fixed ips.")
class FixedIpNotFoundForNetworkHost(FixedIpNotFound):
msg_fmt = _("Network host %(host)s has zero fixed ips "
"in network %(network_id)s.")
class FixedIpNotFoundForSpecificInstance(FixedIpNotFound):
msg_fmt = _("Instance %(instance_uuid)s doesn't have fixed ip '%(ip)s'.")
class FixedIpNotFoundForNetwork(FixedIpNotFound):
msg_fmt = _("Fixed IP address (%(address)s) does not exist in "
"network (%(network_uuid)s).")
class FixedIpAlreadyInUse(NovaException):
msg_fmt = _("Fixed IP address %(address)s is already in use on instance "
"%(instance_uuid)s.")
class FixedIpAssociatedWithMultipleInstances(NovaException):
msg_fmt = _("More than one instance is associated with fixed ip address "
"'%(address)s'.")
class FixedIpInvalid(Invalid):
msg_fmt = _("Fixed IP address %(address)s is invalid.")
class NoMoreFixedIps(NovaException):
ec2_code = 'UnsupportedOperation'
msg_fmt = _("Zero fixed ips available.")
class NoFixedIpsDefined(NotFound):
msg_fmt = _("Zero fixed ips could be found.")
class FloatingIpExists(NovaException):
msg_fmt = _("Floating ip %(address)s already exists.")
class FloatingIpNotFound(NotFound):
ec2_code = "UnsupportedOperation"
msg_fmt = _("Floating ip not found for id %(id)s.")
class FloatingIpDNSExists(Invalid):
msg_fmt = _("The DNS entry %(name)s already exists in domain %(domain)s.")
class FloatingIpNotFoundForAddress(FloatingIpNotFound):
msg_fmt = _("Floating ip not found for address %(address)s.")
class FloatingIpNotFoundForHost(FloatingIpNotFound):
msg_fmt = _("Floating ip not found for host %(host)s.")
class FloatingIpMultipleFoundForAddress(NovaException):
msg_fmt = _("Multiple floating ips are found for address %(address)s.")
class FloatingIpPoolNotFound(NotFound):
msg_fmt = _("Floating ip pool not found.")
safe = True
class NoMoreFloatingIps(FloatingIpNotFound):
msg_fmt = _("Zero floating ips available.")
safe = True
class FloatingIpAssociated(NovaException):
ec2_code = "UnsupportedOperation"
msg_fmt = _("Floating ip %(address)s is associated.")
class FloatingIpNotAssociated(NovaException):
msg_fmt = _("Floating ip %(address)s is not associated.")
class NoFloatingIpsDefined(NotFound):
msg_fmt = _("Zero floating ips exist.")
class NoFloatingIpInterface(NotFound):
ec2_code = "UnsupportedOperation"
msg_fmt = _("Interface %(interface)s not found.")
class CannotDisassociateAutoAssignedFloatingIP(NovaException):
ec2_code = "UnsupportedOperation"
msg_fmt = _("Cannot disassociate auto assigned floating ip")
class KeypairNotFound(NotFound):
ec2_code = 'InvalidKeyPair.NotFound'
msg_fmt = _("Keypair %(name)s not found for user %(user_id)s")
class ServiceNotFound(NotFound):
msg_fmt = _("Service %(service_id)s could not be found.")
class ServiceBinaryExists(NovaException):
msg_fmt = _("Service with host %(host)s binary %(binary)s exists.")
class ServiceTopicExists(NovaException):
msg_fmt = _("Service with host %(host)s topic %(topic)s exists.")
class HostNotFound(NotFound):
msg_fmt = _("Host %(host)s could not be found.")
class ComputeHostNotFound(HostNotFound):
msg_fmt = _("Compute host %(host)s could not be found.")
class HostBinaryNotFound(NotFound):
msg_fmt = _("Could not find binary %(binary)s on host %(host)s.")
class InvalidReservationExpiration(Invalid):
msg_fmt = _("Invalid reservation expiration %(expire)s.")
class InvalidQuotaValue(Invalid):
msg_fmt = _("Change would make usage less than 0 for the following "
"resources: %(unders)s")
class QuotaNotFound(NotFound):
msg_fmt = _("Quota could not be found")
class QuotaExists(NovaException):
msg_fmt = _("Quota exists for project %(project_id)s, "
"resource %(resource)s")
class QuotaResourceUnknown(QuotaNotFound):
msg_fmt = _("Unknown quota resources %(unknown)s.")
class ProjectUserQuotaNotFound(QuotaNotFound):
msg_fmt = _("Quota for user %(user_id)s in project %(project_id)s "
"could not be found.")
class ProjectQuotaNotFound(QuotaNotFound):
msg_fmt = _("Quota for project %(project_id)s could not be found.")
class QuotaClassNotFound(QuotaNotFound):
msg_fmt = _("Quota class %(class_name)s could not be found.")
class QuotaUsageNotFound(QuotaNotFound):
msg_fmt = _("Quota usage for project %(project_id)s could not be found.")
class ReservationNotFound(QuotaNotFound):
msg_fmt = _("Quota reservation %(uuid)s could not be found.")
class OverQuota(NovaException):
msg_fmt = _("Quota exceeded for resources: %(overs)s")
class SecurityGroupNotFound(NotFound):
msg_fmt = _("Security group %(security_group_id)s not found.")
class SecurityGroupNotFoundForProject(SecurityGroupNotFound):
msg_fmt = _("Security group %(security_group_id)s not found "
"for project %(project_id)s.")
class SecurityGroupNotFoundForRule(SecurityGroupNotFound):
msg_fmt = _("Security group with rule %(rule_id)s not found.")
class SecurityGroupExists(Invalid):
ec2_code = 'InvalidGroup.Duplicate'
msg_fmt = _("Security group %(security_group_name)s already exists "
"for project %(project_id)s.")
class SecurityGroupExistsForInstance(Invalid):
msg_fmt = _("Security group %(security_group_id)s is already associated"
" with the instance %(instance_id)s")
class SecurityGroupNotExistsForInstance(Invalid):
msg_fmt = _("Security group %(security_group_id)s is not associated with"
" the instance %(instance_id)s")
class SecurityGroupDefaultRuleNotFound(Invalid):
msg_fmt = _("Security group default rule (%rule_id)s not found.")
class SecurityGroupCannotBeApplied(Invalid):
msg_fmt = _("Network requires port_security_enabled and subnet associated"
" in order to apply security groups.")
class SecurityGroupRuleExists(Invalid):
ec2_code = 'InvalidPermission.Duplicate'
msg_fmt = _("Rule already exists in group: %(rule)s")
class NoUniqueMatch(NovaException):
msg_fmt = _("No Unique Match Found.")
code = 409
class MigrationNotFound(NotFound):
msg_fmt = _("Migration %(migration_id)s could not be found.")
class MigrationNotFoundByStatus(MigrationNotFound):
msg_fmt = _("Migration not found for instance %(instance_id)s "
"with status %(status)s.")
class ConsolePoolNotFound(NotFound):
msg_fmt = _("Console pool %(pool_id)s could not be found.")
class ConsolePoolExists(NovaException):
msg_fmt = _("Console pool with host %(host)s, console_type "
"%(console_type)s and compute_host %(compute_host)s "
"already exists.")
class ConsolePoolNotFoundForHostType(NotFound):
msg_fmt = _("Console pool of type %(console_type)s "
"for compute host %(compute_host)s "
"on proxy host %(host)s not found.")
class ConsoleNotFound(NotFound):
msg_fmt = _("Console %(console_id)s could not be found.")
class ConsoleNotFoundForInstance(ConsoleNotFound):
msg_fmt = _("Console for instance %(instance_uuid)s could not be found.")
class ConsoleNotFoundInPoolForInstance(ConsoleNotFound):
msg_fmt = _("Console for instance %(instance_uuid)s "
"in pool %(pool_id)s could not be found.")
class ConsoleTypeInvalid(Invalid):
msg_fmt = _("Invalid console type %(console_type)s")
class ConsoleTypeUnavailable(Invalid):
msg_fmt = _("Unavailable console type %(console_type)s.")
class ConsolePortRangeExhausted(NovaException):
msg_fmt = _("The console port range %(min_port)d-%(max_port)d is "
"exhausted.")
class FlavorNotFound(NotFound):
msg_fmt = _("Flavor %(flavor_id)s could not be found.")
class FlavorNotFoundByName(FlavorNotFound):
msg_fmt = _("Flavor with name %(flavor_name)s could not be found.")
class FlavorAccessNotFound(NotFound):
msg_fmt = _("Flavor access not found for %(flavor_id)s / "
"%(project_id)s combination.")
class CellNotFound(NotFound):
msg_fmt = _("Cell %(cell_name)s doesn't exist.")
class CellExists(NovaException):
msg_fmt = _("Cell with name %(name)s already exists.")
class CellRoutingInconsistency(NovaException):
msg_fmt = _("Inconsistency in cell routing: %(reason)s")
class CellServiceAPIMethodNotFound(NotFound):
msg_fmt = _("Service API method not found: %(detail)s")
class CellTimeout(NotFound):
msg_fmt = _("Timeout waiting for response from cell")
class CellMaxHopCountReached(NovaException):
msg_fmt = _("Cell message has reached maximum hop count: %(hop_count)s")
class NoCellsAvailable(NovaException):
msg_fmt = _("No cells available matching scheduling criteria.")
class CellsUpdateUnsupported(NovaException):
msg_fmt = _("Cannot update cells configuration file.")
class InstanceUnknownCell(NotFound):
msg_fmt = _("Cell is not known for instance %(instance_uuid)s")
class SchedulerHostFilterNotFound(NotFound):
msg_fmt = _("Scheduler Host Filter %(filter_name)s could not be found.")
class FlavorExtraSpecsNotFound(NotFound):
msg_fmt = _("Flavor %(flavor_id)s has no extra specs with "
"key %(extra_specs_key)s.")
class ComputeHostMetricNotFound(NotFound):
msg_fmt = _("Metric %(name)s could not be found on the compute "
"host node %(host)s.%(node)s.")
class FileNotFound(NotFound):
msg_fmt = _("File %(file_path)s could not be found.")
class NoFilesFound(NotFound):
msg_fmt = _("Zero files could be found.")
class SwitchNotFoundForNetworkAdapter(NotFound):
msg_fmt = _("Virtual switch associated with the "
"network adapter %(adapter)s not found.")
class NetworkAdapterNotFound(NotFound):
msg_fmt = _("Network adapter %(adapter)s could not be found.")
class ClassNotFound(NotFound):
msg_fmt = _("Class %(class_name)s could not be found: %(exception)s")
class NotAllowed(NovaException):
msg_fmt = _("Action not allowed.")
class ImageRotationNotAllowed(NovaException):
msg_fmt = _("Rotation is not allowed for snapshots")
class RotationRequiredForBackup(NovaException):
msg_fmt = _("Rotation param is required for backup image_type")
class KeyPairExists(NovaException):
ec2_code = 'InvalidKeyPair.Duplicate'
msg_fmt = _("Key pair '%(key_name)s' already exists.")
class InstanceExists(NovaException):
msg_fmt = _("Instance %(name)s already exists.")
class FlavorExists(NovaException):
msg_fmt = _("Flavor with name %(name)s already exists.")
class FlavorIdExists(NovaException):
msg_fmt = _("Flavor with ID %(flavor_id)s already exists.")
class FlavorAccessExists(NovaException):
msg_fmt = _("Flavor access already exists for flavor %(flavor_id)s "
"and project %(project_id)s combination.")
class InvalidSharedStorage(NovaException):
msg_fmt = _("%(path)s is not on shared storage: %(reason)s")
class InvalidLocalStorage(NovaException):
msg_fmt = _("%(path)s is not on local storage: %(reason)s")
class StorageError(NovaException):
msg_fmt = _("Storage error: %(reason)s")
class MigrationError(NovaException):
msg_fmt = _("Migration error: %(reason)s")
class MigrationPreCheckError(MigrationError):
msg_fmt = _("Migration pre-check error: %(reason)s")
class MalformedRequestBody(NovaException):
msg_fmt = _("Malformed message body: %(reason)s")
# NOTE(johannes): NotFound should only be used when a 404 error is
# appropriate to be returned
class ConfigNotFound(NovaException):
msg_fmt = _("Could not find config at %(path)s")
class PasteAppNotFound(NovaException):
msg_fmt = _("Could not load paste app '%(name)s' from %(path)s")
class CannotResizeToSameFlavor(NovaException):
msg_fmt = _("When resizing, instances must change flavor!")
class ResizeError(NovaException):
msg_fmt = _("Resize error: %(reason)s")
class CannotResizeDisk(NovaException):
msg_fmt = _("Server disk was unable to be resized because: %(reason)s")
class FlavorMemoryTooSmall(NovaException):
msg_fmt = _("Flavor's memory is too small for requested image.")
class FlavorDiskTooSmall(NovaException):
msg_fmt = _("Flavor's disk is too small for requested image.")
class InsufficientFreeMemory(NovaException):
msg_fmt = _("Insufficient free memory on compute node to start %(uuid)s.")
class NoValidHost(NovaException):
msg_fmt = _("No valid host was found. %(reason)s")
class QuotaError(NovaException):
ec2_code = 'ResourceLimitExceeded'
msg_fmt = _("Quota exceeded: code=%(code)s")
code = 413
headers = {'Retry-After': 0}
safe = True
class TooManyInstances(QuotaError):
msg_fmt = _("Quota exceeded for %(overs)s: Requested %(req)s,"
" but already used %(used)d of %(allowed)d %(resource)s")
class FloatingIpLimitExceeded(QuotaError):
msg_fmt = _("Maximum number of floating ips exceeded")
class FixedIpLimitExceeded(QuotaError):
msg_fmt = _("Maximum number of fixed ips exceeded")
class MetadataLimitExceeded(QuotaError):
msg_fmt = _("Maximum number of metadata items exceeds %(allowed)d")
class OnsetFileLimitExceeded(QuotaError):
msg_fmt = _("Personality file limit exceeded")
class OnsetFilePathLimitExceeded(QuotaError):
msg_fmt = _("Personality file path too long")
class OnsetFileContentLimitExceeded(QuotaError):
msg_fmt = _("Personality file content too long")
class KeypairLimitExceeded(QuotaError):
msg_fmt = _("Maximum number of key pairs exceeded")
class SecurityGroupLimitExceeded(QuotaError):
ec2_code = 'SecurityGroupLimitExceeded'
msg_fmt = _("Maximum number of security groups or rules exceeded")
class PortLimitExceeded(QuotaError):
msg_fmt = _("Maximum number of ports exceeded")
class AggregateError(NovaException):
msg_fmt = _("Aggregate %(aggregate_id)s: action '%(action)s' "
"caused an error: %(reason)s.")
class AggregateNotFound(NotFound):
msg_fmt = _("Aggregate %(aggregate_id)s could not be found.")
class AggregateNameExists(NovaException):
msg_fmt = _("Aggregate %(aggregate_name)s already exists.")
class AggregateHostNotFound(NotFound):
msg_fmt = _("Aggregate %(aggregate_id)s has no host %(host)s.")
class AggregateMetadataNotFound(NotFound):
msg_fmt = _("Aggregate %(aggregate_id)s has no metadata with "
"key %(metadata_key)s.")
class AggregateHostExists(NovaException):
msg_fmt = _("Aggregate %(aggregate_id)s already has host %(host)s.")
class FlavorCreateFailed(NovaException):
msg_fmt = _("Unable to create flavor")
class InstancePasswordSetFailed(NovaException):
msg_fmt = _("Failed to set admin password on %(instance)s "
"because %(reason)s")
safe = True
class DuplicateVlan(NovaException):
msg_fmt = _("Detected existing vlan with id %(vlan)d")
class CidrConflict(NovaException):
msg_fmt = _("There was a conflict when trying to complete your request.")
code = 409
class InstanceNotFound(NotFound):
ec2_code = 'InvalidInstanceID.NotFound'
msg_fmt = _("Instance %(instance_id)s could not be found.")
class InstanceInfoCacheNotFound(NotFound):
msg_fmt = _("Info cache for instance %(instance_uuid)s could not be "
"found.")
class NodeNotFound(NotFound):
msg_fmt = _("Node %(node_id)s could not be found.")
class NodeNotFoundByUUID(NotFound):
msg_fmt = _("Node with UUID %(node_uuid)s could not be found.")
class MarkerNotFound(NotFound):
msg_fmt = _("Marker %(marker)s could not be found.")
class InvalidInstanceIDMalformed(Invalid):
ec2_code = 'InvalidInstanceID.Malformed'
msg_fmt = _("Invalid id: %(val)s (expecting \"i-...\").")
class CouldNotFetchImage(NovaException):
msg_fmt = _("Could not fetch image %(image_id)s")
class CouldNotUploadImage(NovaException):
msg_fmt = _("Could not upload image %(image_id)s")
class TaskAlreadyRunning(NovaException):
msg_fmt = _("Task %(task_name)s is already running on host %(host)s")
class TaskNotRunning(NovaException):
msg_fmt = _("Task %(task_name)s is not running on host %(host)s")
class InstanceIsLocked(InstanceInvalidState):
msg_fmt = _("Instance %(instance_uuid)s is locked")
class ConfigDriveInvalidValue(Invalid):
msg_fmt = _("Invalid value for Config Drive option: %(option)s")
class ConfigDriveMountFailed(NovaException):
msg_fmt = _("Could not mount vfat config drive. %(operation)s failed. "
"Error: %(error)s")
class ConfigDriveUnknownFormat(NovaException):
msg_fmt = _("Unknown config drive format %(format)s. Select one of "
"iso9660 or vfat.")
class InterfaceAttachFailed(Invalid):
msg_fmt = _("Failed to attach network adapter device to %(instance)s")
class InterfaceDetachFailed(Invalid):
msg_fmt = _("Failed to detach network adapter device from %(instance)s")
class InstanceUserDataTooLarge(NovaException):
msg_fmt = _("User data too large. User data must be no larger than "
"%(maxsize)s bytes once base64 encoded. Your data is "
"%(length)d bytes")
class InstanceUserDataMalformed(NovaException):
msg_fmt = _("User data needs to be valid base 64.")
class UnexpectedTaskStateError(NovaException):
msg_fmt = _("Unexpected task state: expecting %(expected)s but "
"the actual state is %(actual)s")
class UnexpectedDeletingTaskStateError(UnexpectedTaskStateError):
pass
class InstanceActionNotFound(NovaException):
msg_fmt = _("Action for request_id %(request_id)s on instance"
" %(instance_uuid)s not found")
class InstanceActionEventNotFound(NovaException):
msg_fmt = _("Event %(event)s not found for action id %(action_id)s")
class UnexpectedVMStateError(NovaException):
msg_fmt = _("Unexpected VM state: expecting %(expected)s but "
"the actual state is %(actual)s")
class CryptoCAFileNotFound(FileNotFound):
msg_fmt = _("The CA file for %(project)s could not be found")
class CryptoCRLFileNotFound(FileNotFound):
msg_fmt = _("The CRL file for %(project)s could not be found")
class InstanceRecreateNotSupported(Invalid):
msg_fmt = _('Instance recreate is not supported.')
class ServiceGroupUnavailable(NovaException):
msg_fmt = _("The service from servicegroup driver %(driver)s is "
"temporarily unavailable.")
class DBNotAllowed(NovaException):
msg_fmt = _('%(binary)s attempted direct database access which is '
'not allowed by policy')
class UnsupportedVirtType(Invalid):
msg_fmt = _("Virtualization type '%(virt)s' is not supported by "
"this compute driver")
class UnsupportedHardware(Invalid):
msg_fmt = _("Requested hardware '%(model)s' is not supported by "
"the '%(virt)s' virt driver")
class Base64Exception(NovaException):
msg_fmt = _("Invalid Base 64 data for file %(path)s")
class BuildAbortException(NovaException):
msg_fmt = _("Build of instance %(instance_uuid)s aborted: %(reason)s")
class RescheduledException(NovaException):
msg_fmt = _("Build of instance %(instance_uuid)s was re-scheduled: "
"%(reason)s")
class ShadowTableExists(NovaException):
msg_fmt = _("Shadow table with name %(name)s already exists.")
class InstanceFaultRollback(NovaException):
def __init__(self, inner_exception=None):
message = _("Instance rollback performed due to: %s")
self.inner_exception = inner_exception
super(InstanceFaultRollback, self).__init__(message % inner_exception)
class UnsupportedObjectError(NovaException):
msg_fmt = _('Unsupported object type %(objtype)s')
class OrphanedObjectError(NovaException):
msg_fmt = _('Cannot call %(method)s on orphaned %(objtype)s object')
class IncompatibleObjectVersion(NovaException):
msg_fmt = _('Version %(objver)s of %(objname)s is not supported')
class ObjectActionError(NovaException):
msg_fmt = _('Object action %(action)s failed because: %(reason)s')
class ObjectFieldInvalid(NovaException):
msg_fmt = _('Field %(field)s of %(objname)s is not an instance of Field')
class CoreAPIMissing(NovaException):
msg_fmt = _("Core API extensions are missing: %(missing_apis)s")
class AgentError(NovaException):
msg_fmt = _('Error during following call to agent: %(method)s')
class AgentTimeout(AgentError):
msg_fmt = _('Unable to contact guest agent. '
'The following call timed out: %(method)s')
class AgentNotImplemented(AgentError):
msg_fmt = _('Agent does not support the call: %(method)s')
class InstanceGroupNotFound(NotFound):
msg_fmt = _("Instance group %(group_uuid)s could not be found.")
class InstanceGroupIdExists(NovaException):
msg_fmt = _("Instance group %(group_uuid)s already exists.")
class InstanceGroupMetadataNotFound(NotFound):
msg_fmt = _("Instance group %(group_uuid)s has no metadata with "
"key %(metadata_key)s.")
class InstanceGroupMemberNotFound(NotFound):
msg_fmt = _("Instance group %(group_uuid)s has no member with "
"id %(instance_id)s.")
class InstanceGroupPolicyNotFound(NotFound):
msg_fmt = _("Instance group %(group_uuid)s has no policy %(policy)s.")
class PluginRetriesExceeded(NovaException):
msg_fmt = _("Number of retries to plugin (%(num_retries)d) exceeded.")
class ImageDownloadModuleError(NovaException):
msg_fmt = _("There was an error with the download module %(module)s. "
"%(reason)s")
class ImageDownloadModuleMetaDataError(ImageDownloadModuleError):
msg_fmt = _("The metadata for this location will not work with this "
"module %(module)s. %(reason)s.")
class ImageDownloadModuleNotImplementedError(ImageDownloadModuleError):
msg_fmt = _("The method %(method_name)s is not implemented.")
class ImageDownloadModuleConfigurationError(ImageDownloadModuleError):
msg_fmt = _("The module %(module)s is misconfigured: %(reason)s.")
class ResourceMonitorError(NovaException):
msg_fmt = _("Error when creating resource monitor: %(monitor)s")
class PciDeviceWrongAddressFormat(NovaException):
msg_fmt = _("The PCI address %(address)s has an incorrect format.")
class PciDeviceNotFoundById(NotFound):
msg_fmt = _("PCI device %(id)s not found")
class PciDeviceNotFound(NovaException):
msg_fmt = _("PCI Device %(node_id)s:%(address)s not found.")
class PciDeviceInvalidStatus(NovaException):
msg_fmt = _(
"PCI device %(compute_node_id)s:%(address)s is %(status)s "
"instead of %(hopestatus)s")
class PciDeviceInvalidOwner(NovaException):
msg_fmt = _(
"PCI device %(compute_node_id)s:%(address)s is owned by %(owner)s "
"instead of %(hopeowner)s")
class PciDeviceRequestFailed(NovaException):
msg_fmt = _(
"PCI device request (%requests)s failed")
class PciDevicePoolEmpty(NovaException):
msg_fmt = _(
"Attempt to consume PCI device %(compute_node_id)s:%(address)s "
"from empty pool")
class PciInvalidAlias(NovaException):
msg_fmt = _("Invalid PCI alias definition: %(reason)s")
class PciRequestAliasNotDefined(NovaException):
msg_fmt = _("PCI alias %(alias)s is not defined")
class MissingParameter(NovaException):
ec2_code = 'MissingParameter'
msg_fmt = _("Not enough parameters: %(reason)s")
code = 400
class PciConfigInvalidWhitelist(Invalid):
msg_fmt = _("Invalid PCI devices Whitelist config %(reason)s")
class PciTrackerInvalidNodeId(NovaException):
msg_fmt = _("Cannot change %(node_id)s to %(new_node_id)s")
# Cannot be templated, msg needs to be constructed when raised.
class InternalError(NovaException):
ec2_code = 'InternalError'
msg_fmt = "%(err)s"
class PciDevicePrepareFailed(NovaException):
msg_fmt = _("Failed to prepare PCI device %(id)s for instance "
"%(instance_uuid)s: %(reason)s")
class PciDeviceDetachFailed(NovaException):
msg_fmt = _("Failed to detach PCI device %(dev)s: %(reason)s")
class PciDeviceUnsupportedHypervisor(NovaException):
msg_fmt = _("%(type)s hypervisor does not support PCI devices")
class KeyManagerError(NovaException):
msg_fmt = _("Key manager error: %(reason)s")
class VolumesNotRemoved(Invalid):
msg_fmt = _("Failed to remove volume(s): (%(reason)s)")
class InvalidVideoMode(Invalid):
msg_fmt = _("Provided video model (%(model)s) is not supported.")
class RngDeviceNotExist(Invalid):
msg_fmt = _("The provided RNG device path: (%(path)s) is not "
"present on the host.")
class RequestedVRamTooHigh(NovaException):
msg_fmt = _("The requested amount of video memory %(req_vram)d is higher "
"than the maximum allowed by flavor %(max_vram)d.")
class InvalidWatchdogAction(Invalid):
msg_fmt = _("Provided watchdog action (%(action)s) is not supported.")
class NoBlockMigrationForConfigDriveInLibVirt(NovaException):
msg_fmt = _("Block migration of instances with config drives is not "
"supported in libvirt.")
class UnshelveException(NovaException):
msg_fmt = _("Error during unshelve instance %(instance_id)s: %(reason)s")
| {
"content_hash": "8461b5a35a84bb1a6a9a900361537e12",
"timestamp": "",
"source": "github",
"line_count": 1553,
"max_line_length": 79,
"avg_line_length": 28.249839021249194,
"alnum_prop": 0.6717496353026987,
"repo_name": "afrolov1/nova",
"id": "a0c5cae87968e5b76c0daf2d4f888291b42ca2f4",
"size": "44604",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nova/exception.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "14057622"
},
{
"name": "Shell",
"bytes": "17451"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
"""
These URL routings are used by the example `commodity`, `smartcard` and `i18n_smartcard` as found
in the django-SHOP's tutorials.
This is the simplest way of routing and a good default to start with.
"""
from django.conf.urls import url
from shop.views.catalog import AddToCartView, ProductListView, ProductRetrieveView
from myshop.serializers import ProductSummarySerializer, ProductDetailSerializer
urlpatterns = [
url(r'^$', ProductListView.as_view(
serializer_class=ProductSummarySerializer,
redirect_to_lonely_product=True,
)),
url(r'^(?P<slug>[\w-]+)/?$', ProductRetrieveView.as_view(
serializer_class=ProductDetailSerializer
)),
url(r'^(?P<slug>[\w-]+)/add-to-cart', AddToCartView.as_view()),
]
| {
"content_hash": "fb1908eaafdcc0bb242ed1d9d6d52fdf",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 97,
"avg_line_length": 34.52173913043478,
"alnum_prop": 0.7229219143576826,
"repo_name": "nimbis/django-shop",
"id": "bb94c5eefa43f8037f49032298ae7e53118a8cbd",
"size": "818",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "example/myshop/urls/simple_products.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "7939"
},
{
"name": "HTML",
"bytes": "112667"
},
{
"name": "JavaScript",
"bytes": "48245"
},
{
"name": "Python",
"bytes": "780393"
},
{
"name": "Shell",
"bytes": "583"
}
],
"symlink_target": ""
} |
"""Interfaces for Pylint objects"""
from collections import namedtuple
Confidence = namedtuple("Confidence", ["name", "description"])
# Warning Certainties
HIGH = Confidence("HIGH", "No false positive possible.")
INFERENCE = Confidence("INFERENCE", "Warning based on inference result.")
INFERENCE_FAILURE = Confidence(
"INFERENCE_FAILURE", "Warning based on inference with failures."
)
UNDEFINED = Confidence("UNDEFINED", "Warning without any associated confidence level.")
CONFIDENCE_LEVELS = [HIGH, INFERENCE, INFERENCE_FAILURE, UNDEFINED]
class Interface:
"""Base class for interfaces."""
@classmethod
def is_implemented_by(cls, instance):
return implements(instance, cls)
def implements(obj, interface):
"""Return true if the give object (maybe an instance or class) implements
the interface.
"""
kimplements = getattr(obj, "__implements__", ())
if not isinstance(kimplements, (list, tuple)):
kimplements = (kimplements,)
for implementedinterface in kimplements:
if issubclass(implementedinterface, interface):
return True
return False
class IChecker(Interface):
"""This is a base interface, not designed to be used elsewhere than for
sub interfaces definition.
"""
def open(self):
"""called before visiting project (i.e set of modules)"""
def close(self):
"""called after visiting project (i.e set of modules)"""
class IRawChecker(IChecker):
"""interface for checker which need to parse the raw file"""
def process_module(self, astroid):
"""process a module
the module's content is accessible via astroid.stream
"""
class ITokenChecker(IChecker):
"""Interface for checkers that need access to the token list."""
def process_tokens(self, tokens):
"""Process a module.
tokens is a list of all source code tokens in the file.
"""
class IAstroidChecker(IChecker):
"""interface for checker which prefers receive events according to
statement type
"""
class IReporter(Interface):
"""reporter collect messages and display results encapsulated in a layout"""
def handle_message(self, msg):
"""Handle the given message object."""
def display_reports(self, layout):
"""display results encapsulated in the layout tree"""
__all__ = ("IRawChecker", "IAstroidChecker", "ITokenChecker", "IReporter")
| {
"content_hash": "07ae3e6d4146ccce47a4980f4ed34578",
"timestamp": "",
"source": "github",
"line_count": 85,
"max_line_length": 87,
"avg_line_length": 28.67058823529412,
"alnum_prop": 0.6840377513336069,
"repo_name": "ruchee/vimrc",
"id": "e61a049281e0f6a42d4f3a1c5aec457894677615",
"size": "3320",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vimfiles/bundle/vim-python/submodules/pylint/pylint/interfaces.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "22028"
},
{
"name": "Blade",
"bytes": "3314"
},
{
"name": "C#",
"bytes": "1734"
},
{
"name": "CSS",
"bytes": "31547"
},
{
"name": "Clojure",
"bytes": "47036"
},
{
"name": "CoffeeScript",
"bytes": "9274"
},
{
"name": "Common Lisp",
"bytes": "54314"
},
{
"name": "D",
"bytes": "11562"
},
{
"name": "Dockerfile",
"bytes": "7620"
},
{
"name": "Elixir",
"bytes": "41696"
},
{
"name": "Emacs Lisp",
"bytes": "10489"
},
{
"name": "Erlang",
"bytes": "137788"
},
{
"name": "F#",
"bytes": "2230"
},
{
"name": "Go",
"bytes": "54655"
},
{
"name": "HTML",
"bytes": "178954"
},
{
"name": "Haml",
"bytes": "39"
},
{
"name": "Haskell",
"bytes": "2031"
},
{
"name": "JavaScript",
"bytes": "9086"
},
{
"name": "Julia",
"bytes": "9540"
},
{
"name": "Kotlin",
"bytes": "8669"
},
{
"name": "Less",
"bytes": "327"
},
{
"name": "Makefile",
"bytes": "87500"
},
{
"name": "Mustache",
"bytes": "3375"
},
{
"name": "Nix",
"bytes": "1860"
},
{
"name": "PHP",
"bytes": "9238"
},
{
"name": "PLpgSQL",
"bytes": "33747"
},
{
"name": "Perl",
"bytes": "84200"
},
{
"name": "PostScript",
"bytes": "3891"
},
{
"name": "Python",
"bytes": "7366233"
},
{
"name": "Racket",
"bytes": "1150"
},
{
"name": "Raku",
"bytes": "21146"
},
{
"name": "Ruby",
"bytes": "133344"
},
{
"name": "SCSS",
"bytes": "327"
},
{
"name": "Sass",
"bytes": "308"
},
{
"name": "Scala",
"bytes": "13125"
},
{
"name": "Shell",
"bytes": "52916"
},
{
"name": "Smarty",
"bytes": "300"
},
{
"name": "Swift",
"bytes": "11436"
},
{
"name": "TypeScript",
"bytes": "4663"
},
{
"name": "Vim Script",
"bytes": "10545492"
},
{
"name": "Vim Snippet",
"bytes": "559139"
}
],
"symlink_target": ""
} |
import scikits.statsmodels.api as sm
import matplotlib.pyplot as plt
import numpy as np
from scikits.statsmodels.sandbox.survival2 import KaplanMeier
#Getting the strike data as an array
dta = sm.datasets.strikes.load()
print 'basic data'
print '\n'
dta = dta.values()[-1]
print dta[range(5),:]
print '\n'
#Create the KaplanMeier object and fit the model
km = KaplanMeier(dta,0)
km.fit()
#show the results
km.plot()
print 'basic model'
print '\n'
km.summary()
print '\n'
#Mutiple survival curves
km2 = KaplanMeier(dta,0,exog=1)
km2.fit()
print 'more than one curve'
print '\n'
km2.summary()
print '\n'
km2.plot()
#with censoring
censoring = np.ones_like(dta[:,0])
censoring[dta[:,0] > 80] = 0
dta = np.c_[dta,censoring]
print 'with censoring'
print '\n'
print dta[range(5),:]
print '\n'
km3 = KaplanMeier(dta,0,exog=1,censoring=2)
km3.fit()
km3.summary()
print '\n'
km3.plot()
#Test for difference of survival curves
log_rank = km3.test_diff([0.0645,-0.03957])
print 'log rank test'
print '\n'
print log_rank
print '\n'
#The zeroth element of log_rank is the chi-square test statistic
#for the difference between the survival curves for exog = 0.0645
#and exog = -0.03957, the index one element is the degrees of freedom for
#the test, and the index two element is the p-value for the test
wilcoxon = km3.test_diff([0.0645,-0.03957], rho=1)
print 'Wilcoxon'
print '\n'
print wilcoxon
print '\n'
#Same info as log_rank, but for Peto and Peto modification to the
#Gehan-Wilcoxon test
#User specified functions for tests
#A wider range of rates can be accessed by using the 'weight' parameter
#for the test_diff method
#For example, if the desire weights are S(t)*(1-S(t)), where S(t) is a pooled
#estimate for the survival function, this could be computed by doing
def weights(t):
#must accept one arguement, even though it is not used here
s = KaplanMeier(dta,0,censoring=2)
s.fit()
s = s.results[0][0]
s = s * (1 - s)
return s
#KaplanMeier provides an array of times to the weighting function
#internally, so the weighting function must accept one arguement
test = km3.test_diff([0.0645,-0.03957], weight=weights)
print 'user specified weights'
print '\n'
print test
print '\n'
#Groups with nan names
#These can be handled by passing the data to KaplanMeier as an array of strings
groups = np.ones_like(dta[:,1])
groups = groups.astype('S4')
groups[dta[:,1] > 0] = 'high'
groups[dta[:,1] <= 0] = 'low'
dta = dta.astype('S4')
dta[:,1] = groups
print 'with nan group names'
print '\n'
print dta[range(5),:]
print '\n'
km4 = KaplanMeier(dta,0,exog=1,censoring=2)
km4.fit()
km4.summary()
print '\n'
km4.plot()
#show all the plots
plt.show()
| {
"content_hash": "94398a2eaaa02bbc3c434306ab35ae11",
"timestamp": "",
"source": "github",
"line_count": 121,
"max_line_length": 79,
"avg_line_length": 22.206611570247933,
"alnum_prop": 0.7082247860066989,
"repo_name": "wesm/statsmodels",
"id": "0f8d2b42a129704dc75f048e6f3c064c521d7950",
"size": "2730",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scikits/statsmodels/sandbox/examples/ex_kaplan_meier.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Assembly",
"bytes": "10509"
},
{
"name": "C",
"bytes": "11707"
},
{
"name": "Python",
"bytes": "3470843"
},
{
"name": "R",
"bytes": "2168"
}
],
"symlink_target": ""
} |
from ..broker import Broker
class BasicServicesBroker(Broker):
controller = "basic_services"
def authenticate(self, **kwargs):
"""Authenticates the user with NetMRI.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param username: The username of the user as whom to login.
:type username: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param password: The password of the user as whom to login.
:type password: String
| ``api version min:`` 2.3
| ``api version max:`` None
| ``required:`` False
| ``default:`` %Y-%m-%d %H:%M:%S
:param datetime_format: The format to use for date/time input and output.
:type datetime_format: String
| ``api version min:`` 2.3
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param timezone: Date/time input and output will be performed in the specified timezone. Should be specified as HH:MM offset from GMT. For example, -05:00 specified US Eastern Time, whereas +09:00 specifies Tokyo time. Alternatively, a timezone name may be used. See the API Data Structures page for details. If omitted, the server's configured timezone will be used.
:type timezone: String
**Outputs**
"""
return self.api_request(self._get_method_fullname("authenticate"), kwargs)
def base_uri(self, **kwargs):
"""Returns the base URI for the specified version.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param version: The API version for which the base_uri is needed.
:type version: String
**Outputs**
"""
return self.api_request(self._get_method_fullname("base_uri"), kwargs)
def license_info(self, **kwargs):
"""Returns license information for this NetMRI server.
**Inputs**
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return serial_number: NetMRI serial number.
:rtype serial_number: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return license_id: NetMRI License identifier.
:rtype license_id: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return license_expiration: NetMRI License expiration.
:rtype license_expiration: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return license_type: NetMRI License type
:rtype license_type: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return mode: NetMRI operation mode. One of 'standalone', 'master' or 'collector'.
:rtype mode: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return maintenance_expiration: Maintenance expiration for appliance.
:rtype maintenance_expiration: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return device_limit: Licensed limit of devices.
:rtype device_limit: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return interface_limit: Licensed limit of interfaces.
:rtype interface_limit: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return spm_limit: Licensed limit of number of ports controlled by SPM.
:rtype spm_limit: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return modules_short_name: Short symbolic names of licensed features.
:rtype modules_short_name: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return modules_support: Support statuses for corresponding modules in modules_short_names.
:rtype modules_support: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return modules_expiration: Expiration times for corresponding modules in modules_short_names.
:rtype modules_expiration: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return modules_name: Long names for corresponding modules in modules_short_names.
:rtype modules_name: Array of String
"""
return self.api_request(self._get_method_fullname("license_info"), kwargs)
def server_info(self, **kwargs):
"""Returns basic information regarding this NetMRI server.
**Inputs**
| ``api version min:`` 2.6
| ``api version max:`` None
| ``required:`` False
| ``default:`` False
:param api_versions_only_ind: Only include API version information in the output.
:type api_versions_only_ind: Boolean
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return netmri_version: The NetMRI version number running on this appliance or virtual machine.
:rtype netmri_version: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return latest_api_version: The most recent API version supported by this NetMRI.
:rtype latest_api_version: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return requested_api_version: The API version that executed this call.
:rtype requested_api_version: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return host_name: The configured host name of the NetMRI appliance.
:rtype host_name: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return operating_mode: Indicates if the NetMRI is running in standalone, collector, or operations center mode.
:rtype operating_mode: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return mgmt_ip: The IPv4 management address of this NetMRI, if configured.
:rtype mgmt_ip: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return mgmt_ip6: The IPv6 management address of this NetMRI, if configured.
:rtype mgmt_ip6: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return scan_ip: The IPv4 SCAN (analysis) address of this NetMRI, if configured.
:rtype scan_ip: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return scan_ip6: The IPv6 SCAN (analysis) address of this NetMRI, if configured.
:rtype scan_ip6: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return operational_status: The status of NetMRI. Usually ready, can also be upgrading. Values might change in the future.
:rtype operational_status: String
| ``api version min:`` 2.3
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return supported_api_versions: All API versions supported by this NetMRI.
:rtype supported_api_versions: Array of String
"""
return self.api_request(self._get_method_fullname("server_info"), kwargs)
def server_time(self, **kwargs):
"""Returns the current system time of this NetMRI server.
**Inputs**
**Outputs**
"""
return self.api_request(self._get_method_fullname("server_time"), kwargs)
def restart(self, **kwargs):
"""Restarts the application.
**Inputs**
**Outputs**
"""
return self.api_request(self._get_method_fullname("restart"), kwargs)
def consolidate(self, **kwargs):
"""Runs consolidation
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param managers: Comma-separated list of consolidator managers. Must be one of Aggregate, Config, Event, Group, Issue, Job, Normal, Policy, Routing, Settings, Stats, Subnet, Switching, Time, Topology, Voip, Vulnerability, Wireless
:type managers: Array
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param collector: Collector name. In case when this method called on OC this parameter is required
:type collector: String
**Outputs**
"""
return self.api_request(self._get_method_fullname("consolidate"), kwargs)
def settings_generate(self, **kwargs):
"""Generates xml with current configuration data
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param version: The version of xml to be generated
:type version: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return xml: A string containing the full xml as collected from the running config.
:rtype xml: String
"""
return self.api_request(self._get_method_fullname("settings_generate"), kwargs)
def settings_current(self, **kwargs):
"""Reports the status of an xml configuration file
**Inputs**
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return xml: A string containing the full xml as collected from the running config.
:rtype xml: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return messages: An array of hashes that contain details about the validation process
:rtype messages: Array of Hash
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return status: A string representation of the status of the request. Will be one of; success, error, pending
:rtype status: String
"""
return self.api_request(self._get_method_fullname("settings_current"), kwargs)
def settings_apply(self, **kwargs):
"""Parses the xml provided by config_id, then applies the changes. You should not need to call this directly!
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param config_id: The configuration id reported when the xml was uploaded to the unit
:type config_id: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param mods: Modifications for applying
:type mods: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return xml: A string containing the full xml as collected from the running config.
:rtype xml: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return messages: An array of hashes that contain details about the validation process
:rtype messages: Array of Hash
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return status: A string representation of the status of the request. Will be one of; success, error, pending
:rtype status: String
"""
return self.api_request(self._get_method_fullname("settings_apply"), kwargs)
def settings_status(self, **kwargs):
"""Reports the status of an xml configuration file
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param config_id: The configuration id reported when the xml was uploaded to the unit
:type config_id: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return messages: An array of hashes that contain details about the validation process
:rtype messages: Array of Hash
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return status: A string representation of the status of the validation. Will be one of; success, error, pending
:rtype status: String
"""
return self.api_request(self._get_method_fullname("settings_status"), kwargs)
def settings_info(self, **kwargs):
"""Shows probe info, running_config, candidate_config, and list of installed dsb
**Inputs**
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return grid_members: Hash of grid members info including master and slaves (probes)
:rtype grid_members: String
"""
return self.api_request(self._get_method_fullname("settings_info"), kwargs)
def set_session_value(self, **kwargs):
"""save data in a cache that is session wise
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param key: key associated with that value - will be used to retrieve the same value
:type key: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param value: value to save in the session cache
:type value: String
**Outputs**
"""
return self.api_request(self._get_method_fullname("set_session_value"), kwargs)
def get_session_value(self, **kwargs):
"""retrieve data in the session cache that formerly saved
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param key: key associated with that value - will be used to retrieve the same value
:type key: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param default_value: Default value in case key/value does not exist in session. If key does not exist and default value is nil the response is 400 with record not found message
:type default_value: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return value: value associated with that key
:rtype value: String
"""
return self.api_request(self._get_method_fullname("get_session_value"), kwargs)
| {
"content_hash": "fc31ce263ee986091fe640b588d426a5",
"timestamp": "",
"source": "github",
"line_count": 567,
"max_line_length": 380,
"avg_line_length": 32.79365079365079,
"alnum_prop": 0.5181779068516725,
"repo_name": "infobloxopen/infoblox-netmri",
"id": "74962599ec42048394abdadb766f1b1c091d4d1b",
"size": "18594",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "infoblox_netmri/api/broker/v2_9_0/basic_services_broker.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "2110"
},
{
"name": "Python",
"bytes": "20560"
}
],
"symlink_target": ""
} |
from oslo_config import cfg
from oslo_log import log as logging
from oslo_policy import policy
from webob import exc as exceptions
from murano.common.i18n import _
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
_ENFORCER = None
def reset():
global _ENFORCER
if _ENFORCER:
_ENFORCER.clear()
_ENFORCER = None
def set_rules(data, default_rule=None, overwrite=True):
default_rule = default_rule or cfg.CONF.policy_default_rule
if not _ENFORCER:
LOG.debug("Enforcer not present, recreating at rules stage.")
init()
if default_rule:
_ENFORCER.default_rule = default_rule
msg = "Loading rules %s, default: %s, overwrite: %s"
LOG.debug(msg, data, default_rule, overwrite)
if isinstance(data, dict):
rules = policy.Rules.from_dict(data, default_rule)
else:
rules = policy.Rules.load_json(data, default_rule)
_ENFORCER.set_rules(rules, overwrite=overwrite)
def init(default_rule=None, use_conf=True):
global _ENFORCER
if not _ENFORCER:
LOG.debug("Enforcer is not present, recreating.")
_ENFORCER = policy.Enforcer(CONF, use_conf=use_conf)
_ENFORCER.load_rules()
def check(rule, ctxt, target={}, do_raise=True, exc=exceptions.HTTPForbidden):
creds = ctxt.to_dict()
try:
result = _ENFORCER.enforce(rule, target, creds, do_raise, exc)
except Exception:
result = False
raise
else:
return result
finally:
extra = {'policy': {'rule': rule, 'target': target}}
if result:
LOG.info(_("Policy check succeeded for rule "
"'%(rule)s' on target %(target)s"),
{'rule': rule, 'target': repr(target)}, extra=extra)
else:
LOG.info(_("Policy check failed for rule "
"'%(rule)s' on target: %(target)s"),
{'rule': rule, 'target': repr(target)}, extra=extra)
def check_is_admin(context):
"""Check if the given context is associated with an admin role.
:param context: Murano request context
:returns: A non-False value if context role is admin.
"""
return check('context_is_admin', context,
context.to_dict(), do_raise=False)
| {
"content_hash": "ff75ffe300e22f16862f13eef82659cc",
"timestamp": "",
"source": "github",
"line_count": 80,
"max_line_length": 78,
"avg_line_length": 28.375,
"alnum_prop": 0.6145374449339207,
"repo_name": "sajuptpm/murano",
"id": "c36225fd253bab307ed0a3db731ba981fcb51178",
"size": "2943",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "murano/common/policy.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "1013"
},
{
"name": "PowerShell",
"bytes": "8634"
},
{
"name": "Python",
"bytes": "928901"
},
{
"name": "Shell",
"bytes": "22704"
}
],
"symlink_target": ""
} |
import json
class Filter(dict):
def __init__(self, function=None, *args):
if function == 'property':
self[function] = args[0]
else:
self[function] = list(args)
def __str__(self):
return json.dumps(self)
And = lambda *args: Filter('and', *args)
Or = lambda *args: Filter('or', *args)
Gt = lambda *args: Filter('gt', *args)
Gte = lambda *args: Filter('gte', *args)
Lt = lambda *args: Filter('lt', *args)
Lte = lambda *args: Filter('lte', *args)
Eq = lambda *args: Filter('eq', *args)
Neq = lambda *args: Filter('neq', *args)
Not = lambda arg: Filter('not', arg)
Contains = lambda *args: Filter('contains', *args)
Overlaps = lambda *args: Filter('overlaps', *args)
Property = lambda property: Filter('property', property)
Tags = lambda tags: Filter('tags', *[]) | {
"content_hash": "0e460e3cab7faf06b9609b69016b6d59",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 56,
"avg_line_length": 32.68,
"alnum_prop": 0.616891064871481,
"repo_name": "tamber/tamber-python",
"id": "f5e23231239c4afe8d15c29ddb031fa10789bba0",
"size": "817",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tamber/filter.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "17941"
}
],
"symlink_target": ""
} |
from django.conf import settings
from django.utils.module_loading import import_string
_handler_class = None
def get_handler_class():
"""
Get the configured `ievv_i18n_url` handler class.
E.g. import the handler class from the class path configured in the ``IEVV_I18N_URL_HANDLER`` setting.
Returns:
ievv_opensource.ievv_i18n_url.handlers.abstract_handler.AbstractHandler: A handler class.
"""
global _handler_class
if _handler_class is None:
handler_classpath = getattr(settings, 'IEVV_I18N_URL_HANDLER', None)
if not handler_classpath:
raise Exception(
'No ievv_i18n_url_handler configured. Please set the IEVV_I18N_URL_HANDLER. Refer to the docs for '
'ievv_i18n_url in ievv_opensource for more info.')
_handler_class = import_string(handler_classpath)
return _handler_class
def get_handler():
return get_handler_class()()
| {
"content_hash": "e25819b90e32972462eebc1279b14a47",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 115,
"avg_line_length": 32.62068965517241,
"alnum_prop": 0.678646934460888,
"repo_name": "appressoas/ievv_opensource",
"id": "8865ff3260a8b721a5e67f78ebc7a2ec571a560e",
"size": "946",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ievv_opensource/ievv_i18n_url/i18n_url_utils/get_handler.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CoffeeScript",
"bytes": "199"
},
{
"name": "Dockerfile",
"bytes": "162"
},
{
"name": "HTML",
"bytes": "7544"
},
{
"name": "JavaScript",
"bytes": "719"
},
{
"name": "Less",
"bytes": "27"
},
{
"name": "Python",
"bytes": "614046"
},
{
"name": "SCSS",
"bytes": "199"
},
{
"name": "Shell",
"bytes": "141"
},
{
"name": "TypeScript",
"bytes": "254"
}
],
"symlink_target": ""
} |
from google.appengine.api import users
import json
from raceways.handler import BaseHandler, api_handler, authorized
from raceways.client import StravaClient
from google.appengine.ext import ndb
from raceways import model
class StreamsHandler(BaseHandler):
@authorized
@api_handler
@ndb.toplevel
def get(self):
result = {
'streams': {}
}
streams_result = result['streams']
activity_ids = self.request.GET.getall('activity_id')
resolution = self.request.get('resolution')
stream_keys = []
stream_types = []
activity_ids2 = []
for activity_id in activity_ids:
streams_result[activity_id] = {}
for type in ('latlng', 'altitude'):
stream_key = ndb.Key(
model.Stream,
model.Stream.make_key_string(activity_id, type, resolution=resolution))
stream_keys.append(stream_key)
stream_types.append(type)
activity_ids2.append(activity_id)
streams = yield ndb.get_multi_async(stream_keys)
for activity_id, stream_type, stream in zip(activity_ids2, stream_types, streams):
activity = streams_result[activity_id]
if stream is None:
activity[stream_type] = {}
else:
activity[stream_type] = stream.to_dict()
# self.response.cache_expires(60*60)
# self.response.cache_control = 'private'
# self.response.vary = 'Cookie'
raise ndb.Return(result)
| {
"content_hash": "533f74580e605cfdbe4b41e488a89995",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 91,
"avg_line_length": 33.638297872340424,
"alnum_prop": 0.592662871600253,
"repo_name": "alecf/strava-raceways",
"id": "1929bcb459f7d06d90a619c1943af23b14162510",
"size": "1581",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "raceways/handlers/streams.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ApacheConf",
"bytes": "20968"
},
{
"name": "CSS",
"bytes": "1415"
},
{
"name": "HTML",
"bytes": "21187"
},
{
"name": "JavaScript",
"bytes": "62373"
},
{
"name": "Python",
"bytes": "87637"
}
],
"symlink_target": ""
} |
"""
this calls test_executable_caller as it should be called for the test to work.
"""
import subprocess
if __name__ == '__main__':
process = subprocess.Popen(
['python', 'test_executable_caller.py','test_executable_callee.py'],
shell = False,
universal_newlines = True
)
exit_status = process.wait()
| {
"content_hash": "9730dde5c6b00e5fdc23ba263ca718bf",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 78,
"avg_line_length": 26.153846153846153,
"alnum_prop": 0.6264705882352941,
"repo_name": "cirosantilli/python-utils",
"id": "9b978469b0ee8ea75c4e009f4d5522ccfc187a4d",
"size": "363",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_executable.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "228348"
}
],
"symlink_target": ""
} |
import imutils
def sliding_window(image, stepSize, windowSize):
# slide a window across the image
for y in xrange(0, image.shape[0], stepSize):
for x in xrange(0, image.shape[1], stepSize):
# yield the current window
yield (x, y, image[y:y + windowSize[1], x:x + windowSize[0]])
def sliding_window_variable_stepsize(start_x, start_y, image, stepSize, windowSize):
# slide a window across the image
for y in xrange(start_y, image.shape[0], stepSize):
for x in xrange(start_x, image.shape[1], stepSize):
# yield the current window
yield (x, y, image[y:y + windowSize[1], x:x + windowSize[0]])
def sliding_window_1D(image, stepSize, windowWidth):
#slide a window 1D
for x in xrange(0, image.shape[1], stepSize):
yield (x, image[0:image.shape[0],x:x + windowWidth])
| {
"content_hash": "178bb964fb34eb816221ca13d170a301",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 85,
"avg_line_length": 42.95,
"alnum_prop": 0.6414435389988359,
"repo_name": "AKAMobi/goods-counter",
"id": "0d5f66500516a7cf8ca25a727e34980cc94a44bf",
"size": "860",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/pyimagesearch/helpers.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "51947"
}
],
"symlink_target": ""
} |
import os
from django.conf import settings
from django.core.files.storage import FileSystemStorage
from django.utils.deconstruct import deconstructible
__doc__ = """
I needed to efficiently create a mirror of a directory tree (so that
"origin pull" CDNs can automatically pull files). The trick was that
some files could be modified, and some could be identical to the original.
Of course it doesn't make sense to store the exact same data twice on the
file system. So I created SymlinkOrCopyStorage.
SymlinkOrCopyStorage allows you to symlink a file when it's identical to
the original file and to copy the file if it's modified.
Of course, it's impossible to know if a file is modified just by looking
at the file, without knowing what the original file was.
That's what the symlinkWithin parameter is for. It accepts one or more paths
(if multiple, they should be concatenated using a colon (:)).
Files that will be saved using SymlinkOrCopyStorage are then checked on their
location: if they are within one of the symlink_within directories,
they will be symlinked, otherwise they will be copied.
The rationale is that unmodified files will exist in their original location,
e.g. /htdocs/example.com/image.jpg and modified files will be stored in
a temporary directory, e.g. /tmp/image.jpg.
"""
@deconstructible
class SymlinkOrCopyStorage(FileSystemStorage):
"""Stores symlinks to files instead of actual files whenever possible
When a file that's being saved is currently stored in the symlink_within
directory, then symlink the file. Otherwise, copy the file.
"""
def __init__(self, location=settings.MEDIA_ROOT, base_url=settings.MEDIA_URL,
symlink_within=None):
super(SymlinkOrCopyStorage, self).__init__(location, base_url)
self.symlink_within = symlink_within.split(":")
def _save(self, name, content):
full_path_dst = self.path(name)
directory = os.path.dirname(full_path_dst)
if not os.path.exists(directory):
os.makedirs(directory)
elif not os.path.isdir(directory):
raise IOError("%s exists and is not a directory." % directory)
full_path_src = os.path.abspath(content.name)
symlinked = False
# Only symlink if the current platform supports it.
if getattr(os, "symlink", False):
for path in self.symlink_within:
if full_path_src.startswith(path):
os.symlink(full_path_src, full_path_dst)
symlinked = True
break
if not symlinked:
super(SymlinkOrCopyStorage, self)._save(name, content)
return name
| {
"content_hash": "9cb09d0c18f714b4108fed01ece94b31",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 81,
"avg_line_length": 41.215384615384615,
"alnum_prop": 0.706233669279582,
"repo_name": "ZuluPro/django-storages",
"id": "6432190f877c6ee50d3c89765ec67353329c46ae",
"size": "2679",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "storages/backends/symlinkorcopy.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "150922"
}
],
"symlink_target": ""
} |
from azure.identity import DefaultAzureCredential
from azure.mgmt.oep import OpenEnergyPlatformManagementServiceAPIs
"""
# PREREQUISITES
pip install azure-identity
pip install azure-mgmt-oep
# USAGE
python energy_services_list_partitions_maximum_set_gen.py
Before run the sample, please set the values of the client ID, tenant ID and client secret
of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
"""
def main():
client = OpenEnergyPlatformManagementServiceAPIs(
credential=DefaultAzureCredential(),
subscription_id="00000000-0000-0000-0000-000000000000",
)
response = client.energy_services.list_partitions(
resource_group_name="rgoep",
resource_name="aaaaaaaaaaaaaaaaaaa",
)
print(response)
# x-ms-original-file: specification/oep/resource-manager/Microsoft.OpenEnergyPlatform/preview/2022-04-04-preview/examples/EnergyServices_ListPartitions_MaximumSet_Gen.json
if __name__ == "__main__":
main()
| {
"content_hash": "e197c0881d5d59b97b4e9d45a6c98501",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 171,
"avg_line_length": 36.21212121212121,
"alnum_prop": 0.7497907949790795,
"repo_name": "Azure/azure-sdk-for-python",
"id": "0c52073fdf9c58133e5b6f3a956d529f877277c9",
"size": "1663",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/oep/azure-mgmt-oep/generated_samples/energy_services_list_partitions_maximum_set_gen.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
"""Source code analyzer for chalice app.
The main point of this module is to analyze your source code
and track which AWS API calls you make.
We can then use this information to create IAM policies
automatically for you.
How it Works
============
This is basically a simplified abstract interpreter.
The type inference is greatly simplified because
we're only interested in boto3 client types.
In a nutshell:
* Create an AST and symbol table from the source code.
* Interpret the AST and track boto3 types. This is governed
by a few simple rules.
* Propagate inferred boto3 types as much as possible. Most of
the basic stuff is handled, for example:
* ``x = y`` if y is a boto3 type, so is x.
* ``a :: (x -> y), where y is a boto3 type, then given ``b = a()``,
b is of type y.
* Map inferred types across function params and return types.
At the end of the analysis, a final walk is performed to collect any
node of type ``Boto3ClientMethodCallType``. This represents an
API call being made. This also lets you be selective about which
API calls you care about. For example, if you want only want to see
which API calls happen in a particular function, only walk that
particular ``FunctionDef`` node.
"""
import ast
import symtable
from typing import Dict, Set, Any, Optional, List, Union, cast # noqa
APICallT = Dict[str, Set[str]]
OptASTSet = Optional[Set[ast.AST]]
ComprehensionNode = Union[ast.DictComp, ast.GeneratorExp, ast.ListComp]
def get_client_calls(source_code):
# type: (str) -> APICallT
"""Return all clients calls made in provided source code.
:returns: A dict of service_name -> set([client calls]).
Example: {"s3": set(["list_objects", "create_bucket"]),
"dynamodb": set(["describe_table"])}
"""
parsed = parse_code(source_code)
t = SymbolTableTypeInfer(parsed)
binder = t.bind_types()
collector = APICallCollector(binder)
api_calls = collector.collect_api_calls(parsed.parsed_ast)
return api_calls
def get_client_calls_for_app(source_code):
# type: (str) -> APICallT
"""Return client calls for a chalice app.
This is similar to ``get_client_calls`` except it will
automatically traverse into chalice views with the assumption
that they will be called.
"""
parsed = parse_code(source_code)
parsed.parsed_ast = AppViewTransformer().visit(parsed.parsed_ast)
ast.fix_missing_locations(parsed.parsed_ast)
t = SymbolTableTypeInfer(parsed)
binder = t.bind_types()
collector = APICallCollector(binder)
api_calls = collector.collect_api_calls(parsed.parsed_ast)
return api_calls
def parse_code(source_code, filename='app.py'):
# type: (str, str) -> ParsedCode
parsed = ast.parse(source_code, filename)
table = symtable.symtable(source_code, filename, 'exec')
return ParsedCode(parsed, ChainedSymbolTable(table, table))
class BaseType(object):
def __repr__(self):
# type: () -> str
return "%s()" % self.__class__.__name__
def __eq__(self, other):
# type: (Any) -> bool
return isinstance(other, self.__class__)
# The next 5 classes are used to track the
# components needed to create a boto3 client.
# While we really only care about boto3 clients we need
# to track all the types it takes to get there:
#
# import boto3 <--- bind "boto3" as the boto3 module type
# c = boto.client <--- bind "c" as the boto3 create client type
# s3 = c('s3') <--- bind 's3' as the boto3 client type, subtype 's3'.
# m = s3.list_objects <--- bind as API call 's3', 'list_objects'
# r = m() <--- bind as API call invoked (what we care about).
#
# That way we can handle (in addition to the case above) things like:
# import boto3; boto3.client('s3').list_objects()
# import boto3; s3 = boto3.client('s3'); s3.list_objects()
class Boto3ModuleType(BaseType):
pass
class Boto3CreateClientType(BaseType):
pass
class Boto3ClientType(BaseType):
def __init__(self, service_name):
# type: (str) -> None
#: The name of the AWS service, e.g. 's3'.
self.service_name = service_name
def __eq__(self, other):
# type: (Any) -> bool
# NOTE: We can't use self.__class__ because of a mypy bug:
# https://github.com/python/mypy/issues/3061
# We can change this back once that bug is fixed.
if not isinstance(other, Boto3ClientType):
return False
return self.service_name == other.service_name
def __repr__(self):
# type: () -> str
return "%s(%s)" % (self.__class__.__name__, self.service_name)
class Boto3ClientMethodType(BaseType):
def __init__(self, service_name, method_name):
# type: (str, str) -> None
self.service_name = service_name
self.method_name = method_name
def __eq__(self, other):
# type: (Any) -> bool
if self.__class__ != other.__class__:
return False
return (
self.service_name == other.service_name and
self.method_name == other.method_name)
def __repr__(self):
# type: () -> str
return "%s(%s, %s)" % (
self.__class__.__name__,
self.service_name,
self.method_name
)
class Boto3ClientMethodCallType(Boto3ClientMethodType):
pass
class TypedSymbol(symtable.Symbol):
inferred_type = None # type: Any
ast_node = None # type: ast.AST
class FunctionType(BaseType):
def __init__(self, return_type):
# type: (Any) -> None
self.return_type = return_type
def __eq__(self, other):
# type: (Any) -> bool
if self.__class__ != other.__class__:
return False
return self.return_type == other.return_type
def __repr__(self):
# type: () -> str
return "%s(%s)" % (
self.__class__.__name__,
self.return_type,
)
class StringLiteral(object):
def __init__(self, value):
# type: (str) -> None
self.value = value
class ParsedCode(object):
def __init__(self, parsed_ast, symbol_table):
# type: (ast.AST, ChainedSymbolTable) -> None
self.parsed_ast = parsed_ast
self.symbol_table = symbol_table
class APICallCollector(ast.NodeVisitor):
"""Traverse a given AST and look for any inferred API call types.
This visitor assumes you've ran type inference on the AST.
It will search through the AST and collect any API calls.
"""
def __init__(self, binder):
# type: (TypeBinder) -> None
self.api_calls = {} # type: APICallT
self._binder = binder
def collect_api_calls(self, node):
# type: (ast.AST) -> APICallT
self.visit(node)
return self.api_calls
def visit(self, node):
# type: (ast.AST) -> None
inferred_type = self._binder.get_type_for_node(node)
if isinstance(inferred_type, Boto3ClientMethodCallType):
self.api_calls.setdefault(inferred_type.service_name, set()).add(
inferred_type.method_name)
ast.NodeVisitor.visit(self, node)
class ChainedSymbolTable(object):
def __init__(self, local_table, global_table):
# type: (symtable.SymbolTable, symtable.SymbolTable) -> None
# If you're in the module scope, then pass in
# the same symbol table for local and global.
self._local_table = local_table
self._global_table = global_table
def new_sub_table(self, local_table):
# type: (symtable.SymbolTable) -> ChainedSymbolTable
# Create a new symbol table using this instances
# local table as the new global table and the passed
# in local table as the new local table.
return self.__class__(local_table, self._local_table)
def get_inferred_type(self, name):
# type: (str) -> Any
# Given a symbol name, check whether a type
# has been inferred.
# The stdlib symtable will already fall back to
# global scope if necessary.
symbol = self._local_table.lookup(name)
if symbol.is_global():
try:
global_symbol = self._global_table.lookup(name)
except KeyError:
# It's not an error if a symbol.is_global()
# but is not in our "_global_table", because
# we're not considering the builtin scope.
# In this case we just say that there is no
# type we've inferred.
return None
return getattr(global_symbol, 'inferred_type', None)
return getattr(symbol, 'inferred_type', None)
def set_inferred_type(self, name, inferred_type):
# type: (str, Any) -> None
symbol = cast(TypedSymbol, self._local_table.lookup(name))
symbol.inferred_type = inferred_type
def lookup_sub_namespace(self, name):
# type: (str) -> ChainedSymbolTable
for child in self._local_table.get_children():
if child.get_name() == name:
return self.__class__(child, self._local_table)
for child in self._global_table.get_children():
if child.get_name() == name:
return self.__class__(child, self._global_table)
raise ValueError("Unknown symbol name: %s" % name)
def get_sub_namespaces(self):
# type: () -> List[symtable.SymbolTable]
return self._local_table.get_children()
def get_name(self):
# type: () -> str
return self._local_table.get_name()
def get_symbols(self):
# type: () -> List[symtable.Symbol]
return self._local_table.get_symbols()
def register_ast_node_for_symbol(self, name, node):
# type: (str, ast.AST) -> None
symbol = cast(TypedSymbol, self._local_table.lookup(name))
symbol.ast_node = node
def lookup_ast_node_for_symbol(self, name):
# type: (str) -> ast.AST
symbol = self._local_table.lookup(name)
if symbol.is_global():
symbol = self._global_table.lookup(name)
try:
return cast(TypedSymbol, symbol).ast_node
except AttributeError:
raise ValueError(
"No AST node registered for symbol: %s" % name)
def has_ast_node_for_symbol(self, name):
# type: (str) -> bool
try:
self.lookup_ast_node_for_symbol(name)
return True
except (ValueError, KeyError):
return False
class TypeBinder(object):
def __init__(self):
# type: () -> None
self._node_to_type = {} # type: Dict[ast.AST, Any]
def get_type_for_node(self, node):
# type: (Any) -> Any
return self._node_to_type.get(node)
def set_type_for_node(self, node, inferred_type):
# type: (Any, Any) -> None
self._node_to_type[node] = inferred_type
class SymbolTableTypeInfer(ast.NodeVisitor):
_SDK_PACKAGE = 'boto3'
_CREATE_CLIENT = 'client'
def __init__(self, parsed_code, binder=None, visited=None):
# type: (ParsedCode, Optional[TypeBinder], OptASTSet) -> None
self._symbol_table = parsed_code.symbol_table
self._current_ast_namespace = parsed_code.parsed_ast
self._node_inference = {} # type: Dict[ast.AST, Any]
if binder is None:
binder = TypeBinder()
if visited is None:
visited = set()
self._binder = binder
self._visited = visited
def bind_types(self):
# type: () -> TypeBinder
self.visit(self._current_ast_namespace)
return self._binder
def known_types(self, scope_name=None):
# type: (Optional[str]) -> Dict[str, Any]
table = None
if scope_name is None:
table = self._symbol_table
else:
table = self._symbol_table.lookup_sub_namespace(scope_name)
return {
s.get_name(): cast(TypedSymbol, s).inferred_type
for s in table.get_symbols()
if hasattr(s, 'inferred_type') and
cast(TypedSymbol, s).inferred_type is not None and
s.is_local()
}
def _set_inferred_type_for_name(self, name, inferred_type):
# type: (str, Any) -> None
self._symbol_table.set_inferred_type(name, inferred_type)
def _set_inferred_type_for_node(self, node, inferred_type):
# type: (Any, Any) -> None
self._binder.set_type_for_node(node, inferred_type)
def _get_inferred_type_for_node(self, node):
# type: (Any) -> Any
return self._binder.get_type_for_node(node)
def _new_inference_scope(self, parsed_code, binder, visited):
# type: (ParsedCode, TypeBinder, Set[ast.AST]) -> SymbolTableTypeInfer
instance = self.__class__(parsed_code, binder, visited)
return instance
def visit_Import(self, node):
# type: (ast.Import) -> None
for child in node.names:
if isinstance(child, ast.alias):
import_name = child.name
if import_name == self._SDK_PACKAGE:
self._set_inferred_type_for_name(
import_name, Boto3ModuleType())
self.generic_visit(node)
def visit_Name(self, node):
# type: (ast.Name) -> None
self._set_inferred_type_for_node(
node,
self._symbol_table.get_inferred_type(node.id)
)
self.generic_visit(node)
def visit_Assign(self, node):
# type: (ast.Assign) -> None
# The LHS gets the inferred type of the RHS.
# We do this post-traversal to let the type inference
# run on the children first.
self.generic_visit(node)
rhs_inferred_type = self._get_inferred_type_for_node(node.value)
if rhs_inferred_type is None:
# Special casing assignment to a string literal.
if isinstance(node.value, ast.Str):
rhs_inferred_type = StringLiteral(node.value.s)
self._set_inferred_type_for_node(node.value, rhs_inferred_type)
for t in node.targets:
if isinstance(t, ast.Name):
self._symbol_table.set_inferred_type(t.id, rhs_inferred_type)
self._set_inferred_type_for_node(node, rhs_inferred_type)
def visit_Attribute(self, node):
# type: (ast.Attribute) -> None
self.generic_visit(node)
lhs_inferred_type = self._get_inferred_type_for_node(node.value)
if lhs_inferred_type is None:
return
elif lhs_inferred_type == Boto3ModuleType():
# Check for attributes such as boto3.client.
if node.attr == self._CREATE_CLIENT:
# This is a "boto3.client" attribute.
self._set_inferred_type_for_node(node, Boto3CreateClientType())
elif isinstance(lhs_inferred_type, Boto3ClientType):
self._set_inferred_type_for_node(
node,
Boto3ClientMethodType(
lhs_inferred_type.service_name,
node.attr
)
)
def visit_Call(self, node):
# type: (ast.Call) -> None
self.generic_visit(node)
# func -> Node that's being called
# args -> Arguments being passed.
inferred_func_type = self._get_inferred_type_for_node(node.func)
if inferred_func_type == Boto3CreateClientType():
# e_0 : B3CCT -> B3CT[S]
# e_1 : S str which is a service name
# e_0(e_1) : B3CT[e_1]
if len(node.args) >= 1:
service_arg = node.args[0]
if isinstance(service_arg, ast.Str):
self._set_inferred_type_for_node(
node, Boto3ClientType(service_arg.s))
elif isinstance(self._get_inferred_type_for_node(service_arg),
StringLiteral):
sub_type = self._get_inferred_type_for_node(service_arg)
inferred_type = Boto3ClientType(sub_type.value)
self._set_inferred_type_for_node(node, inferred_type)
elif isinstance(inferred_func_type, Boto3ClientMethodType):
self._set_inferred_type_for_node(
node,
Boto3ClientMethodCallType(
inferred_func_type.service_name,
inferred_func_type.method_name
)
)
elif isinstance(inferred_func_type, FunctionType):
self._set_inferred_type_for_node(
node, inferred_func_type.return_type)
elif isinstance(node.func, ast.Name) and \
self._symbol_table.has_ast_node_for_symbol(node.func.id):
if node not in self._visited:
self._visited.add(node)
self._infer_function_call(node)
def visit_Lambda(self, node):
# type: (ast.Lambda) -> None
# Lambda is going to be a bit tricky because
# there's a new child namespace (via .get_children()),
# but it's not something that will show up in the
# current symbol table via .lookup().
# For now, we're going to ignore lambda expressions.
pass
def _infer_function_call(self, node):
# type: (Any) -> None
# Here we're calling a function we haven't analyzed
# yet. We're first going to analyze the function.
# This will set the inferred_type on the FunctionDef
# node.
# If we get a FunctionType as the inferred type of the
# function, then we know that the inferred type for
# calling the function is the .return_type type.
function_name = node.func.id
sub_table = self._symbol_table.lookup_sub_namespace(function_name)
ast_node = self._symbol_table.lookup_ast_node_for_symbol(
function_name)
self._map_function_params(sub_table, node, ast_node)
child_infer = self._new_inference_scope(
ParsedCode(ast_node, sub_table), self._binder, self._visited)
child_infer.bind_types()
inferred_func_type = self._get_inferred_type_for_node(ast_node)
self._symbol_table.set_inferred_type(function_name, inferred_func_type)
# And finally the result of this Call() node will be
# the return type from the function we just analyzed.
if isinstance(inferred_func_type, FunctionType):
self._set_inferred_type_for_node(
node, inferred_func_type.return_type)
def _map_function_params(self, sub_table, node, def_node):
# type: (ChainedSymbolTable, Any, Any) -> None
# TODO: Handle the full calling syntax, kwargs, stargs, etc.
# Right now we just handle positional args.
defined_args = def_node.args
for arg, defined in zip(node.args, defined_args.args):
inferred_type = self._get_inferred_type_for_node(arg)
if inferred_type is not None:
name = self._get_name(defined)
sub_table.set_inferred_type(name, inferred_type)
def _get_name(self, node):
# type: (Any) -> str
try:
return getattr(node, 'id')
except AttributeError:
return getattr(node, 'arg')
def visit_FunctionDef(self, node):
# type: (ast.FunctionDef) -> None
if node.name == self._symbol_table.get_name():
# Not using generic_visit() because we don't want to
# visit the decorator_list attr.
for child in node.body:
self.visit(child)
else:
self._symbol_table.register_ast_node_for_symbol(node.name, node)
def visit_AsyncFunctionDef(self, node):
# type: (ast.FunctionDef) -> None
# this type is actually wrong but we can't use the actual type as it's
# not available in python 2
self.visit_FunctionDef(node)
def visit_ClassDef(self, node):
# type: (ast.ClassDef) -> None
# Not implemented yet. We want to ensure we don't
# traverse into the class body for now.
return
def visit_DictComp(self, node):
# type: (ast.DictComp) -> None
self._handle_comprehension(node, 'dictcomp')
def visit_Return(self, node):
# type: (Any) -> None
self.generic_visit(node)
inferred_type = self._get_inferred_type_for_node(node.value)
if inferred_type is not None:
self._set_inferred_type_for_node(node, inferred_type)
# We're making a pretty big assumption there's one return
# type per function. Will likely need to come back to this.
inferred_func_type = FunctionType(inferred_type)
self._set_inferred_type_for_node(self._current_ast_namespace,
inferred_func_type)
def visit_ListComp(self, node):
# type: (ast.ListComp) -> None
# 'listcomp' is the string literal used by python
# to creating the SymbolTable for the corresponding
# list comp function.
self._handle_comprehension(node, 'listcomp')
def visit_GeneratorExp(self, node):
# type: (ast.GeneratorExp) -> None
# Generator expressions are an interesting case.
# They create a new sub scope, but they're not
# explicitly named. Python just creates a table
# with the name "genexpr".
self._handle_comprehension(node, 'genexpr')
def _visit_first_comprehension_generator(self, node):
# type: (ComprehensionNode) -> None
if node.generators:
# first generator's iterator is visited in the current scope
first_generator = node.generators[0]
self.visit(first_generator.iter)
def _collect_comprehension_children(self, node):
# type: (ComprehensionNode) -> List[ast.expr]
if isinstance(node, ast.DictComp):
# dict comprehensions have two values to be checked
child_nodes = [node.key, node.value]
else:
child_nodes = [node.elt]
if node.generators:
first_generator = node.generators[0]
child_nodes.append(first_generator.target)
for if_expr in first_generator.ifs:
child_nodes.append(if_expr)
for generator in node.generators[1:]:
# rest need to be visited in the child scope
child_nodes.append(generator.iter)
child_nodes.append(generator.target)
for if_expr in generator.ifs:
child_nodes.append(if_expr)
return child_nodes
def _visit_comprehension_children(self, node, comprehension_type):
# type: (ComprehensionNode, str) -> None
child_nodes = self._collect_comprehension_children(node)
child_scope = self._get_matching_sub_namespace(comprehension_type,
node.lineno)
if child_scope is None:
# In Python 2 there's no child scope for list comp
# Or we failed to locate the child scope, this happens in Python 2
# when there are multiple comprehensions of the same type in the
# same scope. The line number trick doesn't work as Python 2 always
# passes line number 0, make a best effort
for child_node in child_nodes:
try:
self.visit(child_node)
except KeyError:
pass
return
for child_node in child_nodes:
# visit sub expressions in the child scope
child_table = self._symbol_table.new_sub_table(child_scope)
child_infer = self._new_inference_scope(
ParsedCode(child_node, child_table),
self._binder, self._visited)
child_infer.bind_types()
def _handle_comprehension(self, node, comprehension_type):
# type: (ComprehensionNode, str) -> None
self._visit_first_comprehension_generator(node)
self._visit_comprehension_children(node, comprehension_type)
def _get_matching_sub_namespace(self, name, lineno):
# type: (str, int) -> Optional[symtable.SymbolTable]
namespaces = [t for t in self._symbol_table.get_sub_namespaces()
if t.get_name() == name]
if len(namespaces) == 1:
# if there's only one match for the name, return it
return namespaces[0]
for namespace in namespaces:
# otherwise disambiguate by using the line number
if namespace.get_lineno() == lineno:
return namespace
return None
def visit(self, node):
# type: (Any) -> None
return ast.NodeVisitor.visit(self, node)
class AppViewTransformer(ast.NodeTransformer):
_CHALICE_DECORATORS = [
'route', 'authorizer', 'lambda_function',
'schedule', 'on_s3_event', 'on_sns_message',
'on_sqs_message', 'on_ws_connect', 'on_ws_message',
'on_ws_disconnect',
]
def visit_FunctionDef(self, node):
# type: (ast.FunctionDef) -> Any
if self._is_chalice_view(node):
return self._auto_invoke_view(node)
return node
def _is_chalice_view(self, node):
# type: (ast.FunctionDef) -> bool
# We can certainly improve on this, but this check is more
# of a heuristic for the time being. The ideal way to do this
# is to infer the Chalice type and ensure the function is
# decorated with the Chalice type's route() method.
decorator_list = node.decorator_list
if not decorator_list:
return False
for decorator in decorator_list:
if isinstance(decorator, ast.Call) and \
isinstance(decorator.func, ast.Attribute):
if decorator.func.attr in self._CHALICE_DECORATORS:
return True
return False
def _auto_invoke_view(self, node):
# type: (ast.FunctionDef) -> List[ast.AST]
auto_invoke = ast.Expr(
value=ast.Call(
func=ast.Name(id=node.name, ctx=ast.Load()),
args=[], keywords=[], starargs=None, kwargs=None
)
)
return [node, auto_invoke]
| {
"content_hash": "74142f13b22a39e203cbf4e9748ecaf8",
"timestamp": "",
"source": "github",
"line_count": 699,
"max_line_length": 79,
"avg_line_length": 37.798283261802574,
"alnum_prop": 0.5950948109458385,
"repo_name": "awslabs/chalice",
"id": "9b2f75e37667e3104da7e9e2ab554c93ad40bfd5",
"size": "26421",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "chalice/analyzer.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "1407"
},
{
"name": "Python",
"bytes": "588372"
}
],
"symlink_target": ""
} |
from pyvdp.merchantsearch import VisaMerchantSearchDispatcher
def send(data):
"""Submits a Merchant Search request.
:param MerchantSearch data: **Required**. Instance of :func:`~pyvdp.merchantsearch.MerchantSearchData`.
:return: A response from VDP.
**Usage:**
.. code-block:: python
from pyvdp.merchantsearch import search, SearchModel
search_attrs = {
"merchantName": "cmu edctn materials cntr",
"merchantStreetAddress": "802 industrial dr",
"merchantCity": "Mount Pleasant",
"merchantState": "MI",
"merchantPostalCode": "48858",
"merchantCountryCode": "840",
"merchantPhoneNumber": "19897747123",
"merchantUrl": "http://www.emc.cmich.edu",
"businessRegistrationId": "386004447",
"acquirerCardAcceptorId": "424295031886",
"acquiringBin": "476197"
}
search_options = {
"maxRecords": "5",
"matchIndicators": "true",
"matchScore": "true",
"proximity": [
"merchantName"
],
"wildCard": [
"merchantName"
]
}
data_kwargs = {
'searchAttrList': SearchModel.MerchantSearchAttrList(**search_attrs),
'searchOptions': SearchModel.MerchantSearchOptions(**search_options),
}
data = SearchModel(**data_kwargs)
result = search.send(data)
print(result)
"""
c = VisaMerchantSearchDispatcher(resource='merchantsearch',
api='',
method='search',
http_verb='POST',
data=data)
return c.send()
| {
"content_hash": "6a4b92508b815d908fb137edaf3a7263",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 107,
"avg_line_length": 33.32142857142857,
"alnum_prop": 0.5096463022508039,
"repo_name": "ppokrovsky/pyvdp",
"id": "55694d502d3427220ebd4ab10af50e8629b46a32",
"size": "1866",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyvdp/merchantsearch/search.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "266062"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.