code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
---|---|---|---|---|---|
# Generated by Django 3.1.13 on 2021-10-22 12:55
from django.db import migrations
import uuid
def gen_uuid(apps, schema_editor):
MyModel1 = apps.get_model('outdoor', 'course')
MyModel2 = apps.get_model('outdoor', 'site')
for row in MyModel1.objects.all():
row.uuid = uuid.uuid4()
row.save(update_fields=['uuid'])
for row in MyModel2.objects.all():
row.uuid = uuid.uuid4()
row.save(update_fields=['uuid'])
class Migration(migrations.Migration):
dependencies = [
('outdoor', '0033_auto_20211022_1251'),
]
operations = [
migrations.RunPython(gen_uuid),
]
| GeotrekCE/Geotrek-admin | geotrek/outdoor/migrations/0034_auto_20211022_1255.py | Python | bsd-2-clause | 639 |
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column
from sqlalchemy.types import String, Integer, Boolean, Text, Date
from webapp.libs.mediahelper import MediaHelper
Base = declarative_base()
class Schedule(Base):
__tablename__ = "schedule"
schedule_id = Column(Integer, primary_key=True)
date = Column("dt", Date, nullable=False)
content = Column(Text, nullable=False)
enabled = Column(Boolean)
def __init__(self, schedule_id=None, date=None, content=None, enabled=None):
Base.__init__(self)
self.schedule_id = schedule_id
self.date = date
self.content = content
self.enabled = enabled
| crocodilered/insideout | webapp/libs/models/schedule.py | Python | mit | 712 |
#!/usr/bin/env python
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Michael A.G. Aivazis
# California Institute of Technology
# (C) 1998-2005 All Rights Reserved
#
# {LicenseText}
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
from pyre.components.Component import Component
class Greeter(Component):
class Inventory(Component.Inventory):
import pyre.inventory
greeting = pyre.inventory.str("greeting", default="Hello")
def __init__(self):
Component.__init__(self, name="greeter", facility="greeter")
self.greeting = ''
return
def _configure(self):
Component._configure(self)
self.greeting = self.inventory.greeting
return
def _init(self):
Component._init(self)
return
def _fini(self):
Component._fini(self)
return
from pyre.applications.Script import Script
class HelloApp(Script):
class Inventory(Script.Inventory):
import pyre.inventory
name = pyre.inventory.str("name", default="Michael Aivazis")
name.meta['tip'] = "the name of my friend"
address = pyre.inventory.str("address")
address.meta['tip'] = "the address of my friend"
greeter = pyre.inventory.facility("greeter", default="morning")
greeter.meta['tip'] = "the facility that manages the generated greeting"
def main(self, *args, **kwds):
curator = self.getCurator()
self._debug.log("greeter: %s" % self.greeter)
print '%s %s!' % (self.greeter.greeting, self.friend)
return
def __init__(self):
Script.__init__(self, 'hello')
self.friend = ''
self.greeter = ''
return
def _configure(self):
Script._configure(self)
self.friend = self.inventory.name
self.greeter = self.inventory.greeter
return
# main
if __name__ == '__main__':
app = HelloApp()
app.run()
# version
__id__ = "$Id: hello.py,v 1.4 2005/03/10 21:35:37 aivazis Exp $"
# End of file
| bmi-forum/bmi-pyre | pythia-0.8/packages/pyre/tests/applications/hello.py | Python | gpl-2.0 | 2,202 |
class MultiCodec(object):
"""
A codec which simply composes together a chain of other codecs
"""
def __init__(self, *codecs):
self.codecs = codecs
def encode(self, data):
for codec in self.codecs:
data = codec.encode(data)
return data
def decode(self, data):
for codec in reversed(self.codecs):
data = codec.decode(data)
return data
| gamechanger/topical | topical/codecs/multi.py | Python | mit | 426 |
from DIRAC import gConfig, gLogger
from DIRAC.DataManagementSystem.DB.DataIntegrityDB import DataIntegrityDB
def test():
""" Some test cases
"""
host = '127.0.0.1'
user = 'Dirac'
pwd = 'Dirac'
db = 'DataIntegrityDB'
gConfig.setOptionValue( '/Systems/DataManagement/Test/Databases/DataIntegrityDB/Host', host )
gConfig.setOptionValue( '/Systems/DataManagement/Test/Databases/DataIntegrityDB/DBName', db )
gConfig.setOptionValue( '/Systems/DataManagement/Test/Databases/DataIntegrityDB/User', user )
gConfig.setOptionValue( '/Systems/DataManagement/Test/Databases/DataIntegrityDB/Password', pwd )
diDB = DataIntegrityDB()
assert diDB._connect()['OK']
source = 'Test'
prognosis = 'TestError'
prodID = 1234
lfn = '/Test/%08d/File1' % prodID
fileMetadata1 = {lfn: {'Prognosis': prognosis, 'PFN': 'File1', 'SE': 'Test-SE'}}
fileOut1 = {'FileID': 1L, 'LFN': lfn, 'PFN': 'File1', 'Prognosis': prognosis,
'GUID': None, 'SE': 'Test-SE', 'Size': None}
newStatus = 'Solved'
newPrognosis = 'AnotherError'
try:
gLogger.info( '\n Creating Table\n' )
# Make sure it is there and it has been created for this test
result = diDB._checkTable()
assert result['OK']
result = diDB._checkTable()
assert not result['OK']
assert result['Message'] == 'The requested table already exist'
result = diDB.insertProblematic( source, fileMetadata1 )
assert result['OK']
assert result['Value'] == {'Successful': {lfn: True}, 'Failed': {}}
result = diDB.insertProblematic( source, fileMetadata1 )
assert result['OK']
assert result['Value'] == {'Successful': {lfn: 'Already exists'}, 'Failed': {}}
result = diDB.getProblematicsSummary()
assert result['OK']
assert result['Value'] == {'TestError': {'New': 1}}
result = diDB.getDistinctPrognosis()
assert result['OK']
assert result['Value'] == ['TestError']
result = diDB.getProblematic()
assert result['OK']
assert result['Value'] == fileOut1
result = diDB.incrementProblematicRetry( result['Value']['FileID'] )
assert result['OK']
assert result['Value'] == 1
result = diDB.getProblematic()
assert result['OK']
assert result['Value'] == fileOut1
result = diDB.getPrognosisProblematics( prognosis )
assert result['OK']
assert result['Value'] == [fileOut1]
result = diDB.getTransformationProblematics( prodID )
assert result['OK']
assert result['Value'][lfn] == 1
result = diDB.setProblematicStatus( 1, newStatus )
assert result['OK']
assert result['Value'] == 1
result = diDB.changeProblematicPrognosis( 1, newPrognosis )
assert result['OK']
assert result['Value'] == 1
result = diDB.getPrognosisProblematics( prognosis )
assert result['OK']
assert result['Value'] == []
result = diDB.removeProblematic( 1 )
assert result['OK']
assert result['Value'] == 1
result = diDB.getProblematicsSummary()
assert result['OK']
assert result['Value'] == {}
gLogger.info( '\n Removing Table\n' )
result = diDB._update( 'DROP TABLE `%s`' % diDB.tableName )
assert result['OK']
gLogger.info( '\n OK\n' )
except AssertionError:
print 'ERROR ',
if not result['OK']:
print result['Message']
else:
print result
sys.exit( 1 )
if __name__ == '__main__':
import sys
import os
from DIRAC.Core.Base import Script
Script.parseCommandLine()
gLogger.setLevel( 'VERBOSE' )
if 'PYTHONOPTIMIZE' in os.environ and os.environ['PYTHONOPTIMIZE']:
gLogger.info( 'Unset pyhthon optimization "PYTHONOPTIMIZE"' )
sys.exit( 0 )
test()
| Andrew-McNab-UK/DIRAC | tests/Integration/DataManagementSystem/Test_DataIntegrityDB.py | Python | gpl-3.0 | 3,658 |
import Pyro4
import socket
import sys
import yasnac.remote.erc as erc
sys.excepthook = Pyro4.util.excepthook
def main() :
# Need to get local ip address in order to bind the pyro daemon..
# socket.gethostbyname(socket.gethostname()) gives 127.0.0.1.
# So do the gross thing
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("sudoroom.org", 80))
localipaddr = s.getsockname()[0]
s.close()
robot = erc.ERC()
Pyro4.config.SERIALIZER = "pickle"
Pyro4.config.SERIALIZERS_ACCEPTED = {"json","marshal","serpent","pickle"}
Pyro4.Daemon.serveSimple(
{
robot: "sudoroom.robot.yasnac"
},
host = localipaddr,
ns = False )
if __name__== "__main__" :
main() | b3sigma/robjoy | serve_erc.py | Python | gpl-3.0 | 767 |
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright 2015 Vauxoo
# Author: Osval Reyes, Yanina Aular
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
| ddico/rma | rma/__init__.py | Python | agpl-3.0 | 939 |
class Solution(object):
def count_bits(self, n):
c = (n - ((n >> 1) & 0o33333333333) - ((n >> 2) & 0o11111111111))
return ((c + (c >> 3)) & 0o30707070707) % 63
def countBits(self, num):
"""
:type num: int
:rtype: List[int]
"""
return map(self.count_bits, xrange(num + 1))
| ckclark/leetcode | py/counting-bits.py | Python | apache-2.0 | 336 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from .. import users
from ..models import GitHubCore
class IssueEvent(GitHubCore):
"""The :class:`IssueEvent <IssueEvent>` object. This specifically deals
with events described in the
`Issues\>Events <http://developer.github.com/v3/issues/events>`_ section of
the GitHub API.
Two event instances can be checked like so::
e1 == e2
e1 != e2
And is equivalent to::
e1.commit_id == e2.commit_id
e1.commit_id != e2.commit_id
"""
def _update_attributes(self, event):
# The type of event:
# ('closed', 'reopened', 'subscribed', 'merged', 'referenced',
# 'mentioned', 'assigned')
#: The type of event, e.g., closed
self.event = self._get_attribute(event, 'event')
#: SHA of the commit.
self.commit_id = self._get_attribute(event, 'commit_id')
self._api = self._get_attribute(event, 'url')
#: :class:`Issue <github3.issues.Issue>` where this comment was made.
from .issue import Issue
self.issue = self._class_attribute(event, 'issue', Issue, self)
#: :class:`User <github3.users.User>` who caused this event.
self.actor = self._class_attribute(
event, 'actor', users.ShortUser, self,
)
#: Number of comments
self.comments = self._get_attribute(event, 'comments')
#: datetime object representing when the event was created.
self.created_at = self._strptime_attribute(event, 'created_at')
#: Dictionary of links for the pull request
self.pull_request = self._get_attribute(event, 'pull_request', {})
#: Dictionary containing label details
self.label = self._get_attribute(event, 'label', {})
#: The integer ID of the event
self.id = self._get_attribute(event, 'id')
#: :class:`User <github3.users.User>` that is assigned
self.assignee = self._class_attribute(
event, 'assignee', users.ShortUser, self,
)
#: Dictionary containing milestone details
self.milestone = self._get_attribute(event, 'milestone', {})
#: Dictionary containing to and from attributes
self.rename = self._get_attribute(event, 'rename', {})
self._uniq = self.commit_id
def _repr(self):
return '<Issue Event [{0} by {1}]>'.format(
self.event, self.actor
)
| balloob/github3.py | github3/issues/event.py | Python | bsd-3-clause | 2,480 |
from mock import patch
import mock
from builtins import bytes
from lxml import etree
from pytest import raises
from collections import namedtuple
from kiwi.xml_description import XMLDescription
from kiwi.exceptions import (
KiwiSchemaImportError,
KiwiValidationError,
KiwiDescriptionInvalid,
KiwiDataStructureError,
KiwiDescriptionConflict,
KiwiCommandNotFound,
KiwiExtensionError
)
class TestSchema:
def setup(self):
test_xml = bytes(
b"""<?xml version="1.0" encoding="utf-8"?>
<image schemaversion="1.4" name="bob">
<description type="system">
<author>John Doe</author>
<contact>[email protected]</contact>
<specification>
say hello
</specification>
</description>
<preferences>
<packagemanager>zypper</packagemanager>
<version>1.1.1</version>
<type image="ext3"/>
</preferences>
<repository type="rpm-md">
<source path="repo"/>
</repository>
</image>"""
)
test_xml_extension = bytes(
b"""<?xml version="1.0" encoding="utf-8"?>
<image schemaversion="1.4" name="bob">
<description type="system">
<author>John Doe</author>
<contact>[email protected]</contact>
<specification>
say hello
</specification>
</description>
<preferences>
<packagemanager>zypper</packagemanager>
<version>1.1.1</version>
<type image="ext3"/>
</preferences>
<repository type="rpm-md">
<source path="repo"/>
</repository>
<extension xmlns:my_plugin="http://www.my_plugin.com">
<my_plugin:my_feature>
<my_plugin:title name="cool stuff"/>
</my_plugin:my_feature>
</extension>
</image>"""
)
test_xml_extension_not_unique = bytes(
b"""<?xml version="1.0" encoding="utf-8"?>
<image schemaversion="1.4" name="bob">
<description type="system">
<author>John Doe</author>
<contact>[email protected]</contact>
<specification>
say hello
</specification>
</description>
<preferences>
<packagemanager>zypper</packagemanager>
<version>1.1.1</version>
<type image="ext3"/>
</preferences>
<repository type="rpm-md">
<source path="repo"/>
</repository>
<extension xmlns:my_plugin="http://www.my_plugin.com">
<my_plugin:toplevel_a/>
<my_plugin:toplevel_b/>
</extension>
</image>"""
)
test_xml_extension_invalid = bytes(
b"""<?xml version="1.0" encoding="utf-8"?>
<image schemaversion="1.4" name="bob">
<description type="system">
<author>John Doe</author>
<contact>[email protected]</contact>
<specification>
say hello
</specification>
</description>
<preferences>
<packagemanager>zypper</packagemanager>
<version>1.1.1</version>
<type image="ext3"/>
</preferences>
<repository type="rpm-md">
<source path="repo"/>
</repository>
<extension xmlns:my_plugin="http://www.my_plugin.com">
<my_plugin:my_feature>
<my_plugin:title name="cool stuff" unknown_attr="foo"/>
</my_plugin:my_feature>
</extension>
</image>"""
)
self.description_from_file = XMLDescription(
description='../data/example_config.xml'
)
self.description_from_data = XMLDescription(xml_content=test_xml)
self.extension_description_from_data = XMLDescription(
xml_content=test_xml_extension
)
self.extension_multiple_toplevel_description_from_data = XMLDescription(
xml_content=test_xml_extension_not_unique
)
self.extension_invalid_description_from_data = XMLDescription(
xml_content=test_xml_extension_invalid
)
def test_constructor_conflict(self):
with raises(KiwiDescriptionConflict):
XMLDescription(description='description', xml_content='content')
def test_load_schema_from_xml_content(self):
schema = etree.parse('../../kiwi/schema/kiwi.rng')
lookup = '{http://relaxng.org/ns/structure/1.0}attribute'
for attribute in schema.iter(lookup):
if attribute.get('name') == 'schemaversion':
schemaversion = attribute.find(
'{http://relaxng.org/ns/structure/1.0}value'
).text
parsed = self.description_from_data.load()
assert parsed.get_schemaversion() == schemaversion
@patch('lxml.etree.RelaxNG')
def test_load_schema_import_error(self, mock_relax):
mock_relax.side_effect = KiwiSchemaImportError(
'ImportError'
)
with raises(KiwiSchemaImportError):
self.description_from_file.load()
@patch('lxml.isoschematron.Schematron')
@patch('lxml.etree.RelaxNG')
@patch('lxml.etree.parse')
def test_load_schema_validation_error_from_file(
self, mock_parse, mock_relax, mock_schematron
):
mock_validate = mock.Mock()
mock_validate.validate.side_effect = KiwiValidationError(
'ValidationError'
)
mock_relax.return_value = mock_validate
mock_schematron.return_value = mock_validate
with raises(KiwiValidationError):
self.description_from_file.load()
@patch('lxml.isoschematron.Schematron')
@patch('lxml.etree.RelaxNG')
@patch('lxml.etree.parse')
@patch('kiwi.system.setup.Command.run')
def test_load_schema_description_from_file_invalid(
self, mock_command, mock_parse, mock_relax, mock_schematron
):
mock_rng_validate = mock.Mock()
mock_rng_validate.validate = mock.Mock(
return_value=False
)
mock_sch_validate = mock.Mock()
mock_sch_validate.validate = mock.Mock(
return_value=False
)
validation_report = namedtuple(
'report', ['text']
)
name_spaces = namedtuple(
'nspaces', ['nsmap']
)
mock_validation_report = mock.Mock()
mock_validation_report.getroot = mock.Mock(
return_value=name_spaces(nsmap="")
)
mock_validation_report.xpath = mock.Mock(
return_value=[
validation_report(text='wrong attribute 1'),
validation_report(text='wrong attribute 2')
]
)
mock_sch_validate.validation_report = mock_validation_report
mock_relax.return_value = mock_rng_validate
mock_schematron.return_value = mock_sch_validate
command_run = namedtuple(
'command', ['output', 'error', 'returncode']
)
mock_command.return_value = command_run(
output='jing output\n',
error='',
returncode=1
)
with raises(KiwiDescriptionInvalid):
self.description_from_file.load()
@patch('lxml.isoschematron.Schematron')
@patch('lxml.etree.RelaxNG')
@patch('lxml.etree.parse')
@patch('kiwi.system.setup.Command.run')
def test_load_schema_description_from_data_invalid(
self, mock_command, mock_parse, mock_relax, mock_schematron
):
mock_rng_validate = mock.Mock()
mock_rng_validate.validate = mock.Mock(
return_value=False
)
mock_sch_validate = mock.Mock()
mock_sch_validate.validate = mock.Mock(
return_value=False
)
validation_report = namedtuple(
'report', ['text']
)
name_spaces = namedtuple(
'nspaces', ['nsmap']
)
mock_validation_report = mock.Mock()
mock_validation_report.getroot = mock.Mock(
return_value=name_spaces(nsmap="")
)
mock_validation_report.xpath = mock.Mock(
return_value=[
validation_report(text='wrong attribute 1'),
validation_report(text='wrong attribute 2')
]
)
mock_sch_validate.validation_report = mock_validation_report
mock_relax.return_value = mock_rng_validate
mock_schematron.return_value = mock_sch_validate
command_run = namedtuple(
'command', ['output', 'error', 'returncode']
)
mock_command.return_value = command_run(
output='jing output\n',
error='',
returncode=1
)
with raises(KiwiDescriptionInvalid):
self.description_from_data.load()
@patch('lxml.isoschematron.Schematron')
@patch('lxml.etree.RelaxNG')
@patch('lxml.etree.parse')
@patch('kiwi.system.setup.Command.run')
def test_load_schema_description_from_data_invalid_no_jing(
self, mock_command, mock_parse, mock_relax, mock_schematron
):
mock_rng_validate = mock.Mock()
mock_rng_validate.validate = mock.Mock(
return_value=False
)
mock_sch_validate = mock.Mock()
mock_sch_validate.validate = mock.Mock(
return_value=True
)
mock_relax.return_value = mock_rng_validate
mock_schematron.return_value = mock_sch_validate
mock_command.side_effect = KiwiCommandNotFound('No jing command')
with raises(KiwiDescriptionInvalid):
self.description_from_data.load()
@patch('lxml.isoschematron.Schematron')
@patch('lxml.etree.RelaxNG')
@patch('lxml.etree.parse')
@patch('kiwi.xml_parse.parse')
def test_load_data_structure_error(
self, mock_xml_parse, mock_etree_parse, mock_relax, mock_schematron
):
mock_rng_validate = mock.Mock()
mock_rng_validate.validate = mock.Mock(
return_value=True
)
mock_sch_validate = mock.Mock()
mock_sch_validate.validate = mock.Mock(
return_value=True
)
mock_relax.return_value = mock_rng_validate
mock_schematron.return_value = mock_sch_validate
mock_xml_parse.side_effect = KiwiDataStructureError(
'DataStructureError'
)
with raises(KiwiDataStructureError):
self.description_from_file.load()
@patch('kiwi.xml_description.Command.run')
def test_load_extension(self, mock_command):
command_output = mock.Mock()
command_output.output = 'file://../data/my_plugin.rng'
mock_command.return_value = command_output
self.extension_description_from_data.load()
mock_command.assert_called_once_with(
['xmlcatalog', '/etc/xml/catalog', 'http://www.my_plugin.com']
)
xml_data = self.extension_description_from_data.get_extension_xml_data(
'my_plugin'
)
assert xml_data.getroot()[0].get('name') == 'cool stuff'
def test_load_extension_multiple_toplevel_error(self):
with raises(KiwiExtensionError):
self.extension_multiple_toplevel_description_from_data.load()
@patch('kiwi.xml_description.Command.run')
def test_load_extension_schema_error(self, mock_command):
mock_command.side_effect = Exception
with raises(KiwiExtensionError):
self.extension_description_from_data.load()
@patch('kiwi.xml_description.Command.run')
def test_load_extension_validation_error(self, mock_command):
command_output = mock.Mock()
command_output.output = 'file://../data/my_plugin.rng'
mock_command.return_value = command_output
with raises(KiwiExtensionError):
self.extension_invalid_description_from_data.load()
| b1-systems/kiwi | test/unit/xml_description_test.py | Python | gpl-3.0 | 12,634 |
"""
Django settings for rms project.
Generated by 'django-admin startproject' using Django 1.9.5.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '#ue7m&$hsllz#waww*brofid#5%fhq6qkpw2ttwgny5v@glzy8'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'app.apps.AppConfig',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'rms.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')]
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'rms.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
| iu5team/rms | rms/settings.py | Python | mit | 3,227 |
from setuptools import setup, find_packages
setup(name='BIOMD0000000223',
version=20140916,
description='BIOMD0000000223 from BioModels',
url='http://www.ebi.ac.uk/biomodels-main/BIOMD0000000223',
maintainer='Stanley Gu',
maintainer_url='[email protected]',
packages=find_packages(),
package_data={'': ['*.xml', 'README.md']},
) | biomodels/BIOMD0000000223 | setup.py | Python | cc0-1.0 | 377 |
from __future__ import unicode_literals
from django.apps import AppConfig
class RootConfig(AppConfig):
name = 'root'
| PyConPune/pune.pycon.org | root/apps.py | Python | mit | 124 |
#!/usr/bin/env python3.6
# -*- coding: utf-8 -*-
#
# __main__.py
# @Author : Gustavo F ([email protected])
# @Link : https://github.com/sharkguto
# @Date : 17/02/2019 10:13:23
import time
from ims24.services.extractor import Extractor
from ims24 import logger
from ims24.services.haus import ExtractorHaus
def main():
"""
Main loop that extract information from web,
transform to csv and save on a database using
sqlalchemy.
The visualize() func is to plot some charts
"""
logger.info("Start ETL ...")
start_time = time.time()
extractor = Extractor()
result = extractor.return_data()
extractor_haus = ExtractorHaus(list_haus=result)
logger.info("End ETL ...")
elapsed_time = time.time() - start_time
logger.info("Elapsed Time: %s", elapsed_time)
if __name__ == "__main__":
main()
| fclesio/learning-space | Python/ims24/ims24/__main__.py | Python | gpl-2.0 | 857 |
import mock
from datetime import datetime
from django.contrib.auth.models import User
from django.test import TestCase
from mediaviewer.models.file import File
from mediaviewer.models.filenamescrapeformat import FilenameScrapeFormat
from mediaviewer.models.path import Path
from mediaviewer.models.usersettings import UserSettings
from mediaviewer.models.posterfile import PosterFile
from mediaviewer.models.datatransmission import DataTransmission
class TestGetScrapedNameReplacements(TestCase):
''' The purpose of this test is to test the period and hyphen substitutions '''
def setUp(self):
self.path = Path()
self.path.override_display_name = None
self.scraper = FilenameScrapeFormat()
self.scraper.useSearchTerm = True
self.file = File()
self.file.filename = 'This.is.a.sample.file'
self.file.override_filename = None
self.file.rawSearchString = lambda: self.file.filename
self.file.filenamescrapeformat = self.scraper
self.file.path = self.path
def test_filename_contains_period_with_subPeriod_scraper(self):
self.scraper.subPeriods = True
expected = 'This Is A Sample File'
actual = self.file.getScrapedName()
self.assertEqual(expected, actual)
def test_filename_contains_hyphen_with_subPeriod_scraper(self):
self.file.filename = 'This-is-a-sample-file'
self.scraper.subPeriods = True
expected = 'This Is A Sample File'
actual = self.file.getScrapedName()
self.assertEqual(expected, actual)
def test_filename_contains_period_without_subPeriod_scraper(self):
self.scraper.subPeriods = False
expected = 'This.is.a.sample.file'
actual = self.file.getScrapedName()
self.assertEqual(expected, actual)
def test_filename_contains_hyphen_without_subPeriod_scraper(self):
self.file.filename = 'This-is-a-sample-file'
self.scraper.subPeriods = False
expected = 'This-is-a-sample-file'
actual = self.file.getScrapedName()
self.assertEqual(expected, actual)
class TestGetScrapedNameOverrideFileName(TestCase):
def setUp(self):
self.path = Path()
self.path.override_display_name = None
self.file = File()
self.file.path = self.path
def test_no_override_filename(self):
self.file.filename = 'This.is.a.sample.file'
self.file.override_filename = None
expected = 'This.is.a.sample.file'
actual = self.file.getScrapedName()
self.assertEqual(expected, actual)
def test_override_filename(self):
self.file.filename = 'This.is.a.sample.file'
self.file.override_filename = 'overrided file name'
expected = 'overrided file name'
actual = self.file.getScrapedName()
self.assertEqual(expected, actual)
def test_scrapedName_uses_path_override(self):
self.path.override_display_name = 'overrided path name'
self.file.filename = 'This.is.a.sample.file'
self.file.override_filename = None
expected = 'overrided path name'
actual = self.file.getScrapedName()
self.assertEqual(expected, actual)
def test_scrapedName_uses_overrided_file_name(self):
self.path.override_display_name = 'overrided path name'
self.file.filename = 'This.is.a.sample.file'
self.file.override_filename = 'overrided file name'
expected = 'overrided file name'
actual = self.file.getScrapedName()
self.assertEqual(expected, actual)
class TestNew(TestCase):
def setUp(self):
self.filter_patcher = mock.patch('mediaviewer.models.file.UserSettings.objects.filter')
self.mock_filter = self.filter_patcher.start()
self.createLastWatchedMessage_patcher = mock.patch('mediaviewer.models.file.Message.createLastWatchedMessage')
self.mock_createLastWatchedMessage = self.createLastWatchedMessage_patcher.start()
self.mock_setting = mock.MagicMock(UserSettings)
self.mock_settings_queryset = [self.mock_setting]
self.mock_filter.return_value.all.return_value = self.mock_settings_queryset
self.path = Path.new('local_path', 'remote_path', False)
self.path.save()
def tearDown(self):
self.filter_patcher.stop()
self.createLastWatchedMessage_patcher.stop()
def test_(self):
new_file = File.new('test_filename',
self.path)
self.mock_filter.assert_called_once_with(last_watched=self.path)
self.mock_createLastWatchedMessage.assert_called_once_with(self.mock_setting.user, new_file)
class TestDestroyPosterFile(TestCase):
def setUp(self):
self.get_patcher = mock.patch('mediaviewer.models.file.PosterFile.objects.get')
self.mock_get = self.get_patcher.start()
self.log_patcher = mock.patch('mediaviewer.models.file.log')
self.mock_log = self.log_patcher.start()
self.path = Path.new('local_path', 'remote_path', False)
self.path.save()
self.file = File.new('test_filename',
self.path)
self.posterfile = mock.MagicMock(PosterFile)
self.mock_get.return_value = self.posterfile
def tearDown(self):
self.get_patcher.stop()
self.log_patcher.stop()
def test_valid(self):
self.file.destroyPosterFile()
self.mock_get.assert_called_once_with(file=self.file)
self.posterfile.delete.assert_called_once_with()
def test_no_posterfile(self):
self.mock_get.side_effect = PosterFile.DoesNotExist
self.file.destroyPosterFile()
self.mock_get.assert_called_once_with(file=self.file)
self.assertFalse(self.posterfile.delete.called)
self.mock_log.debug.assert_any_call('Posterfile does not exist. Continuing.')
def test_other_exception(self):
self.mock_get.side_effect = Exception
self.file.destroyPosterFile()
self.mock_get.assert_called_once_with(file=self.file)
self.assertFalse(self.posterfile.delete.called)
self.mock_log.error.assert_any_call('Got an error destroying posterfile')
class TestIsFileNotPath(TestCase):
def setUp(self):
self.filter_patcher = mock.patch('mediaviewer.models.file.UserSettings.objects.filter')
self.mock_filter = self.filter_patcher.start()
self.createLastWatchedMessage_patcher = mock.patch('mediaviewer.models.file.Message.createLastWatchedMessage')
self.mock_createLastWatchedMessage = self.createLastWatchedMessage_patcher.start()
self.mock_setting = mock.MagicMock(UserSettings)
self.mock_settings_queryset = [self.mock_setting]
self.mock_filter.return_value.all.return_value = self.mock_settings_queryset
self.path = Path.new('local_path', 'remote_path', False)
self.path.save()
self.new_file = File.new('test_filename',
self.path)
def tearDown(self):
self.filter_patcher.stop()
self.createLastWatchedMessage_patcher.stop()
def test_isFile(self):
self.assertTrue(self.new_file.isFile)
def test_not_isPath(self):
self.assertFalse(self.new_file.isPath)
class TestProperty(TestCase):
def setUp(self):
self.filter_patcher = mock.patch('mediaviewer.models.file.UserSettings.objects.filter')
self.mock_filter = self.filter_patcher.start()
self.createLastWatchedMessage_patcher = mock.patch('mediaviewer.models.file.Message.createLastWatchedMessage')
self.mock_createLastWatchedMessage = self.createLastWatchedMessage_patcher.start()
self.mock_setting = mock.MagicMock(UserSettings)
self.mock_settings_queryset = [self.mock_setting]
self.mock_filter.return_value.all.return_value = self.mock_settings_queryset
self.path = Path.new('local_path', 'remote_path', False)
self.path.save()
self.another_path = Path.new('local_another_path', 'remote_another_path', False)
self.another_path.save()
self.new_file = File.new('test_filename',
self.path)
self.new_posterfile = PosterFile.new(file=self.new_file)
def tearDown(self):
self.filter_patcher.stop()
self.createLastWatchedMessage_patcher.stop()
def test_get_pathid(self):
self.assertEqual(self.new_file.pathid, self.path.id)
def test_set_pathid(self):
self.new_file.pathid = self.another_path.id
self.assertEqual(self.new_file.path, self.another_path)
def test_get_posterfile(self):
self.assertEqual(self.new_file.posterfile, self.new_posterfile)
class TestDateCreatedForSpan(TestCase):
def setUp(self):
self.filter_patcher = mock.patch('mediaviewer.models.file.UserSettings.objects.filter')
self.mock_filter = self.filter_patcher.start()
self.createLastWatchedMessage_patcher = mock.patch('mediaviewer.models.file.Message.createLastWatchedMessage')
self.mock_createLastWatchedMessage = self.createLastWatchedMessage_patcher.start()
self.mock_setting = mock.MagicMock(UserSettings)
self.mock_settings_queryset = [self.mock_setting]
self.mock_filter.return_value.all.return_value = self.mock_settings_queryset
self.path = Path.new('local_path', 'remote_path', False)
self.path.save()
self.another_path = Path.new('local_another_path', 'remote_another_path', False)
self.another_path.save()
self.new_file = File.new('test_filename',
self.path)
self.new_file.datecreated = datetime(2018, 5, 12)
def tearDown(self):
self.filter_patcher.stop()
self.createLastWatchedMessage_patcher.stop()
def test_(self):
self.assertEqual(self.new_file.dateCreatedForSpan(), '2018-05-12T00:00:00')
class TestCamelCasedProperties(TestCase):
def setUp(self):
self.filter_patcher = mock.patch('mediaviewer.models.file.UserSettings.objects.filter')
self.mock_filter = self.filter_patcher.start()
self.createLastWatchedMessage_patcher = mock.patch('mediaviewer.models.file.Message.createLastWatchedMessage')
self.mock_createLastWatchedMessage = self.createLastWatchedMessage_patcher.start()
self.mock_setting = mock.MagicMock(UserSettings)
self.mock_settings_queryset = [self.mock_setting]
self.mock_filter.return_value.all.return_value = self.mock_settings_queryset
self.path = Path.new('local_path', 'remote_path', False)
self.path.save()
self.another_path = Path.new('local_another_path', 'remote_another_path', False)
self.another_path.save()
self.datatransmission = DataTransmission()
self.new_file = File.new('test_filename',
self.path)
self.new_file.datatransmission = self.datatransmission
def tearDown(self):
self.filter_patcher.stop()
self.createLastWatchedMessage_patcher.stop()
def test_fileName(self):
self.assertEqual(self.new_file.fileName, self.new_file.filename)
def test_dataTransmission(self):
self.assertEqual(self.new_file.dataTransmission, self.new_file.datatransmission)
class TestDownloadLink(TestCase):
def setUp(self):
self.filter_patcher = mock.patch('mediaviewer.models.file.UserSettings.objects.filter')
self.mock_filter = self.filter_patcher.start()
self.createLastWatchedMessage_patcher = mock.patch('mediaviewer.models.file.Message.createLastWatchedMessage')
self.mock_createLastWatchedMessage = self.createLastWatchedMessage_patcher.start()
self.LOCAL_IP_patcher = mock.patch('mediaviewer.models.file.LOCAL_IP', 'test_local_ip')
self.LOCAL_IP_patcher.start()
self.BANGUP_IP_patcher = mock.patch('mediaviewer.models.file.BANGUP_IP', 'test_bangup_ip')
self.BANGUP_IP_patcher.start()
self.WAITER_HEAD_patcher = mock.patch('mediaviewer.models.file.WAITER_HEAD', 'test_local_ip')
self.WAITER_HEAD_patcher.start()
#TODO: Finish this!!!
self.LOCAL_IP_patcher = mock.patch('mediaviewer.models.file.LOCAL_IP', 'test_local_ip')
self.LOCAL_IP_patcher.start()
self.LOCAL_IP_patcher = mock.patch('mediaviewer.models.file.LOCAL_IP', 'test_local_ip')
self.LOCAL_IP_patcher.start()
self.LOCAL_IP_patcher = mock.patch('mediaviewer.models.file.LOCAL_IP', 'test_local_ip')
self.LOCAL_IP_patcher.start()
self.mock_setting = mock.MagicMock(UserSettings)
self.mock_settings_queryset = [self.mock_setting]
self.mock_filter.return_value.all.return_value = self.mock_settings_queryset
self.path = Path.new('local_path', 'remote_path', False)
self.path.save()
self.another_path = Path.new('local_another_path', 'remote_another_path', False)
self.another_path.save()
self.datatransmission = DataTransmission()
self.new_file = File.new('test_filename',
self.path)
self.new_file.datatransmission = self.datatransmission
self.user = mock.MagicMock(User)
self.user_settings = mock.MagicMock(UserSettings)
self.user.settings.return_value = self.user_settings
def tearDown(self):
self.filter_patcher.stop()
self.createLastWatchedMessage_patcher.stop()
self.BANGUP_IP_patcher.stop()
self.LOCAL_IP_patcher.stop()
def test_local_ip(self):
self.user_settings.ip_format = 'test_local_ip'
| kyokley/MediaViewer | mediaviewer/tests/models/test_file.py | Python | mit | 13,668 |
#!flask/bin/python
from migrate.versioning import api
from config import SQLALCHEMY_DATABASE_URI
from config import SQLALCHEMY_MIGRATE_REPO
from app import db
import os.path
db.create_all()
if not os.path.exists(SQLALCHEMY_MIGRATE_REPO):
api.create(SQLALCHEMY_MIGRATE_REPO, 'database2 repository')
api.version_control(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO)
else:
api.version_control(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO,
api.version(SQLALCHEMY_MIGRATE_REPO))
| lawrluor/matchstats | db_create.py | Python | bsd-3-clause | 518 |
import copy
import sys
def configMake(L, N, prevList, totList):
if L==1:
endList = [copy.deepcopy(prevList), N]
totList.append(unfold(endList))
return [N]
if N==0:
return configMake(L-1, 0, [copy.deepcopy(prevList), 0], totList)
if L==N:
return configMake(L-1, N-1, [copy.deepcopy(prevList), 1], totList)
return [configMake(L-1, N, [copy.deepcopy(prevList), 0], totList), configMake(L-1, N-1, [copy.deepcopy(prevList), 1], totList)]
def adjSum(candList):
listLen = len(candList)
total = 0
for index in range(0, listLen):
total += candList[index-1]*candList[index]
return total
def unfold(candList):
if isinstance(candList, list):
if len(candList)==2:
return unfold(candList[0])+unfold(candList[1])
if len(candList)==1:
return candList
if len(candList)==0:
return []
return [candList]
def listCollate(candList):
maxItem = 0
for index in candList:
if index > maxItem:
maxItem = index
outPut = []
for size in range(0, maxItem+1):
numCounts = 0
for index in candList:
if index == size:
numCounts += 1
outPut.append((size, numCounts))
return outPut
def genCorrFn(L, N):
totList = []
allStates = configMake(L, N, [], totList)
restStates = []
weightList = []
maxAdj = 0
for state in totList:
if state[0]==1:
restStates.append((state, adjSum(state)))
if restStates[-1][1]>maxAdj:
maxAdj = restStates[-1][1]
weightList.append(restStates[-1][1])
partFnList = listCollate(weightList)
print(partFnList)
partitionFn = "("
for pair in partFnList:
partitionFn += str(pair[1])+" Exp["+str(pair[0]-maxAdj)+"b] + "
partitionFn += "0)"
print(partitionFn)
finalOut = "{"
for shift in range(0, L-L/2):
tempList = []
for config in restStates:
if config[0][shift] == 1:
tempList.append(config[1])
stateDist = listCollate(tempList)
outSum = "{"+str(shift)+", ("
for pair in stateDist:
outSum += str(pair[1])+" Exp["+str(pair[0]-maxAdj)+"b] + "
outSum += "0)/"+partitionFn+"}"
finalOut += outSum
if shift != L-L/2-1:
finalOut += ", "
finalOut+="}"
return finalOut
L = int(sys.argv[1])
with open("corrFnResults.m", 'w') as f:
f.write("{")
for n in range(2, L-2):
f.write("{"+str(n)+"/"+str(L)+", "+genCorrFn(L, n)+"}, ")
f.write(genCorrFn(L, L-2) + "}")
| joshuahellier/PhDStuff | codes/thesisCodes/correlationFunctions/exactDist.py | Python | mit | 2,644 |
"""
Windows Process Control
winprocess.run launches a child process and returns the exit code.
Optionally, it can:
redirect stdin, stdout & stderr to files
run the command as another user
limit the process's running time
control the process window (location, size, window state, desktop)
Works on Windows NT, 2000 & XP. Requires Mark Hammond's win32
extensions.
This code is free for any purpose, with no warranty of any kind.
-- John B. Dell'Aquila <[email protected]>
"""
import win32api, win32process, win32security
import win32event, win32con, msvcrt, win32gui
def logonUser(loginString):
"""
Login as specified user and return handle.
loginString: 'Domain\nUser\nPassword'; for local
login use . or empty string as domain
e.g. '.\nadministrator\nsecret_password'
"""
domain, user, passwd = loginString.split('\n')
return win32security.LogonUser(
user,
domain,
passwd,
win32con.LOGON32_LOGON_INTERACTIVE,
win32con.LOGON32_PROVIDER_DEFAULT
)
class Process:
"""
A Windows process.
"""
def __init__(self, cmd, login=None,
hStdin=None, hStdout=None, hStderr=None,
show=1, xy=None, xySize=None,
desktop=None):
"""
Create a Windows process.
cmd: command to run
login: run as user 'Domain\nUser\nPassword'
hStdin, hStdout, hStderr:
handles for process I/O; default is caller's stdin,
stdout & stderr
show: wShowWindow (0=SW_HIDE, 1=SW_NORMAL, ...)
xy: window offset (x, y) of upper left corner in pixels
xySize: window size (width, height) in pixels
desktop: lpDesktop - name of desktop e.g. 'winsta0\\default'
None = inherit current desktop
'' = create new desktop if necessary
User calling login requires additional privileges:
Act as part of the operating system [not needed on Windows XP]
Increase quotas
Replace a process level token
Login string must EITHER be an administrator's account
(ordinary user can't access current desktop - see Microsoft
Q165194) OR use desktop='' to run another desktop invisibly
(may be very slow to startup & finalize).
"""
si = win32process.STARTUPINFO()
si.dwFlags = (win32con.STARTF_USESTDHANDLES ^
win32con.STARTF_USESHOWWINDOW)
if hStdin is None:
si.hStdInput = win32api.GetStdHandle(win32api.STD_INPUT_HANDLE)
else:
si.hStdInput = hStdin
if hStdout is None:
si.hStdOutput = win32api.GetStdHandle(win32api.STD_OUTPUT_HANDLE)
else:
si.hStdOutput = hStdout
if hStderr is None:
si.hStdError = win32api.GetStdHandle(win32api.STD_ERROR_HANDLE)
else:
si.hStdError = hStderr
si.wShowWindow = show
if xy is not None:
si.dwX, si.dwY = xy
si.dwFlags ^= win32con.STARTF_USEPOSITION
if xySize is not None:
si.dwXSize, si.dwYSize = xySize
si.dwFlags ^= win32con.STARTF_USESIZE
if desktop is not None:
si.lpDesktop = desktop
procArgs = (None, # appName
cmd, # commandLine
None, # processAttributes
None, # threadAttributes
1, # bInheritHandles
win32process.CREATE_NEW_CONSOLE, # dwCreationFlags
None, # newEnvironment
None, # currentDirectory
si) # startupinfo
if login is not None:
hUser = logonUser(login)
win32security.ImpersonateLoggedOnUser(hUser)
procHandles = win32process.CreateProcessAsUser(hUser, *procArgs)
win32security.RevertToSelf()
else:
procHandles = win32process.CreateProcess(*procArgs)
self.hProcess, self.hThread, self.PId, self.TId = procHandles
def wait(self, mSec=None):
"""
Wait for process to finish or for specified number of
milliseconds to elapse.
"""
if mSec is None:
mSec = win32event.INFINITE
return win32event.WaitForSingleObject(self.hProcess, mSec)
def kill(self, gracePeriod=5000):
"""
Kill process. Try for an orderly shutdown via WM_CLOSE. If
still running after gracePeriod (5 sec. default), terminate.
"""
win32gui.EnumWindows(self.__close__, 0)
if self.wait(gracePeriod) != win32event.WAIT_OBJECT_0:
win32process.TerminateProcess(self.hProcess, 0)
win32api.Sleep(100) # wait for resources to be released
def __close__(self, hwnd, dummy):
"""
EnumWindows callback - sends WM_CLOSE to any window
owned by this process.
"""
TId, PId = win32process.GetWindowThreadProcessId(hwnd)
if PId == self.PId:
win32gui.PostMessage(hwnd, win32con.WM_CLOSE, 0, 0)
def exitCode(self):
"""
Return process exit code.
"""
return win32process.GetExitCodeProcess(self.hProcess)
def run(cmd, mSec=None, stdin=None, stdout=None, stderr=None, **kw):
"""
Run cmd as a child process and return exit code.
mSec: terminate cmd after specified number of milliseconds
stdin, stdout, stderr:
file objects for child I/O (use hStdin etc. to attach
handles instead of files); default is caller's stdin,
stdout & stderr;
kw: see Process.__init__ for more keyword options
"""
if stdin is not None:
kw['hStdin'] = msvcrt.get_osfhandle(stdin.fileno())
if stdout is not None:
kw['hStdout'] = msvcrt.get_osfhandle(stdout.fileno())
if stderr is not None:
kw['hStderr'] = msvcrt.get_osfhandle(stderr.fileno())
child = Process(cmd, **kw)
if child.wait(mSec) != win32event.WAIT_OBJECT_0:
child.kill()
raise WindowsError, 'process timeout exceeded'
return child.exitCode()
if __name__ == '__main__':
# Pipe commands to a shell and display the output in notepad
print 'Testing winprocess.py...'
import tempfile
timeoutSeconds = 15
cmdString = """\
REM Test of winprocess.py piping commands to a shell.\r
REM This window will close in %d seconds.\r
vol\r
net user\r
_this_is_a_test_of_stderr_\r
""" % timeoutSeconds
cmd, out = tempfile.TemporaryFile(), tempfile.TemporaryFile()
cmd.write(cmdString)
cmd.seek(0)
print 'CMD.EXE exit code:', run('cmd.exe', show=0, stdin=cmd,
stdout=out, stderr=out)
cmd.close()
print 'NOTEPAD exit code:', run('notepad.exe %s' % out.file.name,
show=win32con.SW_MAXIMIZE,
mSec=timeoutSeconds*1000)
out.close()
| alexei-matveev/ccp1gui | jobmanager/winprocess.py | Python | gpl-2.0 | 7,039 |
import curses
import logging
import common
from pycurses_widgets import Screen, StatusBar, CommandBar, TitleBar, TextPanel, TabPanel, ItemList
from .commandhandler import CommandHandler
class Window(Screen):
def __init__(self, win):
super(Window, self).__init__(win)
self.title = TitleBar(self)
self.handler = CommandHandler(self)
self.main = TabPanel(self)
self.main.create_tab(ItemList, 'list')
self.main.tabs['list'].set_selected(self.handler.item_selected)
self.main.create_tab(TextPanel, 'help')
self.main.tabs['help'].add_line('Help !')
self.status = StatusBar(self)
self.command = CommandBar(self)
self.main.register_event('<KEY_RESIZE>', self.screen.redraw)
self.main.register_event('<KEY_TAB>', self.show_next_tab)
self.main.register_event('<KEY_BTAB>', self.show_prev_tab)
self.main.register_event(':', self.handler.run_command)
self.main.register_event('d', self.handler.delete_tab)
self.main.register_event('D', self.handler.delete_message)
self.main.register_event('U', self.handler.undo_delete)
self.main.register_event('$', self.handler.toggle_show_deleted)
self.main.register_event('/', self.handler.run_search)
self.redraw()
def set_status(self, text):
self.status.set_text(text)
def set_title(self, text):
self.title.set_text(text)
def show_next_tab(self, event=None):
self.main.show_next_tab()
self.update_title()
def show_prev_tab(self, event=None):
self.main.show_prev_tab()
self.update_title()
def update_title(self):
title = ''
for tab in self.main.childs:
if title != '':
title += ' '
if tab.name == self.main.current.name:
title += '[%s]' % tab.name
else:
title += tab.name
self.set_title('%s v%s %s' % (common.PROGNAME, common.PROGVERSION, title))
def read(self, c):
return self.command.read(c, self.validate_command_input)
def validate_command_input(self, c):
(y, x) = self.command.get_pos()
self.set_status('(%i, %i) : <%s>' % (y, x, c.strip()))
return True
def run(self):
curses.curs_set(0)
self.main.handle_events()
| franckv/pygmail | src/ui/ncurses/window.py | Python | gpl-3.0 | 2,371 |
##############################################################################
#
# OSIS stands for Open Student Information System. It's an application
# designed to manage the core business of higher education institutions,
# such as universities, faculties, institutes and professional schools.
# The core business involves the administration of students, teachers,
# courses, programs and so on.
#
# Copyright (C) 2015-2018 Université catholique de Louvain (http://www.uclouvain.be)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of this license - GNU General Public License - is available
# at the root of the source code of this program. If not,
# see http://www.gnu.org/licenses/.
#
##############################################################################
import datetime
from django.test import TestCase
from base.tests.factories.academic_calendar import AcademicCalendarFactory
from base.tests.factories.academic_year import AcademicYearFactory
from base.tests.factories.offer_year_calendar import OfferYearCalendarFactory
from base.tests.factories.offer_year import OfferYearFactory
from base.templatetags import offer_year_calendar_display as offer_year_calendar_display_tag
today = datetime.date.today()
today2 = datetime.datetime.today()
class OfferYearCalendarDisplayTagTest(TestCase):
def setUp(self):
self.build_current_offer_yr_calendar()
self.build_old_offer_yr_calendar()
def build_old_offer_yr_calendar(self):
self.previous_academic_year = AcademicYearFactory(start_date=today.replace(year=today.year - 3),
end_date=today.replace(year=today.year - 2),
year=today.year - 3)
an_previous_academic_calendar = AcademicCalendarFactory(academic_year=self.previous_academic_year)
self.a_previous_offer_year = OfferYearFactory(academic_year=self.previous_academic_year)
self.a_previous_offer_yr_calendar = OfferYearCalendarFactory(academic_calendar=an_previous_academic_calendar,
offer_year=self.a_previous_offer_year)
def build_current_offer_yr_calendar(self):
self.current_academic_year = AcademicYearFactory(start_date=today,
end_date=today.replace(year=today.year + 1),
year=today.year)
an_academic_calendar = AcademicCalendarFactory(academic_year=self.current_academic_year)
self.a_current_offer_year = OfferYearFactory(academic_year=self.current_academic_year)
self.a_current_offer_yr_calendar = OfferYearCalendarFactory(academic_calendar=an_academic_calendar,
offer_year=self.a_current_offer_year)
def test_current_offer_year_calendar_display_style(self):
css_style = offer_year_calendar_display_tag\
.offer_year_calendar_display(self.a_current_offer_yr_calendar.start_date,
self.a_current_offer_yr_calendar.end_date)
self.assertEqual(css_style, offer_year_calendar_display_tag.CURRENT_EVENT_CSS_STYLE)
def test_not_current_offer_year_calendar_display_style(self):
css_style = offer_year_calendar_display_tag\
.offer_year_calendar_display(self.a_previous_offer_yr_calendar.start_date,
self.a_previous_offer_yr_calendar.end_date)
self.assertEqual(css_style, offer_year_calendar_display_tag.NOT_CURRENT_EVENT_CSS_STYLE)
| uclouvain/osis_louvain | base/tests/templatetags/test_offer_year_calendar_display.py | Python | agpl-3.0 | 4,161 |
'''
@date Mar 28, 2010
@author: Matthew A. Todd
This file is part of Test Parser
by Matthew A. Todd
Test Parser is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Test Parser is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Test Parser. If not, see <http://www.gnu.org/licenses/>.
'''
import tkinter as tk
from tkinter import ttk
from TestParser.Common import Observer
from TestParser.Common.Constants import CONSTANTS
class TkResultView(Observer.Observer):
'''
@date Aug 28, 2010
@author matcat
'''
tagColors = {'error' : 'red',
'fatalerror' : 'red',
'fail' : 'red',
'pass' : 'green',
'ok' : 'green',
'message' : 'white',
'suite' : 'green',
'testresults' : 'green',
'testcase' : 'green'}
def __init__(self, parent, model, controller):
'''
Constructor
@param parent the tkinter parent item for this view
'''
self.parent = parent
self.model = model
self.model.registerObserver(self)
self.controller = controller
self._setupUi()
#
## callbacks and initialization
#
def close(self, data=None):
self.controller.closeView(self)
def rerun(self, data=None):
'''
auto-expand.
It appears there is no way to deselect items in a treeview.
Also, expanding an item selects it. So if the user has expanded
any items, there will be a selection. So either something has
been selected or nothing has been expanded. But as it turns out,
after an auto-expansion nothing is selected. So the first auto-expand
will require something be selected. But its possible for all the
rest to not have anything selected.
Scroll position will be reset to position prior to rerun after.
@bug If the scroll position is set to the very end, it won't be
set to the correct position, for some reason. Note that this
worked fine in a test program I wrote using a ListView.
@warning This will likely scroll to the wrong line if the number of
lines visible changes (which is possible.)
'''
CONSTANTS.logger.debug("start of TkResultView.reRun()")
if CONSTANTS.autoExpand:
# get scroll position
scrollPos = self.tree.yview()
CONSTANTS.logger.debug("scrollPos = %s" % str(scrollPos))
selectedItems = self.tree.selection()
if len(selectedItems) == 0:
selectedItems = []
for root in self.tree.get_children():
selectedItems += self._getExpandedItems(root)
CONSTANTS.logger.debug("selectedItems = %s" % str(selectedItems))
itemsToExpand = []
for item in selectedItems:
itemsToExpand.append(self._computeItemPath(item))
self.controller.runPrevious()
if CONSTANTS.autoExpand:
CONSTANTS.logger.debug("itemsToExpand:\t %s" % itemsToExpand)
self._expandItems(itemsToExpand)
# reset scroll position
if scrollPos[1] != 1.0:
CONSTANTS.logger.debug("setting scroll position to %d" % scrollPos[0])
self.tree.yview(tk.MOVETO, scrollPos[0])
CONSTANTS.logger.debug("end of TkResultView.reRun()")
def about(self, data=None):
self.controller.displayAboutDialog(self.parent)
def _setupUi(self):
'''
Note that treeviews have an extra column for some reason, which is why we
just specify one less.
'''
frame = tk.Frame(self.parent)
frame.pack(expand=True, fill=tk.BOTH)
frame.grid_rowconfigure(0, weight=1)
frame.grid_columnconfigure(0, weight=1)
self.parent.title("Test Parser - %s" % self.model.runnerName())
# menu
menubar = tk.Menu(self.parent)
filemenu = tk.Menu(menubar, tearoff=0)
filemenu.add_command(label="Quit", command=self.close, accelerator="Ctrl+Q")
self.parent.bind("<Control-q>", self.close)
self.parent.protocol('WM_DELETE_WINDOW', self.close) # call our custom func when (x) button pressed
menubar.add_cascade(label="File", menu=filemenu)
runmenu = tk.Menu(menubar, tearoff=0)
runmenu.add_command(label="Rerun", command=self.rerun, accelerator="Ctrl+R")
self.parent.bind("<Control-r>", self.rerun)
menubar.add_cascade(label="Run", menu=runmenu)
helpmenu = tk.Menu(menubar, tearoff=0)
helpmenu.add_command(label="About", command=self.about, accelerator="F1")
self.parent.bind("<F1>", self.about)
menubar.add_cascade(label="Help", menu=helpmenu)
self.parent.config(menu=menubar)
# tree
treeCols = ('Name', 'File', 'Line', 'Info')
self.tree = ttk.Treeview(frame, columns=treeCols)
for col in treeCols:
self.tree.heading(col, text=col)
self.tree.tag_configure('green', background='green')
self.tree.tag_configure('red', background='red')
self.tree.grid(row = 0, column=0, sticky=tk.N+tk.S+tk.E+tk.W)
# scrollbars
vertScrollbar = tk.Scrollbar(frame)
vertScrollbar.grid(row= 0, column=1, sticky=tk.N+tk.S)
self.tree.config(yscrollcommand=vertScrollbar.set)
vertScrollbar.config(command=self.tree.yview)
horizScrollbar = tk.Scrollbar(frame, orient=tk.HORIZONTAL)
horizScrollbar.grid(row=1, column=0, sticky=tk.E+tk.W)
self.tree.config(xscrollcommand=horizScrollbar.set)
horizScrollbar.config(command=self.tree.xview)
#
## result displaying
#
def update(self):
'''
For observer.
'''
self._updateTreeWidget(self.controller.getResults())
def _clearTreeWidget(self):
for root in self.tree.get_children():
self.tree.delete(root)
def _updateTreeWidget(self, results):
self._clearTreeWidget()
self._displayResults('', results)
def _displayResults(self, parentId, result):
'''
@param parent is the parent item id in the tree
@param results results is all the Test Results data we want to
display below given parent item
@return returns tag/color that should
propagate up (eg: we had an error, therefore error's color
should work its way up)
@date Aug 29, 2010
'''
id = self.tree.insert(parentId, 'end', text=result.type,
values=self._getDisplayData(result))
currItemColor = TkResultView.tagColors[result.type.lower()]
returnedColors = [currItemColor]
for child in result.getChildren():
temp = self._displayResults(id, child)
if temp is not None:
returnedColors.append(temp)
color = self._getHighestPriorityColor(returnedColors)
self.tree.item(id, tags=(color))
return color
def _getHighestPriorityColor(self, returnedColors):
'''
Determines which tag/color should propagate up.
'''
if 'red' in returnedColors:
return 'red'
elif 'green' in returnedColors:
return 'green'
else:
return 'white'
def _getDisplayData(self, result):
'''
Takes a TestComposite and returns its data in a tuple that can
be used by the tkinter functions.
@param result TestComposite data to display
@date Aug 29, 2010
'''
name = file = line = info = ""
for infotype, data in result.getRelevantDisplayData():
if infotype == "name":
name = data
elif infotype == "file":
file = data
elif infotype == "line":
line = data
elif infotype == "info":
info = data
elif infotype == "time":
if data is not None:
info = data
return (name, file, line, info)
#
## auto expand
#
def _computeItemPath(self, itemId):
'''
Compute and return the path from root to given item.
Path is a list. The first item is the root and each element
is a child of its predecessor item.
Builds path from head to tail.
@date Jul 30, 2010
'''
path = []
while itemId != '':
path.insert(0, self._getItemData(itemId))
itemId = self.tree.parent(itemId)
return path
def _getExpandedItems(self, currId, parentId = None):
'''
Returns a list of items id's that are open/displayed/expanded.
Does not include the item's path.
We only add items who don't have expanded children, b/c item's
parents are expanded during their expansion. So this way items
won't have redundant expansion.
Base case: no children have been expanded and parent is expanded.
We have this counter-intuitive 'if parent expanded add current'
in order to get the right items to expand. When we did it
the intuitive way ('if current expanded add current')
it was expanding one short. The major disadvantage of this is that
we end up with a lot items being expanded, but I don't think there is
a way around it. Also b/c tkinter's treeview automatically scrolls
to the expanded item, it will end up scrolling all the way down
(unless we do something to counteract that somewhere else.)
In tkinter treeview's leaf items are not considered expanded.
@return list of items to expand (not paths)
@date Sep 1, 2010
'''
itemsToExpand = []
if self._itemExpanded(currId):
for childId in self.tree.get_children(currId):
itemsToExpand += self._getExpandedItems(childId, currId)
# if not children have been expanded, then itemsToExpand will be empty
# at this point.
if len(itemsToExpand) == 0 and self._itemExpanded(parentId):
itemsToExpand.append(currId)
return itemsToExpand
def _itemExpanded(self, itemId):
if itemId == None:
return False
return self.tree.item(itemId)['open'] == True
def _getItemData(self, itemId):
'''
Returns a tuple (type, name, file, line time) of
the data contained within item.
@date Aug 30, 2010
'''
itemData = self.tree.item(itemId)
return (itemData['text'],) + tuple(itemData['values'])
def _expandItems(self, itemsToExpand):
'''
Expand all items so user can see them
@param itemsToExpand a list of item paths to expand along.
@date Aug 30, 2010
'''
for root in self.tree.get_children():
for path in itemsToExpand:
CONSTANTS.logger.debug("path = %s" % path)
self._expandPath(path, root)
def _expandPath(self, path, currId, parentId=None):
'''
Expands a path.
Because there will be multiple children that satisfy the
requirements at any particular level (think multiple unnamed suites),
we need and thus use a backtracking algorithm.
Because of tkinter's see(), we can just call on the final item in the list.
So we go down to final item, then call see().
@param path List of items. Path goes from parent to child
@param currId Id of current item in tree. Path starts at this item.
@param parentId Id of the parent item to this current item. Used so that
we don't expand one too far and don't have to call extra see()'s
(while recursing back up.)
@return True if we've expanded the final item (no more backtracking, just
go back up stack frames.
@date Aug 30, 2010
'''
if len(path) == 0:
if parentId != None:
self.tree.see(parentId)
return True
if path[0] == self._getItemData(currId):
children = self.tree.get_children(currId)
# no children and last item in path
if len(children) == 0 and len(path) == 1:
self.tree.see(currId)
return True
for childId in children:
if self._expandPath(path[1:], childId, currId):
return True
return False
| matcatc/Test_Parser | src/TestParser/View/Tkinter/TkResultView.py | Python | gpl-3.0 | 13,664 |
# coding: utf-8
import types
from girlfriend.workflow.gfworkflow import (
Workflow,
Context
)
from girlfriend.util.lang import ObjDictModel
from girlfriend.util.config import Config
from girlfriend.plugin import plugin_mgr
class WorkflowBuilder(object):
def __init__(self):
self._clazz = Workflow
self._options = ObjDictModel()
self._units = tuple()
self._plugin_mgr = plugin_mgr
self._config = Config()
self._context_factory = Context
self._listeners = tuple()
self._logger = None
def build(self):
# 如果工作单元是函数类型,那么使用options对其展开
if isinstance(self._units, types.FunctionType):
self._units = self._units(self._options)
workflow = self._clazz(
self._units,
config=self._config,
plugin_mgr=self._plugin_mgr,
context_factory=self._context_factory,
logger=self._logger
)
if self._listeners:
for listener in self._listeners:
workflow.add_listener(listener)
return workflow
def clazz(self, clazz):
"""工作流类对象
"""
self._clazz = clazz
return self
def options(self, options):
"""生成工作流单元序列的选项
"""
self._options = options
return self
def units(self, units):
"""工作流单元列表
"""
self._units = units
return self
def plugin_mgr(self, plugin_mgr):
"""插件管理器
"""
self._plugin_mgr = plugin_mgr
return self
def config(self, config):
"""配置信息
"""
self._config = config
return self
def context_factory(self, context_factory):
"""上下文工厂
"""
self._context_factory = context_factory
return self
def listeners(self, listeners):
"""监听器
"""
self._listeners = listeners
return self
def logger(self, logger):
"""日志对象
"""
self._logger = logger
return self
| chihongze/girlfriend | girlfriend/workflow/builder/__init__.py | Python | mit | 2,180 |
"""
Matplotlib Animation Example
author: Jake Vanderplas
email: [email protected]
website: http://jakevdp.github.com
license: BSD
Please feel free to use and modify this, but keep the above information. Thanks!
"""
import numpy as np
from matplotlib import pyplot as plt
from matplotlib import animation
import math
# Initializing number of dots
N = 2
# Creating dot class
class dot(object):
def __init__(self):
self.x = 10 * np.random.random_sample()
self.y = 10 * np.random.random_sample()
self.velx = self.generate_new_vel()
self.vely = self.generate_new_vel()
def generate_new_vel(self):
return (np.random.random_sample() - 0.5) / 5
def move(self):
def distance(x1, y1, x2, y2):
return math.sqrt((x2 - x1) ** 2 + (y2 - y1) ** 2)
def inside(x1, y1):
if distance(x1, y1, 5, 5) <= 1:
return True
else:
return False
def calc_dist(d):
ret = 0
for x in dots:
if inside(x.x, x.y) and x != d:
ret = ret + distance(x.x, x.y, d.x, d.y)
return ret
# if dot is inside the circle it tries to maximize the distances to
# other dots inside circle
if inside(self.x, self.y):
dist = calc_dist(self)
for i in range(1, 10):
self.velx = self.generate_new_vel()
self.vely = self.generate_new_vel()
self.x = self.x + self.velx
self.y = self.y + self.vely
if calc_dist(self) <= dist or not inside(self.x, self.y):
self.x = self.x - self.velx
self.y = self.y - self.vely
else:
if np.random.random_sample() < 0.95:
self.x = self.x + self.velx
self.y = self.y + self.vely
else:
self.velx = self.generate_new_vel()
self.vely = self.generate_new_vel()
self.x = self.x + self.velx
self.y = self.y + self.vely
if self.x >= 10:
self.x = 10
self.velx = -1 * self.velx
if self.x <= 0:
self.x = 0
self.velx = -1 * self.velx
if self.y >= 10:
self.y = 10
self.vely = -1 * self.vely
if self.y <= 0:
self.y = 0
self.vely = -1 * self.vely
# Initializing dots
dots = [dot() for i in range(N)]
# First set up the figure, the axis, and the plot element we want to animate
fig = plt.figure()
ax = plt.axes(xlim=(0, 10), ylim=(-10, 10))
d, = ax.plot([dot.x for dot in dots],
[dot.y for dot in dots], 'ro')
#xy = ax.plot(np.arange(0,10, 0.1), 6 * np.sin(2 * np.arange(0,10, 0.1)))
# animation function. This is called sequentially
def animate(i):
for dot in dots:
dot.move()
d.set_data([dot.x for dot in dots],
[dot.y for dot in dots])
return d,
# call the animator. blit=True means only re-draw the parts that have changed.
anim = animation.FuncAnimation(fig, animate, frames=200, interval=20)
#anim.save("123.mp4")
plt.show() | asurafire/PSO | animation_test.py | Python | mit | 3,252 |
# -*- coding: utf-8 -*-
'''
Routines to set up a minion
'''
# Import python libs
import os
import imp
import sys
import salt
import logging
import tempfile
# Import salt libs
from salt.exceptions import LoaderError
from salt.template import check_render_pipe_str
from salt.utils.decorators import Depends
log = logging.getLogger(__name__)
SALT_BASE_PATH = os.path.dirname(salt.__file__)
LOADED_BASE_NAME = 'salt.loaded'
def _create_loader(
opts,
ext_type,
tag,
int_type=None,
ext_dirs=True,
ext_type_dirs=None,
base_path=None,
loaded_base_name=None,
mod_type_check=None):
'''
Creates Loader instance
Order of module_dirs:
cli flag -m MODULE_DIRS
opts[ext_type_dirs],
extension types,
base types.
'''
sys_types = os.path.join(base_path or SALT_BASE_PATH, int_type or ext_type)
ext_types = os.path.join(opts['extension_modules'], ext_type)
ext_type_types = []
if ext_dirs:
if ext_type_dirs is None:
ext_type_dirs = '{0}_dirs'.format(tag)
if ext_type_dirs in opts:
ext_type_types.extend(opts[ext_type_dirs])
cli_module_dirs = []
# The dirs can be any module dir, or a in-tree _{ext_type} dir
for _dir in opts.get('module_dirs', []):
# Prepend to the list to match cli argument ordering
maybe_dir = os.path.join(_dir, ext_type)
if (os.path.isdir(maybe_dir)):
cli_module_dirs.insert(0, maybe_dir)
continue
maybe_dir = os.path.join(_dir, '_{0}'.format(ext_type))
if (os.path.isdir(maybe_dir)):
cli_module_dirs.insert(0, maybe_dir)
if loaded_base_name is None:
loaded_base_name = LOADED_BASE_NAME
if mod_type_check is None:
mod_type_check = _mod_type
module_dirs = cli_module_dirs + ext_type_types + [ext_types, sys_types]
_generate_module('{0}.int'.format(loaded_base_name))
_generate_module('{0}.int.{1}'.format(loaded_base_name, tag))
_generate_module('{0}.ext'.format(loaded_base_name))
_generate_module('{0}.ext.{1}'.format(loaded_base_name, tag))
return Loader(
module_dirs,
opts,
tag,
loaded_base_name=loaded_base_name,
mod_type_check=mod_type_check
)
def minion_mods(opts, context=None, whitelist=None):
'''
Returns the minion modules
'''
load = _create_loader(opts, 'modules', 'module')
if context is None:
context = {}
pack = {'name': '__context__',
'value': context}
if not whitelist:
whitelist = opts.get('whitelist_modules', None)
functions = load.gen_functions(
pack,
whitelist=whitelist,
provider_overrides=True
)
# Enforce dependencies of module functions from "functions"
Depends.enforce_dependencies(functions)
return functions
def raw_mod(opts, name, functions):
'''
Returns a single module loaded raw and bypassing the __virtual__ function
'''
load = _create_loader(opts, 'modules', 'rawmodule')
return load.gen_module(name, functions)
def returners(opts, functions, whitelist=None):
'''
Returns the returner modules
'''
load = _create_loader(opts, 'returners', 'returner')
pack = {'name': '__salt__',
'value': functions}
return load.gen_functions(pack, whitelist=whitelist)
def pillars(opts, functions):
'''
Returns the pillars modules
'''
load = _create_loader(opts, 'pillar', 'pillar')
pack = {'name': '__salt__',
'value': functions}
return load.filter_func('ext_pillar', pack)
def tops(opts):
'''
Returns the tops modules
'''
if not 'master_tops' in opts:
return {}
whitelist = opts['master_tops'].keys()
load = _create_loader(opts, 'tops', 'top')
return load.filter_func('top', whitelist=whitelist)
def wheels(opts, whitelist=None):
'''
Returns the wheels modules
'''
load = _create_loader(opts, 'wheel', 'wheel')
return load.gen_functions(whitelist=whitelist)
def outputters(opts):
'''
Returns the outputters modules
'''
load = _create_loader(
opts,
'output',
'output',
ext_type_dirs='outputter_dirs')
return load.filter_func('output')
def auth(opts, whitelist=None):
'''
Returns the auth modules
'''
load = _create_loader(opts, 'auth', 'auth')
return load.gen_functions(whitelist=whitelist)
def fileserver(opts, backends):
'''
Returns the file server modules
'''
load = _create_loader(opts, 'fileserver', 'fileserver')
ret = load.gen_functions(whitelist=backends)
return ret
def roster(opts, whitelist=None):
'''
Returns the roster modules
'''
load = _create_loader(opts, 'roster', 'roster')
ret = load.gen_functions(whitelist=whitelist)
return ret
def states(opts, functions, whitelist=None):
'''
Returns the state modules
'''
load = _create_loader(opts, 'states', 'states')
pack = {'name': '__salt__',
'value': functions}
return load.gen_functions(pack, whitelist=whitelist)
def search(opts, returners, whitelist=None):
'''
Returns the search modules
'''
load = _create_loader(opts, 'search', 'search')
pack = {'name': '__ret__',
'value': returners}
return load.gen_functions(pack, whitelist=whitelist)
def log_handlers(opts):
'''
Returns the custom logging handler modules
'''
load = _create_loader(
opts,
'log_handlers',
'log_handlers',
int_type='handlers',
base_path=os.path.join(SALT_BASE_PATH, 'log')
)
return load.filter_func('setup_handlers')
def ssh_wrapper(opts, functions=None):
'''
Returns the custom logging handler modules
'''
if functions is None:
functions = {}
load = _create_loader(
opts,
'wrapper',
'wrapper',
base_path=os.path.join(SALT_BASE_PATH, os.path.join(
'client',
'ssh'))
)
pack = {'name': '__salt__',
'value': functions}
return load.gen_functions(pack)
def render(opts, functions):
'''
Returns the render modules
'''
load = _create_loader(
opts, 'renderers', 'render', ext_type_dirs='render_dirs'
)
pack = {'name': '__salt__',
'value': functions}
rend = load.filter_func('render', pack)
if not check_render_pipe_str(opts['renderer'], rend):
err = ('The renderer {0} is unavailable, this error is often because '
'the needed software is unavailable'.format(opts['renderer']))
log.critical(err)
raise LoaderError(err)
return rend
def grains(opts):
'''
Return the functions for the dynamic grains and the values for the static
grains.
'''
if 'conf_file' in opts:
pre_opts = {}
pre_opts.update(salt.config.load_config(
opts['conf_file'], 'SALT_MINION_CONFIG',
salt.config.DEFAULT_MINION_OPTS['conf_file']
))
default_include = pre_opts.get(
'default_include', opts['default_include']
)
include = pre_opts.get('include', [])
pre_opts.update(salt.config.include_config(
default_include, opts['conf_file'], verbose=False
))
pre_opts.update(salt.config.include_config(
include, opts['conf_file'], verbose=True
))
if 'grains' in pre_opts:
opts['grains'] = pre_opts['grains']
else:
opts['grains'] = {}
else:
opts['grains'] = {}
load = _create_loader(opts, 'grains', 'grain', ext_dirs=False)
grains_info = load.gen_grains()
grains_info.update(opts['grains'])
return grains_info
def call(fun, **kwargs):
'''
Directly call a function inside a loader directory
'''
args = kwargs.get('args', [])
dirs = kwargs.get('dirs', [])
module_dirs = [os.path.join(SALT_BASE_PATH, 'modules')] + dirs
load = Loader(module_dirs)
return load.call(fun, args)
def runner(opts):
'''
Directly call a function inside a loader directory
'''
load = _create_loader(
opts, 'runners', 'runner', ext_type_dirs='runner_dirs'
)
return load.gen_functions()
def _generate_module(name):
if name in sys.modules:
return
code = "'''Salt loaded {0} parent module'''".format(name.split('.')[-1])
module = imp.new_module(name)
exec code in module.__dict__
sys.modules[name] = module
def _mod_type(module_path):
if module_path.startswith(SALT_BASE_PATH):
return 'int'
return 'ext'
def in_pack(pack, name):
'''
Returns if the passed name is in the pack
'''
if isinstance(pack, list):
for chunk in pack:
if not isinstance(chunk, dict):
continue
try:
if name == chunk['name']:
return True
except KeyError:
pass
elif isinstance(pack, dict):
try:
if name == pack['name']:
return True
except KeyError:
pass
return False
class Loader(object):
'''
Used to load in arbitrary modules from a directory, the Loader can
also be used to only load specific functions from a directory, or to
call modules in an arbitrary directory directly.
'''
def __init__(self,
module_dirs,
opts=None,
tag='module',
loaded_base_name=None,
mod_type_check=None):
self.module_dirs = module_dirs
if opts is None:
opts = {}
self.tag = tag
if 'grains' in opts:
self.grains = opts['grains']
else:
self.grains = {}
if 'pillar' in opts:
self.pillar = opts['pillar']
else:
self.pillar = {}
self.opts = self.__prep_mod_opts(opts)
self.loaded_base_name = loaded_base_name or LOADED_BASE_NAME
self.mod_type_check = mod_type_check or _mod_type
def __prep_mod_opts(self, opts):
'''
Strip out of the opts any logger instance
'''
mod_opts = {}
for key, val in opts.items():
if key in ('logger', 'grains'):
continue
mod_opts[key] = val
return mod_opts
def call(self, fun, arg=None):
'''
Call a function in the load path.
'''
if arg is None:
arg = []
name = fun[:fun.rindex('.')]
try:
fn_, path, desc = imp.find_module(name, self.module_dirs)
mod = imp.load_module(name, fn_, path, desc)
except ImportError:
if self.opts.get('cython_enable', True) is True:
# The module was not found, try to find a cython module
try:
import pyximport
pyximport.install()
for mod_dir in self.module_dirs:
for fn_ in os.listdir(mod_dir):
if name == fn_[:fn_.rindex('.')]:
# Found it, load the mod and break the loop
mod = pyximport.load_module(
name, os.path.join(mod_dir, fn_)
)
return getattr(
mod, fun[fun.rindex('.') + 1:])(*arg)
except ImportError:
log.info('Cython is enabled in options though it\'s not '
'present in the system path. Skipping Cython '
'modules.')
return getattr(mod, fun[fun.rindex('.') + 1:])(*arg)
def gen_module(self, name, functions, pack=None):
'''
Load a single module and pack it with the functions passed
'''
full = ''
mod = None
for mod_dir in self.module_dirs:
if not os.path.isabs(mod_dir):
continue
if not os.path.isdir(mod_dir):
continue
fn_ = os.path.join(mod_dir, name)
if os.path.isdir(fn_):
full = fn_
else:
for ext in ('.py', '.pyo', '.pyc', '.so'):
full_test = '{0}{1}'.format(fn_, ext)
if os.path.isfile(full_test):
full = full_test
if not full:
return None
cython_enabled = False
if self.opts.get('cython_enable', True) is True:
try:
import pyximport
pyximport.install()
cython_enabled = True
except ImportError:
log.info('Cython is enabled in the options but not present '
'in the system path. Skipping Cython modules.')
try:
if full.endswith('.pyx') and cython_enabled:
# If there's a name which ends in .pyx it means the above
# cython_enabled is True. Continue...
mod = pyximport.load_module(name, full, tempfile.gettempdir())
else:
fn_, path, desc = imp.find_module(name, self.module_dirs)
mod = imp.load_module(
'{0}.{1}.{2}.{3}'.format(
self.loaded_base_name,
self.mod_type_check(path),
self.tag,
name
), fn_, path, desc
)
except ImportError:
log.debug(
'Failed to import {0} {1}:\n'.format(
self.tag, name
),
exc_info=True
)
return mod
except Exception:
log.warning(
'Failed to import {0} {1}, this is due most likely to a '
'syntax error:\n'.format(
self.tag, name
),
exc_info=True
)
return mod
if hasattr(mod, '__opts__'):
mod.__opts__.update(self.opts)
else:
mod.__opts__ = self.opts
mod.__grains__ = self.grains
if pack:
if isinstance(pack, list):
for chunk in pack:
try:
setattr(mod, chunk['name'], chunk['value'])
except KeyError:
pass
else:
setattr(mod, pack['name'], pack['value'])
# Call a module's initialization method if it exists
if hasattr(mod, '__init__'):
if callable(mod.__init__):
try:
mod.__init__(self.opts)
except TypeError:
pass
funcs = {}
module_name = mod.__name__[mod.__name__.rindex('.') + 1:]
if getattr(mod, '__load__', False) is not False:
log.info(
'The functions from module {0!r} are being loaded from the '
'provided __load__ attribute'.format(
module_name
)
)
for attr in getattr(mod, '__load__', dir(mod)):
if attr.startswith('_'):
# private functions are skipped
continue
if callable(getattr(mod, attr)):
func = getattr(mod, attr)
if hasattr(func, '__bases__'):
if 'BaseException' in func.__bases__:
# the callable object is an exception, don't load it
continue
# Let's get the function name.
# If the module has the __func_alias__ attribute, it must be a
# dictionary mapping in the form of(key -> value):
# <real-func-name> -> <desired-func-name>
#
# It default's of course to the found callable attribute name
# if no alias is defined.
funcname = getattr(mod, '__func_alias__', {}).get(attr, attr)
funcs['{0}.{1}'.format(module_name, funcname)] = func
self._apply_outputter(func, mod)
if not hasattr(mod, '__salt__'):
mod.__salt__ = functions
try:
context = sys.modules[
functions[functions.keys()[0]].__module__
].__context__
except AttributeError:
context = {}
mod.__context__ = context
return funcs
def gen_functions(self, pack=None, virtual_enable=True, whitelist=None,
provider_overrides=False):
'''
Return a dict of functions found in the defined module_dirs
'''
log.debug('loading {0} in {1}'.format(self.tag, self.module_dirs))
names = {}
modules = []
funcs = {}
disable = set(self.opts.get('disable_{0}s'.format(self.tag), []))
cython_enabled = False
if self.opts.get('cython_enable', True) is True:
try:
import pyximport
pyximport.install()
cython_enabled = True
except ImportError:
log.info('Cython is enabled in the options but not present '
'in the system path. Skipping Cython modules.')
for mod_dir in self.module_dirs:
if not os.path.isabs(mod_dir):
log.debug(
'Skipping {0}, it is not an absolute path'.format(
mod_dir
)
)
continue
if not os.path.isdir(mod_dir):
log.debug(
'Skipping {0}, it is not a directory'.format(
mod_dir
)
)
continue
for fn_ in os.listdir(mod_dir):
if fn_.startswith('_'):
# skip private modules
# log messages omitted for obviousness
continue
if fn_.split('.')[0] in disable:
log.debug(
'Skipping {0}, it is disabled by configuration'.format(
fn_
)
)
continue
if (fn_.endswith(('.py', '.pyc', '.pyo', '.so'))
or (cython_enabled and fn_.endswith('.pyx'))
or os.path.isdir(os.path.join(mod_dir, fn_))):
extpos = fn_.rfind('.')
if extpos > 0:
_name = fn_[:extpos]
else:
_name = fn_
names[_name] = os.path.join(mod_dir, fn_)
else:
log.debug(
'Skipping {0}, it does not end with an expected '
'extension'.format(
fn_
)
)
for name in names:
try:
if names[name].endswith('.pyx'):
# If there's a name which ends in .pyx it means the above
# cython_enabled is True. Continue...
mod = pyximport.load_module(
'{0}.{1}.{2}.{3}'.format(
self.loaded_base_name,
self.mod_type_check(names[name]),
self.tag,
name
), names[name], tempfile.gettempdir()
)
else:
fn_, path, desc = imp.find_module(name, self.module_dirs)
mod = imp.load_module(
'{0}.{1}.{2}.{3}'.format(
self.loaded_base_name,
self.mod_type_check(path),
self.tag,
name
), fn_, path, desc
)
# reload all submodules if necessary
submodules = [
getattr(mod, sname) for sname in dir(mod) if
isinstance(getattr(mod, sname), mod.__class__)
]
# reload only custom "sub"modules i.e is a submodule in
# parent module that are still available on disk (i.e. not
# removed during sync_modules)
for submodule in submodules:
try:
smname = '{0}.{1}.{2}'.format(
self.loaded_base_name,
self.tag,
name
)
smfile = '{0}.py'.format(
os.path.splitext(submodule.__file__)[0]
)
if submodule.__name__.startswith(smname) and \
os.path.isfile(smfile):
reload(submodule)
except AttributeError:
continue
except ImportError:
log.debug(
'Failed to import {0} {1}, this is most likely NOT a '
'problem:\n'.format(
self.tag, name
),
exc_info=True
)
continue
except Exception:
log.warning(
'Failed to import {0} {1}, this is due most likely to a '
'syntax error. Traceback raised:\n'.format(
self.tag, name
),
exc_info=True
)
continue
modules.append(mod)
for mod in modules:
virtual = ''
if hasattr(mod, '__opts__'):
mod.__opts__.update(self.opts)
else:
mod.__opts__ = self.opts
mod.__grains__ = self.grains
mod.__pillar__ = self.pillar
if pack:
if isinstance(pack, list):
for chunk in pack:
if not isinstance(chunk, dict):
continue
try:
setattr(mod, chunk['name'], chunk['value'])
except KeyError:
pass
else:
setattr(mod, pack['name'], pack['value'])
# Call a module's initialization method if it exists
if hasattr(mod, '__init__'):
if callable(mod.__init__):
try:
mod.__init__(self.opts)
except TypeError:
pass
# Trim the full pathname to just the module
# this will be the short name that other salt modules and state
# will refer to it as.
module_name = mod.__name__.rsplit('.', 1)[-1]
if virtual_enable:
# if virtual modules are enabled, we need to look for the
# __virtual__() function inside that module and run it.
# This function will return either a new name for the module,
# an empty string(won't be loaded but you just need to check
# against the same python type, a string) or False.
# This allows us to have things like the pkg module working on
# all platforms under the name 'pkg'. It also allows for
# modules like augeas_cfg to be referred to as 'augeas', which
# would otherwise have namespace collisions. And finally it
# allows modules to return False if they are not intended to
# run on the given platform or are missing dependencies.
try:
if hasattr(mod, '__virtual__'):
if callable(mod.__virtual__):
virtual = mod.__virtual__()
if not virtual:
# if __virtual__() evaluates to false then the
# module wasn't meant for this platform or it's
# not supposed to load for some other reason.
# Some modules might accidentally return None
# and are improperly loaded
if virtual is None:
log.warning(
'{0}.__virtual__() is wrongly '
'returning `None`. It should either '
'return `True`, `False` or a new '
'name. If you\'re the developer '
'of the module {1!r}, please fix '
'this.'.format(
mod.__name__,
module_name
)
)
continue
if virtual is not True and module_name != virtual:
# If __virtual__ returned True the module will
# be loaded with the same name, if it returned
# other value than `True`, it should be a new
# name for the module.
# Update the module name with the new name
log.debug(
'Loaded {0} as virtual {1}'.format(
module_name, virtual
)
)
if not hasattr(mod, '__virtualname__'):
salt.utils.warn_until(
'Hydrogen',
'The {0!r} module is renaming itself '
'in it\'s __virtual__() function ({1} '
'=> {2}). Please set it\'s virtual '
'name as the \'__virtualname__\' '
'module attribute. Example: '
'"__virtualname__ = {2!r}"'.format(
mod.__name__,
module_name,
virtual
)
)
module_name = virtual
elif virtual and hasattr(mod, '__virtualname__'):
module_name = mod.__virtualname__
except KeyError:
# Key errors come out of the virtual function when passing
# in incomplete grains sets, these can be safely ignored
# and logged to debug, still, it includes the traceback to
# help debugging.
log.debug(
'KeyError when loading {0}'.format(module_name),
exc_info=True
)
except Exception:
# If the module throws an exception during __virtual__()
# then log the information and continue to the next.
log.exception(
'Failed to read the virtual function for '
'{0}: {1}'.format(
self.tag, module_name
)
)
continue
if whitelist:
# If a whitelist is defined then only load the module if it is
# in the whitelist
if module_name not in whitelist:
continue
if getattr(mod, '__load__', False) is not False:
log.info(
'The functions from module {0!r} are being loaded from '
'the provided __load__ attribute'.format(
module_name
)
)
for attr in getattr(mod, '__load__', dir(mod)):
if attr.startswith('_'):
# skip private attributes
# log messages omitted for obviousness
continue
if callable(getattr(mod, attr)):
# check to make sure this is callable
func = getattr(mod, attr)
if isinstance(func, type):
# skip callables that might be exceptions
if any(['Error' in func.__name__,
'Exception' in func.__name__]):
continue
# now that callable passes all the checks, add it to the
# library of available functions of this type
# Let's get the function name.
# If the module has the __func_alias__ attribute, it must
# be a dictionary mapping in the form of(key -> value):
# <real-func-name> -> <desired-func-name>
#
# It default's of course to the found callable attribute
# name if no alias is defined.
funcname = getattr(mod, '__func_alias__', {}).get(
attr, attr
)
# functions are namespaced with their module name
module_func_name = '{0}.{1}'.format(module_name, funcname)
funcs[module_func_name] = func
log.trace(
'Added {0} to {1}'.format(module_func_name, self.tag)
)
self._apply_outputter(func, mod)
# Handle provider overrides
if provider_overrides and self.opts.get('providers', False):
if isinstance(self.opts['providers'], dict):
for mod, provider in self.opts['providers'].items():
newfuncs = raw_mod(self.opts, provider, funcs)
if newfuncs:
for newfunc in newfuncs:
f_key = '{0}{1}'.format(
mod, newfunc[newfunc.rindex('.'):]
)
funcs[f_key] = newfuncs[newfunc]
# now that all the functions have been collected, iterate back over
# the available modules and inject the special __salt__ namespace that
# contains these functions.
for mod in modules:
if not hasattr(mod, '__salt__') or (
not in_pack(pack, '__salt__') and
not str(mod.__name__).startswith('salt.loaded.int.grain')
):
mod.__salt__ = funcs
elif not in_pack(pack, '__salt__') and str(mod.__name__).startswith('salt.loaded.int.grain'):
mod.__salt__.update(funcs)
return funcs
def _apply_outputter(self, func, mod):
'''
Apply the __outputter__ variable to the functions
'''
if hasattr(mod, '__outputter__'):
outp = mod.__outputter__
if func.__name__ in outp:
func.__outputter__ = outp[func.__name__]
def filter_func(self, name, pack=None, whitelist=None):
'''
Filter a specific function out of the functions, this is used to load
the returners for the salt minion
'''
funcs = {}
if pack:
gen = self.gen_functions(pack, whitelist=whitelist)
else:
gen = self.gen_functions(whitelist=whitelist)
for key, fun in gen.items():
if key[key.index('.') + 1:] == name:
funcs[key[:key.index('.')]] = fun
return funcs
def chop_mods(self):
'''
Chop off the module names so that the raw functions are exposed,
used to generate the grains
'''
funcs = {}
for key, fun in self.gen_functions().items():
funcs[key[key.rindex('.')] + 1:] = fun
return funcs
def gen_grains(self):
'''
Read the grains directory and execute all of the public callable
members. Then verify that the returns are python dict's and return
a dict containing all of the returned values.
'''
grains_data = {}
funcs = self.gen_functions()
for key, fun in funcs.items():
if key[key.index('.') + 1:] == 'core':
continue
try:
ret = fun()
except Exception:
log.critical(
'Failed to load grains defined in grain file {0} in '
'function {1}, error:\n'.format(
key, fun
),
exc_info=True
)
continue
if not isinstance(ret, dict):
continue
grains_data.update(ret)
for key, fun in funcs.items():
if key[key.index('.') + 1:] != 'core':
continue
ret = fun()
if not isinstance(ret, dict):
continue
grains_data.update(ret)
return grains_data
| victorywang80/Maintenance | saltstack/src/salt/loader.py | Python | apache-2.0 | 34,104 |
from django.shortcuts import render, get_object_or_404, redirect, \
HttpResponse
from django.contrib.auth.decorators import login_required
from django.contrib import messages
from django.http import HttpResponseForbidden, JsonResponse
from django.core.exceptions import SuspiciousOperation, ValidationError, \
ImproperlyConfigured
from helpers.form_helpers import extract_model_field_meta_data
from businessareas.models import BusinessAreaDb, NotionalTableDb, \
AvailableNotionalTableSettingDb
from projects.read_write_project import read_project
from .models import ProjectDb, UserSettingDb
from .forms import ProjectForm, ConfirmDeleteForm
from .internal_representation_classes import FedsDateSetting, FedsSetting, \
FedsTitleDescription
FORBIDDEN_MESSAGE = 'Forbidden'
@login_required
def list_projects(request):
try:
projects = ProjectDb.objects.filter(user=request.user)
except ProjectDb.DoesNotExist:
projects = False
return render(
request,
'projects/list_projects.html',
{'projects': projects}
)
@login_required
def show_project(request, project_id):
# Check that the user has view access.
if not user_can_view_project(request, project_id):
return HttpResponseForbidden()
# Get project's basic deets.
project_db = get_object_or_404(ProjectDb, pk=project_id)
project = read_project(project_db.pk)
setting_values = FedsSetting.generate_js_settings_values_object()
visibility_testers = FedsSetting.generate_js_visibility_testers_object()
return render(request, 'projects/show_project.html',
{
'project': project,
'settings_values': setting_values,
'visibility_testers': visibility_testers,
})
def user_can_view_project(request, project_id):
# Check whether the user has access to the project.
# TODO: replace with permission check?
# Is the owner of the project, or is staff.
current_user = request.user
project = get_object_or_404(ProjectDb, pk=project_id)
project_owner = project.user
if current_user == project_owner:
return True
if current_user.is_staff():
return True
return False
def user_can_delete_project(request, project_id):
return user_can_view_project(request, project_id)
def user_can_edit_project(request, project_id):
return user_can_view_project(request, project_id)
def user_can_create_project(request):
# Any user can create a project.
return True
@login_required
def create_project(request):
""" Create a new project. """
if not user_can_create_project(request):
return HttpResponseForbidden(FORBIDDEN_MESSAGE)
if request.method == 'GET':
# Create MT project form.
# Get the first business area.
business_area = BusinessAreaDb.objects.all().order_by('title')[0]
# Set initial, otherwise form fields have 'None' in them. Strange.
form = ProjectForm(
initial={'title': '', 'description': '',
'business_area': business_area}
# 'slug': '',
)
return render(
request,
'projects/create_project.html',
{
'form': form,
'model_field_meta_data':
extract_model_field_meta_data(form,
['help_text', 'max_length']),
}
)
if request.method != 'POST':
raise SuspiciousOperation(
'Bad HTTP op in create_edit_project: {op}'.format(
op=request.method))
# It's a POST.
form = ProjectForm(request.POST)
# Apply form validation. This does not apply model validation.
if form.is_valid():
# Create MT object.
project = ProjectDb()
# Copy data from form fields into model instance.
cleaned_data = form.cleaned_data
project.user = request.user
project.title = cleaned_data['title']
project.description = cleaned_data['description']
# project.slug = cleaned_data['slug']
project.business_area = cleaned_data['business_area']
project.save()
# For new record, save will add the new id to the model instance.
# TODO: Replace explicit link.
return redirect('/projects/{project_id}'.format(project_id=project.pk))
# There were data errors.
# Render the form again.
return render(
request,
'projects/create_project.html',
{
'form': form,
'model_field_meta_data':
extract_model_field_meta_data(form,
['help_text', 'max_length']),
}
)
@login_required
def delete_project(request, project_id):
""" Delete a project. """
if not user_can_delete_project(request, project_id):
return HttpResponseForbidden(FORBIDDEN_MESSAGE)
if request.method == 'GET':
# User is asking to delete. Show the confirmation form.
form = ConfirmDeleteForm()
project = ProjectDb.objects.get(pk=project_id)
return render(
request,
'projects/delete_project.html',
{
'form': form,
'project_id': project_id, # id is passed for Cancel link.
'project_title': project.title,
'model_field_meta_data':
extract_model_field_meta_data(form, ['help_text']),
}
)
if request.method != 'POST':
raise SuspiciousOperation(
'Bad HTTP op in delete_project: {op}'.format(op=request.method))
# It's a POST.
form = ConfirmDeleteForm(request.POST)
if form.is_valid():
cleaned_data = form.cleaned_data
confirm_is_checked = cleaned_data['confirm']
if confirm_is_checked:
# TODO: delete linked records.
project = get_object_or_404(ProjectDb, pk=project_id)
project.delete()
messages.success(request, 'Project deleted.')
# TODO: replace explicit link.
return redirect('home')
# Not confirmed.
messages.info(request, 'Deletion not confirmed.')
return redirect('projects:show_project', project_id=project_id)
# Form was not valid. This should not happen.
raise ValidationError('Huh? Delete form not is_valid().')
@login_required
def clone_project(request):
return HttpResponse('Heretical!')
@login_required
def request_setting_widget(request):
""" Get a widget for a setting to show in a form. """
try:
# Identify the project and setting.
project_id, setting_machine_name, project = get_setting_info(request)
# Get the widget code.
widget_html, validators = FedsSetting.setting_machine_names[
setting_machine_name].display_widget()
result = {
'status': 'ok',
'widgethtml': widget_html,
'validators': validators,
}
return JsonResponse(result)
except Exception as e:
return JsonResponse({'status': 'Error: ' + e.__str__()})
@login_required
def get_setting_info(request):
"""
Get info identifying the setting from the request.
:param request:
:return:
"""
# Is the project id given?
project_id = request.POST.get('projectid', None)
if project_id is None:
raise LookupError('get_settting_info: project id missing.')
# Can use user edit the project?
if not user_can_edit_project(request, project_id):
raise PermissionError('get_settting_info: permission denied.')
# Is the settings's machine name given?
setting_machine_name = request.POST.get('machinename', None)
if setting_machine_name is None:
return HttpResponse(status=404, reason='Machinename missing')
# Load the FedsXXXSettings for the project.
project = read_project(project_id)
# Is the machine name defined?
if setting_machine_name not in FedsSetting.setting_machine_names:
message = 'get_setting_info: machine name "{mn}" unknown.'
raise LookupError(message.format(mn=setting_machine_name))
return project_id, setting_machine_name, project
@login_required
def save_setting(request):
""" Save a setting to the database. """
try:
# Identify the project and setting.
project_id, setting_machine_name, project = get_setting_info(request)
# Get the new value
new_value = request.POST.get('newValue', None)
if new_value is None:
msg = 'New value missing. Proj: {proj_id}, machine name: {mn}.'
raise ValueError(msg.format(proj_id=project_id,
mn=setting_machine_name))
# Get project DB record.
project_db = ProjectDb.objects.get(pk=project_id)
# Lookup existing records with given project and machine name.
user_setting_db_recs = UserSettingDb.objects.filter(
project=project_db,
machine_name=setting_machine_name
)
# There should only be one.
if user_setting_db_recs.count() > 1:
msg = 'Too many user settings. Proj: {proj_id}, machine name: {mn}.'
raise LookupError(msg.format(proj_id=project_id,
mn=setting_machine_name))
if user_setting_db_recs.count() == 0:
# Make a new record.
user_setting_db = UserSettingDb(
project=project_db,
machine_name=setting_machine_name,
value=new_value
)
else:
# Update
user_setting_db = user_setting_db_recs[0]
user_setting_db.value = new_value
user_setting_db.save()
# Done.
return JsonResponse({'status': 'ok'})
except Exception as e:
return JsonResponse({'status': 'Error: ' + e.__str__()})
@login_required
def load_setting_deets(request):
"""
Load the deets for a single setting.
:param request:
:return: HTML to show the deets.
"""
try:
# Identify the project and setting.
project_id, setting_machine_name, project = get_setting_info(request)
# Is it in the machine names list?
if setting_machine_name not in FedsSetting.setting_machine_names:
raise ReferenceError('load_setting_deets: cannot find "{mn}"'
.format(mn=setting_machine_name))
# Get the deets.
html = FedsSetting.setting_machine_names[
setting_machine_name].display_deets()
return JsonResponse({'status': 'ok', 'deets': html})
except Exception as e:
return JsonResponse({'status': 'Error: ' + e.__str__()})
@login_required
def request_title_description_widget(request):
""" Get a widget for the project title and description. """
try:
# Get the project id.
project_id = request.POST.get('projectid', None)
if project_id is None:
raise LookupError('title desc widget: project id missing.')
project_db = ProjectDb.objects.get(pk=project_id)
# Can user edit the project?
if not user_can_edit_project(request, project_id):
raise PermissionError('title desc widget: permission denied.')
widget = FedsTitleDescription(project_id, project_db.title,
project_db.description)
widget_html = widget.display_widget()
result = {
'status': 'ok',
'widgethtml': widget_html,
}
return JsonResponse(result)
except Exception as e:
return JsonResponse({'status': 'Error: ' + e.__str__()})
@login_required
def save_title_description(request):
""" Save project title and description to the database. """
try:
# Get required data.
project_id = request.POST.get('projectid', None)
if project_id is None:
raise LookupError('Project id missing.')
project_title = request.POST.get('title', None)
if project_title is None:
raise LookupError('Project title.')
project_description = request.POST.get('description', None)
if project_description is None:
raise LookupError('Description missing.')
# Can user edit the project?
if not user_can_edit_project(request, project_id):
raise PermissionError('Permission denied.')
project_db = ProjectDb.objects.get(pk=project_id)
project_db.title = project_title
project_db.description = project_description
project_db.save()
return JsonResponse({'status': 'ok'})
except Exception as e:
return JsonResponse({'status': 'Error: save title desc:' + e.__str__()})
| kieranmathieson/feds | projects/views.py | Python | apache-2.0 | 12,870 |
# -*- coding: utf-8 -*-
class Charset(object):
common_name = 'NotoSansKhmer-Regular'
native_name = ''
def glyphs(self):
glyphs = []
glyphs.append(0x008C) #uni17E6
glyphs.append(0x008D) #uni17E7
glyphs.append(0x008A) #uni17E4
glyphs.append(0x0085) #uni17DB
glyphs.append(0x0088) #uni17E2
glyphs.append(0x0089) #uni17E3
glyphs.append(0x0086) #uni17E0
glyphs.append(0x001F) #less
glyphs.append(0x0035) #uni1789
glyphs.append(0x0007) #dollar
glyphs.append(0x008E) #uni17E8
glyphs.append(0x008F) #uni17E9
glyphs.append(0x0017) #four
glyphs.append(0x008B) #uni17E5
glyphs.append(0x000F) #comma
glyphs.append(0x009D) #uni17D2178C
glyphs.append(0x009C) #uni17D2178B
glyphs.append(0x009B) #uni17D2178A
glyphs.append(0x00A0) #uni17D2178F
glyphs.append(0x009F) #uni17D2178E
glyphs.append(0x009E) #uni17D2178D
glyphs.append(0x00C3) #uni17B9.r
glyphs.append(0x0102) #glyph00258
glyphs.append(0x00CB) #uni17D2178C.n
glyphs.append(0x00F9) #glyph00249
glyphs.append(0x00C2) #uni17B8.r
glyphs.append(0x00FA) #glyph00250
glyphs.append(0x00FD) #glyph00253
glyphs.append(0x0087) #uni17E1
glyphs.append(0x00FF) #glyph00255
glyphs.append(0x00FE) #glyph00254
glyphs.append(0x0101) #glyph00257
glyphs.append(0x0100) #glyph00256
glyphs.append(0x0066) #uni17BC
glyphs.append(0x0065) #uni17BB
glyphs.append(0x0064) #uni17BA
glyphs.append(0x00B6) #uni17B9.a
glyphs.append(0x00C9) #uni17D2178A.n
glyphs.append(0x0069) #uni17BF
glyphs.append(0x0068) #uni17BE
glyphs.append(0x0067) #uni17BD
glyphs.append(0x00E3) #glyph00227
glyphs.append(0x004C) #uni17A0
glyphs.append(0x005F) #uni17B3
glyphs.append(0x005E) #uni17B2
glyphs.append(0x002B) #guillemotright
glyphs.append(0x005C) #uni17B0
glyphs.append(0x0061) #uni17B7
glyphs.append(0x0021) #greater
glyphs.append(0x00CD) #uni17D2178A.r
glyphs.append(0x0063) #uni17B9
glyphs.append(0x0062) #uni17B8
glyphs.append(0x0002) #nonmarkingreturn
glyphs.append(0x0053) #uni17A7
glyphs.append(0x0106) #uni200C
glyphs.append(0x0104) #glyph00260
glyphs.append(0x0018) #five
glyphs.append(0x00BA) #uni17D0.a
glyphs.append(0x00CF) #uni17D21798.r
glyphs.append(0x0107) #uni200D
glyphs.append(0x0051) #uni17A5
glyphs.append(0x0001) #.null
glyphs.append(0x0099) #uni17D21789
glyphs.append(0x0098) #uni17D21788
glyphs.append(0x0025) #bar
glyphs.append(0x00D3) #uni17BB.n2
glyphs.append(0x0093) #uni17D21783
glyphs.append(0x0092) #uni17D21782
glyphs.append(0x0091) #uni17D21781
glyphs.append(0x0090) #uni17D21780
glyphs.append(0x0097) #uni17D21787
glyphs.append(0x0096) #uni17D21786
glyphs.append(0x0095) #uni17D21785
glyphs.append(0x0094) #uni17D21784
glyphs.append(0x00C4) #uni17BA.r
glyphs.append(0x00B3) #uni17BD.b
glyphs.append(0x00C6) #uni17C9.r
glyphs.append(0x00B7) #uni17BA.a
glyphs.append(0x00DE) #glyph00222
glyphs.append(0x000C) #parenright
glyphs.append(0x006B) #uni17C1
glyphs.append(0x006C) #uni17C2
glyphs.append(0x006D) #uni17C3
glyphs.append(0x006E) #uni17C4
glyphs.append(0x006F) #uni17C5
glyphs.append(0x0070) #uni17C6
glyphs.append(0x0071) #uni17C7
glyphs.append(0x0072) #uni17C8
glyphs.append(0x0073) #uni17C9
glyphs.append(0x0105) #glyph00261
glyphs.append(0x0013) #zero
glyphs.append(0x000A) #quotesingle
glyphs.append(0x00C7) #uni17CD.r
glyphs.append(0x0024) #braceleft
glyphs.append(0x00A9) #uni17D21798
glyphs.append(0x00AA) #uni17D21799
glyphs.append(0x0003) #space
glyphs.append(0x00A1) #uni17D21790
glyphs.append(0x00A2) #uni17D21791
glyphs.append(0x00A3) #uni17D21792
glyphs.append(0x00A4) #uni17D21793
glyphs.append(0x00A5) #uni17D21794
glyphs.append(0x00A6) #uni17D21795
glyphs.append(0x00A7) #uni17D21796
glyphs.append(0x00A8) #uni17D21797
glyphs.append(0x0009) #ampersand
glyphs.append(0x0012) #slash
glyphs.append(0x00CE) #uni17D21797.r
glyphs.append(0x00B2) #uni17BC.b
glyphs.append(0x00FB) #glyph00251
glyphs.append(0x0103) #glyph00259
glyphs.append(0x00D9) #glyph00217
glyphs.append(0x00C1) #uni17B7.r
glyphs.append(0x00BC) #uni1794.a
glyphs.append(0x00AB) #uni17D2179A
glyphs.append(0x0004) #exclam
glyphs.append(0x00AD) #uni17D2179C
glyphs.append(0x00B4) #uni17B7.a
glyphs.append(0x00AE) #uni17D2179F
glyphs.append(0x0083) #uni17D9
glyphs.append(0x00D6) #uni17D21798.b
glyphs.append(0x00C8) #uni17B717CD.r
glyphs.append(0x0082) #uni17D8
glyphs.append(0x00C5) #uni17C6.r
glyphs.append(0x0020) #equal
glyphs.append(0x0074) #uni17CA
glyphs.append(0x0075) #uni17CB
glyphs.append(0x0076) #uni17CC
glyphs.append(0x000E) #plus
glyphs.append(0x0000) #.notdef
glyphs.append(0x0079) #uni17CF
glyphs.append(0x001B) #eight
glyphs.append(0x005A) #uni17AE
glyphs.append(0x006A) #uni17C0
glyphs.append(0x0054) #uni17A8
glyphs.append(0x0055) #uni17A9
glyphs.append(0x00DB) #glyph00219
glyphs.append(0x004E) #uni17A2
glyphs.append(0x004F) #uni17A3
glyphs.append(0x0010) #hyphen
glyphs.append(0x004D) #uni17A1
glyphs.append(0x0052) #uni17A6
glyphs.append(0x0011) #period
glyphs.append(0x0050) #uni17A4
glyphs.append(0x0014) #one
glyphs.append(0x0027) #asciitilde
glyphs.append(0x001D) #colon
glyphs.append(0x00D4) #uni17BC.n2
glyphs.append(0x0028) #zwsp
glyphs.append(0x00AC) #uni17D2179B
glyphs.append(0x000B) #parenleft
glyphs.append(0x00D7) #uni17D217A0.b
glyphs.append(0x00D8) #glyph00216
glyphs.append(0x00DA) #glyph00218
glyphs.append(0x0022) #question
glyphs.append(0x0015) #two
glyphs.append(0x00CC) #uni17D217A0.n
glyphs.append(0x00D2) #uni17BD.n
glyphs.append(0x002A) #uni00AD
glyphs.append(0x00D0) #uni17BB.n
glyphs.append(0x00F8) #glyph00248
glyphs.append(0x00B1) #uni17BB.b
glyphs.append(0x00E4) #glyph00228
glyphs.append(0x00E5) #glyph00229
glyphs.append(0x009A) #uni17D21789.a
glyphs.append(0x00DC) #glyph00220
glyphs.append(0x00DD) #glyph00221
glyphs.append(0x00C0) #uni17C0.b
glyphs.append(0x00DF) #glyph00223
glyphs.append(0x00E0) #glyph00224
glyphs.append(0x00E1) #glyph00225
glyphs.append(0x00E2) #glyph00226
glyphs.append(0x00BB) #uni1789.a
glyphs.append(0x00AF) #uni17D217A0
glyphs.append(0x00B0) #uni17D217A2
glyphs.append(0x0029) #guillemotleft
glyphs.append(0x00FC) #glyph00252
glyphs.append(0x00BD) #uni17D2179A.b
glyphs.append(0x00B8) #uni17C6.a
glyphs.append(0x001C) #nine
glyphs.append(0x0016) #three
glyphs.append(0x0057) #uni17AB
glyphs.append(0x0058) #uni17AC
glyphs.append(0x0056) #uni17AA
glyphs.append(0x005B) #uni17AF
glyphs.append(0x00F0) #glyph00240
glyphs.append(0x0059) #uni17AD
glyphs.append(0x0026) #braceright
glyphs.append(0x00B9) #uni17CE.a
glyphs.append(0x001A) #seven
glyphs.append(0x00F1) #glyph00241
glyphs.append(0x00EF) #glyph00239
glyphs.append(0x001E) #semicolon
glyphs.append(0x00EE) #glyph00238
glyphs.append(0x005D) #uni17B1
glyphs.append(0x0047) #uni179B
glyphs.append(0x0048) #uni179C
glyphs.append(0x0046) #uni179A
glyphs.append(0x004B) #uni179F
glyphs.append(0x0023) #at
glyphs.append(0x0049) #uni179D
glyphs.append(0x004A) #uni179E
glyphs.append(0x00ED) #glyph00237
glyphs.append(0x00D5) #uni17BD.n2
glyphs.append(0x00EB) #glyph00235
glyphs.append(0x00EA) #glyph00234
glyphs.append(0x00E9) #glyph00233
glyphs.append(0x00E8) #glyph00232
glyphs.append(0x00E7) #glyph00231
glyphs.append(0x0060) #uni17B6
glyphs.append(0x0031) #uni1785
glyphs.append(0x0030) #uni1784
glyphs.append(0x0033) #uni1787
glyphs.append(0x0032) #uni1786
glyphs.append(0x002D) #uni1781
glyphs.append(0x002C) #uni1780
glyphs.append(0x002F) #uni1783
glyphs.append(0x002E) #uni1782
glyphs.append(0x0084) #uni17DA
glyphs.append(0x0019) #six
glyphs.append(0x0008) #percent
glyphs.append(0x0034) #uni1788
glyphs.append(0x00B5) #uni17B8.a
glyphs.append(0x0077) #uni17CD
glyphs.append(0x00E6) #glyph00230
glyphs.append(0x0078) #uni17CE
glyphs.append(0x0005) #quotedbl
glyphs.append(0x00BF) #uni17BF.b
glyphs.append(0x00D1) #uni17BC.n
glyphs.append(0x0006) #numbersign
glyphs.append(0x000D) #asterisk
glyphs.append(0x0108) #uni25CC
glyphs.append(0x003A) #uni178E
glyphs.append(0x0039) #uni178D
glyphs.append(0x003B) #uni178F
glyphs.append(0x0036) #uni178A
glyphs.append(0x00BE) #uni17B717CD
glyphs.append(0x0038) #uni178C
glyphs.append(0x0037) #uni178B
glyphs.append(0x007B) #uni17D1
glyphs.append(0x007A) #uni17D0
glyphs.append(0x007D) #uni17D3
glyphs.append(0x007C) #uni17D2
glyphs.append(0x007F) #uni17D5
glyphs.append(0x007E) #uni17D4
glyphs.append(0x0081) #uni17D7
glyphs.append(0x0080) #uni17D6
glyphs.append(0x003E) #uni1792
glyphs.append(0x003F) #uni1793
glyphs.append(0x003C) #uni1790
glyphs.append(0x003D) #uni1791
glyphs.append(0x0042) #uni1796
glyphs.append(0x0043) #uni1797
glyphs.append(0x0040) #uni1794
glyphs.append(0x0041) #uni1795
glyphs.append(0x00F2) #glyph00242
glyphs.append(0x00F3) #glyph00243
glyphs.append(0x0044) #uni1798
glyphs.append(0x0045) #uni1799
glyphs.append(0x00F6) #glyph00246
glyphs.append(0x00F7) #glyph00247
glyphs.append(0x00F4) #glyph00244
glyphs.append(0x00F5) #glyph00245
glyphs.append(0x00EC) #glyph00236
glyphs.append(0x00CA) #uni17D2178B.n
return glyphs
| davelab6/pyfontaine | fontaine/charsets/noto_glyphs/notosanskhmer_regular.py | Python | gpl-3.0 | 11,163 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Classes and functions used to construct graphs."""
# pylint: disable=g-bad-name
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import re
import sys
import threading
import types
import numpy as np
import six
from six.moves import map # pylint: disable=redefined-builtin
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.core.framework import attr_value_pb2
from tensorflow.core.framework import function_pb2
from tensorflow.core.framework import graph_pb2
from tensorflow.core.framework import node_def_pb2
from tensorflow.core.framework import op_def_pb2
from tensorflow.core.framework import versions_pb2
from tensorflow.core.protobuf import config_pb2
# pywrap_tensorflow must be imported first to avoid profobuf issues.
# (b/143110113)
# pylint: disable=invalid-import-order,g-bad-import-order,unused-import
from tensorflow.python import pywrap_tensorflow
from tensorflow.python import pywrap_tfe
# pylint: enable=invalid-import-order,g-bad-import-order,unused-import
from tensorflow.python import tf2
from tensorflow.python.client import pywrap_tf_session
from tensorflow.python.eager import context
from tensorflow.python.eager import core
from tensorflow.python.eager import monitoring
from tensorflow.python.eager import tape
from tensorflow.python.framework import c_api_util
from tensorflow.python.framework import composite_tensor
from tensorflow.python.framework import device as pydev
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import indexed_slices
from tensorflow.python.framework import registry
from tensorflow.python.framework import tensor_conversion_registry
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import traceable_stack
from tensorflow.python.framework import versions
from tensorflow.python.ops import control_flow_util
from tensorflow.python.platform import app
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.profiler import trace
from tensorflow.python.types import core as core_tf_types
from tensorflow.python.types import internal
from tensorflow.python.util import compat
from tensorflow.python.util import decorator_utils
from tensorflow.python.util import deprecation
from tensorflow.python.util import dispatch
from tensorflow.python.util import function_utils
from tensorflow.python.util import lock_util
from tensorflow.python.util import memory
from tensorflow.python.util import object_identity
from tensorflow.python.util import tf_contextlib
from tensorflow.python.util import tf_stack
from tensorflow.python.util.compat import collections_abc
from tensorflow.python.util.deprecation import deprecated_args
from tensorflow.python.util.lazy_loader import LazyLoader
from tensorflow.python.util.tf_export import kwarg_only
from tensorflow.python.util.tf_export import tf_export
ag_ctx = LazyLoader(
"ag_ctx", globals(),
"tensorflow.python.autograph.core.ag_ctx")
# Temporary global switches determining if we should enable the work-in-progress
# calls to the C API. These will be removed once all functionality is supported.
_USE_C_API = True
_USE_C_SHAPES = True
_api_usage_gauge = monitoring.BoolGauge(
"/tensorflow/api/ops_eager_execution",
"Whether ops.enable_eager_execution() is called.")
_tensor_equality_api_usage_gauge = monitoring.BoolGauge(
"/tensorflow/api/enable_tensor_equality",
"Whether ops.enable_tensor_equality() is called.")
_control_flow_api_gauge = monitoring.BoolGauge(
"/tensorflow/api/enable_control_flow_v2",
"Whether enable_control_flow_v2() is called.")
_tf_function_api_guage = monitoring.BoolGauge(
"/tensorflow/api/tf_function",
"Whether tf.function() is used.")
# pylint: disable=protected-access
_DTYPES_INTERN_TABLE = dtypes._INTERN_TABLE
# pylint: enable=protected-access
def tensor_id(tensor):
"""Returns a unique identifier for this Tensor."""
return tensor._id # pylint: disable=protected-access
class _UserDeviceSpec(object):
"""Store user-specified device and provide computation of merged device."""
def __init__(self, device_name_or_function):
self._device_name_or_function = device_name_or_function
self.display_name = str(self._device_name_or_function)
self.function = device_name_or_function
self.raw_string = None
if isinstance(device_name_or_function, pydev.MergeDevice):
self.is_null_merge = device_name_or_function.is_null_merge
elif callable(device_name_or_function):
self.is_null_merge = False
dev_func = self._device_name_or_function
func_name = function_utils.get_func_name(dev_func)
func_code = function_utils.get_func_code(dev_func)
if func_code:
fname = func_code.co_filename
lineno = func_code.co_firstlineno
else:
fname = "unknown"
lineno = -1
self.display_name = "%s<%s, %d>" % (func_name, fname, lineno)
elif device_name_or_function is None:
# NOTE(taylorrobie): This MUST be False. None signals a break in the
# device stack, so `is_null_merge` must be False for such a case to
# allow callers to safely skip over null merges without missing a None.
self.is_null_merge = False
else:
self.raw_string = device_name_or_function
self.function = pydev.merge_device(device_name_or_function)
self.is_null_merge = self.function.is_null_merge
# We perform this check in __init__ because it is of non-trivial cost,
# and self.string_merge is typically called many times.
self.fast_string_merge = isinstance(self.function, pydev.MergeDevice)
def string_merge(self, node_def):
if self.fast_string_merge:
return self.function.shortcut_string_merge(node_def)
return compat.as_str(_device_string(self.function(node_def)))
class NullContextmanager(object):
def __init__(self, *args, **kwargs):
pass
def __enter__(self):
pass
def __exit__(self, type_arg, value_arg, traceback_arg):
return False # False values do not suppress exceptions
def _override_helper(clazz_object, operator, func):
"""Overrides (string) operator on Tensors to call func.
Args:
clazz_object: the class to override for; either Tensor or SparseTensor.
operator: the string name of the operator to override.
func: the function that replaces the overridden operator.
Raises:
ValueError: If operator is not allowed to be overwritten.
"""
if operator not in Tensor.OVERLOADABLE_OPERATORS:
raise ValueError("Overriding %s is disallowed" % operator)
setattr(clazz_object, operator, func)
def _as_graph_element(obj):
"""Convert `obj` to a graph element if possible, otherwise return `None`.
Args:
obj: Object to convert.
Returns:
The result of `obj._as_graph_element()` if that method is available;
otherwise `None`.
"""
conv_fn = getattr(obj, "_as_graph_element", None)
if conv_fn and callable(conv_fn):
return conv_fn()
return None
# Deprecated - do not use.
# This API to avoid breaking estimator and tensorflow-mesh which depend on this
# internal API. The stub should be safe to use after TF 2.3 is released.
def is_dense_tensor_like(t):
return isinstance(t, core_tf_types.Tensor)
def uid():
"""A unique (within this program execution) integer."""
return pywrap_tfe.TFE_Py_UID()
def numpy_text(tensor, is_repr=False):
"""Human readable representation of a tensor's numpy value."""
if tensor.dtype.is_numpy_compatible:
# pylint: disable=protected-access
text = repr(tensor._numpy()) if is_repr else str(tensor._numpy())
# pylint: enable=protected-access
else:
text = "<unprintable>"
if "\n" in text:
text = "\n" + text
return text
@tf_export(v1=["enable_tensor_equality"])
def enable_tensor_equality():
"""Compare Tensors with element-wise comparison and thus be unhashable.
Comparing tensors with element-wise allows comparisons such as
tf.Variable(1.0) == 1.0. Element-wise equality implies that tensors are
unhashable. Thus tensors can no longer be directly used in sets or as a key in
a dictionary.
"""
_tensor_equality_api_usage_gauge.get_cell().set(True)
Tensor._USE_EQUALITY = True # pylint: disable=protected-access
@tf_export(v1=["disable_tensor_equality"])
def disable_tensor_equality():
"""Compare Tensors by their id and be hashable.
This is a legacy behaviour of TensorFlow and is highly discouraged.
"""
_tensor_equality_api_usage_gauge.get_cell().set(False)
Tensor._USE_EQUALITY = False # pylint: disable=protected-access
# TODO(mdan): This object should subclass Symbol, not just Tensor.
@tf_export("Tensor")
class Tensor(internal.NativeObject, core_tf_types.Tensor):
"""A tensor is a multidimensional array of elements represented by a
`tf.Tensor` object. All elements are of a single known data type.
When writing a TensorFlow program, the main object that is
manipulated and passed around is the `tf.Tensor`.
A `tf.Tensor` has the following properties:
* a single data type (float32, int32, or string, for example)
* a shape
TensorFlow supports eager execution and graph execution. In eager
execution, operations are evaluated immediately. In graph
execution, a computational graph is constructed for later
evaluation.
TensorFlow defaults to eager execution. In the example below, the
matrix multiplication results are calculated immediately.
>>> # Compute some values using a Tensor
>>> c = tf.constant([[1.0, 2.0], [3.0, 4.0]])
>>> d = tf.constant([[1.0, 1.0], [0.0, 1.0]])
>>> e = tf.matmul(c, d)
>>> print(e)
tf.Tensor(
[[1. 3.]
[3. 7.]], shape=(2, 2), dtype=float32)
Note that during eager execution, you may discover your `Tensors` are actually
of type `EagerTensor`. This is an internal detail, but it does give you
access to a useful function, `numpy`:
>>> type(e)
<class '...ops.EagerTensor'>
>>> print(e.numpy())
[[1. 3.]
[3. 7.]]
In TensorFlow, `tf.function`s are a common way to define graph execution.
A Tensor's shape (that is, the rank of the Tensor and the size of
each dimension) may not always be fully known. In `tf.function`
definitions, the shape may only be partially known.
Most operations produce tensors of fully-known shapes if the shapes of their
inputs are also fully known, but in some cases it's only possible to find the
shape of a tensor at execution time.
A number of specialized tensors are available: see `tf.Variable`,
`tf.constant`, `tf.placeholder`, `tf.sparse.SparseTensor`, and
`tf.RaggedTensor`.
For more on Tensors, see the [guide](https://tensorflow.org/guide/tensor).
"""
# List of Python operators that we allow to override.
OVERLOADABLE_OPERATORS = {
# Binary.
"__add__",
"__radd__",
"__sub__",
"__rsub__",
"__mul__",
"__rmul__",
"__div__",
"__rdiv__",
"__truediv__",
"__rtruediv__",
"__floordiv__",
"__rfloordiv__",
"__mod__",
"__rmod__",
"__lt__",
"__le__",
"__gt__",
"__ge__",
"__ne__",
"__eq__",
"__and__",
"__rand__",
"__or__",
"__ror__",
"__xor__",
"__rxor__",
"__getitem__",
"__pow__",
"__rpow__",
# Unary.
"__invert__",
"__neg__",
"__abs__",
"__matmul__",
"__rmatmul__"
}
# Whether to allow hashing or numpy-style equality
_USE_EQUALITY = tf2.enabled()
def __init__(self, op, value_index, dtype):
"""Creates a new `Tensor`.
Args:
op: An `Operation`. `Operation` that computes this tensor.
value_index: An `int`. Index of the operation's endpoint that produces
this tensor.
dtype: A `DType`. Type of elements stored in this tensor.
Raises:
TypeError: If the op is not an `Operation`.
"""
if not isinstance(op, Operation):
raise TypeError("op needs to be an Operation: %s" % op)
self._op = op
self._value_index = value_index
self._dtype = dtypes.as_dtype(dtype)
# This will be set by self._as_tf_output().
self._tf_output = None
# This will be set by self.shape().
self._shape_val = None
# List of operations that use this Tensor as input. We maintain this list
# to easily navigate a computation graph.
self._consumers = []
self._id = uid()
self._name = None
@staticmethod
def _create_with_tf_output(op, value_index, dtype, tf_output):
ret = Tensor(op, value_index, dtype)
ret._tf_output = tf_output
return ret
@property
def op(self):
"""The `Operation` that produces this tensor as an output."""
return self._op
@property
def dtype(self):
"""The `DType` of elements in this tensor."""
return self._dtype
@property
def graph(self):
"""The `Graph` that contains this tensor."""
return self._op.graph
@property
def name(self):
"""The string name of this tensor."""
if self._name is None:
if not self._op.name:
raise ValueError("Operation was not named: %s" % self._op)
self._name = "%s:%d" % (self._op.name, self._value_index)
return self._name
@property
def device(self):
"""The name of the device on which this tensor will be produced, or None."""
return self._op.device
@property
def shape(self):
"""Returns a `tf.TensorShape` that represents the shape of this tensor.
>>> t = tf.constant([1,2,3,4,5])
>>> t.shape
TensorShape([5])
`tf.Tensor.shape` is equivalent to `tf.Tensor.get_shape()`.
In a `tf.function` or when building a model using
`tf.keras.Input`, they return the build-time shape of the
tensor, which may be partially unknown.
A `tf.TensorShape` is not a tensor. Use `tf.shape(t)` to get a tensor
containing the shape, calculated at runtime.
See `tf.Tensor.get_shape()`, and `tf.TensorShape` for details and examples.
"""
if self._shape_val is None:
self._shape_val = self._c_api_shape()
return self._shape_val
def _c_api_shape(self):
"""Returns the TensorShape of this tensor according to the C API."""
c_graph = self._op._graph._c_graph # pylint: disable=protected-access
shape_vec, unknown_shape = pywrap_tf_session.TF_GraphGetTensorShapeHelper(
c_graph, self._as_tf_output())
if unknown_shape:
return tensor_shape.unknown_shape()
else:
shape_vec = [None if d == -1 else d for d in shape_vec]
return tensor_shape.TensorShape(shape_vec)
@property
def _shape(self):
logging.warning("Tensor._shape is private, use Tensor.shape "
"instead. Tensor._shape will eventually be removed.")
return self.shape
@_shape.setter
def _shape(self, value):
raise ValueError(
"Tensor._shape cannot be assigned, use Tensor.set_shape instead.")
def _disallow_when_autograph_disabled(self, task):
raise errors.OperatorNotAllowedInGraphError(
"{} is not allowed: AutoGraph is disabled in this function."
" Try decorating it directly with @tf.function.".format(task))
def _disallow_when_autograph_enabled(self, task):
raise errors.OperatorNotAllowedInGraphError(
"{} is not allowed: AutoGraph did convert this function. This might"
" indicate you are trying to use an unsupported feature.".format(task))
def _disallow_in_graph_mode(self, task):
raise errors.OperatorNotAllowedInGraphError(
"{} is not allowed in Graph execution. Use Eager execution or decorate"
" this function with @tf.function.".format(task))
def _disallow_bool_casting(self):
if ag_ctx.control_status_ctx().status == ag_ctx.Status.DISABLED:
self._disallow_when_autograph_disabled(
"using a `tf.Tensor` as a Python `bool`")
elif ag_ctx.control_status_ctx().status == ag_ctx.Status.ENABLED:
self._disallow_when_autograph_enabled(
"using a `tf.Tensor` as a Python `bool`")
else:
# Default: V1-style Graph execution.
self._disallow_in_graph_mode("using a `tf.Tensor` as a Python `bool`")
def _disallow_iteration(self):
if ag_ctx.control_status_ctx().status == ag_ctx.Status.DISABLED:
self._disallow_when_autograph_disabled("iterating over `tf.Tensor`")
elif ag_ctx.control_status_ctx().status == ag_ctx.Status.ENABLED:
self._disallow_when_autograph_enabled("iterating over `tf.Tensor`")
else:
# Default: V1-style Graph execution.
self._disallow_in_graph_mode("iterating over `tf.Tensor`")
def __iter__(self):
if not context.executing_eagerly():
self._disallow_iteration()
shape = self._shape_tuple()
if shape is None:
raise TypeError("Cannot iterate over a tensor with unknown shape.")
if not shape:
raise TypeError("Cannot iterate over a scalar tensor.")
if shape[0] is None:
raise TypeError(
"Cannot iterate over a tensor with unknown first dimension.")
return _TensorIterator(self, shape[0])
def _shape_as_list(self):
if self.shape.ndims is not None:
return [dim.value for dim in self.shape.dims]
else:
return None
def _shape_tuple(self):
shape = self._shape_as_list()
if shape is None:
return None
return tuple(shape)
def _rank(self):
"""Integer rank of this Tensor, if known, else None.
Returns:
Integer rank or None
"""
return self.shape.ndims
def get_shape(self):
"""Returns a `tf.TensorShape` that represents the shape of this tensor.
In eager execution the shape is always fully-known.
>>> a = tf.constant([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
>>> print(a.shape)
(2, 3)
`tf.Tensor.get_shape()` is equivalent to `tf.Tensor.shape`.
When executing in a `tf.function` or building a model using
`tf.keras.Input`, `Tensor.shape` may return a partial shape (including
`None` for unknown dimensions). See `tf.TensorShape` for more details.
>>> inputs = tf.keras.Input(shape = [10])
>>> # Unknown batch size
>>> print(inputs.shape)
(None, 10)
The shape is computed using shape inference functions that are
registered for each `tf.Operation`.
The returned `tf.TensorShape` is determined at *build* time, without
executing the underlying kernel. It is not a `tf.Tensor`. If you need a
shape *tensor*, either convert the `tf.TensorShape` to a `tf.constant`, or
use the `tf.shape(tensor)` function, which returns the tensor's shape at
*execution* time.
This is useful for debugging and providing early errors. For
example, when tracing a `tf.function`, no ops are being executed, shapes
may be unknown (See the [Concrete Functions
Guide](https://www.tensorflow.org/guide/concrete_function) for details).
>>> @tf.function
... def my_matmul(a, b):
... result = a@b
... # the `print` executes during tracing.
... print("Result shape: ", result.shape)
... return result
The shape inference functions propagate shapes to the extent possible:
>>> f = my_matmul.get_concrete_function(
... tf.TensorSpec([None,3]),
... tf.TensorSpec([3,5]))
Result shape: (None, 5)
Tracing may fail if a shape missmatch can be detected:
>>> cf = my_matmul.get_concrete_function(
... tf.TensorSpec([None,3]),
... tf.TensorSpec([4,5]))
Traceback (most recent call last):
...
ValueError: Dimensions must be equal, but are 3 and 4 for 'matmul' (op:
'MatMul') with input shapes: [?,3], [4,5].
In some cases, the inferred shape may have unknown dimensions. If
the caller has additional information about the values of these
dimensions, `tf.ensure_shape` or `Tensor.set_shape()` can be used to augment
the inferred shape.
>>> @tf.function
... def my_fun(a):
... a = tf.ensure_shape(a, [5, 5])
... # the `print` executes during tracing.
... print("Result shape: ", a.shape)
... return a
>>> cf = my_fun.get_concrete_function(
... tf.TensorSpec([None, None]))
Result shape: (5, 5)
Returns:
A `tf.TensorShape` representing the shape of this tensor.
"""
return self.shape
def set_shape(self, shape):
"""Updates the shape of this tensor.
Note: It is recommended to use `tf.ensure_shape` instead of
`Tensor.set_shape`, because `tf.ensure_shape` provides better checking for
programming errors and can create guarantees for compiler
optimization.
With eager execution this operates as a shape assertion.
Here the shapes match:
>>> t = tf.constant([[1,2,3]])
>>> t.set_shape([1, 3])
Passing a `None` in the new shape allows any value for that axis:
>>> t.set_shape([1,None])
An error is raised if an incompatible shape is passed.
>>> t.set_shape([1,5])
Traceback (most recent call last):
...
ValueError: Tensor's shape (1, 3) is not compatible with supplied
shape [1, 5]
When executing in a `tf.function`, or building a model using
`tf.keras.Input`, `Tensor.set_shape` will *merge* the given `shape` with
the current shape of this tensor, and set the tensor's shape to the
merged value (see `tf.TensorShape.merge_with` for details):
>>> t = tf.keras.Input(shape=[None, None, 3])
>>> print(t.shape)
(None, None, None, 3)
Dimensions set to `None` are not updated:
>>> t.set_shape([None, 224, 224, None])
>>> print(t.shape)
(None, 224, 224, 3)
The main use case for this is to provide additional shape information
that cannot be inferred from the graph alone.
For example if you know all the images in a dataset have shape [28,28,3] you
can set it with `tf.set_shape`:
>>> @tf.function
... def load_image(filename):
... raw = tf.io.read_file(filename)
... image = tf.image.decode_png(raw, channels=3)
... # the `print` executes during tracing.
... print("Initial shape: ", image.shape)
... image.set_shape([28, 28, 3])
... print("Final shape: ", image.shape)
... return image
Trace the function, see the [Concrete Functions
Guide](https://www.tensorflow.org/guide/concrete_function) for details.
>>> cf = load_image.get_concrete_function(
... tf.TensorSpec([], dtype=tf.string))
Initial shape: (None, None, 3)
Final shape: (28, 28, 3)
Similarly the `tf.io.parse_tensor` function could return a tensor with
any shape, even the `tf.rank` is unknown. If you know that all your
serialized tensors will be 2d, set it with `set_shape`:
>>> @tf.function
... def my_parse(string_tensor):
... result = tf.io.parse_tensor(string_tensor, out_type=tf.float32)
... # the `print` executes during tracing.
... print("Initial shape: ", result.shape)
... result.set_shape([None, None])
... print("Final shape: ", result.shape)
... return result
Trace the function
>>> concrete_parse = my_parse.get_concrete_function(
... tf.TensorSpec([], dtype=tf.string))
Initial shape: <unknown>
Final shape: (None, None)
Make sure it works:
>>> t = tf.ones([5,3], dtype=tf.float32)
>>> serialized = tf.io.serialize_tensor(t)
>>> print(serialized.dtype)
<dtype: 'string'>
>>> print(serialized.shape)
()
>>> t2 = concrete_parse(serialized)
>>> print(t2.shape)
(5, 3)
Caution: `set_shape` ensures that the applied shape is compatible with
the existing shape, but it does not check at runtime. Setting
incorrect shapes can result in inconsistencies between the
statically-known graph and the runtime value of tensors. For runtime
validation of the shape, use `tf.ensure_shape` instead. It also modifies
the `shape` of the tensor.
>>> # Serialize a rank-3 tensor
>>> t = tf.ones([5,5,5], dtype=tf.float32)
>>> serialized = tf.io.serialize_tensor(t)
>>> # The function still runs, even though it `set_shape([None,None])`
>>> t2 = concrete_parse(serialized)
>>> print(t2.shape)
(5, 5, 5)
Args:
shape: A `TensorShape` representing the shape of this tensor, a
`TensorShapeProto`, a list, a tuple, or None.
Raises:
ValueError: If `shape` is not compatible with the current shape of
this tensor.
"""
# Reset cached shape.
self._shape_val = None
# We want set_shape to be reflected in the C API graph for when we run it.
if not isinstance(shape, tensor_shape.TensorShape):
shape = tensor_shape.TensorShape(shape)
dim_list = []
if shape.dims is None:
unknown_shape = True
else:
unknown_shape = False
for dim in shape.dims:
if dim.value is None:
dim_list.append(-1)
else:
dim_list.append(dim.value)
try:
pywrap_tf_session.TF_GraphSetTensorShape_wrapper(
self._op._graph._c_graph, # pylint: disable=protected-access
self._as_tf_output(),
dim_list,
unknown_shape)
except errors.InvalidArgumentError as e:
# Convert to ValueError for backwards compatibility.
raise ValueError(str(e))
@property
def value_index(self):
"""The index of this tensor in the outputs of its `Operation`."""
return self._value_index
def consumers(self):
"""Returns a list of `Operation`s that consume this tensor.
Returns:
A list of `Operation`s.
"""
consumer_names = pywrap_tf_session.TF_OperationOutputConsumers_wrapper(
self._as_tf_output())
# pylint: disable=protected-access
return [
self.graph._get_operation_by_name_unsafe(name)
for name in consumer_names
]
# pylint: enable=protected-access
def _as_node_def_input(self):
"""Return a value to use for the NodeDef "input" attribute.
The returned string can be used in a NodeDef "input" attribute
to indicate that the NodeDef uses this Tensor as input.
Raises:
ValueError: if this Tensor's Operation does not have a name.
Returns:
a string.
"""
if not self._op.name:
raise ValueError("Operation was not named: %s" % self._op)
if self._value_index == 0:
return self._op.name
else:
return "%s:%d" % (self._op.name, self._value_index)
def _as_tf_output(self):
# pylint: disable=protected-access
# NOTE: Beyond preventing unnecessary (re-)allocation, the cached object
# also guarantees that a dictionary of tf_output objects will retain a
# deterministic (yet unsorted) order which prevents memory blowup in the
# cache of executor(s) stored for every session.
if self._tf_output is None:
self._tf_output = c_api_util.tf_output(self.op._c_op, self.value_index)
return self._tf_output
# pylint: enable=protected-access
def __str__(self):
return "Tensor(\"%s\"%s%s%s)" % (
self.name,
(", shape=%s" %
self.get_shape()) if self.get_shape().ndims is not None else "",
(", dtype=%s" % self._dtype.name) if self._dtype else "",
(", device=%s" % self.device) if self.device else "")
def __repr__(self):
return "<tf.Tensor '%s' shape=%s dtype=%s>" % (self.name, self.get_shape(),
self._dtype.name)
def __hash__(self):
g = getattr(self, "graph", None)
if (Tensor._USE_EQUALITY and executing_eagerly_outside_functions() and
(g is None or g.building_function)):
raise TypeError("Tensor is unhashable. "
"Instead, use tensor.ref() as the key.")
else:
return id(self)
def __copy__(self):
# TODO(b/77597810): get rid of Tensor copies.
cls = self.__class__
result = cls.__new__(cls)
result.__dict__.update(self.__dict__)
return result
# NOTE(mrry): This enables the Tensor's overloaded "right" binary
# operators to run when the left operand is an ndarray, because it
# accords the Tensor class higher priority than an ndarray, or a
# numpy matrix.
# TODO(mrry): Convert this to using numpy's __numpy_ufunc__
# mechanism, which allows more control over how Tensors interact
# with ndarrays.
__array_priority__ = 100
def __array__(self):
raise NotImplementedError(
"Cannot convert a symbolic Tensor ({}) to a numpy array."
" This error may indicate that you're trying to pass a Tensor to"
" a NumPy call, which is not supported".format(self.name))
def __len__(self):
raise TypeError("len is not well defined for symbolic Tensors. ({}) "
"Please call `x.shape` rather than `len(x)` for "
"shape information.".format(self.name))
# TODO(mdan): This convoluted machinery is hard to maintain. Clean up.
@staticmethod
def _override_operator(operator, func):
_override_helper(Tensor, operator, func)
def __bool__(self):
"""Dummy method to prevent a tensor from being used as a Python `bool`.
This overload raises a `TypeError` when the user inadvertently
treats a `Tensor` as a boolean (most commonly in an `if` or `while`
statement), in code that was not converted by AutoGraph. For example:
```python
if tf.constant(True): # Will raise.
# ...
if tf.constant(5) < tf.constant(7): # Will raise.
# ...
```
Raises:
`TypeError`.
"""
self._disallow_bool_casting()
def __nonzero__(self):
"""Dummy method to prevent a tensor from being used as a Python `bool`.
This is the Python 2.x counterpart to `__bool__()` above.
Raises:
`TypeError`.
"""
self._disallow_bool_casting()
def eval(self, feed_dict=None, session=None):
"""Evaluates this tensor in a `Session`.
Note: If you are not using `compat.v1` libraries, you should not need this,
(or `feed_dict` or `Session`). In eager execution (or within `tf.function`)
you do not need to call `eval`.
Calling this method will execute all preceding operations that
produce the inputs needed for the operation that produces this
tensor.
*N.B.* Before invoking `Tensor.eval()`, its graph must have been
launched in a session, and either a default session must be
available, or `session` must be specified explicitly.
Args:
feed_dict: A dictionary that maps `Tensor` objects to feed values. See
`tf.Session.run` for a description of the valid feed values.
session: (Optional.) The `Session` to be used to evaluate this tensor. If
none, the default session will be used.
Returns:
A numpy array corresponding to the value of this tensor.
"""
return _eval_using_default_session(self, feed_dict, self.graph, session)
@deprecation.deprecated(None, "Use ref() instead.")
def experimental_ref(self):
return self.ref()
def ref(self):
# tf.Variable also has the same ref() API. If you update the
# documentation here, please update tf.Variable.ref() as well.
"""Returns a hashable reference object to this Tensor.
The primary use case for this API is to put tensors in a set/dictionary.
We can't put tensors in a set/dictionary as `tensor.__hash__()` is no longer
available starting Tensorflow 2.0.
The following will raise an exception starting 2.0
>>> x = tf.constant(5)
>>> y = tf.constant(10)
>>> z = tf.constant(10)
>>> tensor_set = {x, y, z}
Traceback (most recent call last):
...
TypeError: Tensor is unhashable. Instead, use tensor.ref() as the key.
>>> tensor_dict = {x: 'five', y: 'ten'}
Traceback (most recent call last):
...
TypeError: Tensor is unhashable. Instead, use tensor.ref() as the key.
Instead, we can use `tensor.ref()`.
>>> tensor_set = {x.ref(), y.ref(), z.ref()}
>>> x.ref() in tensor_set
True
>>> tensor_dict = {x.ref(): 'five', y.ref(): 'ten', z.ref(): 'ten'}
>>> tensor_dict[y.ref()]
'ten'
Also, the reference object provides `.deref()` function that returns the
original Tensor.
>>> x = tf.constant(5)
>>> x.ref().deref()
<tf.Tensor: shape=(), dtype=int32, numpy=5>
"""
return object_identity.Reference(self)
# TODO(agarwal): consider getting rid of this.
# TODO(mdan): This object should not subclass ops.Tensor.
class _EagerTensorBase(Tensor):
"""Base class for EagerTensor."""
# __complex__, __int__, __float__ and __index__ may copy the tensor to CPU and
# only work for scalars; values are cast as per numpy.
def __complex__(self):
return complex(self._numpy())
def __int__(self):
return int(self._numpy())
def __long__(self):
return long(self._numpy())
def __float__(self):
return float(self._numpy())
def __index__(self):
return self._numpy().__index__()
def __bool__(self):
return bool(self._numpy())
__nonzero__ = __bool__
def __format__(self, format_spec):
return self._numpy().__format__(format_spec)
def __reduce__(self):
return convert_to_tensor, (self._numpy(),)
def __copy__(self):
# Eager Tensors are immutable so it's safe to return themselves as a copy.
return self
def __deepcopy__(self, memo):
# Eager Tensors are immutable so it's safe to return themselves as a copy.
del memo
return self
def __str__(self):
return "tf.Tensor(%s, shape=%s, dtype=%s)" % (numpy_text(self), self.shape,
self.dtype.name)
def __repr__(self):
return "<tf.Tensor: shape=%s, dtype=%s, numpy=%s>" % (
self.shape, self.dtype.name, numpy_text(self, is_repr=True))
def __len__(self):
"""Returns the length of the first dimension in the Tensor."""
if not self.shape.ndims:
raise TypeError("Scalar tensor has no `len()`")
# pylint: disable=protected-access
try:
return self._shape_tuple()[0]
except core._NotOkStatusException as e:
six.raise_from(core._status_to_exception(e.code, e.message), None)
def __array__(self):
return self._numpy()
def _numpy_internal(self):
raise NotImplementedError()
def _numpy(self):
try:
return self._numpy_internal()
except core._NotOkStatusException as e: # pylint: disable=protected-access
six.raise_from(core._status_to_exception(e.code, e.message), None) # pylint: disable=protected-access
@property
def dtype(self):
# Note: using the intern table directly here as this is
# performance-sensitive in some models.
return dtypes._INTERN_TABLE[self._datatype_enum()] # pylint: disable=protected-access
def numpy(self):
"""Copy of the contents of this Tensor into a NumPy array or scalar.
Unlike NumPy arrays, Tensors are immutable, so this method has to copy
the contents to ensure safety. Use `memoryview` to get a readonly
view of the contents without doing a copy:
>>> t = tf.constant([42])
>>> np.array(memoryview(t))
array([42], dtype=int32)
Note that `memoryview` is only zero-copy for Tensors on CPU. If a Tensor
is on GPU, it will have to be transferred to CPU first in order for
`memoryview` to work.
Returns:
A NumPy array of the same shape and dtype or a NumPy scalar, if this
Tensor has rank 0.
Raises:
ValueError: If the dtype of this Tensor does not have a compatible
NumPy dtype.
"""
# TODO(slebedev): Consider avoiding a copy for non-CPU or remote tensors.
maybe_arr = self._numpy() # pylint: disable=protected-access
return maybe_arr.copy() if isinstance(maybe_arr, np.ndarray) else maybe_arr
@property
def backing_device(self):
"""Returns the name of the device holding this tensor's memory.
`.backing_device` is usually the same as `.device`, which returns
the device on which the kernel of the operation that produced this tensor
ran. However, some operations can produce tensors on a different device
(e.g., an operation that executes on the GPU but produces output tensors
in host memory).
"""
raise NotImplementedError()
def _datatype_enum(self):
raise NotImplementedError()
def _shape_tuple(self):
"""The shape of this Tensor, as a tuple.
This is more performant than tuple(shape().as_list()) as it avoids
two list and one object creation. Marked private for now as from an API
perspective, it would be better to have a single performant way of
getting a shape rather than exposing shape() and shape_tuple()
(and heaven forbid, shape_list() etc. as well!). Punting on that for now,
but ideally one would work things out and remove the need for this method.
Returns:
tuple with the shape.
"""
raise NotImplementedError()
def _rank(self):
"""Integer rank of this Tensor.
Unlike regular Tensors, the rank is always known for EagerTensors.
This is more performant than len(self._shape_tuple())
Returns:
Integer rank
"""
raise NotImplementedError()
def _num_elements(self):
"""Number of elements of this Tensor.
Unlike regular Tensors, the number of elements is always known for
EagerTensors.
This is more performant than tensor.shape.num_elements
Returns:
Long - num elements in the tensor
"""
raise NotImplementedError()
def _copy_to_device(self, device_name): # pylint: disable=redefined-outer-name
raise NotImplementedError()
@staticmethod
def _override_operator(name, func):
setattr(_EagerTensorBase, name, func)
def _copy_nograd(self, ctx=None, device_name=None):
"""Copies tensor to dest device, but doesn't record the operation."""
# Creates a new tensor on the dest device.
if ctx is None:
ctx = context.context()
if device_name is None:
device_name = ctx.device_name
# pylint: disable=protected-access
try:
ctx.ensure_initialized()
new_tensor = self._copy_to_device(device_name)
except core._NotOkStatusException as e:
six.raise_from(core._status_to_exception(e.code, e.message), None)
return new_tensor
def _copy(self, ctx=None, device_name=None):
"""Copies tensor to dest device."""
new_tensor = self._copy_nograd(ctx, device_name)
# Record the copy on tape and define backprop copy as well.
if context.executing_eagerly():
self_device = self.device
def grad_fun(dresult):
return [
dresult._copy(device_name=self_device)
if hasattr(dresult, "_copy") else dresult
]
tape.record_operation("_copy", [new_tensor], [self], grad_fun)
return new_tensor
# pylint: enable=protected-access
@property
def shape(self):
if self._tensor_shape is None: # pylint: disable=access-member-before-definition
# pylint: disable=protected-access
try:
# `_tensor_shape` is declared and defined in the definition of
# `EagerTensor`, in C.
self._tensor_shape = tensor_shape.TensorShape(self._shape_tuple())
except core._NotOkStatusException as e:
six.raise_from(core._status_to_exception(e.code, e.message), None)
return self._tensor_shape
def get_shape(self):
"""Alias of Tensor.shape."""
return self.shape
def _shape_as_list(self):
"""The shape of the tensor as a list."""
return list(self._shape_tuple())
@property
def ndim(self):
"""Returns the number of Tensor dimensions."""
return self.shape.ndims
@deprecation.deprecated(None, "Use tf.identity instead.")
def cpu(self):
"""A copy of this Tensor with contents backed by host memory."""
return self._copy(context.context(), "CPU:0")
@deprecation.deprecated(None, "Use tf.identity instead.")
def gpu(self, gpu_index=0):
"""A copy of this Tensor with contents backed by memory on the GPU.
Arguments:
gpu_index: Identifies which GPU to place the contents on the returned
Tensor in.
Returns:
A GPU-memory backed Tensor object initialized with the same contents
as this Tensor.
"""
return self._copy(context.context(), "GPU:" + str(gpu_index))
def set_shape(self, shape):
if not self.shape.is_compatible_with(shape):
raise ValueError(
"Tensor's shape %s is not compatible with supplied shape %s" %
(self.shape, shape))
# Methods not supported / implemented for Eager Tensors.
@property
def op(self):
raise AttributeError(
"Tensor.op is meaningless when eager execution is enabled.")
@property
def graph(self):
raise AttributeError(
"Tensor.graph is meaningless when eager execution is enabled.")
@property
def name(self):
raise AttributeError(
"Tensor.name is meaningless when eager execution is enabled.")
@property
def value_index(self):
raise AttributeError(
"Tensor.value_index is meaningless when eager execution is enabled.")
def consumers(self):
raise NotImplementedError(
"Tensor.consumers is meaningless when eager execution is enabled.")
def _add_consumer(self, consumer):
raise NotImplementedError(
"_add_consumer not supported when eager execution is enabled.")
def _as_node_def_input(self):
raise NotImplementedError(
"_as_node_def_input not supported when eager execution is enabled.")
def _as_tf_output(self):
raise NotImplementedError(
"_as_tf_output not supported when eager execution is enabled.")
def eval(self, feed_dict=None, session=None):
raise NotImplementedError(
"eval is not supported when eager execution is enabled, "
"is .numpy() what you're looking for?")
# This call creates an EagerTensor class, as a subclass of _EagerTensorBase, and
# registers it with the current module.
EagerTensor = pywrap_tfe.TFE_Py_InitEagerTensor(_EagerTensorBase)
@tf_export(v1=["convert_to_tensor"])
@dispatch.add_dispatch_support
def convert_to_tensor_v1_with_dispatch(
value,
dtype=None,
name=None,
preferred_dtype=None,
dtype_hint=None):
"""Converts the given `value` to a `Tensor`.
This function converts Python objects of various types to `Tensor`
objects. It accepts `Tensor` objects, numpy arrays, Python lists,
and Python scalars. For example:
```python
import numpy as np
def my_func(arg):
arg = tf.convert_to_tensor(arg, dtype=tf.float32)
return tf.matmul(arg, arg) + arg
# The following calls are equivalent.
value_1 = my_func(tf.constant([[1.0, 2.0], [3.0, 4.0]]))
value_2 = my_func([[1.0, 2.0], [3.0, 4.0]])
value_3 = my_func(np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32))
```
This function can be useful when composing a new operation in Python
(such as `my_func` in the example above). All standard Python op
constructors apply this function to each of their Tensor-valued
inputs, which allows those ops to accept numpy arrays, Python lists,
and scalars in addition to `Tensor` objects.
Note: This function diverges from default Numpy behavior for `float` and
`string` types when `None` is present in a Python list or scalar. Rather
than silently converting `None` values, an error will be thrown.
Args:
value: An object whose type has a registered `Tensor` conversion function.
dtype: Optional element type for the returned tensor. If missing, the type
is inferred from the type of `value`.
name: Optional name to use if a new `Tensor` is created.
preferred_dtype: Optional element type for the returned tensor, used when
dtype is None. In some cases, a caller may not have a dtype in mind when
converting to a tensor, so preferred_dtype can be used as a soft
preference. If the conversion to `preferred_dtype` is not possible, this
argument has no effect.
dtype_hint: same meaning as preferred_dtype, and overrides it.
Returns:
A `Tensor` based on `value`.
Raises:
TypeError: If no conversion function is registered for `value` to `dtype`.
RuntimeError: If a registered conversion function returns an invalid value.
ValueError: If the `value` is a tensor not of given `dtype` in graph mode.
"""
return convert_to_tensor_v1(value, dtype=dtype, name=name,
preferred_dtype=preferred_dtype,
dtype_hint=dtype_hint)
def convert_to_tensor_v1(value,
dtype=None,
name=None,
preferred_dtype=None,
dtype_hint=None):
"""Converts the given `value` to a `Tensor` (with the TF1 API)."""
preferred_dtype = deprecation.deprecated_argument_lookup(
"dtype_hint", dtype_hint, "preferred_dtype", preferred_dtype)
return convert_to_tensor_v2(value, dtype, preferred_dtype, name)
@tf_export("convert_to_tensor", v1=[])
@dispatch.add_dispatch_support
def convert_to_tensor_v2_with_dispatch(
value, dtype=None, dtype_hint=None, name=None):
"""Converts the given `value` to a `Tensor`.
This function converts Python objects of various types to `Tensor`
objects. It accepts `Tensor` objects, numpy arrays, Python lists,
and Python scalars.
For example:
>>> import numpy as np
>>> def my_func(arg):
... arg = tf.convert_to_tensor(arg, dtype=tf.float32)
... return arg
>>> # The following calls are equivalent.
...
>>> value_1 = my_func(tf.constant([[1.0, 2.0], [3.0, 4.0]]))
>>> print(value_1)
tf.Tensor(
[[1. 2.]
[3. 4.]], shape=(2, 2), dtype=float32)
>>> value_2 = my_func([[1.0, 2.0], [3.0, 4.0]])
>>> print(value_2)
tf.Tensor(
[[1. 2.]
[3. 4.]], shape=(2, 2), dtype=float32)
>>> value_3 = my_func(np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32))
>>> print(value_3)
tf.Tensor(
[[1. 2.]
[3. 4.]], shape=(2, 2), dtype=float32)
This function can be useful when composing a new operation in Python
(such as `my_func` in the example above). All standard Python op
constructors apply this function to each of their Tensor-valued
inputs, which allows those ops to accept numpy arrays, Python lists,
and scalars in addition to `Tensor` objects.
Note: This function diverges from default Numpy behavior for `float` and
`string` types when `None` is present in a Python list or scalar. Rather
than silently converting `None` values, an error will be thrown.
Args:
value: An object whose type has a registered `Tensor` conversion function.
dtype: Optional element type for the returned tensor. If missing, the type
is inferred from the type of `value`.
dtype_hint: Optional element type for the returned tensor, used when dtype
is None. In some cases, a caller may not have a dtype in mind when
converting to a tensor, so dtype_hint can be used as a soft preference.
If the conversion to `dtype_hint` is not possible, this argument has no
effect.
name: Optional name to use if a new `Tensor` is created.
Returns:
A `Tensor` based on `value`.
Raises:
TypeError: If no conversion function is registered for `value` to `dtype`.
RuntimeError: If a registered conversion function returns an invalid value.
ValueError: If the `value` is a tensor not of given `dtype` in graph mode.
"""
return convert_to_tensor_v2(
value, dtype=dtype, dtype_hint=dtype_hint, name=name)
def convert_to_tensor_v2(value, dtype=None, dtype_hint=None, name=None):
"""Converts the given `value` to a `Tensor`."""
return convert_to_tensor(
value=value,
dtype=dtype,
name=name,
preferred_dtype=dtype_hint,
as_ref=False)
def _error_prefix(name):
return "" if name is None else "%s: " % name
def pack_eager_tensors(tensors, ctx=None):
"""Pack multiple `EagerTensor`s of the same dtype and shape.
Args:
tensors: a list of EagerTensors to pack.
ctx: context.context().
Returns:
A packed EagerTensor.
"""
if not isinstance(tensors, list):
raise TypeError("tensors must be a list or a tuple: %s" % tensors)
if not tensors:
raise ValueError("Empty tensors is unexpected for packing.")
dtype = tensors[0].dtype
shape = tensors[0].shape
handle_data = tensors[0]._handle_data # pylint: disable=protected-access
is_resource = dtype == dtypes.resource
for i in range(len(tensors)):
t = tensors[i]
if not isinstance(t, EagerTensor):
raise TypeError("tensors must be a list of EagerTensors: %s" % t)
if t.dtype != dtype:
raise ValueError(
"All tensors being packed should have the same dtype %s, "
"but the %d-th tensor is of dtype %s" % (dtype, i, t.dtype))
if t.shape != shape:
raise ValueError(
"All tensors being packed should have the same shape %s, "
"but the %d-th tensor is of shape %s" % (shape, i, t.shape))
# pylint: disable=protected-access
if is_resource and t._handle_data != handle_data:
raise ValueError(
"All tensors being packed should have the same handle data %s, "
"but the %d-th tensor is of handle data %s" %
(handle_data, i, t._handle_data))
# pylint: enable=protected-access
if ctx is None:
ctx = context.context()
# Propogate handle data for resource variables
packed_tensor = ctx.pack_eager_tensors(tensors)
if handle_data is not None:
packed_tensor._handle_data = handle_data # pylint: disable=protected-access
def grad_fun(_):
raise ValueError(
"Gradients through pack_eager_tensors are not supported yet.")
tape.record_operation("pack_eager_tensors", [packed_tensor], tensors,
grad_fun)
return packed_tensor
@trace.trace_wrapper("convert_to_tensor")
def convert_to_tensor(value,
dtype=None,
name=None,
as_ref=False,
preferred_dtype=None,
dtype_hint=None,
ctx=None,
accepted_result_types=(Tensor,)):
"""Implementation of the public convert_to_tensor."""
# TODO(b/142518781): Fix all call-sites and remove redundant arg
preferred_dtype = preferred_dtype or dtype_hint
if isinstance(value, EagerTensor):
if ctx is None:
ctx = context.context()
if not ctx.executing_eagerly():
graph = get_default_graph()
if not graph.building_function:
raise RuntimeError("Attempting to capture an EagerTensor without "
"building a function.")
return graph.capture(value, name=name)
if dtype is not None:
dtype = dtypes.as_dtype(dtype)
if isinstance(value, Tensor):
if dtype is not None and not dtype.is_compatible_with(value.dtype):
raise ValueError(
"Tensor conversion requested dtype %s for Tensor with dtype %s: %r" %
(dtype.name, value.dtype.name, value))
return value
if preferred_dtype is not None:
preferred_dtype = dtypes.as_dtype(preferred_dtype)
# See below for the reason why it's `type(value)` and not just `value`.
# https://docs.python.org/3.8/reference/datamodel.html#special-lookup
overload = getattr(type(value), "__tf_tensor__", None)
if overload is not None:
return overload(value, dtype, name)
for base_type, conversion_func in tensor_conversion_registry.get(type(value)):
# If dtype is None but preferred_dtype is not None, we try to
# cast to preferred_dtype first.
ret = None
if dtype is None and preferred_dtype is not None:
try:
ret = conversion_func(
value, dtype=preferred_dtype, name=name, as_ref=as_ref)
except (TypeError, ValueError):
# Could not coerce the conversion to use the preferred dtype.
pass
else:
if (ret is not NotImplemented and
ret.dtype.base_dtype != preferred_dtype.base_dtype):
raise TypeError("convert_to_tensor did not convert to "
"the preferred dtype: %s vs %s " %
(ret.dtype.base_dtype, preferred_dtype.base_dtype))
if ret is None:
ret = conversion_func(value, dtype=dtype, name=name, as_ref=as_ref)
if ret is NotImplemented:
continue
if not isinstance(ret, accepted_result_types):
raise RuntimeError(
"%sConversion function %r for type %s returned non-Tensor: %r" %
(_error_prefix(name), conversion_func, base_type, ret))
if dtype and not dtype.is_compatible_with(ret.dtype):
raise RuntimeError(
"%sConversion function %r for type %s returned incompatible "
"dtype: requested = %s, actual = %s" %
(_error_prefix(name), conversion_func, base_type, dtype.name,
ret.dtype.name))
return ret
raise TypeError("%sCannot convert %r with type %s to Tensor: "
"no conversion function registered." %
(_error_prefix(name), value, type(value)))
internal_convert_to_tensor = convert_to_tensor
def internal_convert_n_to_tensor(values,
dtype=None,
name=None,
as_ref=False,
preferred_dtype=None,
ctx=None):
"""Converts `values` to a list of `Tensor` objects.
Args:
values: A list of objects that can be consumed by `tf.convert_to_tensor()`.
dtype: (Optional.) The required `DType` of the returned `Tensor` objects.
name: (Optional.) A name prefix to used when a new `Tensor` is created, in
which case element `i` will be given the name `name + '_' + i`.
as_ref: True if the caller wants the results as ref tensors.
preferred_dtype: Optional element type for the returned tensors, used when
dtype is None. In some cases, a caller may not have a dtype in mind when
converting to a tensor, so preferred_dtype can be used as a soft
preference. If the conversion to `preferred_dtype` is not possible, this
argument has no effect.
ctx: The value of context.context().
Returns:
A list of `Tensor` and/or `IndexedSlices` objects.
Raises:
TypeError: If no conversion function is registered for an element in
`values`.
RuntimeError: If a registered conversion function returns an invalid
value.
"""
if not isinstance(values, collections_abc.Sequence):
raise TypeError("values must be a sequence.")
ret = []
if ctx is None:
ctx = context.context()
for i, value in enumerate(values):
n = None if name is None else "%s_%d" % (name, i)
ret.append(
convert_to_tensor(
value,
dtype=dtype,
name=n,
as_ref=as_ref,
preferred_dtype=preferred_dtype,
ctx=ctx))
return ret
def convert_n_to_tensor(values, dtype=None, name=None, preferred_dtype=None):
"""Converts `values` to a list of `Tensor` objects.
Args:
values: A list of objects that can be consumed by `tf.convert_to_tensor()`.
dtype: (Optional.) The required `DType` of the returned `Tensor` objects.
name: (Optional.) A name prefix to used when a new `Tensor` is created, in
which case element `i` will be given the name `name + '_' + i`.
preferred_dtype: Optional element type for the returned tensors, used when
dtype is None. In some cases, a caller may not have a dtype in mind when
converting to a tensor, so preferred_dtype can be used as a soft
preference. If the conversion to `preferred_dtype` is not possible, this
argument has no effect.
Returns:
A list of `Tensor` and/or `IndexedSlices` objects.
Raises:
TypeError: If no conversion function is registered for an element in
`values`.
RuntimeError: If a registered conversion function returns an invalid
value.
"""
return internal_convert_n_to_tensor(
values=values,
dtype=dtype,
name=name,
preferred_dtype=preferred_dtype,
as_ref=False)
def convert_to_tensor_or_composite(value, dtype=None, name=None):
"""Converts the given object to a `Tensor` or `CompositeTensor`.
If `value` is a `CompositeTensor` it is returned unmodified. Otherwise, it
is converted to a `Tensor` using `convert_to_tensor()`.
Args:
value: A `CompositeTensor` or an object that can be consumed by
`convert_to_tensor()`.
dtype: (Optional.) The required `DType` of the returned `Tensor` or
`CompositeTensor`.
name: (Optional.) A name to use if a new `Tensor` is created.
Returns:
A `Tensor` or `CompositeTensor`, based on `value`.
Raises:
ValueError: If `dtype` does not match the element type of `value`.
"""
return internal_convert_to_tensor_or_composite(
value=value, dtype=dtype, name=name, as_ref=False)
def internal_convert_to_tensor_or_composite(value,
dtype=None,
name=None,
as_ref=False):
"""Converts the given object to a `Tensor` or `CompositeTensor`.
If `value` is a `CompositeTensor` it is returned unmodified. Otherwise, it
is converted to a `Tensor` using `convert_to_tensor()`.
Args:
value: A `CompositeTensor`, or an object that can be consumed by
`convert_to_tensor()`.
dtype: (Optional.) The required `DType` of the returned `Tensor` or
`CompositeTensor`.
name: (Optional.) A name to use if a new `Tensor` is created.
as_ref: True if the caller wants the results as ref tensors.
Returns:
A `Tensor` or `CompositeTensor`, based on `value`.
Raises:
ValueError: If `dtype` does not match the element type of `value`.
"""
if isinstance(value, composite_tensor.CompositeTensor):
value_dtype = getattr(value, "dtype", None)
if dtype and not dtypes.as_dtype(dtype).is_compatible_with(value_dtype):
raise ValueError(
"Tensor conversion requested dtype %s for Tensor with dtype %s: %r" %
(dtypes.as_dtype(dtype).name, value.dtype.name, str(value)))
return value
else:
return convert_to_tensor(
value,
dtype=dtype,
name=name,
as_ref=as_ref,
accepted_result_types=(Tensor, composite_tensor.CompositeTensor))
def internal_convert_n_to_tensor_or_composite(values,
dtype=None,
name=None,
as_ref=False):
"""Converts `values` to a list of `Tensor` or `CompositeTensor` objects.
Any `CompositeTensor` objects in `values` are returned unmodified.
Args:
values: A list of `None`, `CompositeTensor`, or objects that can be consumed
by `convert_to_tensor()`.
dtype: (Optional.) The required `DType` of the returned `Tensor`s or
`CompositeTensor`s.
name: (Optional.) A name prefix to used when a new `Tensor` is created, in
which case element `i` will be given the name `name + '_' + i`.
as_ref: True if the caller wants the results as ref tensors.
Returns:
A list of `Tensor`, `CompositeTensor`, and/or `None` objects.
Raises:
TypeError: If no conversion function is registered for an element in
`values`.
RuntimeError: If a registered conversion function returns an invalid
value.
"""
if not isinstance(values, collections_abc.Sequence):
raise TypeError("values must be a sequence.")
ret = []
for i, value in enumerate(values):
if value is None:
ret.append(value)
else:
n = None if name is None else "%s_%d" % (name, i)
ret.append(
internal_convert_to_tensor_or_composite(
value, dtype=dtype, name=n, as_ref=as_ref))
return ret
def convert_n_to_tensor_or_composite(values, dtype=None, name=None):
"""Converts `values` to a list of `Output` or `CompositeTensor` objects.
Any `CompositeTensor` objects in `values` are returned unmodified.
Args:
values: A list of `None`, `CompositeTensor``, or objects that can be
consumed by `convert_to_tensor()`.
dtype: (Optional.) The required `DType` of the returned `Tensor`s or
`CompositeTensor`s.
name: (Optional.) A name prefix to used when a new `Tensor` is created, in
which case element `i` will be given the name `name + '_' + i`.
Returns:
A list of `Tensor` and/or `CompositeTensor` objects.
Raises:
TypeError: If no conversion function is registered for an element in
`values`.
RuntimeError: If a registered conversion function returns an invalid
value.
"""
return internal_convert_n_to_tensor_or_composite(
values=values, dtype=dtype, name=name, as_ref=False)
def _device_string(dev_spec):
if pydev.is_device_spec(dev_spec):
return dev_spec.to_string()
else:
return dev_spec
def _NodeDef(op_type, name, attrs=None):
"""Create a NodeDef proto.
Args:
op_type: Value for the "op" attribute of the NodeDef proto.
name: Value for the "name" attribute of the NodeDef proto.
attrs: Dictionary where the key is the attribute name (a string)
and the value is the respective "attr" attribute of the NodeDef proto (an
AttrValue).
Returns:
A node_def_pb2.NodeDef protocol buffer.
"""
node_def = node_def_pb2.NodeDef(op=compat.as_bytes(op_type),
name=compat.as_bytes(name))
if attrs:
for k, v in six.iteritems(attrs):
node_def.attr[k].CopyFrom(v)
return node_def
# Copied from core/framework/node_def_util.cc
# TODO(mrry,josh11b): Consolidate this validation in C++ code.
_VALID_OP_NAME_REGEX = re.compile(r"^[A-Za-z0-9.][A-Za-z0-9_.\\/>-]*$")
_VALID_SCOPE_NAME_REGEX = re.compile(r"^[A-Za-z0-9_.\\/>-]*$")
def _create_c_op(graph, node_def, inputs, control_inputs, op_def=None):
"""Creates a TF_Operation.
Args:
graph: a `Graph`.
node_def: `node_def_pb2.NodeDef` for the operation to create.
inputs: A flattened list of `Tensor`s. This function handles grouping
tensors into lists as per attributes in the `node_def`.
control_inputs: A list of `Operation`s to set as control dependencies.
op_def: Optional. `op_def_pb2.OpDef` for the operation to create. If not
specified, is looked up from the `graph` using `node_def.op`.
Returns:
A wrapped TF_Operation*.
"""
if op_def is None:
op_def = graph._get_op_def(node_def.op) # pylint: disable=protected-access
# TODO(skyewm): op_def_library.apply_op() flattens the incoming inputs.
# Refactor so we don't have to do this here.
inputs = _reconstruct_sequence_inputs(op_def, inputs, node_def.attr)
# pylint: disable=protected-access
op_desc = pywrap_tf_session.TF_NewOperation(graph._c_graph,
compat.as_str(node_def.op),
compat.as_str(node_def.name))
if node_def.device:
pywrap_tf_session.TF_SetDevice(op_desc, compat.as_str(node_def.device))
# Add inputs
for op_input in inputs:
if isinstance(op_input, (list, tuple)):
pywrap_tf_session.TF_AddInputList(op_desc,
[t._as_tf_output() for t in op_input])
else:
pywrap_tf_session.TF_AddInput(op_desc, op_input._as_tf_output())
# Add control inputs
for control_input in control_inputs:
pywrap_tf_session.TF_AddControlInput(op_desc, control_input._c_op)
# pylint: enable=protected-access
# Add attrs
for name, attr_value in node_def.attr.items():
serialized = attr_value.SerializeToString()
# TODO(skyewm): this creates and deletes a new TF_Status for every attr.
# It might be worth creating a convenient way to re-use the same status.
pywrap_tf_session.TF_SetAttrValueProto(op_desc, compat.as_str(name),
serialized)
try:
c_op = pywrap_tf_session.TF_FinishOperation(op_desc)
except errors.InvalidArgumentError as e:
# Convert to ValueError for backwards compatibility.
raise ValueError(str(e))
return c_op
@tf_export("Operation")
class Operation(object):
"""Represents a graph node that performs computation on tensors.
An `Operation` is a node in a `tf.Graph` that takes zero or more `Tensor`
objects as input, and produces zero or more `Tensor` objects as output.
Objects of type `Operation` are created by calling a Python op constructor
(such as `tf.matmul`) within a `tf.function` or under a `tf.Graph.as_default`
context manager.
For example, within a `tf.function`, `c = tf.matmul(a, b)` creates an
`Operation` of type "MatMul" that takes tensors `a` and `b` as input, and
produces `c` as output.
If a `tf.compat.v1.Session` is used, an `Operation` of a `tf.Graph` can be
executed by passing it to `tf.Session.run`. `op.run()` is a shortcut for
calling `tf.compat.v1.get_default_session().run(op)`.
"""
def __init__(self,
node_def,
g,
inputs=None,
output_types=None,
control_inputs=None,
input_types=None,
original_op=None,
op_def=None):
r"""Creates an `Operation`.
NOTE: This constructor validates the name of the `Operation` (passed
as `node_def.name`). Valid `Operation` names match the following
regular expression:
[A-Za-z0-9.][A-Za-z0-9_.\\-/]*
Args:
node_def: `node_def_pb2.NodeDef`. `NodeDef` for the `Operation`. Used for
attributes of `node_def_pb2.NodeDef`, typically `name`, `op`, and
`device`. The `input` attribute is irrelevant here as it will be
computed when generating the model.
g: `Graph`. The parent graph.
inputs: list of `Tensor` objects. The inputs to this `Operation`.
output_types: list of `DType` objects. List of the types of the `Tensors`
computed by this operation. The length of this list indicates the
number of output endpoints of the `Operation`.
control_inputs: list of operations or tensors from which to have a control
dependency.
input_types: List of `DType` objects representing the types of the tensors
accepted by the `Operation`. By default uses `[x.dtype.base_dtype for x
in inputs]`. Operations that expect reference-typed inputs must specify
these explicitly.
original_op: Optional. Used to associate the new `Operation` with an
existing `Operation` (for example, a replica with the op that was
replicated).
op_def: Optional. The `op_def_pb2.OpDef` proto that describes the op type
that this `Operation` represents.
Raises:
TypeError: if control inputs are not Operations or Tensors,
or if `node_def` is not a `NodeDef`,
or if `g` is not a `Graph`,
or if `inputs` are not tensors,
or if `inputs` and `input_types` are incompatible.
ValueError: if the `node_def` name is not valid.
"""
# For internal use only: `node_def` can be set to a TF_Operation to create
# an Operation for that op. This is useful for creating Operations for ops
# indirectly created by C API methods, e.g. the ops created by
# TF_ImportGraphDef. When `node_def` is a TF_Operation, all optional fields
# should be None.
if isinstance(node_def, node_def_pb2.NodeDef):
if node_def.ByteSize() >= (1 << 31) or node_def.ByteSize() < 0:
raise ValueError(
"Cannot create a tensor proto whose content is larger than 2GB.")
if not _VALID_OP_NAME_REGEX.match(node_def.name):
raise ValueError("'%s' is not a valid node name" % node_def.name)
c_op = None
elif type(node_def).__name__ == "TF_Operation":
assert inputs is None
assert output_types is None
assert control_inputs is None
assert input_types is None
assert original_op is None
assert op_def is None
c_op = node_def
else:
raise TypeError("node_def needs to be a NodeDef: %s" % node_def)
if not isinstance(g, Graph):
raise TypeError("g needs to be a Graph: %s" % g)
self._graph = g
if inputs is None:
inputs = []
elif not isinstance(inputs, list):
raise TypeError("inputs needs to be a list of Tensors: %s" % inputs)
for a in inputs:
if not isinstance(a, Tensor):
raise TypeError("input needs to be a Tensor: %s" % a)
if input_types is None:
input_types = [i.dtype.base_dtype for i in inputs]
else:
if not all(
x.is_compatible_with(i.dtype) for i, x in zip(inputs, input_types)):
raise TypeError("In op '%s', input types (%s) are not compatible "
"with expected types (%s)" %
(node_def.name, [i.dtype for i in inputs], input_types))
# Build the list of control inputs.
control_input_ops = []
if control_inputs:
for c in control_inputs:
control_op = None
if isinstance(c, Operation):
control_op = c
elif isinstance(c, (Tensor, IndexedSlices)):
control_op = c.op
else:
raise TypeError("Control input must be an Operation, "
"a Tensor, or IndexedSlices: %s" % c)
control_input_ops.append(control_op)
# This will be set by self.inputs.
self._inputs_val = None
# pylint: disable=protected-access
self._original_op = original_op
self._traceback = tf_stack.extract_stack()
# List of _UserDevSpecs holding code location of device context manager
# invocations and the users original argument to them.
self._device_code_locations = None
# Dict mapping op name to file and line information for op colocation
# context managers.
self._colocation_code_locations = None
self._control_flow_context = self.graph._get_control_flow_context()
# Gradient function for this op. There are three ways to specify gradient
# function, and first available gradient gets used, in the following order.
# 1. self._gradient_function
# 2. Gradient name registered by "_gradient_op_type" attribute.
# 3. Gradient name registered by op.type.
self._gradient_function = None
# Initialize self._c_op.
if c_op:
self._c_op = c_op
op_def = g._get_op_def(pywrap_tf_session.TF_OperationOpType(c_op))
name = self.name
else:
if op_def is None:
op_def = self._graph._get_op_def(node_def.op)
self._c_op = _create_c_op(self._graph, node_def, inputs,
control_input_ops, op_def)
name = compat.as_str(node_def.name)
# pylint: enable=protected-access
self._is_stateful = op_def.is_stateful
# Initialize self._outputs.
num_outputs = pywrap_tf_session.TF_OperationNumOutputs(self._c_op)
self._outputs = []
for i in range(num_outputs):
tf_output = c_api_util.tf_output(self._c_op, i)
output_type = pywrap_tf_session.TF_OperationOutputType(tf_output)
tensor = Tensor._create_with_tf_output(self, i, output_type, tf_output) # pylint: disable=protected-access
self._outputs.append(tensor)
self._id_value = self._graph._add_op(self, name) # pylint: disable=protected-access
if not c_op:
self._control_flow_post_processing(input_tensors=inputs)
def _control_flow_post_processing(self, input_tensors=None):
"""Add this op to its control flow context.
This may add new ops and change this op's inputs. self.inputs must be
available before calling this method.
Args:
input_tensors: (Optional.) A list of `Tensors` corresponding to the inputs
of this op, which should be equivalent to `self.inputs`. Pass this
argument to avoid evaluating `self.inputs` unnecessarily.
"""
if input_tensors is None:
input_tensors = self.inputs
for input_tensor in input_tensors:
control_flow_util.CheckInputFromValidContext(self, input_tensor.op)
if self._control_flow_context is not None:
self._control_flow_context.AddOp(self)
def colocation_groups(self):
"""Returns the list of colocation groups of the op."""
default_colocation_group = [compat.as_bytes("loc:@%s" % self.name)]
try:
class_attr = self.get_attr("_class")
except ValueError:
# This op has no explicit colocation group, so it is itself its
# own root of a colocation group.
return default_colocation_group
attr_groups = [
class_name for class_name in class_attr
if class_name.startswith(b"loc:@")
]
# If there are no colocation groups in the explicit _class field,
# return the default colocation group.
return attr_groups if attr_groups else default_colocation_group
def values(self):
"""DEPRECATED: Use outputs."""
return tuple(self.outputs)
def _get_control_flow_context(self):
"""Returns the control flow context of this op.
Returns:
A context object.
"""
return self._control_flow_context
def _set_control_flow_context(self, ctx):
"""Sets the current control flow context of this op.
Args:
ctx: a context object.
"""
self._control_flow_context = ctx
@property
def name(self):
"""The full name of this operation."""
return pywrap_tf_session.TF_OperationName(self._c_op)
@property
def _id(self):
"""The unique integer id of this operation."""
return self._id_value
@property
def device(self):
"""The name of the device to which this op has been assigned, if any.
Returns:
The string name of the device to which this op has been
assigned, or an empty string if it has not been assigned to a
device.
"""
return pywrap_tf_session.TF_OperationDevice(self._c_op)
@property
def _device_assignments(self):
"""Code locations for device context managers active at op creation.
This property will return a list of traceable_stack.TraceableObject
instances where .obj is a string representing the assigned device
(or information about the function that would be applied to this op
to compute the desired device) and the filename and lineno members
record the location of the relevant device context manager.
For example, suppose file_a contained these lines:
file_a.py:
15: with tf.device('/gpu:0'):
16: node_b = tf.constant(4, name='NODE_B')
Then a TraceableObject t_obj representing the device context manager
would have these member values:
t_obj.obj -> '/gpu:0'
t_obj.filename = 'file_a.py'
t_obj.lineno = 15
and node_b.op._device_assignments would return the list [t_obj].
Returns:
[str: traceable_stack.TraceableObject, ...] as per this method's
description, above.
"""
return self._device_code_locations or []
@property
def _colocation_dict(self):
"""Code locations for colocation context managers active at op creation.
This property will return a dictionary for which the keys are nodes with
which this Operation is colocated, and for which the values are
traceable_stack.TraceableObject instances. The TraceableObject instances
record the location of the relevant colocation context manager but have the
"obj" field set to None to prevent leaking private data.
For example, suppose file_a contained these lines:
file_a.py:
14: node_a = tf.constant(3, name='NODE_A')
15: with tf.compat.v1.colocate_with(node_a):
16: node_b = tf.constant(4, name='NODE_B')
Then a TraceableObject t_obj representing the colocation context manager
would have these member values:
t_obj.obj -> None
t_obj.filename = 'file_a.py'
t_obj.lineno = 15
and node_b.op._colocation_dict would return the dictionary
{ 'NODE_A': t_obj }
Returns:
{str: traceable_stack.TraceableObject} as per this method's description,
above.
"""
locations_dict = self._colocation_code_locations or {}
return locations_dict.copy()
@property
def _output_types(self):
"""List this operation's output types.
Returns:
List of the types of the Tensors computed by this operation.
Each element in the list is an integer whose value is one of
the TF_DataType enums defined in pywrap_tf_session.h
The length of this list indicates the number of output endpoints
of the operation.
"""
num_outputs = pywrap_tf_session.TF_OperationNumOutputs(self._c_op)
output_types = [
int(pywrap_tf_session.TF_OperationOutputType(self._tf_output(i)))
for i in xrange(num_outputs)
]
return output_types
def _tf_output(self, output_idx):
"""Create and return a new TF_Output for output_idx'th output of this op."""
tf_output = pywrap_tf_session.TF_Output()
tf_output.oper = self._c_op
tf_output.index = output_idx
return tf_output
def _tf_input(self, input_idx):
"""Create and return a new TF_Input for input_idx'th input of this op."""
tf_input = pywrap_tf_session.TF_Input()
tf_input.oper = self._c_op
tf_input.index = input_idx
return tf_input
def _set_device(self, device): # pylint: disable=redefined-outer-name
"""Set the device of this operation.
Args:
device: string or device.. The device to set.
"""
self._set_device_from_string(compat.as_str(_device_string(device)))
def _set_device_from_string(self, device_str):
"""Fast path to set device if the type is known to be a string.
This function is called frequently enough during graph construction that
there are non-trivial performance gains if the caller can guarantee that
the specified device is already a string.
Args:
device_str: A string specifying where to place this op.
"""
pywrap_tf_session.SetRequestedDevice(
self._graph._c_graph, # pylint: disable=protected-access
self._c_op, # pylint: disable=protected-access
device_str)
def _update_input(self, index, tensor):
"""Update the input to this operation at the given index.
NOTE: This is for TF internal use only. Please don't use it.
Args:
index: the index of the input to update.
tensor: the Tensor to be used as the input at the given index.
Raises:
TypeError: if tensor is not a Tensor,
or if input tensor type is not convertible to dtype.
ValueError: if the Tensor is from a different graph.
"""
if not isinstance(tensor, Tensor):
raise TypeError("tensor must be a Tensor: %s" % tensor)
_assert_same_graph(self, tensor)
# Reset cached inputs.
self._inputs_val = None
pywrap_tf_session.UpdateEdge(
self._graph._c_graph, # pylint: disable=protected-access
tensor._as_tf_output(), # pylint: disable=protected-access
self._tf_input(index))
def _add_while_inputs(self, tensors):
"""See AddWhileInputHack in python_api.h.
NOTE: This is for TF internal use only. Please don't use it.
Args:
tensors: list of Tensors
Raises:
TypeError: if tensor is not a Tensor,
or if input tensor type is not convertible to dtype.
ValueError: if the Tensor is from a different graph.
"""
for tensor in tensors:
if not isinstance(tensor, Tensor):
raise TypeError("tensor must be a Tensor: %s" % tensor)
_assert_same_graph(self, tensor)
# Reset cached inputs.
self._inputs_val = None
pywrap_tf_session.AddWhileInputHack(
self._graph._c_graph, # pylint: disable=protected-access
tensor._as_tf_output(), # pylint: disable=protected-access
self._c_op)
def _add_control_inputs(self, ops):
"""Add a list of new control inputs to this operation.
Args:
ops: the list of Operations to add as control input.
Raises:
TypeError: if ops is not a list of Operations.
ValueError: if any op in ops is from a different graph.
"""
for op in ops:
if not isinstance(op, Operation):
raise TypeError("op must be an Operation: %s" % op)
pywrap_tf_session.AddControlInput(
self._graph._c_graph, # pylint: disable=protected-access
self._c_op, # pylint: disable=protected-access
op._c_op) # pylint: disable=protected-access
def _add_control_input(self, op):
"""Add a new control input to this operation.
Args:
op: the Operation to add as control input.
Raises:
TypeError: if op is not an Operation.
ValueError: if op is from a different graph.
"""
if not isinstance(op, Operation):
raise TypeError("op must be an Operation: %s" % op)
pywrap_tf_session.AddControlInput(
self._graph._c_graph, # pylint: disable=protected-access
self._c_op, # pylint: disable=protected-access
op._c_op) # pylint: disable=protected-access
def _remove_all_control_inputs(self):
"""Removes any control inputs to this operation."""
pywrap_tf_session.RemoveAllControlInputs(self._graph._c_graph, self._c_op) # pylint: disable=protected-access
def _add_outputs(self, types, shapes):
"""Adds new Tensors to self.outputs.
Note: this is generally unsafe to use. This is used in certain situations in
conjunction with _set_type_list_attr.
Arguments:
types: list of DTypes
shapes: list of TensorShapes
"""
assert len(types) == len(shapes)
orig_num_outputs = len(self.outputs)
for i in range(len(types)):
t = Tensor(self, orig_num_outputs + i, types[i])
self._outputs.append(t)
t.set_shape(shapes[i])
def __str__(self):
return str(self.node_def)
def __repr__(self):
return "<tf.Operation '%s' type=%s>" % (self.name, self.type)
def __tf_tensor__(self, dtype=None, name=None):
"""Raises a helpful error."""
raise TypeError("can't convert Operation '{}' to Tensor".format(self.name))
@property
def outputs(self):
"""The list of `Tensor` objects representing the outputs of this op."""
return self._outputs
@property
def inputs(self):
"""The sequence of `Tensor` objects representing the data inputs of this op."""
if self._inputs_val is None:
# pylint: disable=protected-access
self._inputs_val = tuple(
map(self.graph._get_tensor_by_tf_output,
pywrap_tf_session.GetOperationInputs(self._c_op)))
# pylint: enable=protected-access
return self._inputs_val
@property
def _input_types(self):
num_inputs = pywrap_tf_session.TF_OperationNumInputs(self._c_op)
input_types = [
dtypes.as_dtype(
pywrap_tf_session.TF_OperationInputType(self._tf_input(i)))
for i in xrange(num_inputs)
]
return input_types
@property
def control_inputs(self):
"""The `Operation` objects on which this op has a control dependency.
Before this op is executed, TensorFlow will ensure that the
operations in `self.control_inputs` have finished executing. This
mechanism can be used to run ops sequentially for performance
reasons, or to ensure that the side effects of an op are observed
in the correct order.
Returns:
A list of `Operation` objects.
"""
control_c_ops = pywrap_tf_session.TF_OperationGetControlInputs_wrapper(
self._c_op)
# pylint: disable=protected-access
return [
self.graph._get_operation_by_name_unsafe(
pywrap_tf_session.TF_OperationName(c_op)) for c_op in control_c_ops
]
# pylint: enable=protected-access
@property
def _control_outputs(self):
"""The `Operation` objects which have a control dependency on this op.
Before any of the ops in self._control_outputs can execute tensorflow will
ensure self has finished executing.
Returns:
A list of `Operation` objects.
"""
control_c_ops = pywrap_tf_session.TF_OperationGetControlOutputs_wrapper(
self._c_op)
# pylint: disable=protected-access
return [
self.graph._get_operation_by_name_unsafe(
pywrap_tf_session.TF_OperationName(c_op)) for c_op in control_c_ops
]
# pylint: enable=protected-access
@property
def type(self):
"""The type of the op (e.g. `"MatMul"`)."""
return pywrap_tf_session.TF_OperationOpType(self._c_op)
@property
def graph(self):
"""The `Graph` that contains this operation."""
return self._graph
@property
def node_def(self):
# pylint: disable=line-too-long
"""Returns the `NodeDef` representation of this operation.
Returns:
A
[`NodeDef`](https://www.tensorflow.org/code/tensorflow/core/framework/node_def.proto)
protocol buffer.
"""
# pylint: enable=line-too-long
with c_api_util.tf_buffer() as buf:
pywrap_tf_session.TF_OperationToNodeDef(self._c_op, buf)
data = pywrap_tf_session.TF_GetBuffer(buf)
node_def = node_def_pb2.NodeDef()
node_def.ParseFromString(compat.as_bytes(data))
return node_def
@property
def op_def(self):
# pylint: disable=line-too-long
"""Returns the `OpDef` proto that represents the type of this op.
Returns:
An
[`OpDef`](https://www.tensorflow.org/code/tensorflow/core/framework/op_def.proto)
protocol buffer.
"""
# pylint: enable=line-too-long
return self._graph._get_op_def(self.type)
@property
def traceback(self):
"""Returns the call stack from when this operation was constructed."""
return self._traceback
def _set_attr(self, attr_name, attr_value):
"""Private method used to set an attribute in the node_def."""
buf = pywrap_tf_session.TF_NewBufferFromString(
compat.as_bytes(attr_value.SerializeToString()))
try:
self._set_attr_with_buf(attr_name, buf)
finally:
pywrap_tf_session.TF_DeleteBuffer(buf)
def _set_attr_with_buf(self, attr_name, attr_buf):
"""Set an attr in the node_def with a pre-allocated buffer."""
# pylint: disable=protected-access
pywrap_tf_session.SetAttr(self._graph._c_graph, self._c_op, attr_name,
attr_buf)
# pylint: enable=protected-access
def _set_func_attr(self, attr_name, func_name):
"""Private method used to set a function attribute in the node_def."""
func = attr_value_pb2.NameAttrList(name=func_name)
self._set_attr(attr_name, attr_value_pb2.AttrValue(func=func))
def _set_func_list_attr(self, attr_name, func_names):
"""Private method used to set a list(function) attribute in the node_def."""
funcs = [attr_value_pb2.NameAttrList(name=func_name)
for func_name in func_names]
funcs_list = attr_value_pb2.AttrValue.ListValue(func=funcs)
self._set_attr(attr_name, attr_value_pb2.AttrValue(list=funcs_list))
def _set_type_list_attr(self, attr_name, types):
"""Private method used to set a list(type) attribute in the node_def."""
if not types:
return
if isinstance(types[0], dtypes.DType):
types = [dt.as_datatype_enum for dt in types]
types_list = attr_value_pb2.AttrValue.ListValue(type=types)
self._set_attr(attr_name, attr_value_pb2.AttrValue(list=types_list))
def _set_shape_list_attr(self, attr_name, shapes):
"""Private method used to set a list(shape) attribute in the node_def."""
shapes = [s.as_proto() for s in shapes]
shapes_list = attr_value_pb2.AttrValue.ListValue(shape=shapes)
self._set_attr(attr_name, attr_value_pb2.AttrValue(list=shapes_list))
def _clear_attr(self, attr_name):
"""Private method used to clear an attribute in the node_def."""
# pylint: disable=protected-access
pywrap_tf_session.ClearAttr(self._graph._c_graph, self._c_op, attr_name)
# pylint: enable=protected-access
def get_attr(self, name):
"""Returns the value of the attr of this op with the given `name`.
Args:
name: The name of the attr to fetch.
Returns:
The value of the attr, as a Python object.
Raises:
ValueError: If this op does not have an attr with the given `name`.
"""
fields = ("s", "i", "f", "b", "type", "shape", "tensor", "func")
try:
with c_api_util.tf_buffer() as buf:
pywrap_tf_session.TF_OperationGetAttrValueProto(self._c_op, name, buf)
data = pywrap_tf_session.TF_GetBuffer(buf)
except errors.InvalidArgumentError as e:
# Convert to ValueError for backwards compatibility.
raise ValueError(str(e))
x = attr_value_pb2.AttrValue()
x.ParseFromString(data)
oneof_value = x.WhichOneof("value")
if oneof_value is None:
return []
if oneof_value == "list":
for f in fields:
if getattr(x.list, f):
if f == "type":
return [dtypes.as_dtype(t) for t in x.list.type]
else:
return list(getattr(x.list, f))
return []
if oneof_value == "type":
return dtypes.as_dtype(x.type)
assert oneof_value in fields, "Unsupported field type in " + str(x)
return getattr(x, oneof_value)
def _get_attr_type(self, name):
"""Returns the `DType` value of the attr of this op with the given `name`."""
try:
dtype_enum = pywrap_tf_session.TF_OperationGetAttrType(self._c_op, name)
return _DTYPES_INTERN_TABLE[dtype_enum]
except errors.InvalidArgumentError as e:
# Convert to ValueError for backwards compatibility.
raise ValueError(str(e))
def _get_attr_bool(self, name):
"""Returns the `bool` value of the attr of this op with the given `name`."""
try:
return pywrap_tf_session.TF_OperationGetAttrBool(self._c_op, name)
except errors.InvalidArgumentError as e:
# Convert to ValueError for backwards compatibility.
raise ValueError(str(e))
def _get_attr_int(self, name):
"""Returns the `int` value of the attr of this op with the given `name`."""
try:
return pywrap_tf_session.TF_OperationGetAttrInt(self._c_op, name)
except errors.InvalidArgumentError as e:
# Convert to ValueError for backwards compatibility.
raise ValueError(str(e))
def run(self, feed_dict=None, session=None):
"""Runs this operation in a `Session`.
Calling this method will execute all preceding operations that
produce the inputs needed for this operation.
*N.B.* Before invoking `Operation.run()`, its graph must have been
launched in a session, and either a default session must be
available, or `session` must be specified explicitly.
Args:
feed_dict: A dictionary that maps `Tensor` objects to feed values. See
`tf.Session.run` for a description of the valid feed values.
session: (Optional.) The `Session` to be used to run to this operation. If
none, the default session will be used.
"""
_run_using_default_session(self, feed_dict, self.graph, session)
_gradient_registry = registry.Registry("gradient")
@tf_export("RegisterGradient")
class RegisterGradient(object):
"""A decorator for registering the gradient function for an op type.
This decorator is only used when defining a new op type. For an op
with `m` inputs and `n` outputs, the gradient function is a function
that takes the original `Operation` and `n` `Tensor` objects
(representing the gradients with respect to each output of the op),
and returns `m` `Tensor` objects (representing the partial gradients
with respect to each input of the op).
For example, assuming that operations of type `"Sub"` take two
inputs `x` and `y`, and return a single output `x - y`, the
following gradient function would be registered:
```python
@tf.RegisterGradient("Sub")
def _sub_grad(unused_op, grad):
return grad, tf.negative(grad)
```
The decorator argument `op_type` is the string type of an
operation. This corresponds to the `OpDef.name` field for the proto
that defines the operation.
"""
__slots__ = ["_op_type"]
def __init__(self, op_type):
"""Creates a new decorator with `op_type` as the Operation type.
Args:
op_type: The string type of an operation. This corresponds to the
`OpDef.name` field for the proto that defines the operation.
Raises:
TypeError: If `op_type` is not string.
"""
if not isinstance(op_type, six.string_types):
raise TypeError("op_type must be a string")
self._op_type = op_type
def __call__(self, f):
"""Registers the function `f` as gradient function for `op_type`."""
_gradient_registry.register(f, self._op_type)
return f
@deprecation.deprecated_endpoints("NotDifferentiable", "NoGradient")
@tf_export("no_gradient", v1=["no_gradient", "NotDifferentiable", "NoGradient"])
def no_gradient(op_type):
"""Specifies that ops of type `op_type` is not differentiable.
This function should *not* be used for operations that have a
well-defined gradient that is not yet implemented.
This function is only used when defining a new op type. It may be
used for ops such as `tf.size()` that are not differentiable. For
example:
```python
tf.no_gradient("Size")
```
The gradient computed for 'op_type' will then propagate zeros.
For ops that have a well-defined gradient but are not yet implemented,
no declaration should be made, and an error *must* be thrown if
an attempt to request its gradient is made.
Args:
op_type: The string type of an operation. This corresponds to the
`OpDef.name` field for the proto that defines the operation.
Raises:
TypeError: If `op_type` is not a string.
"""
if not isinstance(op_type, six.string_types):
raise TypeError("op_type must be a string")
_gradient_registry.register(None, op_type)
# Aliases for the old names, will be eventually removed.
NoGradient = no_gradient
NotDifferentiable = no_gradient
def get_gradient_function(op):
"""Returns the function that computes gradients for "op"."""
if not op.inputs:
return None
gradient_function = op._gradient_function # pylint: disable=protected-access
if gradient_function:
return gradient_function
try:
op_type = op.get_attr("_gradient_op_type")
except ValueError:
op_type = op.type
return _gradient_registry.lookup(op_type)
def set_shape_and_handle_data_for_outputs(_):
"""No op. TODO(b/74620627): Remove this."""
pass
class OpStats(object):
"""A holder for statistics about an operator.
This class holds information about the resource requirements for an op,
including the size of its weight parameters on-disk and how many FLOPS it
requires to execute forward inference.
If you define a new operation, you can create a function that will return a
set of information about its usage of the CPU and disk space when serialized.
The function itself takes a Graph object that's been set up so you can call
methods like get_tensor_by_name to help calculate the results, and a NodeDef
argument.
"""
__slots__ = ["_statistic_type", "_value"]
def __init__(self, statistic_type, value=None):
"""Sets up the initial placeholders for the statistics."""
self.statistic_type = statistic_type
self.value = value
@property
def statistic_type(self):
return self._statistic_type
@statistic_type.setter
def statistic_type(self, statistic_type):
self._statistic_type = statistic_type
@property
def value(self):
return self._value
@value.setter
def value(self, value):
self._value = value
def __iadd__(self, other):
if other.statistic_type != self.statistic_type:
raise ValueError("Can't add an OpStat of type %s to one of %s." %
(self.statistic_type, other.statistic_type))
if self.value is None:
self.value = other.value
elif other.value is not None:
self._value += other.value
return self
_stats_registry = registry.Registry("statistical functions")
class RegisterStatistics(object):
"""A decorator for registering the statistics function for an op type.
This decorator can be defined for an op type so that it gives a
report on the resources used by an instance of an operator, in the
form of an OpStats object.
Well-known types of statistics include these so far:
- flops: When running a graph, the bulk of the computation happens doing
numerical calculations like matrix multiplications. This type allows a node
to return how many floating-point operations it takes to complete. The
total number of FLOPs for a graph is a good guide to its expected latency.
You can add your own statistics just by picking a new type string, registering
functions for the ops you care about, and then calling get_stats_for_node_def.
If a statistic for an op is registered multiple times, a KeyError will be
raised.
Since the statistics is counted on a per-op basis. It is not suitable for
model parameters (capacity), which is expected to be counted only once, even
if it is shared by multiple ops. (e.g. RNN)
For example, you can define a new metric called doohickey for a Foo operation
by placing this in your code:
```python
@ops.RegisterStatistics("Foo", "doohickey")
def _calc_foo_bojangles(unused_graph, unused_node_def):
return ops.OpStats("doohickey", 20)
```
Then in client code you can retrieve the value by making this call:
```python
doohickey = ops.get_stats_for_node_def(graph, node_def, "doohickey")
```
If the NodeDef is for an op with a registered doohickey function, you'll get
back the calculated amount in doohickey.value, or None if it's not defined.
"""
__slots__ = ["_op_type", "_statistic_type"]
def __init__(self, op_type, statistic_type):
"""Saves the `op_type` as the `Operation` type."""
if not isinstance(op_type, six.string_types):
raise TypeError("op_type must be a string.")
if "," in op_type:
raise TypeError("op_type must not contain a comma.")
self._op_type = op_type
if not isinstance(statistic_type, six.string_types):
raise TypeError("statistic_type must be a string.")
if "," in statistic_type:
raise TypeError("statistic_type must not contain a comma.")
self._statistic_type = statistic_type
def __call__(self, f):
"""Registers "f" as the statistics function for "op_type"."""
_stats_registry.register(f, self._op_type + "," + self._statistic_type)
return f
def get_stats_for_node_def(graph, node, statistic_type):
"""Looks up the node's statistics function in the registry and calls it.
This function takes a Graph object and a NodeDef from a GraphDef, and if
there's an associated statistics method, calls it and returns a result. If no
function has been registered for the particular node type, it returns an empty
statistics object.
Args:
graph: A Graph object that's been set up with the node's graph.
node: A NodeDef describing the operator.
statistic_type: A string identifying the statistic we're interested in.
Returns:
An OpStats object containing information about resource usage.
"""
try:
stats_func = _stats_registry.lookup(node.op + "," + statistic_type)
result = stats_func(graph, node)
except LookupError:
result = OpStats(statistic_type)
return result
def name_from_scope_name(name):
"""Returns the name of an op given the name of its scope.
Args:
name: the name of the scope.
Returns:
the name of the op (equal to scope name minus any trailing slash).
"""
return name[:-1] if (name and name[-1] == "/") else name
_MUTATION_LOCK_GROUP = 0
_SESSION_RUN_LOCK_GROUP = 1
@tf_export("Graph")
class Graph(object):
"""A TensorFlow computation, represented as a dataflow graph.
Graphs are used by `tf.function`s to represent the function's computations.
Each graph contains a set of `tf.Operation` objects, which represent units of
computation; and `tf.Tensor` objects, which represent the units of data that
flow between operations.
### Using graphs directly (deprecated)
A `tf.Graph` can be constructed and used directly without a `tf.function`, as
was required in TensorFlow 1, but this is deprecated and it is recommended to
use a `tf.function` instead. If a graph is directly used, other deprecated
TensorFlow 1 classes are also required to execute the graph, such as a
`tf.compat.v1.Session`.
A default graph can be registered with the `tf.Graph.as_default` context
manager. Then, operations will be added to the graph instead of being executed
eagerly. For example:
```python
g = tf.Graph()
with g.as_default():
# Define operations and tensors in `g`.
c = tf.constant(30.0)
assert c.graph is g
```
`tf.compat.v1.get_default_graph()` can be used to obtain the default graph.
Important note: This class *is not* thread-safe for graph construction. All
operations should be created from a single thread, or external
synchronization must be provided. Unless otherwise specified, all methods
are not thread-safe.
A `Graph` instance supports an arbitrary number of "collections"
that are identified by name. For convenience when building a large
graph, collections can store groups of related objects: for
example, the `tf.Variable` uses a collection (named
`tf.GraphKeys.GLOBAL_VARIABLES`) for
all variables that are created during the construction of a graph. The caller
may define additional collections by specifying a new name.
"""
def __init__(self):
"""Creates a new, empty Graph."""
# Protects core state that can be returned via public accessors.
# Thread-safety is provided on a best-effort basis to support buggy
# programs, and is not guaranteed by the public `tf.Graph` API.
#
# NOTE(mrry): This does not protect the various stacks. A warning will
# be reported if these are used from multiple threads
self._lock = threading.RLock()
# The group lock synchronizes Session.run calls with methods that create
# and mutate ops (e.g. Graph.create_op()). This synchronization is
# necessary because it's illegal to modify an operation after it's been run.
# The group lock allows any number of threads to mutate ops at the same time
# but if any modification is going on, all Session.run calls have to wait.
# Similarly, if one or more Session.run calls are going on, all mutate ops
# have to wait until all Session.run calls have finished.
self._group_lock = lock_util.GroupLock(num_groups=2)
self._nodes_by_id = {} # GUARDED_BY(self._lock)
self._next_id_counter = 0 # GUARDED_BY(self._lock)
self._nodes_by_name = {} # GUARDED_BY(self._lock)
self._version = 0 # GUARDED_BY(self._lock)
# Maps a name used in the graph to the next id to use for that name.
self._names_in_use = {}
self._stack_state_is_thread_local = False
self._thread_local = threading.local()
# Functions that will be applied to choose a device if none is specified.
# In TF2.x or after switch_to_thread_local(),
# self._thread_local._device_function_stack is used instead.
self._graph_device_function_stack = traceable_stack.TraceableStack()
# Default original_op applied to new ops.
self._default_original_op = None
# Current control flow context. It could be either CondContext or
# WhileContext defined in ops/control_flow_ops.py
self._control_flow_context = None
# A new node will depend of the union of all of the nodes in the stack.
# In TF2.x or after switch_to_thread_local(),
# self._thread_local._control_dependencies_stack is used instead.
self._graph_control_dependencies_stack = []
# Arbitrary collections of objects.
self._collections = {}
# The graph-level random seed
self._seed = None
# A dictionary of attributes that should be applied to all ops.
self._attr_scope_map = {}
# A map from op type to the kernel label that should be used.
self._op_to_kernel_label_map = {}
# A map from op type to an alternative op type that should be used when
# computing gradients.
self._gradient_override_map = {}
# A map from op type to a gradient function that should be used instead.
self._gradient_function_map = {}
# True if the graph is considered "finalized". In that case no
# new operations can be added.
self._finalized = False
# Functions defined in the graph
self._functions = collections.OrderedDict()
# Default GraphDef versions
self._graph_def_versions = versions_pb2.VersionDef(
producer=versions.GRAPH_DEF_VERSION,
min_consumer=versions.GRAPH_DEF_VERSION_MIN_CONSUMER)
self._building_function = False
# Stack of colocate_with ops. In TF2.x or after switch_to_thread_local(),
# self._thread_local._colocation_stack is used instead.
self._graph_colocation_stack = traceable_stack.TraceableStack()
# Set of tensors that are dangerous to feed!
self._unfeedable_tensors = object_identity.ObjectIdentitySet()
# Set of operations that are dangerous to fetch!
self._unfetchable_ops = set()
# A map of tensor handle placeholder to tensor dtype.
self._handle_feeders = {}
# A map from tensor handle to its read op.
self._handle_readers = {}
# A map from tensor handle to its move op.
self._handle_movers = {}
# A map from tensor handle to its delete op.
self._handle_deleters = {}
# Allow optimizers and other objects to pseudo-uniquely key graphs (this key
# will be shared when defining function graphs, for example, so optimizers
# being called inside function definitions behave as if they were seeing the
# actual outside graph).
self._graph_key = "grap-key-%d/" % (uid(),)
# A string with the last reduction method passed to
# losses.compute_weighted_loss(), or None. This is required only for
# backward compatibility with Estimator and optimizer V1 use cases.
self._last_loss_reduction = None
# Flag that is used to indicate whether loss has been scaled by optimizer.
# If this flag has been set, then estimator uses it to scale losss back
# before reporting. This is required only for backward compatibility with
# Estimator and optimizer V1 use cases.
self._is_loss_scaled_by_optimizer = False
self._container = ""
# Set to True if this graph is being built in an
# AutomaticControlDependencies context.
self._add_control_dependencies = False
# Cache for OpDef protobufs retrieved via the C API.
self._op_def_cache = {}
# Cache for constant results of `broadcast_gradient_args()`. The keys are
# tuples of fully-defined shapes: (x_shape_tuple, y_shape_tuple), and the
# values are tuples of reduction indices: (rx, ry).
self._bcast_grad_args_cache = {}
# Cache for constant results of `reduced_shape()`. The keys are pairs of
# tuples: (input_shape_tuple, reduction_indices_tuple), and the values
# are pairs of tuples: (output_shape_kept_dims, tile_scaling).
self._reduced_shape_cache = {}
# TODO(skyewm): fold as much of the above as possible into the C
# implementation
self._scoped_c_graph = c_api_util.ScopedTFGraph()
# The C API requires all ops to have shape functions. Disable this
# requirement (many custom ops do not have shape functions, and we don't
# want to break these existing cases).
pywrap_tf_session.SetRequireShapeInferenceFns(self._c_graph, False)
if tf2.enabled():
self.switch_to_thread_local()
# Note: this method is private because the API of tf.Graph() is public and
# frozen, and this functionality is still not ready for public visibility.
@tf_contextlib.contextmanager
def _variable_creator_scope(self, creator, priority=100):
"""Scope which defines a variable creation function.
Args:
creator: A callable taking `next_creator` and `kwargs`. See the
`tf.variable_creator_scope` docstring.
priority: Creators with a higher `priority` are called first. Within the
same priority, creators are called inner-to-outer.
Yields:
`_variable_creator_scope` is a context manager with a side effect, but
doesn't return a value.
Raises:
RuntimeError: If variable creator scopes are not properly nested.
"""
# This step keeps a reference to the existing stack, and it also initializes
# self._thread_local._variable_creator_stack if it doesn't exist yet.
old = self._variable_creator_stack
new = list(old)
new.append((priority, creator))
# Sorting is stable, so we'll put higher-priority creators later in the list
# but otherwise maintain registration order.
new.sort(key=lambda item: item[0])
self._thread_local._variable_creator_stack = new # pylint: disable=protected-access
try:
yield
finally:
if self._thread_local._variable_creator_stack is not new: # pylint: disable=protected-access
raise RuntimeError(
"Exiting variable_creator_scope without proper nesting.")
self._thread_local._variable_creator_stack = old # pylint: disable=protected-access
# Note: this method is private because the API of tf.Graph() is public and
# frozen, and this functionality is still not ready for public visibility.
@property
def _variable_creator_stack(self):
if not hasattr(self._thread_local, "_variable_creator_stack"):
self._thread_local._variable_creator_stack = [] # pylint: disable=protected-access
# This previously returned a copy of the stack instead of the stack itself,
# to guard against accidental mutation. Consider, however, code that wants
# to save and restore the variable creator stack:
# def f():
# original_stack = graph._variable_creator_stack
# graph._variable_creator_stack = new_stack
# ... # Some code
# graph._variable_creator_stack = original_stack
#
# And lets say you have some code that calls this function with some
# variable_creator:
# def g():
# with variable_scope.variable_creator_scope(creator):
# f()
# When exiting the variable creator scope, it would see a different stack
# object than it expected leading to a "Exiting variable_creator_scope
# without proper nesting" error.
return self._thread_local._variable_creator_stack # pylint: disable=protected-access
@_variable_creator_stack.setter
def _variable_creator_stack(self, variable_creator_stack):
self._thread_local._variable_creator_stack = variable_creator_stack # pylint: disable=protected-access
def _check_not_finalized(self):
"""Check if the graph is finalized.
Raises:
RuntimeError: If the graph finalized.
"""
if self._finalized:
raise RuntimeError("Graph is finalized and cannot be modified.")
def _add_op(self, op, op_name):
"""Adds 'op' to the graph and returns the unique ID for the added Operation.
Args:
op: the Operation to add.
op_name: the name of the Operation.
Returns:
An integer that is a unique ID for the added Operation.
"""
self._check_not_finalized()
with self._lock:
self._next_id_counter += 1
op_id = self._next_id_counter
self._nodes_by_id[op_id] = op
self._nodes_by_name[op_name] = op
self._version = max(self._version, op_id)
return op_id
@property
def _c_graph(self):
if self._scoped_c_graph:
return self._scoped_c_graph.graph
return None
@property
def version(self):
"""Returns a version number that increases as ops are added to the graph.
Note that this is unrelated to the
`tf.Graph.graph_def_versions`.
Returns:
An integer version that increases as ops are added to the graph.
"""
if self._finalized:
return self._version
with self._lock:
return self._version
@property
def graph_def_versions(self):
# pylint: disable=line-too-long
"""The GraphDef version information of this graph.
For details on the meaning of each version, see
[`GraphDef`](https://www.tensorflow.org/code/tensorflow/core/framework/graph.proto).
Returns:
A `VersionDef`.
"""
# pylint: enable=line-too-long
with c_api_util.tf_buffer() as buf:
pywrap_tf_session.TF_GraphVersions(self._c_graph, buf)
data = pywrap_tf_session.TF_GetBuffer(buf)
version_def = versions_pb2.VersionDef()
version_def.ParseFromString(compat.as_bytes(data))
return version_def
@property
def seed(self):
"""The graph-level random seed of this graph."""
return self._seed
@seed.setter
def seed(self, seed):
self._seed = seed
@property
def finalized(self):
"""True if this graph has been finalized."""
return self._finalized
def finalize(self):
"""Finalizes this graph, making it read-only.
After calling `g.finalize()`, no new operations can be added to
`g`. This method is used to ensure that no operations are added
to a graph when it is shared between multiple threads, for example
when using a `tf.compat.v1.train.QueueRunner`.
"""
self._finalized = True
def _unsafe_unfinalize(self):
"""Opposite of `finalize`.
Internal interface.
NOTE: Unfinalizing a graph could have negative impact on performance,
especially in a multi-threaded environment. Unfinalizing a graph
when it is in use by a Session may lead to undefined behavior. Ensure
that all sessions using a graph are closed before calling this method.
"""
self._finalized = False
def _get_control_flow_context(self):
"""Returns the current control flow context.
Returns:
A context object.
"""
return self._control_flow_context
def _set_control_flow_context(self, ctx):
"""Sets the current control flow context.
Args:
ctx: a context object.
"""
self._control_flow_context = ctx
def _copy_functions_to_graph_def(self, graph_def, starting_bytesize):
"""If this graph contains functions, copy them to `graph_def`."""
bytesize = starting_bytesize
for f in self._functions.values():
bytesize += f.definition.ByteSize()
if bytesize >= (1 << 31) or bytesize < 0:
raise ValueError("GraphDef cannot be larger than 2GB.")
graph_def.library.function.extend([f.definition])
if f.grad_func_name:
grad_def = function_pb2.GradientDef()
grad_def.function_name = f.name
grad_def.gradient_func = f.grad_func_name
graph_def.library.gradient.extend([grad_def])
def _as_graph_def(self, from_version=None, add_shapes=False):
# pylint: disable=line-too-long
"""Returns a serialized `GraphDef` representation of this graph.
The serialized `GraphDef` can be imported into another `Graph`
(using `tf.import_graph_def`) or used with the
[C++ Session API](../../../../api_docs/cc/index.md).
This method is thread-safe.
Args:
from_version: Optional. If this is set, returns a `GraphDef` containing
only the nodes that were added to this graph since its `version`
property had the given value.
add_shapes: If true, adds an "_output_shapes" list attr to each node with
the inferred shapes of each of its outputs.
Returns:
A tuple containing a
[`GraphDef`](https://www.tensorflow.org/code/tensorflow/core/framework/graph.proto)
protocol buffer, and the version of the graph to which that
`GraphDef` corresponds.
Raises:
ValueError: If the `graph_def` would be too large.
"""
# pylint: enable=line-too-long
with self._lock:
with c_api_util.tf_buffer() as buf:
pywrap_tf_session.TF_GraphToGraphDef(self._c_graph, buf)
data = pywrap_tf_session.TF_GetBuffer(buf)
graph = graph_pb2.GraphDef()
graph.ParseFromString(compat.as_bytes(data))
# Strip the experimental library field iff it's empty.
if not graph.library.function:
graph.ClearField("library")
if add_shapes:
for node in graph.node:
op = self._nodes_by_name[node.name]
if op.outputs:
node.attr["_output_shapes"].list.shape.extend(
[output.get_shape().as_proto() for output in op.outputs])
for function_def in graph.library.function:
defined_function = self._functions[function_def.signature.name]
try:
func_graph = defined_function.graph
except AttributeError:
# _DefinedFunction doesn't have a graph, _EagerDefinedFunction
# does. Both rely on ops.py, so we can't really isinstance check
# them.
continue
input_shapes = function_def.attr["_input_shapes"]
try:
func_graph_inputs = func_graph.inputs
except AttributeError:
continue
# TODO(b/141471245): Fix the inconsistency when inputs of func graph
# are appended during gradient computation of while/cond.
for input_tensor, _ in zip(func_graph_inputs,
function_def.signature.input_arg):
if input_tensor.dtype == dtypes.resource:
# TODO(allenl): Save and restore handle data, then save the
# resource placeholder's shape. Right now some shape functions get
# confused if we set the shape of the resource placeholder (to a
# scalar of course) and there isn't any handle data.
input_shapes.list.shape.add().CopyFrom(
tensor_shape.TensorShape(None).as_proto())
else:
input_shapes.list.shape.add().CopyFrom(
input_tensor.get_shape().as_proto())
for node in function_def.node_def:
try:
op = func_graph.get_operation_by_name(node.name)
except KeyError:
continue
outputs = op.outputs
if op.type == "StatefulPartitionedCall":
# Filter out any extra outputs (possibly added by function
# backpropagation rewriting).
num_outputs = len(node.attr["Tout"].list.type)
outputs = outputs[:num_outputs]
node.attr["_output_shapes"].list.shape.extend(
[output.get_shape().as_proto() for output in outputs])
return graph, self._version
def as_graph_def(self, from_version=None, add_shapes=False):
# pylint: disable=line-too-long
"""Returns a serialized `GraphDef` representation of this graph.
The serialized `GraphDef` can be imported into another `Graph`
(using `tf.import_graph_def`) or used with the
[C++ Session API](../../api_docs/cc/index.md).
This method is thread-safe.
Args:
from_version: Optional. If this is set, returns a `GraphDef` containing
only the nodes that were added to this graph since its `version`
property had the given value.
add_shapes: If true, adds an "_output_shapes" list attr to each node with
the inferred shapes of each of its outputs.
Returns:
A
[`GraphDef`](https://www.tensorflow.org/code/tensorflow/core/framework/graph.proto)
protocol buffer.
Raises:
ValueError: If the `graph_def` would be too large.
"""
# pylint: enable=line-too-long
result, _ = self._as_graph_def(from_version, add_shapes)
return result
def _is_function(self, name):
"""Tests whether 'name' is registered in this graph's function library.
Args:
name: string op name.
Returns:
bool indicating whether or not 'name' is registered in function library.
"""
return compat.as_str(name) in self._functions
def _get_function(self, name):
"""Returns the function definition for 'name'.
Args:
name: string function name.
Returns:
The function def proto.
"""
return self._functions.get(compat.as_str(name), None)
def _add_function(self, function):
"""Adds a function to the graph.
After the function has been added, you can call to the function by
passing the function name in place of an op name to
`Graph.create_op()`.
Args:
function: A `_DefinedFunction` object.
Raises:
ValueError: if another function is defined with the same name.
"""
self._check_not_finalized()
name = function.name
# Sanity checks on gradient definition.
if (function.grad_func_name is not None) and (function.python_grad_func is
not None):
raise ValueError("Gradient defined twice for function %s" % name)
# Add function to graph
# pylint: disable=protected-access
gradient = (
function._grad_func._c_func.func if function._grad_func else None)
pywrap_tf_session.TF_GraphCopyFunction(self._c_graph, function._c_func.func,
gradient)
# pylint: enable=protected-access
self._functions[compat.as_str(name)] = function
# Need a new-enough consumer to support the functions we add to the graph.
if self._graph_def_versions.min_consumer < 12:
self._graph_def_versions.min_consumer = 12
@property
def building_function(self):
"""Returns True iff this graph represents a function."""
return self._building_function
# Helper functions to create operations.
@deprecated_args(None,
"Shapes are always computed; don't use the compute_shapes "
"as it has no effect.", "compute_shapes")
def create_op(
self,
op_type,
inputs,
dtypes=None, # pylint: disable=redefined-outer-name
input_types=None,
name=None,
attrs=None,
op_def=None,
compute_shapes=True,
compute_device=True):
"""Creates an `Operation` in this graph.
This is a low-level interface for creating an `Operation`. Most
programs will not call this method directly, and instead use the
Python op constructors, such as `tf.constant()`, which add ops to
the default graph.
Args:
op_type: The `Operation` type to create. This corresponds to the
`OpDef.name` field for the proto that defines the operation.
inputs: A list of `Tensor` objects that will be inputs to the `Operation`.
dtypes: (Optional) A list of `DType` objects that will be the types of the
tensors that the operation produces.
input_types: (Optional.) A list of `DType`s that will be the types of the
tensors that the operation consumes. By default, uses the base `DType`
of each input in `inputs`. Operations that expect reference-typed inputs
must specify `input_types` explicitly.
name: (Optional.) A string name for the operation. If not specified, a
name is generated based on `op_type`.
attrs: (Optional.) A dictionary where the key is the attribute name (a
string) and the value is the respective `attr` attribute of the
`NodeDef` proto that will represent the operation (an `AttrValue`
proto).
op_def: (Optional.) The `OpDef` proto that describes the `op_type` that
the operation will have.
compute_shapes: (Optional.) Deprecated. Has no effect (shapes are always
computed).
compute_device: (Optional.) If True, device functions will be executed to
compute the device property of the Operation.
Raises:
TypeError: if any of the inputs is not a `Tensor`.
ValueError: if colocation conflicts with existing device assignment.
Returns:
An `Operation` object.
"""
del compute_shapes
for idx, a in enumerate(inputs):
if not isinstance(a, Tensor):
raise TypeError("Input #%d is not a tensor: %s" % (idx, a))
return self._create_op_internal(op_type, inputs, dtypes, input_types, name,
attrs, op_def, compute_device)
def _create_op_internal(
self,
op_type,
inputs,
dtypes=None, # pylint: disable=redefined-outer-name
input_types=None,
name=None,
attrs=None,
op_def=None,
compute_device=True):
"""Creates an `Operation` in this graph.
Implements `Graph.create_op()` without the overhead of the deprecation
wrapper.
Args:
op_type: The `Operation` type to create. This corresponds to the
`OpDef.name` field for the proto that defines the operation.
inputs: A list of `Tensor` objects that will be inputs to the `Operation`.
dtypes: (Optional) A list of `DType` objects that will be the types of the
tensors that the operation produces.
input_types: (Optional.) A list of `DType`s that will be the types of the
tensors that the operation consumes. By default, uses the base `DType`
of each input in `inputs`. Operations that expect reference-typed inputs
must specify `input_types` explicitly.
name: (Optional.) A string name for the operation. If not specified, a
name is generated based on `op_type`.
attrs: (Optional.) A dictionary where the key is the attribute name (a
string) and the value is the respective `attr` attribute of the
`NodeDef` proto that will represent the operation (an `AttrValue`
proto).
op_def: (Optional.) The `OpDef` proto that describes the `op_type` that
the operation will have.
compute_device: (Optional.) If True, device functions will be executed to
compute the device property of the Operation.
Raises:
ValueError: if colocation conflicts with existing device assignment.
Returns:
An `Operation` object.
"""
self._check_not_finalized()
if name is None:
name = op_type
# If a names ends with a '/' it is a "name scope" and we use it as-is,
# after removing the trailing '/'.
if name and name[-1] == "/":
name = name_from_scope_name(name)
else:
name = self.unique_name(name)
node_def = _NodeDef(op_type, name, attrs)
input_ops = set(t.op for t in inputs)
control_inputs = self._control_dependencies_for_inputs(input_ops)
# _create_op_helper mutates the new Operation. `_mutation_lock` ensures a
# Session.run call cannot occur between creating and mutating the op.
with self._mutation_lock():
ret = Operation(
node_def,
self,
inputs=inputs,
output_types=dtypes,
control_inputs=control_inputs,
input_types=input_types,
original_op=self._default_original_op,
op_def=op_def)
self._create_op_helper(ret, compute_device=compute_device)
return ret
def _create_op_from_tf_operation(self, c_op, compute_device=True):
"""Creates an `Operation` in this graph from the supplied TF_Operation.
This method is like create_op() except the new Operation is constructed
using `c_op`. The returned Operation will have `c_op` as its _c_op
field. This is used to create Operation objects around TF_Operations created
indirectly by the C API (e.g. by TF_ImportGraphDef, TF_FinishWhile).
This function does not call Operation._control_flow_post_processing or
Graph._control_dependencies_for_inputs (since the inputs may not be
available yet). The caller is responsible for calling these methods.
Args:
c_op: a wrapped TF_Operation
compute_device: (Optional.) If True, device functions will be executed to
compute the device property of the Operation.
Returns:
An `Operation` object.
"""
self._check_not_finalized()
ret = Operation(c_op, self)
# If a name_scope was created with ret.name but no nodes were created in it,
# the name will still appear in _names_in_use even though the name hasn't
# been used. This is ok, just leave _names_in_use as-is in this case.
# TODO(skyewm): make the C API guarantee no name conflicts.
name_key = ret.name.lower()
if name_key not in self._names_in_use:
self._names_in_use[name_key] = 1
self._create_op_helper(ret, compute_device=compute_device)
return ret
def _create_op_helper(self, op, compute_device=True):
"""Common logic for creating an op in this graph."""
# Apply any additional attributes requested. Do not overwrite any existing
# attributes.
for key, value in self._attr_scope_map.items():
try:
op.get_attr(key)
except ValueError:
if callable(value):
value = value(op.node_def)
if not isinstance(value, (type(None), attr_value_pb2.AttrValue)):
raise TypeError(
"Callable for scope map key '%s' must return either None or "
"an AttrValue protocol buffer; but it returned: %s" %
(key, value))
if value:
op._set_attr(key, value) # pylint: disable=protected-access
# Apply a kernel label if one has been specified for this op type.
try:
kernel_label = self._op_to_kernel_label_map[op.type]
op._set_attr("_kernel", # pylint: disable=protected-access
attr_value_pb2.AttrValue(s=compat.as_bytes(kernel_label)))
except KeyError:
pass
op._gradient_function = self._gradient_function_map.get(op.type) # pylint: disable=protected-access
# Apply the overriding op type for gradients if one has been specified for
# this op type.
try:
mapped_op_type = self._gradient_override_map[op.type]
op._set_attr("_gradient_op_type", # pylint: disable=protected-access
attr_value_pb2.AttrValue(s=compat.as_bytes(mapped_op_type)))
except KeyError:
pass
self._record_op_seen_by_control_dependencies(op)
if compute_device:
self._apply_device_functions(op)
# Snapshot the colocation stack metadata before we might generate error
# messages using it. Note that this snapshot depends on the actual stack
# and is independent of the op's _class attribute.
# pylint: disable=protected-access
op._colocation_code_locations = self._snapshot_colocation_stack_metadata()
# pylint: enable=protected-access
if self._colocation_stack:
all_colocation_groups = []
is_device_set = False
for colocation_op in self._colocation_stack.peek_objs():
try:
all_colocation_groups.extend(colocation_op.colocation_groups())
except AttributeError:
pass
if colocation_op.device and not is_device_set:
# pylint: disable=protected-access
op._set_device(colocation_op.device)
# pylint: enable=protected-access
is_device_set = True
all_colocation_groups = sorted(set(all_colocation_groups))
# pylint: disable=protected-access
op._set_attr(
"_class",
attr_value_pb2.AttrValue(
list=attr_value_pb2.AttrValue.ListValue(s=all_colocation_groups)))
# pylint: enable=protected-access
# Sets "container" attribute if
# (1) self._container is not None
# (2) "is_stateful" is set in OpDef
# (3) "container" attribute is in OpDef
# (4) "container" attribute is None
if self._container and op._is_stateful: # pylint: disable=protected-access
try:
container_attr = op.get_attr("container")
except ValueError:
# "container" attribute is not in OpDef
pass
else:
if not container_attr:
op._set_attr("container", attr_value_pb2.AttrValue( # pylint: disable=protected-access
s=compat.as_bytes(self._container)))
def _add_new_tf_operations(self, compute_devices=True):
"""Creates `Operations` in this graph for any new TF_Operations.
This is useful for when TF_Operations are indirectly created by the C API
outside of the Operation constructor (e.g. by TF_ImportGraphDef,
TF_FinishWhile). This ensures there are corresponding Operations for all
TF_Operations in the underlying TF_Graph.
Args:
compute_devices: (Optional.) If True, device functions will be executed to
compute the device properties of each new Operation.
Returns:
A list of the new `Operation` objects.
"""
self._check_not_finalized()
# Create all Operation objects before accessing their inputs since an op may
# be created before its inputs.
new_ops = [
self._create_op_from_tf_operation(c_op, compute_device=compute_devices)
for c_op in c_api_util.new_tf_operations(self)
]
# pylint: disable=protected-access
for op in new_ops:
new_control_inputs = self._control_dependencies_for_inputs(op.inputs)
op._add_control_inputs(new_control_inputs)
op._control_flow_post_processing()
# pylint: enable=protected-access
return new_ops
def as_graph_element(self, obj, allow_tensor=True, allow_operation=True):
"""Returns the object referred to by `obj`, as an `Operation` or `Tensor`.
This function validates that `obj` represents an element of this
graph, and gives an informative error message if it is not.
This function is the canonical way to get/validate an object of
one of the allowed types from an external argument reference in the
Session API.
This method may be called concurrently from multiple threads.
Args:
obj: A `Tensor`, an `Operation`, or the name of a tensor or operation. Can
also be any object with an `_as_graph_element()` method that returns a
value of one of these types. Note: `_as_graph_element` will be called
inside the graph's lock and so may not modify the graph.
allow_tensor: If true, `obj` may refer to a `Tensor`.
allow_operation: If true, `obj` may refer to an `Operation`.
Returns:
The `Tensor` or `Operation` in the Graph corresponding to `obj`.
Raises:
TypeError: If `obj` is not a type we support attempting to convert
to types.
ValueError: If `obj` is of an appropriate type but invalid. For
example, an invalid string.
KeyError: If `obj` is not an object in the graph.
"""
if self._finalized:
return self._as_graph_element_locked(obj, allow_tensor, allow_operation)
with self._lock:
return self._as_graph_element_locked(obj, allow_tensor, allow_operation)
def _as_graph_element_locked(self, obj, allow_tensor, allow_operation):
"""See `Graph.as_graph_element()` for details."""
# The vast majority of this function is figuring
# out what an API user might be doing wrong, so
# that we can give helpful error messages.
#
# Ideally, it would be nice to split it up, but we
# need context to generate nice error messages.
if allow_tensor and allow_operation:
types_str = "Tensor or Operation"
elif allow_tensor:
types_str = "Tensor"
elif allow_operation:
types_str = "Operation"
else:
raise ValueError("allow_tensor and allow_operation can't both be False.")
temp_obj = _as_graph_element(obj)
if temp_obj is not None:
obj = temp_obj
# If obj appears to be a name...
if isinstance(obj, compat.bytes_or_text_types):
name = compat.as_str(obj)
if ":" in name and allow_tensor:
# Looks like a Tensor name and can be a Tensor.
try:
op_name, out_n = name.split(":")
out_n = int(out_n)
except:
raise ValueError("The name %s looks a like a Tensor name, but is "
"not a valid one. Tensor names must be of the "
"form \"<op_name>:<output_index>\"." % repr(name))
if op_name in self._nodes_by_name:
op = self._nodes_by_name[op_name]
else:
raise KeyError("The name %s refers to a Tensor which does not "
"exist. The operation, %s, does not exist in the "
"graph." % (repr(name), repr(op_name)))
try:
return op.outputs[out_n]
except:
raise KeyError("The name %s refers to a Tensor which does not "
"exist. The operation, %s, exists but only has "
"%s outputs." %
(repr(name), repr(op_name), len(op.outputs)))
elif ":" in name and not allow_tensor:
# Looks like a Tensor name but can't be a Tensor.
raise ValueError("Name %s appears to refer to a Tensor, not a %s." %
(repr(name), types_str))
elif ":" not in name and allow_operation:
# Looks like an Operation name and can be an Operation.
if name not in self._nodes_by_name:
raise KeyError("The name %s refers to an Operation not in the "
"graph." % repr(name))
return self._nodes_by_name[name]
elif ":" not in name and not allow_operation:
# Looks like an Operation name but can't be an Operation.
if name in self._nodes_by_name:
# Yep, it's an Operation name
err_msg = ("The name %s refers to an Operation, not a %s." %
(repr(name), types_str))
else:
err_msg = ("The name %s looks like an (invalid) Operation name, "
"not a %s." % (repr(name), types_str))
err_msg += (" Tensor names must be of the form "
"\"<op_name>:<output_index>\".")
raise ValueError(err_msg)
elif isinstance(obj, Tensor) and allow_tensor:
# Actually obj is just the object it's referring to.
if obj.graph is not self:
raise ValueError("Tensor %s is not an element of this graph." % obj)
return obj
elif isinstance(obj, Operation) and allow_operation:
# Actually obj is just the object it's referring to.
if obj.graph is not self:
raise ValueError("Operation %s is not an element of this graph." % obj)
return obj
else:
# We give up!
raise TypeError("Can not convert a %s into a %s." %
(type(obj).__name__, types_str))
def get_operations(self):
"""Return the list of operations in the graph.
You can modify the operations in place, but modifications
to the list such as inserts/delete have no effect on the
list of operations known to the graph.
This method may be called concurrently from multiple threads.
Returns:
A list of Operations.
"""
if self._finalized:
return list(self._nodes_by_id.values())
with self._lock:
return list(self._nodes_by_id.values())
def get_operation_by_name(self, name):
"""Returns the `Operation` with the given `name`.
This method may be called concurrently from multiple threads.
Args:
name: The name of the `Operation` to return.
Returns:
The `Operation` with the given `name`.
Raises:
TypeError: If `name` is not a string.
KeyError: If `name` does not correspond to an operation in this graph.
"""
if not isinstance(name, six.string_types):
raise TypeError("Operation names are strings (or similar), not %s." %
type(name).__name__)
return self.as_graph_element(name, allow_tensor=False, allow_operation=True)
def _get_operation_by_name_unsafe(self, name):
"""Returns the `Operation` with the given `name`.
This is a internal unsafe version of get_operation_by_name. It skips many
checks and does not have user friendly error messages but runs considerably
faster. This method may be called concurrently from multiple threads.
Args:
name: The name of the `Operation` to return.
Returns:
The `Operation` with the given `name`.
Raises:
KeyError: If `name` does not correspond to an operation in this graph.
"""
if self._finalized:
return self._nodes_by_name[name]
with self._lock:
return self._nodes_by_name[name]
def _get_operation_by_tf_operation(self, tf_oper):
op_name = pywrap_tf_session.TF_OperationName(tf_oper)
return self._get_operation_by_name_unsafe(op_name)
def get_tensor_by_name(self, name):
"""Returns the `Tensor` with the given `name`.
This method may be called concurrently from multiple threads.
Args:
name: The name of the `Tensor` to return.
Returns:
The `Tensor` with the given `name`.
Raises:
TypeError: If `name` is not a string.
KeyError: If `name` does not correspond to a tensor in this graph.
"""
# Names should be strings.
if not isinstance(name, six.string_types):
raise TypeError("Tensor names are strings (or similar), not %s." %
type(name).__name__)
return self.as_graph_element(name, allow_tensor=True, allow_operation=False)
def _get_tensor_by_tf_output(self, tf_output):
"""Returns the `Tensor` representing `tf_output`.
Note that there is only one such `Tensor`, i.e. multiple calls to this
function with the same TF_Output value will always return the same `Tensor`
object.
Args:
tf_output: A wrapped `TF_Output` (the C API equivalent of `Tensor`).
Returns:
The `Tensor` that represents `tf_output`.
"""
op = self._get_operation_by_tf_operation(tf_output.oper)
return op.outputs[tf_output.index]
@property
def _last_id(self):
return self._next_id_counter
def _get_op_def(self, type): # pylint: disable=redefined-builtin
"""Returns the `OpDef` proto for `type`. `type` is a string."""
# NOTE: No locking is required because the lookup and insertion operations
# on Python dictionaries are atomic.
try:
return self._op_def_cache[type]
except KeyError:
with c_api_util.tf_buffer() as buf:
# pylint: disable=protected-access
pywrap_tf_session.TF_GraphGetOpDef(self._c_graph, compat.as_bytes(type),
buf)
# pylint: enable=protected-access
data = pywrap_tf_session.TF_GetBuffer(buf)
op_def = op_def_pb2.OpDef()
op_def.ParseFromString(compat.as_bytes(data))
self._op_def_cache[type] = op_def
return op_def
def as_default(self):
"""Returns a context manager that makes this `Graph` the default graph.
This method should be used if you want to create multiple graphs
in the same process. For convenience, a global default graph is
provided, and all ops will be added to this graph if you do not
create a new graph explicitly.
Use this method with the `with` keyword to specify that ops created within
the scope of a block should be added to this graph. In this case, once
the scope of the `with` is exited, the previous default graph is set again
as default. There is a stack, so it's ok to have multiple nested levels
of `as_default` calls.
The default graph is a property of the current thread. If you
create a new thread, and wish to use the default graph in that
thread, you must explicitly add a `with g.as_default():` in that
thread's function.
The following code examples are equivalent:
```python
# 1. Using Graph.as_default():
g = tf.Graph()
with g.as_default():
c = tf.constant(5.0)
assert c.graph is g
# 2. Constructing and making default:
with tf.Graph().as_default() as g:
c = tf.constant(5.0)
assert c.graph is g
```
If eager execution is enabled ops created under this context manager will be
added to the graph instead of executed eagerly.
Returns:
A context manager for using this graph as the default graph.
"""
return _default_graph_stack.get_controller(self)
@property
def collections(self):
"""Returns the names of the collections known to this graph."""
return list(self._collections)
def add_to_collection(self, name, value):
"""Stores `value` in the collection with the given `name`.
Note that collections are not sets, so it is possible to add a value to
a collection several times.
Args:
name: The key for the collection. The `GraphKeys` class contains many
standard names for collections.
value: The value to add to the collection.
""" # pylint: disable=g-doc-exception
self._check_not_finalized()
with self._lock:
if name not in self._collections:
self._collections[name] = [value]
else:
self._collections[name].append(value)
def add_to_collections(self, names, value):
"""Stores `value` in the collections given by `names`.
Note that collections are not sets, so it is possible to add a value to
a collection several times. This function makes sure that duplicates in
`names` are ignored, but it will not check for pre-existing membership of
`value` in any of the collections in `names`.
`names` can be any iterable, but if `names` is a string, it is treated as a
single collection name.
Args:
names: The keys for the collections to add to. The `GraphKeys` class
contains many standard names for collections.
value: The value to add to the collections.
"""
# Make sure names are unique, but treat strings as a single collection name
names = (names,) if isinstance(names, six.string_types) else set(names)
for name in names:
self.add_to_collection(name, value)
def get_collection_ref(self, name):
"""Returns a list of values in the collection with the given `name`.
If the collection exists, this returns the list itself, which can
be modified in place to change the collection. If the collection does
not exist, it is created as an empty list and the list is returned.
This is different from `get_collection()` which always returns a copy of
the collection list if it exists and never creates an empty collection.
Args:
name: The key for the collection. For example, the `GraphKeys` class
contains many standard names for collections.
Returns:
The list of values in the collection with the given `name`, or an empty
list if no value has been added to that collection.
""" # pylint: disable=g-doc-exception
with self._lock:
coll_list = self._collections.get(name, None)
if coll_list is None:
coll_list = []
self._collections[name] = coll_list
return coll_list
def get_collection(self, name, scope=None):
"""Returns a list of values in the collection with the given `name`.
This is different from `get_collection_ref()` which always returns the
actual collection list if it exists in that it returns a new list each time
it is called.
Args:
name: The key for the collection. For example, the `GraphKeys` class
contains many standard names for collections.
scope: (Optional.) A string. If supplied, the resulting list is filtered
to include only items whose `name` attribute matches `scope` using
`re.match`. Items without a `name` attribute are never returned if a
scope is supplied. The choice of `re.match` means that a `scope` without
special tokens filters by prefix.
Returns:
The list of values in the collection with the given `name`, or
an empty list if no value has been added to that collection. The
list contains the values in the order under which they were
collected.
""" # pylint: disable=g-doc-exception
with self._lock:
collection = self._collections.get(name, None)
if collection is None:
return []
if scope is None:
return list(collection)
else:
c = []
regex = re.compile(scope)
for item in collection:
try:
if regex.match(item.name):
c.append(item)
except AttributeError:
# Collection items with no name are ignored.
pass
return c
def get_all_collection_keys(self):
"""Returns a list of collections used in this graph."""
with self._lock:
return [x for x in self._collections if isinstance(x, six.string_types)]
def clear_collection(self, name):
"""Clears all values in a collection.
Args:
name: The key for the collection. The `GraphKeys` class contains many
standard names for collections.
"""
self._check_not_finalized()
with self._lock:
if name in self._collections:
del self._collections[name]
@tf_contextlib.contextmanager
def _original_op(self, op):
"""Python 'with' handler to help annotate ops with their originator.
An op may have an 'original_op' property that indicates the op on which
it was based. For example a replica op is based on the op that was
replicated and a gradient op is based on the op that was differentiated.
All ops created in the scope of this 'with' handler will have
the given 'op' as their original op.
Args:
op: The Operation that all ops created in this scope will have as their
original op.
Yields:
Nothing.
"""
old_original_op = self._default_original_op
self._default_original_op = op
try:
yield
finally:
self._default_original_op = old_original_op
@property
def _name_stack(self):
# This may be called from a thread where name_stack doesn't yet exist.
if not hasattr(self._thread_local, "_name_stack"):
self._thread_local._name_stack = ""
return self._thread_local._name_stack
@_name_stack.setter
def _name_stack(self, name_stack):
self._thread_local._name_stack = name_stack
# pylint: disable=g-doc-return-or-yield,line-too-long
@tf_contextlib.contextmanager
def name_scope(self, name):
"""Returns a context manager that creates hierarchical names for operations.
A graph maintains a stack of name scopes. A `with name_scope(...):`
statement pushes a new name onto the stack for the lifetime of the context.
The `name` argument will be interpreted as follows:
* A string (not ending with '/') will create a new name scope, in which
`name` is appended to the prefix of all operations created in the
context. If `name` has been used before, it will be made unique by
calling `self.unique_name(name)`.
* A scope previously captured from a `with g.name_scope(...) as
scope:` statement will be treated as an "absolute" name scope, which
makes it possible to re-enter existing scopes.
* A value of `None` or the empty string will reset the current name scope
to the top-level (empty) name scope.
For example:
```python
with tf.Graph().as_default() as g:
c = tf.constant(5.0, name="c")
assert c.op.name == "c"
c_1 = tf.constant(6.0, name="c")
assert c_1.op.name == "c_1"
# Creates a scope called "nested"
with g.name_scope("nested") as scope:
nested_c = tf.constant(10.0, name="c")
assert nested_c.op.name == "nested/c"
# Creates a nested scope called "inner".
with g.name_scope("inner"):
nested_inner_c = tf.constant(20.0, name="c")
assert nested_inner_c.op.name == "nested/inner/c"
# Create a nested scope called "inner_1".
with g.name_scope("inner"):
nested_inner_1_c = tf.constant(30.0, name="c")
assert nested_inner_1_c.op.name == "nested/inner_1/c"
# Treats `scope` as an absolute name scope, and
# switches to the "nested/" scope.
with g.name_scope(scope):
nested_d = tf.constant(40.0, name="d")
assert nested_d.op.name == "nested/d"
with g.name_scope(""):
e = tf.constant(50.0, name="e")
assert e.op.name == "e"
```
The name of the scope itself can be captured by `with
g.name_scope(...) as scope:`, which stores the name of the scope
in the variable `scope`. This value can be used to name an
operation that represents the overall result of executing the ops
in a scope. For example:
```python
inputs = tf.constant(...)
with g.name_scope('my_layer') as scope:
weights = tf.Variable(..., name="weights")
biases = tf.Variable(..., name="biases")
affine = tf.matmul(inputs, weights) + biases
output = tf.nn.relu(affine, name=scope)
```
NOTE: This constructor validates the given `name`. Valid scope
names match one of the following regular expressions:
[A-Za-z0-9.][A-Za-z0-9_.\\-/]* (for scopes at the root)
[A-Za-z0-9_.\\-/]* (for other scopes)
Args:
name: A name for the scope.
Returns:
A context manager that installs `name` as a new name scope.
Raises:
ValueError: If `name` is not a valid scope name, according to the rules
above.
"""
if name:
if isinstance(name, compat.bytes_or_text_types):
name = compat.as_str(name)
if self._name_stack:
# Scopes created in a nested scope may have initial characters
# that are illegal as the initial character of an op name
# (viz. '-', '\', '/', and '_').
if not _VALID_SCOPE_NAME_REGEX.match(name):
raise ValueError("'%s' is not a valid scope name" % name)
else:
# Scopes created in the root must match the more restrictive
# op name regex, which constrains the initial character.
if not _VALID_OP_NAME_REGEX.match(name):
raise ValueError("'%s' is not a valid scope name" % name)
old_stack = self._name_stack
if not name: # Both for name=None and name="" we re-set to empty scope.
new_stack = None
elif name[-1] == "/":
new_stack = name_from_scope_name(name)
else:
new_stack = self.unique_name(name)
self._name_stack = new_stack
try:
yield "" if new_stack is None else new_stack + "/"
finally:
self._name_stack = old_stack
# pylint: enable=g-doc-return-or-yield,line-too-long
def unique_name(self, name, mark_as_used=True):
"""Return a unique operation name for `name`.
Note: You rarely need to call `unique_name()` directly. Most of
the time you just need to create `with g.name_scope()` blocks to
generate structured names.
`unique_name` is used to generate structured names, separated by
`"/"`, to help identify operations when debugging a graph.
Operation names are displayed in error messages reported by the
TensorFlow runtime, and in various visualization tools such as
TensorBoard.
If `mark_as_used` is set to `True`, which is the default, a new
unique name is created and marked as in use. If it's set to `False`,
the unique name is returned without actually being marked as used.
This is useful when the caller simply wants to know what the name
to be created will be.
Args:
name: The name for an operation.
mark_as_used: Whether to mark this name as being used.
Returns:
A string to be passed to `create_op()` that will be used
to name the operation being created.
"""
if self._name_stack:
name = self._name_stack + "/" + name
# For the sake of checking for names in use, we treat names as case
# insensitive (e.g. foo = Foo).
name_key = name.lower()
i = self._names_in_use.get(name_key, 0)
# Increment the number for "name_key".
if mark_as_used:
self._names_in_use[name_key] = i + 1
if i > 0:
base_name_key = name_key
# Make sure the composed name key is not already used.
while name_key in self._names_in_use:
name_key = "%s_%d" % (base_name_key, i)
i += 1
# Mark the composed name_key as used in case someone wants
# to call unique_name("name_1").
if mark_as_used:
self._names_in_use[name_key] = 1
# Return the new name with the original capitalization of the given name.
name = "%s_%d" % (name, i - 1)
return name
def get_name_scope(self):
"""Returns the current name scope.
For example:
```python
with tf.name_scope('scope1'):
with tf.name_scope('scope2'):
print(tf.compat.v1.get_default_graph().get_name_scope())
```
would print the string `scope1/scope2`.
Returns:
A string representing the current name scope.
"""
return self._name_stack
@tf_contextlib.contextmanager
def _colocate_with_for_gradient(self, op, gradient_uid,
ignore_existing=False):
with self.colocate_with(op, ignore_existing):
if gradient_uid is not None and self._control_flow_context is not None:
self._control_flow_context.EnterGradientColocation(op, gradient_uid)
try:
yield
finally:
self._control_flow_context.ExitGradientColocation(op, gradient_uid)
else:
yield
@tf_contextlib.contextmanager
def colocate_with(self, op, ignore_existing=False):
"""Returns a context manager that specifies an op to colocate with.
Note: this function is not for public use, only for internal libraries.
For example:
```python
a = tf.Variable([1.0])
with g.colocate_with(a):
b = tf.constant(1.0)
c = tf.add(a, b)
```
`b` and `c` will always be colocated with `a`, no matter where `a`
is eventually placed.
**NOTE** Using a colocation scope resets any existing device constraints.
If `op` is `None` then `ignore_existing` must be `True` and the new
scope resets all colocation and device constraints.
Args:
op: The op to colocate all created ops with, or `None`.
ignore_existing: If true, only applies colocation of this op within the
context, rather than applying all colocation properties on the stack.
If `op` is `None`, this value must be `True`.
Raises:
ValueError: if op is None but ignore_existing is False.
Yields:
A context manager that specifies the op with which to colocate
newly created ops.
"""
if op is None and not ignore_existing:
raise ValueError("Trying to reset colocation (op is None) but "
"ignore_existing is not True")
op, device_only_candidate = _op_to_colocate_with(op, self)
# By default, colocate_with resets the device function stack,
# since colocate_with is typically used in specific internal
# library functions where colocation is intended to be "stronger"
# than device functions.
#
# In the future, a caller may specify that device_functions win
# over colocation, in which case we can add support.
device_fn_tmp = self._device_function_stack
self._device_function_stack = traceable_stack.TraceableStack()
if ignore_existing:
current_stack = self._colocation_stack
self._colocation_stack = traceable_stack.TraceableStack()
if op is not None:
# offset refers to the stack frame used for storing code location.
# We use 4, the sum of 1 to use our caller's stack frame and 3
# to jump over layers of context managers above us.
if device_only_candidate is not None:
self._colocation_stack.push_obj(device_only_candidate, offset=4)
self._colocation_stack.push_obj(op, offset=4)
elif not ignore_existing:
raise ValueError("Trying to reset colocation (op is None) but "
"ignore_existing is not True")
try:
yield
finally:
# Restore device function stack
self._device_function_stack = device_fn_tmp
if op is not None:
self._colocation_stack.pop_obj()
if device_only_candidate is not None:
self._colocation_stack.pop_obj()
# Reset the colocation stack if requested.
if ignore_existing:
self._colocation_stack = current_stack
def _add_device_to_stack(self, device_name_or_function, offset=0):
"""Add device to stack manually, separate from a context manager."""
total_offset = 1 + offset
spec = _UserDeviceSpec(device_name_or_function)
self._device_function_stack.push_obj(spec, offset=total_offset)
return spec
@tf_contextlib.contextmanager
def device(self, device_name_or_function):
# pylint: disable=line-too-long
"""Returns a context manager that specifies the default device to use.
The `device_name_or_function` argument may either be a device name
string, a device function, or None:
* If it is a device name string, all operations constructed in
this context will be assigned to the device with that name, unless
overridden by a nested `device()` context.
* If it is a function, it will be treated as a function from
Operation objects to device name strings, and invoked each time
a new Operation is created. The Operation will be assigned to
the device with the returned name.
* If it is None, all `device()` invocations from the enclosing context
will be ignored.
For information about the valid syntax of device name strings, see
the documentation in
[`DeviceNameUtils`](https://www.tensorflow.org/code/tensorflow/core/util/device_name_utils.h).
For example:
```python
with g.device('/device:GPU:0'):
# All operations constructed in this context will be placed
# on GPU 0.
with g.device(None):
# All operations constructed in this context will have no
# assigned device.
# Defines a function from `Operation` to device string.
def matmul_on_gpu(n):
if n.type == "MatMul":
return "/device:GPU:0"
else:
return "/cpu:0"
with g.device(matmul_on_gpu):
# All operations of type "MatMul" constructed in this context
# will be placed on GPU 0; all other operations will be placed
# on CPU 0.
```
**N.B.** The device scope may be overridden by op wrappers or
other library code. For example, a variable assignment op
`v.assign()` must be colocated with the `tf.Variable` `v`, and
incompatible device scopes will be ignored.
Args:
device_name_or_function: The device name or function to use in the
context.
Yields:
A context manager that specifies the default device to use for newly
created ops.
Raises:
RuntimeError: If device scopes are not properly nested.
"""
self._add_device_to_stack(device_name_or_function, offset=2)
old_top_of_stack = self._device_function_stack.peek_top_obj()
try:
yield
finally:
new_top_of_stack = self._device_function_stack.peek_top_obj()
if old_top_of_stack is not new_top_of_stack:
raise RuntimeError("Exiting device scope without proper scope nesting.")
self._device_function_stack.pop_obj()
def _apply_device_functions(self, op):
"""Applies the current device function stack to the given operation."""
# Apply any device functions in LIFO order, so that the most recently
# pushed function has the first chance to apply a device to the op.
# We apply here because the result can depend on the Operation's
# signature, which is computed in the Operation constructor.
# pylint: disable=protected-access
prior_device_string = None
for device_spec in self._device_function_stack.peek_objs():
if device_spec.is_null_merge:
continue
if device_spec.function is None:
break
device_string = device_spec.string_merge(op)
# Take advantage of the fact that None is a singleton and Python interns
# strings, since identity checks are faster than equality checks.
if device_string is not prior_device_string:
op._set_device_from_string(device_string)
prior_device_string = device_string
op._device_code_locations = self._snapshot_device_function_stack_metadata()
# pylint: enable=protected-access
# pylint: disable=g-doc-return-or-yield
@tf_contextlib.contextmanager
def container(self, container_name):
"""Returns a context manager that specifies the resource container to use.
Stateful operations, such as variables and queues, can maintain their
states on devices so that they can be shared by multiple processes.
A resource container is a string name under which these stateful
operations are tracked. These resources can be released or cleared
with `tf.Session.reset()`.
For example:
```python
with g.container('experiment0'):
# All stateful Operations constructed in this context will be placed
# in resource container "experiment0".
v1 = tf.Variable([1.0])
v2 = tf.Variable([2.0])
with g.container("experiment1"):
# All stateful Operations constructed in this context will be
# placed in resource container "experiment1".
v3 = tf.Variable([3.0])
q1 = tf.queue.FIFOQueue(10, tf.float32)
# All stateful Operations constructed in this context will be
# be created in the "experiment0".
v4 = tf.Variable([4.0])
q1 = tf.queue.FIFOQueue(20, tf.float32)
with g.container(""):
# All stateful Operations constructed in this context will be
# be placed in the default resource container.
v5 = tf.Variable([5.0])
q3 = tf.queue.FIFOQueue(30, tf.float32)
# Resets container "experiment0", after which the state of v1, v2, v4, q1
# will become undefined (such as uninitialized).
tf.Session.reset(target, ["experiment0"])
```
Args:
container_name: container name string.
Returns:
A context manager for defining resource containers for stateful ops,
yields the container name.
"""
original_container = self._container
self._container = container_name
try:
yield self._container
finally:
self._container = original_container
# pylint: enable=g-doc-return-or-yield
class _ControlDependenciesController(object):
"""Context manager for `control_dependencies()`."""
def __init__(self, graph, control_inputs):
"""Create a new `_ControlDependenciesController`.
A `_ControlDependenciesController` is the context manager for
`with tf.control_dependencies()` blocks. These normally nest,
as described in the documentation for `control_dependencies()`.
The `control_inputs` argument list control dependencies that must be
added to the current set of control dependencies. Because of
uniquification the set can be empty even if the caller passed a list of
ops. The special value `None` indicates that we want to start a new
empty set of control dependencies instead of extending the current set.
In that case we also clear the current control flow context, which is an
additional mechanism to add control dependencies.
Args:
graph: The graph that this controller is managing.
control_inputs: List of ops to use as control inputs in addition to the
current control dependencies. None to indicate that the dependencies
should be cleared.
"""
self._graph = graph
if control_inputs is None:
self._control_inputs_val = []
self._new_stack = True
else:
self._control_inputs_val = control_inputs
self._new_stack = False
self._seen_nodes = set()
self._old_stack = None
self._old_control_flow_context = None
# pylint: disable=protected-access
def __enter__(self):
if self._new_stack:
# Clear the control_dependencies graph.
self._old_stack = self._graph._control_dependencies_stack
self._graph._control_dependencies_stack = []
# Clear the control_flow_context too.
self._old_control_flow_context = self._graph._get_control_flow_context()
self._graph._set_control_flow_context(None)
self._graph._push_control_dependencies_controller(self)
def __exit__(self, unused_type, unused_value, unused_traceback):
self._graph._pop_control_dependencies_controller(self)
if self._new_stack:
self._graph._control_dependencies_stack = self._old_stack
self._graph._set_control_flow_context(self._old_control_flow_context)
# pylint: enable=protected-access
@property
def control_inputs(self):
return self._control_inputs_val
def add_op(self, op):
if isinstance(op, Tensor):
op = op.ref()
self._seen_nodes.add(op)
def op_in_group(self, op):
if isinstance(op, Tensor):
op = op.ref()
return op in self._seen_nodes
def _push_control_dependencies_controller(self, controller):
self._control_dependencies_stack.append(controller)
def _pop_control_dependencies_controller(self, controller):
assert self._control_dependencies_stack[-1] is controller
self._control_dependencies_stack.pop()
def _current_control_dependencies(self):
ret = set()
for controller in self._control_dependencies_stack:
for op in controller.control_inputs:
ret.add(op)
return ret
def _control_dependencies_for_inputs(self, input_ops):
"""For an op that takes `input_ops` as inputs, compute control inputs.
The returned control dependencies should yield an execution that
is equivalent to adding all control inputs in
self._control_dependencies_stack to a newly created op. However,
this function attempts to prune the returned control dependencies
by observing that nodes created within the same `with
control_dependencies(...):` block may have data dependencies that make
the explicit approach redundant.
Args:
input_ops: The data input ops for an op to be created.
Returns:
A list of control inputs for the op to be created.
"""
ret = []
for controller in self._control_dependencies_stack:
# If any of the input_ops already depends on the inputs from controller,
# we say that the new op is dominated (by that input), and we therefore
# do not need to add control dependencies for this controller's inputs.
dominated = False
for op in input_ops:
if controller.op_in_group(op):
dominated = True
break
if not dominated:
# Don't add a control input if we already have a data dependency on i.
# NOTE(mrry): We do not currently track transitive data dependencies,
# so we may add redundant control inputs.
ret.extend(c for c in controller.control_inputs if c not in input_ops)
return ret
def _record_op_seen_by_control_dependencies(self, op):
"""Record that the given op depends on all registered control dependencies.
Args:
op: An Operation.
"""
for controller in self._control_dependencies_stack:
controller.add_op(op)
def control_dependencies(self, control_inputs):
"""Returns a context manager that specifies control dependencies.
Use with the `with` keyword to specify that all operations constructed
within the context should have control dependencies on
`control_inputs`. For example:
```python
with g.control_dependencies([a, b, c]):
# `d` and `e` will only run after `a`, `b`, and `c` have executed.
d = ...
e = ...
```
Multiple calls to `control_dependencies()` can be nested, and in
that case a new `Operation` will have control dependencies on the union
of `control_inputs` from all active contexts.
```python
with g.control_dependencies([a, b]):
# Ops constructed here run after `a` and `b`.
with g.control_dependencies([c, d]):
# Ops constructed here run after `a`, `b`, `c`, and `d`.
```
You can pass None to clear the control dependencies:
```python
with g.control_dependencies([a, b]):
# Ops constructed here run after `a` and `b`.
with g.control_dependencies(None):
# Ops constructed here run normally, not waiting for either `a` or `b`.
with g.control_dependencies([c, d]):
# Ops constructed here run after `c` and `d`, also not waiting
# for either `a` or `b`.
```
*N.B.* The control dependencies context applies *only* to ops that
are constructed within the context. Merely using an op or tensor
in the context does not add a control dependency. The following
example illustrates this point:
```python
# WRONG
def my_func(pred, tensor):
t = tf.matmul(tensor, tensor)
with tf.control_dependencies([pred]):
# The matmul op is created outside the context, so no control
# dependency will be added.
return t
# RIGHT
def my_func(pred, tensor):
with tf.control_dependencies([pred]):
# The matmul op is created in the context, so a control dependency
# will be added.
return tf.matmul(tensor, tensor)
```
Also note that though execution of ops created under this scope will trigger
execution of the dependencies, the ops created under this scope might still
be pruned from a normal tensorflow graph. For example, in the following
snippet of code the dependencies are never executed:
```python
loss = model.loss()
with tf.control_dependencies(dependencies):
loss = loss + tf.constant(1) # note: dependencies ignored in the
# backward pass
return tf.gradients(loss, model.variables)
```
This is because evaluating the gradient graph does not require evaluating
the constant(1) op created in the forward pass.
Args:
control_inputs: A list of `Operation` or `Tensor` objects which must be
executed or computed before running the operations defined in the
context. Can also be `None` to clear the control dependencies.
Returns:
A context manager that specifies control dependencies for all
operations constructed within the context.
Raises:
TypeError: If `control_inputs` is not a list of `Operation` or
`Tensor` objects.
"""
if control_inputs is None:
return self._ControlDependenciesController(self, None)
# First convert the inputs to ops, and deduplicate them.
# NOTE(mrry): Other than deduplication, we do not currently track direct
# or indirect dependencies between control_inputs, which may result in
# redundant control inputs.
control_ops = []
current = self._current_control_dependencies()
for c in control_inputs:
# The hasattr(handle) is designed to match ResourceVariables. This is so
# control dependencies on a variable or on an unread variable don't
# trigger reads.
if (isinstance(c, IndexedSlices) or
(hasattr(c, "_handle") and hasattr(c, "op"))):
c = c.op
c = self.as_graph_element(c)
if isinstance(c, Tensor):
c = c.op
elif not isinstance(c, Operation):
raise TypeError("Control input must be Operation or Tensor: %s" % c)
if c not in current:
control_ops.append(c)
current.add(c)
return self._ControlDependenciesController(self, control_ops)
# pylint: disable=g-doc-return-or-yield
@tf_contextlib.contextmanager
def _attr_scope(self, attr_map):
"""EXPERIMENTAL: A context manager for setting attributes on operators.
This context manager can be used to add additional
attributes to operators within the scope of the context.
For example:
with ops.Graph().as_default() as g:
f_1 = Foo() # No extra attributes
with g._attr_scope({"_a": tf.attr_value_pb2.AttrValue(b=False)}):
f_2 = Foo() # Additional attribute _a=False
with g._attr_scope({"_a": tf.attr_value_pb2.AttrValue(b=True)}):
f_3 = Foo() # Additional attribute _a=False
with g._attr_scope({"_a": None}):
f_4 = Foo() # No additional attributes.
Args:
attr_map: A dictionary mapping attr name strings to AttrValue protocol
buffers or None.
Returns:
A context manager that sets the kernel label to be used for one or more
ops created in that context.
Raises:
TypeError: If attr_map is not a dictionary mapping
strings to AttrValue protobufs.
"""
if not isinstance(attr_map, dict):
raise TypeError("attr_map must be a dictionary mapping "
"strings to AttrValue protocol buffers")
# The saved_attrs dictionary stores any currently-set labels that
# will be overridden by this context manager.
saved_attrs = {}
# Install the given attribute
for name, attr in attr_map.items():
if not (isinstance(name, six.string_types) and
(isinstance(attr, (type(None), attr_value_pb2.AttrValue)) or
callable(attr))):
raise TypeError("attr_map must be a dictionary mapping "
"strings to AttrValue protocol buffers or "
"callables that emit AttrValue protocol buffers")
try:
saved_attrs[name] = self._attr_scope_map[name]
except KeyError:
pass
if attr is None:
del self._attr_scope_map[name]
else:
self._attr_scope_map[name] = attr
try:
yield # The code within the context runs here.
finally:
# Remove the attributes set for this context, and restore any saved
# attributes.
for name, attr in attr_map.items():
try:
self._attr_scope_map[name] = saved_attrs[name]
except KeyError:
del self._attr_scope_map[name]
# pylint: enable=g-doc-return-or-yield
# pylint: disable=g-doc-return-or-yield
@tf_contextlib.contextmanager
def _kernel_label_map(self, op_to_kernel_label_map):
"""EXPERIMENTAL: A context manager for setting kernel labels.
This context manager can be used to select particular
implementations of kernels within the scope of the context.
For example:
with ops.Graph().as_default() as g:
f_1 = Foo() # Uses the default registered kernel for the Foo op.
with g.kernel_label_map({"Foo": "v_2"}):
f_2 = Foo() # Uses the registered kernel with label "v_2"
# for the Foo op.
with g.kernel_label_map({"Foo": "v_3"}):
f_3 = Foo() # Uses the registered kernel with label "v_3"
# for the Foo op.
with g.kernel_label_map({"Foo": ""}):
f_4 = Foo() # Uses the default registered kernel
# for the Foo op.
Args:
op_to_kernel_label_map: A dictionary mapping op type strings to kernel
label strings.
Returns:
A context manager that sets the kernel label to be used for one or more
ops created in that context.
Raises:
TypeError: If op_to_kernel_label_map is not a dictionary mapping
strings to strings.
"""
if not isinstance(op_to_kernel_label_map, dict):
raise TypeError("op_to_kernel_label_map must be a dictionary mapping "
"strings to strings")
# The saved_labels dictionary stores any currently-set labels that
# will be overridden by this context manager.
saved_labels = {}
# Install the given label
for op_type, label in op_to_kernel_label_map.items():
if not (isinstance(op_type, six.string_types) and
isinstance(label, six.string_types)):
raise TypeError("op_to_kernel_label_map must be a dictionary mapping "
"strings to strings")
try:
saved_labels[op_type] = self._op_to_kernel_label_map[op_type]
except KeyError:
pass
self._op_to_kernel_label_map[op_type] = label
try:
yield # The code within the context runs here.
finally:
# Remove the labels set for this context, and restore any saved labels.
for op_type, label in op_to_kernel_label_map.items():
try:
self._op_to_kernel_label_map[op_type] = saved_labels[op_type]
except KeyError:
del self._op_to_kernel_label_map[op_type]
# pylint: enable=g-doc-return-or-yield
@tf_contextlib.contextmanager
def _override_gradient_function(self, gradient_function_map):
"""Specify gradient function for the given op type."""
# This is an internal API and we don't need nested context for this.
assert not self._gradient_function_map
self._gradient_function_map = gradient_function_map
yield
self._gradient_function_map = {}
# pylint: disable=g-doc-return-or-yield
@tf_contextlib.contextmanager
def gradient_override_map(self, op_type_map):
"""EXPERIMENTAL: A context manager for overriding gradient functions.
This context manager can be used to override the gradient function
that will be used for ops within the scope of the context.
For example:
```python
@tf.RegisterGradient("CustomSquare")
def _custom_square_grad(op, grad):
# ...
with tf.Graph().as_default() as g:
c = tf.constant(5.0)
s_1 = tf.square(c) # Uses the default gradient for tf.square.
with g.gradient_override_map({"Square": "CustomSquare"}):
s_2 = tf.square(s_2) # Uses _custom_square_grad to compute the
# gradient of s_2.
```
Args:
op_type_map: A dictionary mapping op type strings to alternative op type
strings.
Returns:
A context manager that sets the alternative op type to be used for one
or more ops created in that context.
Raises:
TypeError: If `op_type_map` is not a dictionary mapping strings to
strings.
"""
if not isinstance(op_type_map, dict):
raise TypeError("op_type_map must be a dictionary mapping "
"strings to strings")
# The saved_mappings dictionary stores any currently-set mappings that
# will be overridden by this context manager.
saved_mappings = {}
# Install the given label
for op_type, mapped_op_type in op_type_map.items():
if not (isinstance(op_type, six.string_types) and
isinstance(mapped_op_type, six.string_types)):
raise TypeError("op_type_map must be a dictionary mapping "
"strings to strings")
try:
saved_mappings[op_type] = self._gradient_override_map[op_type]
except KeyError:
pass
self._gradient_override_map[op_type] = mapped_op_type
try:
yield # The code within the context runs here.
finally:
# Remove the labels set for this context, and restore any saved labels.
for op_type, mapped_op_type in op_type_map.items():
try:
self._gradient_override_map[op_type] = saved_mappings[op_type]
except KeyError:
del self._gradient_override_map[op_type]
# pylint: enable=g-doc-return-or-yield
def prevent_feeding(self, tensor):
"""Marks the given `tensor` as unfeedable in this graph."""
self._unfeedable_tensors.add(tensor)
def is_feedable(self, tensor):
"""Returns `True` if and only if `tensor` is feedable."""
return tensor not in self._unfeedable_tensors
def prevent_fetching(self, op):
"""Marks the given `op` as unfetchable in this graph."""
self._unfetchable_ops.add(op)
def is_fetchable(self, tensor_or_op):
"""Returns `True` if and only if `tensor_or_op` is fetchable."""
if isinstance(tensor_or_op, Tensor):
return tensor_or_op.op not in self._unfetchable_ops
else:
return tensor_or_op not in self._unfetchable_ops
def switch_to_thread_local(self):
"""Make device, colocation and dependencies stacks thread-local.
Device, colocation and dependencies stacks are not thread-local be default.
If multiple threads access them, then the state is shared. This means that
one thread may affect the behavior of another thread.
After this method is called, the stacks become thread-local. If multiple
threads access them, then the state is not shared. Each thread uses its own
value; a thread doesn't affect other threads by mutating such a stack.
The initial value for every thread's stack is set to the current value
of the stack when `switch_to_thread_local()` was first called.
"""
if not self._stack_state_is_thread_local:
self._stack_state_is_thread_local = True
@property
def _device_function_stack(self):
if self._stack_state_is_thread_local:
# This may be called from a thread where device_function_stack doesn't yet
# exist.
# pylint: disable=protected-access
if not hasattr(self._thread_local, "_device_function_stack"):
stack_copy_for_this_thread = self._graph_device_function_stack.copy()
self._thread_local._device_function_stack = stack_copy_for_this_thread
return self._thread_local._device_function_stack
# pylint: enable=protected-access
else:
return self._graph_device_function_stack
@property
def _device_functions_outer_to_inner(self):
user_device_specs = self._device_function_stack.peek_objs()
device_functions = [spec.function for spec in user_device_specs]
device_functions_outer_to_inner = list(reversed(device_functions))
return device_functions_outer_to_inner
def _snapshot_device_function_stack_metadata(self):
"""Return device function stack as a list of TraceableObjects.
Returns:
[traceable_stack.TraceableObject, ...] where each TraceableObject's .obj
member is a displayable name for the user's argument to Graph.device, and
the filename and lineno members point to the code location where
Graph.device was called directly or indirectly by the user.
"""
snapshot = []
for obj in self._device_function_stack.peek_traceable_objs():
obj_copy = obj.copy_metadata()
obj_copy.obj = obj.obj.display_name
snapshot.append(obj_copy)
return snapshot
@_device_function_stack.setter
def _device_function_stack(self, device_function_stack):
if self._stack_state_is_thread_local:
# pylint: disable=protected-access
self._thread_local._device_function_stack = device_function_stack
# pylint: enable=protected-access
else:
self._graph_device_function_stack = device_function_stack
@property
def _colocation_stack(self):
"""Return thread-local copy of colocation stack."""
if self._stack_state_is_thread_local:
# This may be called from a thread where colocation_stack doesn't yet
# exist.
# pylint: disable=protected-access
if not hasattr(self._thread_local, "_colocation_stack"):
stack_copy_for_this_thread = self._graph_colocation_stack.copy()
self._thread_local._colocation_stack = stack_copy_for_this_thread
return self._thread_local._colocation_stack
# pylint: enable=protected-access
else:
return self._graph_colocation_stack
def _snapshot_colocation_stack_metadata(self):
"""Return colocation stack metadata as a dictionary."""
return {
traceable_obj.obj.name: traceable_obj.copy_metadata()
for traceable_obj in self._colocation_stack.peek_traceable_objs()
}
@_colocation_stack.setter
def _colocation_stack(self, colocation_stack):
if self._stack_state_is_thread_local:
# pylint: disable=protected-access
self._thread_local._colocation_stack = colocation_stack
# pylint: enable=protected-access
else:
self._graph_colocation_stack = colocation_stack
@property
def _control_dependencies_stack(self):
if self._stack_state_is_thread_local:
# This may be called from a thread where control_dependencies_stack
# doesn't yet exist.
if not hasattr(self._thread_local, "_control_dependencies_stack"):
self._thread_local._control_dependencies_stack = (
self._graph_control_dependencies_stack[:])
return self._thread_local._control_dependencies_stack
else:
return self._graph_control_dependencies_stack
@_control_dependencies_stack.setter
def _control_dependencies_stack(self, control_dependencies):
if self._stack_state_is_thread_local:
self._thread_local._control_dependencies_stack = control_dependencies
else:
self._graph_control_dependencies_stack = control_dependencies
@property
def _distribution_strategy_stack(self):
"""A stack to maintain distribution strategy context for each thread."""
if not hasattr(self._thread_local, "_distribution_strategy_stack"):
self._thread_local._distribution_strategy_stack = [] # pylint: disable=protected-access
return self._thread_local._distribution_strategy_stack # pylint: disable=protected-access
@_distribution_strategy_stack.setter
def _distribution_strategy_stack(self, _distribution_strategy_stack):
self._thread_local._distribution_strategy_stack = ( # pylint: disable=protected-access
_distribution_strategy_stack)
@property
def _global_distribute_strategy_scope(self):
"""For implementing `tf.distribute.set_strategy()`."""
if not hasattr(self._thread_local, "distribute_strategy_scope"):
self._thread_local.distribute_strategy_scope = None
return self._thread_local.distribute_strategy_scope
@_global_distribute_strategy_scope.setter
def _global_distribute_strategy_scope(self, distribute_strategy_scope):
self._thread_local.distribute_strategy_scope = (distribute_strategy_scope)
def _mutation_lock(self):
"""Returns a lock to guard code that creates & mutates ops.
See the comment for self._group_lock for more info.
"""
return self._group_lock.group(_MUTATION_LOCK_GROUP)
def _session_run_lock(self):
"""Returns a lock to guard code for Session.run.
See the comment for self._group_lock for more info.
"""
return self._group_lock.group(_SESSION_RUN_LOCK_GROUP)
# TODO(agarwal): currently device directives in an outer eager scope will not
# apply to inner graph mode code. Fix that.
@tf_export(v1=["device"])
def device(device_name_or_function):
"""Wrapper for `Graph.device()` using the default graph.
See `tf.Graph.device` for more details.
Args:
device_name_or_function: The device name or function to use in the context.
Returns:
A context manager that specifies the default device to use for newly
created ops.
Raises:
RuntimeError: If eager execution is enabled and a function is passed in.
"""
if context.executing_eagerly():
if callable(device_name_or_function):
raise RuntimeError(
"tf.device does not support functions when eager execution "
"is enabled.")
return context.device(device_name_or_function)
elif executing_eagerly_outside_functions():
@tf_contextlib.contextmanager
def combined(device_name_or_function):
with get_default_graph().device(device_name_or_function):
if not callable(device_name_or_function):
with context.device(device_name_or_function):
yield
else:
yield
return combined(device_name_or_function)
else:
return get_default_graph().device(device_name_or_function)
@tf_export("device", v1=[])
def device_v2(device_name):
"""Specifies the device for ops created/executed in this context.
This function specifies the device to be used for ops created/executed in a
particular context. Nested contexts will inherit and also create/execute
their ops on the specified device. If a specific device is not required,
consider not using this function so that a device can be automatically
assigned. In general the use of this function is optional. `device_name` can
be fully specified, as in "/job:worker/task:1/device:cpu:0", or partially
specified, containing only a subset of the "/"-separated fields. Any fields
which are specified will override device annotations from outer scopes.
For example:
```python
with tf.device('/job:foo'):
# ops created here have devices with /job:foo
with tf.device('/job:bar/task:0/device:gpu:2'):
# ops created here have the fully specified device above
with tf.device('/device:gpu:1'):
# ops created here have the device '/job:foo/device:gpu:1'
```
Args:
device_name: The device name to use in the context.
Returns:
A context manager that specifies the default device to use for newly
created ops.
Raises:
RuntimeError: If a function is passed in.
"""
if callable(device_name):
raise RuntimeError("tf.device does not support functions.")
return device(device_name)
@tf_export(v1=["container"])
def container(container_name):
"""Wrapper for `Graph.container()` using the default graph.
Args:
container_name: The container string to use in the context.
Returns:
A context manager that specifies the default container to use for newly
created stateful ops.
"""
return get_default_graph().container(container_name)
def _colocate_with_for_gradient(op, gradient_uid, ignore_existing=False):
if context.executing_eagerly():
if op is not None:
if not hasattr(op, "device"):
op = internal_convert_to_tensor_or_indexed_slices(op)
return device(op.device)
else:
return NullContextmanager()
else:
default_graph = get_default_graph()
if isinstance(op, EagerTensor):
if default_graph.building_function:
return default_graph.device(op.device)
else:
raise ValueError("Encountered an Eager-defined Tensor during graph "
"construction, but a function was not being built.")
return default_graph._colocate_with_for_gradient(
op, gradient_uid=gradient_uid, ignore_existing=ignore_existing)
# Internal interface to colocate_with. colocate_with has been deprecated from
# public API. There are still a few internal uses of colocate_with. Add internal
# only API for those uses to avoid deprecation warning.
def colocate_with(op, ignore_existing=False):
return _colocate_with_for_gradient(op, None, ignore_existing=ignore_existing)
@deprecation.deprecated(
date=None, instructions="Colocations handled automatically by placer.")
@tf_export(v1=["colocate_with"])
def _colocate_with(op, ignore_existing=False):
return colocate_with(op, ignore_existing)
@tf_export("control_dependencies")
def control_dependencies(control_inputs):
"""Wrapper for `Graph.control_dependencies()` using the default graph.
See `tf.Graph.control_dependencies`
for more details.
Note: *In TensorFlow 2 with eager and/or Autograph, you should not require
this method, as code executes in the expected order.* Only use
`tf.control_dependencies` when working with v1-style code or in a graph
context such as inside `Dataset.map`.
When eager execution is enabled, any callable object in the `control_inputs`
list will be called.
Args:
control_inputs: A list of `Operation` or `Tensor` objects which must be
executed or computed before running the operations defined in the context.
Can also be `None` to clear the control dependencies. If eager execution
is enabled, any callable object in the `control_inputs` list will be
called.
Returns:
A context manager that specifies control dependencies for all
operations constructed within the context.
"""
if context.executing_eagerly():
if control_inputs:
# Execute any pending callables.
for control in control_inputs:
if callable(control):
control()
return NullContextmanager()
else:
return get_default_graph().control_dependencies(control_inputs)
class _DefaultStack(threading.local):
"""A thread-local stack of objects for providing implicit defaults."""
def __init__(self):
super(_DefaultStack, self).__init__()
self._enforce_nesting = True
self.stack = []
def get_default(self):
return self.stack[-1] if self.stack else None
def reset(self):
self.stack = []
def is_cleared(self):
return not self.stack
@property
def enforce_nesting(self):
return self._enforce_nesting
@enforce_nesting.setter
def enforce_nesting(self, value):
self._enforce_nesting = value
@tf_contextlib.contextmanager
def get_controller(self, default):
"""A context manager for manipulating a default stack."""
self.stack.append(default)
try:
yield default
finally:
# stack may be empty if reset() was called
if self.stack:
if self._enforce_nesting:
if self.stack[-1] is not default:
raise AssertionError(
"Nesting violated for default stack of %s objects" %
type(default))
self.stack.pop()
else:
self.stack.remove(default)
_default_session_stack = _DefaultStack() # pylint: disable=protected-access
def default_session(session):
"""Python "with" handler for defining a default session.
This function provides a means of registering a session for handling
Tensor.eval() and Operation.run() calls. It is primarily intended for use
by session.Session, but can be used with any object that implements
the Session.run() interface.
Use with the "with" keyword to specify that Tensor.eval() and Operation.run()
invocations within the scope of a block should be executed by a particular
session.
The default session applies to the current thread only, so it is always
possible to inspect the call stack and determine the scope of a default
session. If you create a new thread, and wish to use the default session
in that thread, you must explicitly add a "with ops.default_session(sess):"
block in that thread's function.
Example:
The following code examples are equivalent:
# 1. Using the Session object directly:
sess = ...
c = tf.constant(5.0)
sess.run(c)
# 2. Using default_session():
sess = ...
with ops.default_session(sess):
c = tf.constant(5.0)
result = c.eval()
# 3. Overriding default_session():
sess = ...
with ops.default_session(sess):
c = tf.constant(5.0)
with ops.default_session(...):
c.eval(session=sess)
Args:
session: The session to be installed as the default session.
Returns:
A context manager for the default session.
"""
return _default_session_stack.get_controller(session)
@tf_export(v1=["get_default_session"])
def get_default_session():
"""Returns the default session for the current thread.
The returned `Session` will be the innermost session on which a
`Session` or `Session.as_default()` context has been entered.
NOTE: The default session is a property of the current thread. If you
create a new thread, and wish to use the default session in that
thread, you must explicitly add a `with sess.as_default():` in that
thread's function.
Returns:
The default `Session` being used in the current thread.
"""
return _default_session_stack.get_default()
def _eval_using_default_session(tensors, feed_dict, graph, session=None):
"""Uses the default session to evaluate one or more tensors.
Args:
tensors: A single Tensor, or a list of Tensor objects.
feed_dict: A dictionary that maps Tensor objects (or tensor names) to lists,
numpy ndarrays, TensorProtos, or strings.
graph: The graph in which the tensors are defined.
session: (Optional) A different session to use to evaluate "tensors".
Returns:
Either a single numpy ndarray if "tensors" is a single tensor; or a list
of numpy ndarrays that each correspond to the respective element in
"tensors".
Raises:
ValueError: If no default session is available; the default session
does not have "graph" as its graph; or if "session" is specified,
and it does not have "graph" as its graph.
"""
if session is None:
session = get_default_session()
if session is None:
raise ValueError("Cannot evaluate tensor using `eval()`: No default "
"session is registered. Use `with "
"sess.as_default()` or pass an explicit session to "
"`eval(session=sess)`")
if session.graph is not graph:
raise ValueError("Cannot use the default session to evaluate tensor: "
"the tensor's graph is different from the session's "
"graph. Pass an explicit session to "
"`eval(session=sess)`.")
else:
if session.graph is not graph:
raise ValueError("Cannot use the given session to evaluate tensor: "
"the tensor's graph is different from the session's "
"graph.")
return session.run(tensors, feed_dict)
def _run_using_default_session(operation, feed_dict, graph, session=None):
"""Uses the default session to run "operation".
Args:
operation: The Operation to be run.
feed_dict: A dictionary that maps Tensor objects (or tensor names) to lists,
numpy ndarrays, TensorProtos, or strings.
graph: The graph in which "operation" is defined.
session: (Optional) A different session to use to run "operation".
Raises:
ValueError: If no default session is available; the default session
does not have "graph" as its graph; or if "session" is specified,
and it does not have "graph" as its graph.
"""
if session is None:
session = get_default_session()
if session is None:
raise ValueError("Cannot execute operation using `run()`: No default "
"session is registered. Use `with "
"sess.as_default():` or pass an explicit session to "
"`run(session=sess)`")
if session.graph is not graph:
raise ValueError("Cannot use the default session to execute operation: "
"the operation's graph is different from the "
"session's graph. Pass an explicit session to "
"run(session=sess).")
else:
if session.graph is not graph:
raise ValueError("Cannot use the given session to execute operation: "
"the operation's graph is different from the session's "
"graph.")
session.run(operation, feed_dict)
class _DefaultGraphStack(_DefaultStack): # pylint: disable=protected-access
"""A thread-local stack of objects for providing an implicit default graph."""
def __init__(self):
super(_DefaultGraphStack, self).__init__()
self._global_default_graph = None
def get_default(self):
"""Override that returns a global default if the stack is empty."""
if self.stack:
return self.stack[-1]
elif self._global_default_graph:
return self._global_default_graph
else:
self._global_default_graph = Graph()
return self._global_default_graph
def _GetGlobalDefaultGraph(self):
if self._global_default_graph is None:
# TODO(mrry): Perhaps log that the default graph is being used, or set
# provide some other feedback to prevent confusion when a mixture of
# the global default graph and an explicit graph are combined in the
# same process.
self._global_default_graph = Graph()
return self._global_default_graph
def reset(self):
super(_DefaultGraphStack, self).reset()
self._global_default_graph = None
@tf_contextlib.contextmanager
def get_controller(self, default):
context.context().context_switches.push(default.building_function,
default.as_default,
default._device_function_stack)
try:
with super(_DefaultGraphStack,
self).get_controller(default) as g, context.graph_mode():
yield g
finally:
# If an exception is raised here it may be hiding a related exception in
# the try-block (just above).
context.context().context_switches.pop()
_default_graph_stack = _DefaultGraphStack()
# Shared helper used in init_scope and executing_eagerly_outside_functions
# to obtain the outermost context that is not building a function, and the
# innermost non empty device stack.
def _get_outer_context_and_inner_device_stack():
"""Get the outermost context not building a function."""
default_graph = get_default_graph()
outer_context = None
innermost_nonempty_device_stack = default_graph._device_function_stack # pylint: disable=protected-access
if not _default_graph_stack.stack:
# If the default graph stack is empty, then we cannot be building a
# function. Install the global graph (which, in this case, is also the
# default graph) as the outer context.
if default_graph.building_function:
raise RuntimeError("The global graph is building a function.")
outer_context = default_graph.as_default
else:
# Find a context that is not building a function.
for stack_entry in reversed(context.context().context_switches.stack):
if not innermost_nonempty_device_stack:
innermost_nonempty_device_stack = stack_entry.device_stack
if not stack_entry.is_building_function:
outer_context = stack_entry.enter_context_fn
break
if outer_context is None:
# As a last resort, obtain the global default graph; this graph doesn't
# necessarily live on the graph stack (and hence it doesn't necessarily
# live on the context stack), but it is stored in the graph stack's
# encapsulating object.
outer_context = _default_graph_stack._GetGlobalDefaultGraph().as_default # pylint: disable=protected-access
if outer_context is None:
# Sanity check; this shouldn't be triggered.
raise RuntimeError("All graphs are building functions, and no "
"eager context was previously active.")
return outer_context, innermost_nonempty_device_stack
# pylint: disable=g-doc-return-or-yield,line-too-long
@tf_export("init_scope")
@tf_contextlib.contextmanager
def init_scope():
"""A context manager that lifts ops out of control-flow scopes and function-building graphs.
There is often a need to lift variable initialization ops out of control-flow
scopes, function-building graphs, and gradient tapes. Entering an
`init_scope` is a mechanism for satisfying these desiderata. In particular,
entering an `init_scope` has three effects:
(1) All control dependencies are cleared the moment the scope is entered;
this is equivalent to entering the context manager returned from
`control_dependencies(None)`, which has the side-effect of exiting
control-flow scopes like `tf.cond` and `tf.while_loop`.
(2) All operations that are created while the scope is active are lifted
into the lowest context on the `context_stack` that is not building a
graph function. Here, a context is defined as either a graph or an eager
context. Every context switch, i.e., every installation of a graph as
the default graph and every switch into eager mode, is logged in a
thread-local stack called `context_switches`; the log entry for a
context switch is popped from the stack when the context is exited.
Entering an `init_scope` is equivalent to crawling up
`context_switches`, finding the first context that is not building a
graph function, and entering it. A caveat is that if graph mode is
enabled but the default graph stack is empty, then entering an
`init_scope` will simply install a fresh graph as the default one.
(3) The gradient tape is paused while the scope is active.
When eager execution is enabled, code inside an init_scope block runs with
eager execution enabled even when tracing a `tf.function`. For example:
```python
tf.compat.v1.enable_eager_execution()
@tf.function
def func():
# A function constructs TensorFlow graphs,
# it does not execute eagerly.
assert not tf.executing_eagerly()
with tf.init_scope():
# Initialization runs with eager execution enabled
assert tf.executing_eagerly()
```
Raises:
RuntimeError: if graph state is incompatible with this initialization.
"""
# pylint: enable=g-doc-return-or-yield,line-too-long
if context.executing_eagerly():
# Fastpath.
with tape.stop_recording():
yield
else:
# Retrieve the active name scope: entering an `init_scope` preserves
# the name scope of the current context.
scope = get_default_graph().get_name_scope()
if scope and scope[-1] != "/":
# Names that end with trailing slashes are treated by `name_scope` as
# absolute.
scope = scope + "/"
outer_context, innermost_nonempty_device_stack = (
_get_outer_context_and_inner_device_stack())
outer_graph = None
outer_device_stack = None
try:
with outer_context(), name_scope(
scope, skip_on_eager=False), control_dependencies(
None), tape.stop_recording():
context_manager = NullContextmanager
context_manager_input = None
if not context.executing_eagerly():
# The device stack is preserved when lifting into a graph. Eager
# execution doesn't implement device stacks and in particular it
# doesn't support device functions, so in general it's not possible
# to do the same when lifting into the eager context.
outer_graph = get_default_graph()
outer_device_stack = outer_graph._device_function_stack # pylint: disable=protected-access
outer_graph._device_function_stack = innermost_nonempty_device_stack # pylint: disable=protected-access
elif innermost_nonempty_device_stack is not None:
for device_spec in innermost_nonempty_device_stack.peek_objs():
if device_spec.function is None:
break
if device_spec.raw_string:
context_manager = context.device
context_manager_input = device_spec.raw_string
break
# It is currently not possible to have a device function in V2,
# but in V1 we are unable to apply device functions in eager mode.
# This means that we will silently skip some of the entries on the
# device stack in V1 + eager mode.
with context_manager(context_manager_input):
yield
finally:
# If an exception is raised here it may be hiding a related exception in
# try-block (just above).
if outer_graph is not None:
outer_graph._device_function_stack = outer_device_stack # pylint: disable=protected-access
@tf_export(v1=["executing_eagerly_outside_functions"])
def executing_eagerly_outside_functions():
"""Returns True if executing eagerly, even if inside a graph function.
This function will check the outermost context for the program and see if
it is in eager mode. It is useful comparing to `tf.executing_eagerly()`,
which checks the current context and will return `False` within a
`tf.function` body. It can be used to build library that behave differently
in eager runtime and v1 session runtime (deprecated).
Example:
>>> tf.compat.v1.enable_eager_execution()
>>> @tf.function
... def func():
... # A function constructs TensorFlow graphs, it does not execute eagerly,
... # but the outer most context is still eager.
... assert not tf.executing_eagerly()
... return tf.compat.v1.executing_eagerly_outside_functions()
>>> func()
<tf.Tensor: shape=(), dtype=bool, numpy=True>
Returns:
boolean, whether the outermost context is in eager mode.
"""
if context.executing_eagerly():
return True
else:
outer_context, _ = _get_outer_context_and_inner_device_stack()
with outer_context():
return context.executing_eagerly()
@tf_export("inside_function", v1=[])
def inside_function():
"""Indicates whether the caller code is executing inside a `tf.function`.
Returns:
Boolean, True if the caller code is executing inside a `tf.function`
rather than eagerly.
Example:
>>> tf.inside_function()
False
>>> @tf.function
... def f():
... print(tf.inside_function())
>>> f()
True
"""
return get_default_graph().building_function
@tf_export(v1=["enable_eager_execution"])
def enable_eager_execution(config=None, device_policy=None,
execution_mode=None):
"""Enables eager execution for the lifetime of this program.
Eager execution provides an imperative interface to TensorFlow. With eager
execution enabled, TensorFlow functions execute operations immediately (as
opposed to adding to a graph to be executed later in a `tf.compat.v1.Session`)
and
return concrete values (as opposed to symbolic references to a node in a
computational graph).
For example:
```python
tf.compat.v1.enable_eager_execution()
# After eager execution is enabled, operations are executed as they are
# defined and Tensor objects hold concrete values, which can be accessed as
# numpy.ndarray`s through the numpy() method.
assert tf.multiply(6, 7).numpy() == 42
```
Eager execution cannot be enabled after TensorFlow APIs have been used to
create or execute graphs. It is typically recommended to invoke this function
at program startup and not in a library (as most libraries should be usable
both with and without eager execution).
Args:
config: (Optional.) A `tf.compat.v1.ConfigProto` to use to configure the
environment in which operations are executed. Note that
`tf.compat.v1.ConfigProto` is also used to configure graph execution (via
`tf.compat.v1.Session`) and many options within `tf.compat.v1.ConfigProto`
are not implemented (or are irrelevant) when eager execution is enabled.
device_policy: (Optional.) Policy controlling how operations requiring
inputs on a specific device (e.g., a GPU 0) handle inputs on a different
device (e.g. GPU 1 or CPU). When set to None, an appropriate value will
be picked automatically. The value picked may change between TensorFlow
releases.
Valid values:
- tf.contrib.eager.DEVICE_PLACEMENT_EXPLICIT: raises an error if the
placement is not correct.
- tf.contrib.eager.DEVICE_PLACEMENT_WARN: copies the tensors which are not
on the right device but logs a warning.
- tf.contrib.eager.DEVICE_PLACEMENT_SILENT: silently copies the tensors.
Note that this may hide performance problems as there is no notification
provided when operations are blocked on the tensor being copied between
devices.
- tf.contrib.eager.DEVICE_PLACEMENT_SILENT_FOR_INT32: silently copies
int32 tensors, raising errors on the other ones.
execution_mode: (Optional.) Policy controlling how operations dispatched are
actually executed. When set to None, an appropriate value will be picked
automatically. The value picked may change between TensorFlow releases.
Valid values:
- tf.contrib.eager.SYNC: executes each operation synchronously.
- tf.contrib.eager.ASYNC: executes each operation asynchronously. These
operations may return "non-ready" handles.
Raises:
ValueError: If eager execution is enabled after creating/executing a
TensorFlow graph, or if options provided conflict with a previous call
to this function.
"""
_api_usage_gauge.get_cell().set(True)
if context.default_execution_mode != context.EAGER_MODE:
return enable_eager_execution_internal(
config=config,
device_policy=device_policy,
execution_mode=execution_mode,
server_def=None)
@tf_export(v1=["disable_eager_execution"])
def disable_eager_execution():
"""Disables eager execution.
This function can only be called before any Graphs, Ops, or Tensors have been
created. It can be used at the beginning of the program for complex migration
projects from TensorFlow 1.x to 2.x.
"""
_api_usage_gauge.get_cell().set(False)
context.default_execution_mode = context.GRAPH_MODE
c = context.context_safe()
if c is not None:
c._thread_local_data.is_eager = False # pylint: disable=protected-access
def enable_eager_execution_internal(config=None,
device_policy=None,
execution_mode=None,
server_def=None):
"""Enables eager execution for the lifetime of this program.
Most of the doc string for enable_eager_execution is relevant here as well.
Args:
config: See enable_eager_execution doc string
device_policy: See enable_eager_execution doc string
execution_mode: See enable_eager_execution doc string
server_def: (Optional.) A tensorflow::ServerDef proto. Enables execution on
remote devices. GrpcServers need to be started by creating an identical
server_def to this, and setting the appropriate task_indexes, so that the
servers can communicate. It will then be possible to execute operations on
remote devices.
Raises:
ValueError
"""
if config is not None and not isinstance(config, config_pb2.ConfigProto):
raise TypeError("config must be a tf.ConfigProto, but got %s" %
type(config))
if device_policy not in (None, context.DEVICE_PLACEMENT_EXPLICIT,
context.DEVICE_PLACEMENT_WARN,
context.DEVICE_PLACEMENT_SILENT,
context.DEVICE_PLACEMENT_SILENT_FOR_INT32):
raise ValueError(
"device_policy must be one of None, tf.contrib.eager.DEVICE_PLACEMENT_*"
)
if execution_mode not in (None, context.SYNC, context.ASYNC):
raise ValueError(
"execution_mode must be one of None, tf.contrib.eager.SYNC, "
"tf.contrib.eager.ASYNC")
if context.default_execution_mode == context.GRAPH_MODE:
graph_mode_has_been_used = (
_default_graph_stack._global_default_graph is not None) # pylint: disable=protected-access
if graph_mode_has_been_used:
raise ValueError(
"tf.enable_eager_execution must be called at program startup.")
context.default_execution_mode = context.EAGER_MODE
# pylint: disable=protected-access
with context._context_lock:
if context._context is None:
context._set_context_locked(context.Context(
config=config,
device_policy=device_policy,
execution_mode=execution_mode,
server_def=server_def))
elif ((config is not None and config is not context._context._config) or
(device_policy is not None and
device_policy is not context._context._device_policy) or
(execution_mode is not None and
execution_mode is not context._context._execution_mode)):
raise ValueError(
"Trying to change the options of an active eager"
" execution. Context config: %s, specified config:"
" %s. Context device policy: %s, specified device"
" policy: %s. Context execution mode: %s, "
" specified execution mode %s." %
(context._context._config, config, context._context._device_policy,
device_policy, context._context._execution_mode, execution_mode))
else:
# We already created everything, so update the thread local data.
context._context._thread_local_data.is_eager = True
# Monkey patch to get rid of an unnecessary conditional since the context is
# now initialized.
context.context = context.context_safe
def eager_run(main=None, argv=None):
"""Runs the program with an optional main function and argv list.
The program will run with eager execution enabled.
Example:
```python
import tensorflow as tf
# Import subject to future changes:
from tensorflow.contrib.eager.python import tfe
def main(_):
u = tf.constant(6.0)
v = tf.constant(7.0)
print(u * v)
if __name__ == "__main__":
tfe.run()
```
Args:
main: the main function to run.
argv: the arguments to pass to it.
"""
enable_eager_execution()
app.run(main, argv)
@tf_export(v1=["reset_default_graph"])
def reset_default_graph():
"""Clears the default graph stack and resets the global default graph.
NOTE: The default graph is a property of the current thread. This
function applies only to the current thread. Calling this function while
a `tf.compat.v1.Session` or `tf.compat.v1.InteractiveSession` is active will
result in undefined
behavior. Using any previously created `tf.Operation` or `tf.Tensor` objects
after calling this function will result in undefined behavior.
Raises:
AssertionError: If this function is called within a nested graph.
"""
if not _default_graph_stack.is_cleared():
raise AssertionError("Do not use tf.reset_default_graph() to clear "
"nested graphs. If you need a cleared graph, "
"exit the nesting and create a new graph.")
_default_graph_stack.reset()
@tf_export(v1=["get_default_graph"])
def get_default_graph():
"""Returns the default graph for the current thread.
The returned graph will be the innermost graph on which a
`Graph.as_default()` context has been entered, or a global default
graph if none has been explicitly created.
NOTE: The default graph is a property of the current thread. If you
create a new thread, and wish to use the default graph in that
thread, you must explicitly add a `with g.as_default():` in that
thread's function.
Returns:
The default `Graph` being used in the current thread.
"""
return _default_graph_stack.get_default()
def has_default_graph():
"""Returns True if there is a default graph."""
return len(_default_graph_stack.stack) >= 1
def get_name_scope():
"""Returns the current name scope in the default_graph.
For example:
```python
with tf.name_scope('scope1'):
with tf.name_scope('scope2'):
print(tf.get_name_scope())
```
would print the string `scope1/scope2`.
Returns:
A string representing the current name scope.
"""
if context.executing_eagerly():
return context.context().scope_name.rstrip("/")
return get_default_graph().get_name_scope()
def _assert_same_graph(original_item, item):
"""Fail if the 2 items are from different graphs.
Args:
original_item: Original item to check against.
item: Item to check.
Raises:
ValueError: if graphs do not match.
"""
original_graph = getattr(original_item, "graph", None)
graph = getattr(item, "graph", None)
if original_graph and graph and original_graph is not graph:
raise ValueError(
"%s must be from the same graph as %s (graphs are %s and %s)." %
(item, original_item, graph, original_graph))
def _get_graph_from_inputs(op_input_list, graph=None):
"""Returns the appropriate graph to use for the given inputs.
This library method provides a consistent algorithm for choosing the graph
in which an Operation should be constructed:
1. If the default graph is being used to construct a function, we
use the default graph.
2. If the "graph" is specified explicitly, we validate that all of the inputs
in "op_input_list" are compatible with that graph.
3. Otherwise, we attempt to select a graph from the first Operation-
or Tensor-valued input in "op_input_list", and validate that all other
such inputs are in the same graph.
4. If the graph was not specified and it could not be inferred from
"op_input_list", we attempt to use the default graph.
Args:
op_input_list: A list of inputs to an operation, which may include `Tensor`,
`Operation`, and other objects that may be converted to a graph element.
graph: (Optional) The explicit graph to use.
Raises:
TypeError: If op_input_list is not a list or tuple, or if graph is not a
Graph.
ValueError: If a graph is explicitly passed and not all inputs are from it,
or if the inputs are from multiple graphs, or we could not find a graph
and there was no default graph.
Returns:
The appropriate graph to use for the given inputs.
"""
current_default_graph = get_default_graph()
if current_default_graph.building_function:
return current_default_graph
op_input_list = tuple(op_input_list) # Handle generators correctly
if graph and not isinstance(graph, Graph):
raise TypeError("Input graph needs to be a Graph: %s" % graph)
# 1. We validate that all of the inputs are from the same graph. This is
# either the supplied graph parameter, or the first one selected from one
# the graph-element-valued inputs. In the latter case, we hold onto
# that input in original_graph_element so we can provide a more
# informative error if a mismatch is found.
original_graph_element = None
for op_input in op_input_list:
# Determine if this is a valid graph_element.
# TODO(josh11b): Note that we exclude subclasses of Tensor. Need to clean this
# up.
graph_element = None
if (isinstance(op_input, (Operation, internal.NativeObject)) and
((not isinstance(op_input, Tensor)) or type(op_input) == Tensor)): # pylint: disable=unidiomatic-typecheck
graph_element = op_input
else:
graph_element = _as_graph_element(op_input)
if graph_element is not None:
if not graph:
original_graph_element = graph_element
graph = getattr(graph_element, "graph", None)
elif original_graph_element is not None:
_assert_same_graph(original_graph_element, graph_element)
elif graph_element.graph is not graph:
raise ValueError("%s is not from the passed-in graph." % graph_element)
# 2. If all else fails, we use the default graph, which is always there.
return graph or current_default_graph
@tf_export(v1=["GraphKeys"])
class GraphKeys(object):
"""Standard names to use for graph collections.
The standard library uses various well-known names to collect and
retrieve values associated with a graph. For example, the
`tf.Optimizer` subclasses default to optimizing the variables
collected under `tf.GraphKeys.TRAINABLE_VARIABLES` if none is
specified, but it is also possible to pass an explicit list of
variables.
The following standard keys are defined:
* `GLOBAL_VARIABLES`: the default collection of `Variable` objects, shared
across distributed environment (model variables are subset of these). See
`tf.compat.v1.global_variables`
for more details.
Commonly, all `TRAINABLE_VARIABLES` variables will be in `MODEL_VARIABLES`,
and all `MODEL_VARIABLES` variables will be in `GLOBAL_VARIABLES`.
* `LOCAL_VARIABLES`: the subset of `Variable` objects that are local to each
machine. Usually used for temporarily variables, like counters.
Note: use `tf.contrib.framework.local_variable` to add to this collection.
* `MODEL_VARIABLES`: the subset of `Variable` objects that are used in the
model for inference (feed forward). Note: use
`tf.contrib.framework.model_variable` to add to this collection.
* `TRAINABLE_VARIABLES`: the subset of `Variable` objects that will
be trained by an optimizer. See
`tf.compat.v1.trainable_variables`
for more details.
* `SUMMARIES`: the summary `Tensor` objects that have been created in the
graph. See
`tf.compat.v1.summary.merge_all`
for more details.
* `QUEUE_RUNNERS`: the `QueueRunner` objects that are used to
produce input for a computation. See
`tf.compat.v1.train.start_queue_runners`
for more details.
* `MOVING_AVERAGE_VARIABLES`: the subset of `Variable` objects that will also
keep moving averages. See
`tf.compat.v1.moving_average_variables`
for more details.
* `REGULARIZATION_LOSSES`: regularization losses collected during graph
construction.
The following standard keys are _defined_, but their collections are **not**
automatically populated as many of the others are:
* `WEIGHTS`
* `BIASES`
* `ACTIVATIONS`
"""
# Key to collect Variable objects that are global (shared across machines).
# Default collection for all variables, except local ones.
GLOBAL_VARIABLES = "variables"
# Key to collect local variables that are local to the machine and are not
# saved/restored.
LOCAL_VARIABLES = "local_variables"
# Key to collect local variables which are used to accumulate interal state
# to be used in tf.metrics.*.
METRIC_VARIABLES = "metric_variables"
# Key to collect model variables defined by layers.
MODEL_VARIABLES = "model_variables"
# Key to collect Variable objects that will be trained by the
# optimizers.
TRAINABLE_VARIABLES = "trainable_variables"
# Key to collect summaries.
SUMMARIES = "summaries"
# Key to collect QueueRunners.
QUEUE_RUNNERS = "queue_runners"
# Key to collect table initializers.
TABLE_INITIALIZERS = "table_initializer"
# Key to collect asset filepaths. An asset represents an external resource
# like a vocabulary file.
ASSET_FILEPATHS = "asset_filepaths"
# Key to collect Variable objects that keep moving averages.
MOVING_AVERAGE_VARIABLES = "moving_average_variables"
# Key to collect regularization losses at graph construction.
REGULARIZATION_LOSSES = "regularization_losses"
# Key to collect concatenated sharded variables.
CONCATENATED_VARIABLES = "concatenated_variables"
# Key to collect savers.
SAVERS = "savers"
# Key to collect weights
WEIGHTS = "weights"
# Key to collect biases
BIASES = "biases"
# Key to collect activations
ACTIVATIONS = "activations"
# Key to collect update_ops
UPDATE_OPS = "update_ops"
# Key to collect losses
LOSSES = "losses"
# Key to collect BaseSaverBuilder.SaveableObject instances for checkpointing.
SAVEABLE_OBJECTS = "saveable_objects"
# Key to collect all shared resources used by the graph which need to be
# initialized once per cluster.
RESOURCES = "resources"
# Key to collect all shared resources used in this graph which need to be
# initialized once per session.
LOCAL_RESOURCES = "local_resources"
# Trainable resource-style variables.
TRAINABLE_RESOURCE_VARIABLES = "trainable_resource_variables"
# Key to indicate various ops.
INIT_OP = "init_op"
LOCAL_INIT_OP = "local_init_op"
READY_OP = "ready_op"
READY_FOR_LOCAL_INIT_OP = "ready_for_local_init_op"
SUMMARY_OP = "summary_op"
GLOBAL_STEP = "global_step"
# Used to count the number of evaluations performed during a single evaluation
# run.
EVAL_STEP = "eval_step"
TRAIN_OP = "train_op"
# Key for control flow context.
COND_CONTEXT = "cond_context"
WHILE_CONTEXT = "while_context"
# Used to store v2 summary names.
_SUMMARY_COLLECTION = "_SUMMARY_V2"
# List of all collections that keep track of variables.
_VARIABLE_COLLECTIONS = [
GLOBAL_VARIABLES,
LOCAL_VARIABLES,
METRIC_VARIABLES,
MODEL_VARIABLES,
TRAINABLE_VARIABLES,
MOVING_AVERAGE_VARIABLES,
CONCATENATED_VARIABLES,
TRAINABLE_RESOURCE_VARIABLES,
]
# Key for streaming model ports.
# NOTE(yuanbyu): internal and experimental.
_STREAMING_MODEL_PORTS = "streaming_model_ports"
@decorator_utils.classproperty
@deprecation.deprecated(None, "Use `tf.GraphKeys.GLOBAL_VARIABLES` instead.")
def VARIABLES(cls): # pylint: disable=no-self-argument
return cls.GLOBAL_VARIABLES
def dismantle_graph(graph):
"""Cleans up reference cycles from a `Graph`.
Helpful for making sure the garbage collector doesn't need to run after a
temporary `Graph` is no longer needed.
Args:
graph: A `Graph` object to destroy. Neither it nor any of its ops are usable
after this function runs.
"""
memory.dismantle_ordered_dict(graph._functions) # pylint: disable=protected-access
# Now clean up Operation<->Graph reference cycles by clearing all of the
# attributes for the Graph and its ops.
graph_operations = graph.get_operations()
for op in graph_operations:
op.__dict__ = {}
graph.__dict__ = {}
@tf_export(v1=["add_to_collection"])
def add_to_collection(name, value):
"""Wrapper for `Graph.add_to_collection()` using the default graph.
See `tf.Graph.add_to_collection`
for more details.
Args:
name: The key for the collection. For example, the `GraphKeys` class
contains many standard names for collections.
value: The value to add to the collection.
@compatibility(eager)
Collections are only supported in eager when variables are created inside
an EagerVariableStore (e.g. as part of a layer or template).
@end_compatibility
"""
get_default_graph().add_to_collection(name, value)
@tf_export(v1=["add_to_collections"])
def add_to_collections(names, value):
"""Wrapper for `Graph.add_to_collections()` using the default graph.
See `tf.Graph.add_to_collections`
for more details.
Args:
names: The key for the collections. The `GraphKeys` class contains many
standard names for collections.
value: The value to add to the collections.
@compatibility(eager)
Collections are only supported in eager when variables are created inside
an EagerVariableStore (e.g. as part of a layer or template).
@end_compatibility
"""
get_default_graph().add_to_collections(names, value)
@tf_export(v1=["get_collection_ref"])
def get_collection_ref(key):
"""Wrapper for `Graph.get_collection_ref()` using the default graph.
See `tf.Graph.get_collection_ref`
for more details.
Args:
key: The key for the collection. For example, the `GraphKeys` class contains
many standard names for collections.
Returns:
The list of values in the collection with the given `name`, or an empty
list if no value has been added to that collection. Note that this returns
the collection list itself, which can be modified in place to change the
collection.
@compatibility(eager)
Collections are not supported when eager execution is enabled.
@end_compatibility
"""
return get_default_graph().get_collection_ref(key)
@tf_export(v1=["get_collection"])
def get_collection(key, scope=None):
"""Wrapper for `Graph.get_collection()` using the default graph.
See `tf.Graph.get_collection`
for more details.
Args:
key: The key for the collection. For example, the `GraphKeys` class contains
many standard names for collections.
scope: (Optional.) If supplied, the resulting list is filtered to include
only items whose `name` attribute matches using `re.match`. Items without
a `name` attribute are never returned if a scope is supplied and the
choice or `re.match` means that a `scope` without special tokens filters
by prefix.
Returns:
The list of values in the collection with the given `name`, or
an empty list if no value has been added to that collection. The
list contains the values in the order under which they were
collected.
@compatibility(eager)
Collections are not supported when eager execution is enabled.
@end_compatibility
"""
return get_default_graph().get_collection(key, scope)
def get_all_collection_keys():
"""Returns a list of collections used in the default graph."""
return get_default_graph().get_all_collection_keys()
def name_scope(name, default_name=None, values=None, skip_on_eager=True):
"""Internal-only entry point for `name_scope*`.
Internal ops do not use the public API and instead rely on
`ops.name_scope` regardless of the execution mode. This function
dispatches to the correct `name_scope*` implementation based on
the arguments provided and the current mode. Specifically,
* if `values` contains a graph tensor `Graph.name_scope` is used;
* `name_scope_v1` is used in graph mode;
* `name_scope_v2` -- in eager mode.
Args:
name: The name argument that is passed to the op function.
default_name: The default name to use if the `name` argument is `None`.
values: The list of `Tensor` arguments that are passed to the op function.
skip_on_eager: Indicates to return NullContextmanager if executing eagerly.
By default this is True since naming tensors and operations in eager mode
have little use and cause unnecessary performance overhead. However, it is
important to preserve variable names since they are often useful for
debugging and saved models.
Returns:
`name_scope*` context manager.
"""
if not context.executing_eagerly():
return internal_name_scope_v1(name, default_name, values)
if skip_on_eager:
return NullContextmanager()
name = default_name if name is None else name
if values:
# The presence of a graph tensor in `values` overrides the context.
# TODO(slebedev): this is Keras-specific and should be removed.
# pylint: disable=unidiomatic-typecheck
graph_value = next((value for value in values if type(value) == Tensor),
None)
# pylint: enable=unidiomatic-typecheck
if graph_value is not None:
return graph_value.graph.name_scope(name)
return name_scope_v2(name or "")
class internal_name_scope_v1(object): # pylint: disable=invalid-name
"""Graph-only version of `name_scope_v1`."""
@property
def name(self):
return self._name
def __init__(self, name, default_name=None, values=None):
"""Initialize the context manager.
Args:
name: The name argument that is passed to the op function.
default_name: The default name to use if the `name` argument is `None`.
values: The list of `Tensor` arguments that are passed to the op function.
Raises:
TypeError: if `default_name` is passed in but not a string.
"""
if not (default_name is None or isinstance(default_name, six.string_types)):
raise TypeError(
"`default_name` type (%s) is not a string type. You likely meant to "
"pass this into the `values` kwarg." % type(default_name))
self._name = default_name if name is None else name
self._default_name = default_name
self._values = values
def __enter__(self):
"""Start the scope block.
Returns:
The scope name.
Raises:
ValueError: if neither `name` nor `default_name` is provided
but `values` are.
"""
if self._name is None and self._values is not None:
# We only raise an error if values is not None (provided) because
# currently tf.name_scope(None) (values=None then) is sometimes used as
# an idiom to reset to top scope.
raise ValueError(
"At least one of name (%s) and default_name (%s) must be provided."
% (self._name, self._default_name))
g = get_default_graph()
if self._values and not g.building_function:
# Specialize based on the knowledge that `_get_graph_from_inputs()`
# ignores `inputs` when building a function.
g_from_inputs = _get_graph_from_inputs(self._values)
if g_from_inputs is not g:
g = g_from_inputs
self._g_manager = g.as_default()
self._g_manager.__enter__()
else:
self._g_manager = None
else:
self._g_manager = None
try:
self._name_scope = g.name_scope(self._name)
return self._name_scope.__enter__()
except:
if self._g_manager is not None:
self._g_manager.__exit__(*sys.exc_info())
raise
def __exit__(self, *exc_info):
self._name_scope.__exit__(*exc_info)
if self._g_manager is not None:
self._g_manager.__exit__(*exc_info)
# Named like a function for backwards compatibility with the
# @tf_contextlib.contextmanager version, which was switched to a class to avoid
# some object creation overhead.
@tf_export(v1=["name_scope"])
class name_scope_v1(object): # pylint: disable=invalid-name
"""A context manager for use when defining a Python op.
This context manager validates that the given `values` are from the
same graph, makes that graph the default graph, and pushes a
name scope in that graph (see
`tf.Graph.name_scope`
for more details on that).
For example, to define a new Python op called `my_op`:
```python
def my_op(a, b, c, name=None):
with tf.name_scope(name, "MyOp", [a, b, c]) as scope:
a = tf.convert_to_tensor(a, name="a")
b = tf.convert_to_tensor(b, name="b")
c = tf.convert_to_tensor(c, name="c")
# Define some computation that uses `a`, `b`, and `c`.
return foo_op(..., name=scope)
```
"""
__slots__ = ["_name", "_name_scope"]
@property
def name(self):
return self._name
def __init__(self, name, default_name=None, values=None):
"""Initialize the context manager.
Args:
name: The name argument that is passed to the op function.
default_name: The default name to use if the `name` argument is `None`.
values: The list of `Tensor` arguments that are passed to the op function.
Raises:
TypeError: if `default_name` is passed in but not a string.
"""
self._name_scope = name_scope(
name, default_name, values, skip_on_eager=False)
self._name = default_name if name is None else name
def __enter__(self):
return self._name_scope.__enter__()
def __exit__(self, *exc_info):
return self._name_scope.__exit__(*exc_info)
@tf_export("name_scope", v1=[])
class name_scope_v2(object):
"""A context manager for use when defining a Python op.
This context manager pushes a name scope, which will make the name of all
operations added within it have a prefix.
For example, to define a new Python op called `my_op`:
```python
def my_op(a, b, c, name=None):
with tf.name_scope("MyOp") as scope:
a = tf.convert_to_tensor(a, name="a")
b = tf.convert_to_tensor(b, name="b")
c = tf.convert_to_tensor(c, name="c")
# Define some computation that uses `a`, `b`, and `c`.
return foo_op(..., name=scope)
```
When executed, the Tensors `a`, `b`, `c`, will have names `MyOp/a`, `MyOp/b`,
and `MyOp/c`.
Inside a `tf.function`, if the scope name already exists, the name will be
made unique by appending `_n`. For example, calling `my_op` the second time
will generate `MyOp_1/a`, etc.
"""
__slots__ = ["_name", "_exit_fns"]
def __init__(self, name):
"""Initialize the context manager.
Args:
name: The prefix to use on all names created within the name scope.
Raises:
ValueError: If name is not a string.
"""
if not isinstance(name, six.string_types):
raise ValueError("name for name_scope must be a string.")
self._name = name
self._exit_fns = []
@property
def name(self):
return self._name
def __enter__(self):
"""Start the scope block.
Returns:
The scope name.
"""
ctx = context.context()
if ctx.executing_eagerly():
# Names are not auto-incremented in eager mode.
# A trailing slash breaks out of nested name scopes, indicating a
# fully specified scope name, for compatibility with Graph.name_scope.
# This also prevents auto-incrementing.
old_name = ctx.scope_name
name = self._name
if not name:
scope_name = ""
elif name[-1] == "/":
scope_name = name
elif old_name:
scope_name = old_name + name + "/"
else:
scope_name = name + "/"
ctx.scope_name = scope_name
def _restore_name_scope(*_):
ctx.scope_name = old_name
self._exit_fns.append(_restore_name_scope)
else:
scope = get_default_graph().name_scope(self._name)
scope_name = scope.__enter__()
self._exit_fns.append(scope.__exit__)
return scope_name
def __exit__(self, type_arg, value_arg, traceback_arg):
self._exit_fns.pop()(type_arg, value_arg, traceback_arg)
return False # False values do not suppress exceptions
def __getstate__(self):
return self._name, self._exit_fns
def __setstate__(self, state):
self._name = state[0]
self._exit_fns = state[1]
def strip_name_scope(name, export_scope):
"""Removes name scope from a name.
Args:
name: A `string` name.
export_scope: Optional `string`. Name scope to remove.
Returns:
Name with name scope removed, or the original name if export_scope
is None.
"""
if export_scope:
if export_scope[-1] == "/":
export_scope = export_scope[:-1]
try:
# Strips export_scope/, export_scope///,
# ^export_scope/, loc:@export_scope/.
str_to_replace = r"([\^]|loc:@|^)" + export_scope + r"[\/]+(.*)"
return re.sub(str_to_replace, r"\1\2", compat.as_str(name), count=1)
except TypeError as e:
# If the name is not of a type we can process, simply return it.
logging.warning(e)
return name
else:
return name
def prepend_name_scope(name, import_scope):
"""Prepends name scope to a name.
Args:
name: A `string` name.
import_scope: Optional `string`. Name scope to add.
Returns:
Name with name scope added, or the original name if import_scope
is None.
"""
if import_scope:
if import_scope[-1] == "/":
import_scope = import_scope[:-1]
try:
str_to_replace = r"([\^]|loc:@|^)(.*)"
return re.sub(str_to_replace, r"\1" + import_scope + r"/\2",
compat.as_str(name))
except TypeError as e:
# If the name is not of a type we can process, simply return it.
logging.warning(e)
return name
else:
return name
# pylint: disable=g-doc-return-or-yield
# pylint: disable=not-context-manager
@tf_export(v1=["op_scope"])
@tf_contextlib.contextmanager
def op_scope(values, name, default_name=None):
"""DEPRECATED. Same as name_scope above, just different argument order."""
logging.warn("tf.op_scope(values, name, default_name) is deprecated,"
" use tf.name_scope(name, default_name, values)")
with name_scope(name, default_name=default_name, values=values) as scope:
yield scope
_proto_function_registry = registry.Registry("proto functions")
def register_proto_function(collection_name,
proto_type=None,
to_proto=None,
from_proto=None):
"""Registers `to_proto` and `from_proto` functions for collection_name.
`to_proto` function converts a Python object to the corresponding protocol
buffer, and returns the protocol buffer.
`from_proto` function converts protocol buffer into a Python object, and
returns the object..
Args:
collection_name: Name of the collection.
proto_type: Protobuf type, such as `saver_pb2.SaverDef`,
`variable_pb2.VariableDef`, `queue_runner_pb2.QueueRunnerDef`..
to_proto: Function that implements Python object to protobuf conversion.
from_proto: Function that implements protobuf to Python object conversion.
"""
if to_proto and not callable(to_proto):
raise TypeError("to_proto must be callable.")
if from_proto and not callable(from_proto):
raise TypeError("from_proto must be callable.")
_proto_function_registry.register((proto_type, to_proto, from_proto),
collection_name)
def get_collection_proto_type(collection_name):
"""Returns the proto_type for collection_name."""
try:
return _proto_function_registry.lookup(collection_name)[0]
except LookupError:
return None
def get_to_proto_function(collection_name):
"""Returns the to_proto function for collection_name."""
try:
return _proto_function_registry.lookup(collection_name)[1]
except LookupError:
return None
def get_from_proto_function(collection_name):
"""Returns the from_proto function for collection_name."""
try:
return _proto_function_registry.lookup(collection_name)[2]
except LookupError:
return None
def _op_to_colocate_with(v, graph):
"""Operation object corresponding to v to use for colocation constraints."""
if v is None:
return None, None
if isinstance(v, Operation):
return v, None
# We always want to colocate with the reference op.
# When 'v' is a ResourceVariable, the reference op is the handle creating op.
#
# What this should be is:
# if isinstance(v, ResourceVariable):
# return v.handle.op, v
# However, that would require a circular import dependency.
# As of October 2018, there were attempts underway to remove
# colocation constraints altogether. Assuming that will
# happen soon, perhaps this hack to work around the circular
# import dependency is acceptable.
if hasattr(v, "handle") and isinstance(v.handle, Tensor):
device_only_candidate = lambda: None
device_only_candidate.device = v.device
device_only_candidate.name = v.name
if graph.building_function:
return graph.capture(v.handle).op, device_only_candidate
else:
return v.handle.op, device_only_candidate
return internal_convert_to_tensor_or_indexed_slices(v, as_ref=True).op, None
def _is_keras_symbolic_tensor(x):
return hasattr(x, "graph") and getattr(x.graph, "name", None) == "keras_graph"
# These symbols were originally defined in this module; import them for
# backwards compatibility until all references have been updated to access
# them from the indexed_slices.py module.
IndexedSlices = indexed_slices.IndexedSlices
IndexedSlicesValue = indexed_slices.IndexedSlicesValue
convert_to_tensor_or_indexed_slices = \
indexed_slices.convert_to_tensor_or_indexed_slices
convert_n_to_tensor_or_indexed_slices = \
indexed_slices.convert_n_to_tensor_or_indexed_slices
internal_convert_to_tensor_or_indexed_slices = \
indexed_slices.internal_convert_to_tensor_or_indexed_slices
internal_convert_n_to_tensor_or_indexed_slices = \
indexed_slices.internal_convert_n_to_tensor_or_indexed_slices
register_tensor_conversion_function = \
tensor_conversion_registry.register_tensor_conversion_function
# Helper functions for op wrapper modules generated by `python_op_gen`.
def to_raw_op(f):
"""Make a given op wrapper function `f` raw.
Raw op wrappers can only be called with keyword arguments.
Args:
f: An op wrapper function to make raw.
Returns:
Raw `f`.
"""
# Copy `f` to get a new `__dict__`, otherwise `tf_export` will fail
# due to double-registration.
f = types.FunctionType(f.__code__, f.__globals__, f.__name__, f.__defaults__,
f.__closure__)
return kwarg_only(f)
def raise_from_not_ok_status(e, name):
message = e.message + (" name: " + name if name is not None else "")
# pylint: disable=protected-access
six.raise_from(core._status_to_exception(e.code, message), None)
# pylint: enable=protected-access
def add_exit_callback_to_default_func_graph(fn):
"""Add a callback to run when the default function graph goes out of scope.
Usage:
```python
@tf.function
def fn(x, v):
expensive = expensive_object(v)
add_exit_callback_to_default_func_graph(lambda: expensive.release())
return g(x, expensive)
fn(x=tf.constant(...), v=...)
# `expensive` has been released.
```
Args:
fn: A callable that takes no arguments and whose output is ignored.
To be executed when exiting func graph scope.
Raises:
RuntimeError: If executed when the current default graph is not a FuncGraph,
or not currently executing in function creation mode (e.g., if inside
an init_scope).
"""
default_graph = get_default_graph()
if not default_graph._building_function: # pylint: disable=protected-access
raise RuntimeError(
"Cannot add scope exit callbacks when not building a function. "
"Default graph: {}".format(default_graph))
default_graph._add_scope_exit_callback(fn) # pylint: disable=protected-access
def _reconstruct_sequence_inputs(op_def, inputs, attrs):
"""Regroups a flat list of input tensors into scalar and sequence inputs.
Args:
op_def: The `op_def_pb2.OpDef` (for knowing the input types)
inputs: a list of input `Tensor`s to the op.
attrs: mapping from attr name to `attr_value_pb2.AttrValue` (these define
how long each sequence is)
Returns:
A list of `Tensor`s (corresponding to scalar inputs) and lists of
`Tensor`s (corresponding to sequence inputs).
"""
grouped_inputs = []
i = 0
for input_arg in op_def.input_arg:
if input_arg.number_attr:
input_len = attrs[input_arg.number_attr].i
is_sequence = True
elif input_arg.type_list_attr:
input_len = len(attrs[input_arg.type_list_attr].list.type)
is_sequence = True
else:
input_len = 1
is_sequence = False
if is_sequence:
grouped_inputs.append(inputs[i:i + input_len])
else:
grouped_inputs.append(inputs[i])
i += input_len
assert i == len(inputs)
return grouped_inputs
class _TensorIterator(object):
"""Iterates over the leading dim of a Tensor. Performs no error checks."""
__slots__ = ["_tensor", "_index", "_limit"]
def __init__(self, tensor, dim0):
self._tensor = tensor
self._index = 0
self._limit = dim0
def __iter__(self):
return self
def __next__(self):
if self._index == self._limit:
raise StopIteration
result = self._tensor[self._index]
self._index += 1
return result
next = __next__ # python2.x compatibility.
def set_int_list_attr(op, attr_name, ints):
"""TF internal method used to set a list(int) attribute in the node_def."""
ints_list = attr_value_pb2.AttrValue.ListValue(i=ints)
op._set_attr(attr_name, attr_value_pb2.AttrValue(list=ints_list)) # pylint:disable=protected-access
| karllessard/tensorflow | tensorflow/python/framework/ops.py | Python | apache-2.0 | 254,309 |
from __future__ import print_function, unicode_literals
from . logEnvironmentModule import *
from . errorObjs import *
from heapq import nlargest
import random
from collections import deque
class BFS_improved(LogAgent):
"""BFS_improved LogAgent by Robert Parcus, 2014
State space blind search using BFS. It is very fast
for small problems, but impractical for realistic
situations.
This version uses a special trick to avoid saving
te fringe as a huge list of statuses.
It saves a list o movements, which are smaller objs
than statuses.
A "state map" is kept but it is actually a graph
with hashes from the statuses.
It is used to avoid loops
"""
def __init__(self):
super(BFS_improved, self).__init__()
class NotFound(Exception): pass
def discovery_forwards(self, status, stateMap):
print("\n"*2)
print("*"*60)
print("\t", self.__doc__)
print("\n"*2)
fringe = deque()
stat = status.clone
start_hash = hash(repr(stat))
current_moves = []
if start_hash not in stateMap:
stateMap[start_hash] = []
while not stat.check_goal():
curr_hash = hash(repr(stat))
for move in stat.moves:
#child = stat.clone
child = status.clone
#print("funziono?")
new_move = current_moves + [move]
#print("current_moves", current_moves)
#print("move", move)
#print("new_move", new_move)
child.execute(new_move)
child_hash = hash(repr(child))
if child_hash not in stateMap[curr_hash]:
stateMap[curr_hash].append(child_hash)
if child_hash not in stateMap:
stateMap[child_hash] = []
stateMap[child_hash].append(curr_hash)
#print(new_move)
fringe.append(new_move)
#print("fringe", fringe)
stat = status.clone
current_moves = fringe.popleft()
#print("popped current_moves", current_moves)
stat.execute(current_moves)
goal_state = stat.clone
goal_hash = hash(repr(stat))
return {'stateMap': stateMap, 'goal_hash': goal_hash, 'goal_state': goal_state, 'start_hash': start_hash, "final_moves": current_moves}
def itr_solve(self, status):
start_hash = hash(repr(status))
stateMap = {start_hash: []}
goal_hash = []
goal_state = []
tmp = self.discovery_forwards(status.clone, stateMap)
stateMap = tmp["stateMap"]
goal_state.append(tmp["goal_state"])
start_hash = tmp["start_hash"]
final_moves = tmp["final_moves"]
goal_hash.append(tmp["goal_hash"])
return final_moves
def search_shortest_path(self, graph, start, goal):
visited = {start: None}
queue = deque([start])
while queue:
node = queue.popleft()
if node == goal:
path = []
while node is not None:
path.append(node)
node = visited[node]
return path[::-1]
for neighbour in graph[node]:
if neighbour not in visited:
visited[neighbour] = node
queue.append(neighbour)
raise self.NotFound('No path from {} to {}'.format(start, goal))
def get_solution_cost(self, status, moves):
cost = 0
for move in moves:
if move[0] in ["unload", "load"]:
cost += 10
else:
cost += 10*status.airports[move[2]].neighbors[move[3]]
return cost
def solve(self, status, goal):
return self.itr_solve(status) | MircoT/AI-Project-PlannerEnvironment | agents_dir/BFS_improved.py | Python | mit | 3,874 |
"""Functions to plot epochs data
"""
# Authors: Alexandre Gramfort <[email protected]>
# Denis Engemann <[email protected]>
# Martin Luessi <[email protected]>
# Eric Larson <[email protected]>
# Jaakko Leppakangas <[email protected]>
#
# License: Simplified BSD
from collections import Counter
from functools import partial
import copy
import numpy as np
from ..utils import verbose, get_config, set_config, logger, warn
from ..io.pick import pick_types, channel_type
from ..io.proj import setup_proj
from ..time_frequency import psd_multitaper
from .utils import (tight_layout, figure_nobar, _toggle_proj, _toggle_options,
_layout_figure, _setup_vmin_vmax, _channels_changed,
_plot_raw_onscroll, _onclick_help, plt_show,
_compute_scalings, DraggableColorbar)
from ..defaults import _handle_default
def plot_epochs_image(epochs, picks=None, sigma=0., vmin=None,
vmax=None, colorbar=True, order=None, show=True,
units=None, scalings=None, cmap='RdBu_r',
fig=None, axes=None, overlay_times=None):
"""Plot Event Related Potential / Fields image
Parameters
----------
epochs : instance of Epochs
The epochs.
picks : int | array-like of int | None
The indices of the channels to consider. If None, the first
five good channels are plotted.
sigma : float
The standard deviation of the Gaussian smoothing to apply along
the epoch axis to apply in the image. If 0., no smoothing is applied.
vmin : float
The min value in the image. The unit is uV for EEG channels,
fT for magnetometers and fT/cm for gradiometers.
vmax : float
The max value in the image. The unit is uV for EEG channels,
fT for magnetometers and fT/cm for gradiometers.
colorbar : bool
Display or not a colorbar.
order : None | array of int | callable
If not None, order is used to reorder the epochs on the y-axis
of the image. If it's an array of int it should be of length
the number of good epochs. If it's a callable the arguments
passed are the times vector and the data as 2d array
(data.shape[1] == len(times).
show : bool
Show figure if True.
units : dict | None
The units of the channel types used for axes lables. If None,
defaults to `units=dict(eeg='uV', grad='fT/cm', mag='fT')`.
scalings : dict | None
The scalings of the channel types to be applied for plotting.
If None, defaults to `scalings=dict(eeg=1e6, grad=1e13, mag=1e15,
eog=1e6)`.
cmap : matplotlib colormap | (colormap, bool) | 'interactive'
Colormap. If tuple, the first value indicates the colormap to use and
the second value is a boolean defining interactivity. In interactive
mode the colors are adjustable by clicking and dragging the colorbar
with left and right mouse button. Left mouse button moves the scale up
and down and right mouse button adjusts the range. Hitting space bar
resets the scale. Up and down arrows can be used to change the
colormap. If 'interactive', translates to ('RdBu_r', True). Defaults to
'RdBu_r'.
fig : matplotlib figure | None
Figure instance to draw the image to. Figure must contain two axes for
drawing the single trials and evoked responses. If None a new figure is
created. Defaults to None.
axes : list of matplotlib axes | None
List of axes instances to draw the image, erp and colorbar to.
Must be of length three if colorbar is True (with the last list element
being the colorbar axes) or two if colorbar is False. If both fig and
axes are passed an error is raised. Defaults to None.
overlay_times : array-like, shape (n_epochs,) | None
If not None the parameter is interpreted as time instants in seconds
and is added to the image. It is typically useful to display reaction
times. Note that it is defined with respect to the order
of epochs such that overlay_times[0] corresponds to epochs[0].
Returns
-------
figs : lists of matplotlib figures
One figure per channel displayed.
"""
from scipy import ndimage
units = _handle_default('units', units)
scalings = _handle_default('scalings', scalings)
import matplotlib.pyplot as plt
if picks is None:
picks = pick_types(epochs.info, meg=True, eeg=True, ref_meg=False,
exclude='bads')[:5]
if set(units.keys()) != set(scalings.keys()):
raise ValueError('Scalings and units must have the same keys.')
picks = np.atleast_1d(picks)
if (fig is not None or axes is not None) and len(picks) > 1:
raise ValueError('Only single pick can be drawn to a figure.')
if axes is not None:
if fig is not None:
raise ValueError('Both figure and axes were passed, please'
'decide between the two.')
from .utils import _validate_if_list_of_axes
oblig_len = 3 if colorbar else 2
_validate_if_list_of_axes(axes, obligatory_len=oblig_len)
ax1, ax2 = axes[:2]
# if axes were passed - we ignore fig param and get figure from axes
fig = ax1.get_figure()
if colorbar:
ax3 = axes[-1]
evoked = epochs.average(picks)
data = epochs.get_data()[:, picks, :]
n_epochs = len(data)
data = np.swapaxes(data, 0, 1)
if sigma > 0.:
for k in range(len(picks)):
data[k, :] = ndimage.gaussian_filter1d(
data[k, :], sigma=sigma, axis=0)
scale_vmin = True if vmin is None else False
scale_vmax = True if vmax is None else False
vmin, vmax = _setup_vmin_vmax(data, vmin, vmax)
if overlay_times is not None and len(overlay_times) != n_epochs:
raise ValueError('size of overlay_times parameter (%s) do not '
'match the number of epochs (%s).'
% (len(overlay_times), n_epochs))
if overlay_times is not None:
overlay_times = np.array(overlay_times)
times_min = np.min(overlay_times)
times_max = np.max(overlay_times)
if ((times_min < epochs.tmin) or (times_max > epochs.tmax)):
warn('Some values in overlay_times fall outside of the epochs '
'time interval (between %s s and %s s)'
% (epochs.tmin, epochs.tmax))
figs = list()
for i, (this_data, idx) in enumerate(zip(data, picks)):
if fig is None:
this_fig = plt.figure()
else:
this_fig = fig
figs.append(this_fig)
ch_type = channel_type(epochs.info, idx)
if ch_type not in scalings:
# We know it's not in either scalings or units since keys match
raise KeyError('%s type not in scalings and units' % ch_type)
this_data *= scalings[ch_type]
this_order = order
if callable(order):
this_order = order(epochs.times, this_data)
if this_order is not None and (len(this_order) != len(this_data)):
raise ValueError('size of order parameter (%s) does not '
'match the number of epochs (%s).'
% (len(this_order), len(this_data)))
this_overlay_times = None
if overlay_times is not None:
this_overlay_times = overlay_times
if this_order is not None:
this_order = np.asarray(this_order)
this_data = this_data[this_order]
if this_overlay_times is not None:
this_overlay_times = this_overlay_times[this_order]
plt.figure(this_fig.number)
if axes is None:
ax1 = plt.subplot2grid((3, 10), (0, 0), colspan=9, rowspan=2)
ax2 = plt.subplot2grid((3, 10), (2, 0), colspan=9, rowspan=1)
if colorbar:
ax3 = plt.subplot2grid((3, 10), (0, 9), colspan=1, rowspan=3)
this_vmin = vmin * scalings[ch_type] if scale_vmin else vmin
this_vmax = vmax * scalings[ch_type] if scale_vmax else vmax
if cmap == 'interactive':
cmap = ('RdBu_r', True)
elif not isinstance(cmap, tuple):
cmap = (cmap, True)
im = ax1.imshow(this_data,
extent=[1e3 * epochs.times[0], 1e3 * epochs.times[-1],
0, n_epochs],
aspect='auto', origin='lower', interpolation='nearest',
vmin=this_vmin, vmax=this_vmax, cmap=cmap[0])
if this_overlay_times is not None:
plt.plot(1e3 * this_overlay_times, 0.5 + np.arange(len(this_data)),
'k', linewidth=2)
ax1.set_title(epochs.ch_names[idx])
ax1.set_ylabel('Epochs')
ax1.axis('auto')
ax1.axis('tight')
ax1.axvline(0, color='m', linewidth=3, linestyle='--')
evoked_data = scalings[ch_type] * evoked.data[i]
ax2.plot(1e3 * evoked.times, evoked_data)
ax2.set_xlabel('Time (ms)')
ax2.set_xlim([1e3 * evoked.times[0], 1e3 * evoked.times[-1]])
ax2.set_ylabel(units[ch_type])
evoked_vmin = min(evoked_data) * 1.1 if scale_vmin else vmin
evoked_vmax = max(evoked_data) * 1.1 if scale_vmax else vmax
if scale_vmin or scale_vmax:
evoked_vmax = max(np.abs([evoked_vmax, evoked_vmin]))
evoked_vmin = -evoked_vmax
ax2.set_ylim([evoked_vmin, evoked_vmax])
ax2.axvline(0, color='m', linewidth=3, linestyle='--')
if colorbar:
cbar = plt.colorbar(im, cax=ax3)
if cmap[1]:
ax1.CB = DraggableColorbar(cbar, im)
tight_layout(fig=this_fig)
plt_show(show)
return figs
def plot_drop_log(drop_log, threshold=0, n_max_plot=20, subject='Unknown',
color=(0.9, 0.9, 0.9), width=0.8, ignore=('IGNORED',),
show=True):
"""Show the channel stats based on a drop_log from Epochs
Parameters
----------
drop_log : list of lists
Epoch drop log from Epochs.drop_log.
threshold : float
The percentage threshold to use to decide whether or not to
plot. Default is zero (always plot).
n_max_plot : int
Maximum number of channels to show stats for.
subject : str
The subject name to use in the title of the plot.
color : tuple | str
Color to use for the bars.
width : float
Width of the bars.
ignore : list
The drop reasons to ignore.
show : bool
Show figure if True.
Returns
-------
fig : Instance of matplotlib.figure.Figure
The figure.
"""
import matplotlib.pyplot as plt
from ..epochs import _drop_log_stats
perc = _drop_log_stats(drop_log, ignore)
scores = Counter([ch for d in drop_log for ch in d if ch not in ignore])
ch_names = np.array(list(scores.keys()))
fig = plt.figure()
if perc < threshold or len(ch_names) == 0:
plt.text(0, 0, 'No drops')
return fig
n_used = 0
for d in drop_log: # "d" is the list of drop reasons for each epoch
if len(d) == 0 or any(ch not in ignore for ch in d):
n_used += 1 # number of epochs not ignored
counts = 100 * np.array(list(scores.values()), dtype=float) / n_used
n_plot = min(n_max_plot, len(ch_names))
order = np.flipud(np.argsort(counts))
plt.title('%s: %0.1f%%' % (subject, perc))
x = np.arange(n_plot)
plt.bar(x, counts[order[:n_plot]], color=color, width=width)
plt.xticks(x + width / 2.0, ch_names[order[:n_plot]], rotation=45,
horizontalalignment='right')
plt.tick_params(axis='x', which='major', labelsize=10)
plt.ylabel('% of epochs rejected')
plt.xlim((-width / 2.0, (n_plot - 1) + width * 3 / 2))
plt.grid(True, axis='y')
tight_layout(pad=1, fig=fig)
plt_show(show)
return fig
def _draw_epochs_axes(epoch_idx, good_ch_idx, bad_ch_idx, data, times, axes,
title_str, axes_handler):
"""Aux functioin"""
this = axes_handler[0]
for ii, data_, ax in zip(epoch_idx, data, axes):
for l, d in zip(ax.lines, data_[good_ch_idx]):
l.set_data(times, d)
if bad_ch_idx is not None:
bad_lines = [ax.lines[k] for k in bad_ch_idx]
for l, d in zip(bad_lines, data_[bad_ch_idx]):
l.set_data(times, d)
if title_str is not None:
ax.set_title(title_str % ii, fontsize=12)
ax.set_ylim(data.min(), data.max())
ax.set_yticks(list())
ax.set_xticks(list())
if vars(ax)[this]['reject'] is True:
# memorizing reject
for l in ax.lines:
l.set_color((0.8, 0.8, 0.8))
ax.get_figure().canvas.draw()
else:
# forgetting previous reject
for k in axes_handler:
if k == this:
continue
if vars(ax).get(k, {}).get('reject', None) is True:
for l in ax.lines[:len(good_ch_idx)]:
l.set_color('k')
if bad_ch_idx is not None:
for l in ax.lines[-len(bad_ch_idx):]:
l.set_color('r')
ax.get_figure().canvas.draw()
break
def _epochs_navigation_onclick(event, params):
"""Aux function"""
import matplotlib.pyplot as plt
p = params
here = None
if event.inaxes == p['back'].ax:
here = 1
elif event.inaxes == p['next'].ax:
here = -1
elif event.inaxes == p['reject-quit'].ax:
if p['reject_idx']:
p['epochs'].drop(p['reject_idx'])
plt.close(p['fig'])
plt.close(event.inaxes.get_figure())
if here is not None:
p['idx_handler'].rotate(here)
p['axes_handler'].rotate(here)
this_idx = p['idx_handler'][0]
_draw_epochs_axes(this_idx, p['good_ch_idx'], p['bad_ch_idx'],
p['data'][this_idx],
p['times'], p['axes'], p['title_str'],
p['axes_handler'])
# XXX don't ask me why
p['axes'][0].get_figure().canvas.draw()
def _epochs_axes_onclick(event, params):
"""Aux function"""
reject_color = (0.8, 0.8, 0.8)
ax = event.inaxes
if event.inaxes is None:
return
p = params
here = vars(ax)[p['axes_handler'][0]]
if here.get('reject', None) is False:
idx = here['idx']
if idx not in p['reject_idx']:
p['reject_idx'].append(idx)
for l in ax.lines:
l.set_color(reject_color)
here['reject'] = True
elif here.get('reject', None) is True:
idx = here['idx']
if idx in p['reject_idx']:
p['reject_idx'].pop(p['reject_idx'].index(idx))
good_lines = [ax.lines[k] for k in p['good_ch_idx']]
for l in good_lines:
l.set_color('k')
if p['bad_ch_idx'] is not None:
bad_lines = ax.lines[-len(p['bad_ch_idx']):]
for l in bad_lines:
l.set_color('r')
here['reject'] = False
ax.get_figure().canvas.draw()
def plot_epochs(epochs, picks=None, scalings=None, n_epochs=20,
n_channels=20, title=None, show=True, block=False):
""" Visualize epochs
Bad epochs can be marked with a left click on top of the epoch. Bad
channels can be selected by clicking the channel name on the left side of
the main axes. Calling this function drops all the selected bad epochs as
well as bad epochs marked beforehand with rejection parameters.
Parameters
----------
epochs : instance of Epochs
The epochs object
picks : array-like of int | None
Channels to be included. If None only good data channels are used.
Defaults to None
scalings : dict | 'auto' | None
Scaling factors for the traces. If any fields in scalings are 'auto',
the scaling factor is set to match the 99.5th percentile of a subset of
the corresponding data. If scalings == 'auto', all scalings fields are
set to 'auto'. If any fields are 'auto' and data is not preloaded,
a subset of epochs up to 100mb will be loaded. If None, defaults to::
dict(mag=1e-12, grad=4e-11, eeg=20e-6, eog=150e-6, ecg=5e-4,
emg=1e-3, ref_meg=1e-12, misc=1e-3, stim=1, resp=1, chpi=1e-4)
n_epochs : int
The number of epochs per view. Defaults to 20.
n_channels : int
The number of channels per view. Defaults to 20.
title : str | None
The title of the window. If None, epochs name will be displayed.
Defaults to None.
show : bool
Show figure if True. Defaults to True
block : bool
Whether to halt program execution until the figure is closed.
Useful for rejecting bad trials on the fly by clicking on an epoch.
Defaults to False.
Returns
-------
fig : Instance of matplotlib.figure.Figure
The figure.
Notes
-----
The arrow keys (up/down/left/right) can be used to navigate between
channels and epochs and the scaling can be adjusted with - and + (or =)
keys, but this depends on the backend matplotlib is configured to use
(e.g., mpl.use(``TkAgg``) should work). Full screen mode can be toggled
with f11 key. The amount of epochs and channels per view can be adjusted
with home/end and page down/page up keys. Butterfly plot can be toggled
with ``b`` key. Right mouse click adds a vertical line to the plot.
.. versionadded:: 0.10.0
"""
epochs.drop_bad()
scalings = _compute_scalings(scalings, epochs)
scalings = _handle_default('scalings_plot_raw', scalings)
projs = epochs.info['projs']
params = {'epochs': epochs,
'info': copy.deepcopy(epochs.info),
'bad_color': (0.8, 0.8, 0.8),
't_start': 0,
'histogram': None}
params['label_click_fun'] = partial(_pick_bad_channels, params=params)
_prepare_mne_browse_epochs(params, projs, n_channels, n_epochs, scalings,
title, picks)
_prepare_projectors(params)
_layout_figure(params)
callback_close = partial(_close_event, params=params)
params['fig'].canvas.mpl_connect('close_event', callback_close)
try:
plt_show(show, block=block)
except TypeError: # not all versions have this
plt_show(show)
return params['fig']
@verbose
def plot_epochs_psd(epochs, fmin=0, fmax=np.inf, tmin=None, tmax=None,
proj=False, bandwidth=None, adaptive=False, low_bias=True,
normalization='length', picks=None, ax=None, color='black',
area_mode='std', area_alpha=0.33, dB=True, n_jobs=1,
show=True, verbose=None):
"""Plot the power spectral density across epochs
Parameters
----------
epochs : instance of Epochs
The epochs object
fmin : float
Start frequency to consider.
fmax : float
End frequency to consider.
tmin : float | None
Start time to consider.
tmax : float | None
End time to consider.
proj : bool
Apply projection.
bandwidth : float
The bandwidth of the multi taper windowing function in Hz. The default
value is a window half-bandwidth of 4.
adaptive : bool
Use adaptive weights to combine the tapered spectra into PSD
(slow, use n_jobs >> 1 to speed up computation).
low_bias : bool
Only use tapers with more than 90% spectral concentration within
bandwidth.
normalization : str
Either "full" or "length" (default). If "full", the PSD will
be normalized by the sampling rate as well as the length of
the signal (as in nitime).
picks : array-like of int | None
List of channels to use.
ax : instance of matplotlib Axes | None
Axes to plot into. If None, axes will be created.
color : str | tuple
A matplotlib-compatible color to use.
area_mode : str | None
Mode for plotting area. If 'std', the mean +/- 1 STD (across channels)
will be plotted. If 'range', the min and max (across channels) will be
plotted. Bad channels will be excluded from these calculations.
If None, no area will be plotted.
area_alpha : float
Alpha for the area.
dB : bool
If True, transform data to decibels.
n_jobs : int
Number of jobs to run in parallel.
show : bool
Show figure if True.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
fig : instance of matplotlib figure
Figure distributing one image per channel across sensor topography.
"""
from .raw import _set_psd_plot_params
fig, picks_list, titles_list, ax_list, make_label = _set_psd_plot_params(
epochs.info, proj, picks, ax, area_mode)
for ii, (picks, title, ax) in enumerate(zip(picks_list, titles_list,
ax_list)):
psds, freqs = psd_multitaper(epochs, picks=picks, fmin=fmin,
fmax=fmax, tmin=tmin, tmax=tmax,
bandwidth=bandwidth, adaptive=adaptive,
low_bias=low_bias,
normalization=normalization, proj=proj,
n_jobs=n_jobs)
# Convert PSDs to dB
if dB:
psds = 10 * np.log10(psds)
unit = 'dB'
else:
unit = 'power'
# mean across epochs and channels
psd_mean = np.mean(psds, axis=0).mean(axis=0)
if area_mode == 'std':
# std across channels
psd_std = np.std(np.mean(psds, axis=0), axis=0)
hyp_limits = (psd_mean - psd_std, psd_mean + psd_std)
elif area_mode == 'range':
hyp_limits = (np.min(np.mean(psds, axis=0), axis=0),
np.max(np.mean(psds, axis=0), axis=0))
else: # area_mode is None
hyp_limits = None
ax.plot(freqs, psd_mean, color=color)
if hyp_limits is not None:
ax.fill_between(freqs, hyp_limits[0], y2=hyp_limits[1],
color=color, alpha=area_alpha)
if make_label:
if ii == len(picks_list) - 1:
ax.set_xlabel('Freq (Hz)')
if ii == len(picks_list) // 2:
ax.set_ylabel('Power Spectral Density (%s/Hz)' % unit)
ax.set_title(title)
ax.set_xlim(freqs[0], freqs[-1])
if make_label:
tight_layout(pad=0.1, h_pad=0.1, w_pad=0.1, fig=fig)
plt_show(show)
return fig
def _prepare_mne_browse_epochs(params, projs, n_channels, n_epochs, scalings,
title, picks, order=None):
"""Helper for setting up the mne_browse_epochs window."""
import matplotlib.pyplot as plt
import matplotlib as mpl
from matplotlib.collections import LineCollection
from matplotlib.colors import colorConverter
epochs = params['epochs']
if picks is None:
picks = _handle_picks(epochs)
if len(picks) < 1:
raise RuntimeError('No appropriate channels found. Please'
' check your picks')
picks = sorted(picks)
# Reorganize channels
inds = list()
types = list()
for t in ['grad', 'mag']:
idxs = pick_types(params['info'], meg=t, ref_meg=False, exclude=[])
if len(idxs) < 1:
continue
mask = np.in1d(idxs, picks, assume_unique=True)
inds.append(idxs[mask])
types += [t] * len(inds[-1])
for t in ['hbo', 'hbr']:
idxs = pick_types(params['info'], meg=False, ref_meg=False, fnirs=t,
exclude=[])
if len(idxs) < 1:
continue
mask = np.in1d(idxs, picks, assume_unique=True)
inds.append(idxs[mask])
types += [t] * len(inds[-1])
pick_kwargs = dict(meg=False, ref_meg=False, exclude=[])
if order is None:
order = ['eeg', 'seeg', 'ecog', 'eog', 'ecg', 'emg', 'ref_meg', 'stim',
'resp', 'misc', 'chpi', 'syst', 'ias', 'exci']
for ch_type in order:
pick_kwargs[ch_type] = True
idxs = pick_types(params['info'], **pick_kwargs)
if len(idxs) < 1:
continue
mask = np.in1d(idxs, picks, assume_unique=True)
inds.append(idxs[mask])
types += [ch_type] * len(inds[-1])
pick_kwargs[ch_type] = False
inds = np.concatenate(inds).astype(int)
if not len(inds) == len(picks):
raise RuntimeError('Some channels not classified. Please'
' check your picks')
ch_names = [params['info']['ch_names'][x] for x in inds]
# set up plotting
size = get_config('MNE_BROWSE_RAW_SIZE')
n_epochs = min(n_epochs, len(epochs.events))
duration = len(epochs.times) * n_epochs
n_channels = min(n_channels, len(picks))
if size is not None:
size = size.split(',')
size = tuple(float(s) for s in size)
if title is None:
title = epochs.name
if epochs.name is None or len(title) == 0:
title = ''
fig = figure_nobar(facecolor='w', figsize=size, dpi=80)
fig.canvas.set_window_title('mne_browse_epochs')
ax = plt.subplot2grid((10, 15), (0, 1), colspan=13, rowspan=9)
ax.annotate(title, xy=(0.5, 1), xytext=(0, ax.get_ylim()[1] + 15),
ha='center', va='bottom', size=12, xycoords='axes fraction',
textcoords='offset points')
color = _handle_default('color', None)
ax.axis([0, duration, 0, 200])
ax2 = ax.twiny()
ax2.set_zorder(-1)
ax2.axis([0, duration, 0, 200])
ax_hscroll = plt.subplot2grid((10, 15), (9, 1), colspan=13)
ax_hscroll.get_yaxis().set_visible(False)
ax_hscroll.set_xlabel('Epochs')
ax_vscroll = plt.subplot2grid((10, 15), (0, 14), rowspan=9)
ax_vscroll.set_axis_off()
ax_vscroll.add_patch(mpl.patches.Rectangle((0, 0), 1, len(picks),
facecolor='w', zorder=3))
ax_help_button = plt.subplot2grid((10, 15), (9, 0), colspan=1)
help_button = mpl.widgets.Button(ax_help_button, 'Help')
help_button.on_clicked(partial(_onclick_help, params=params))
# populate vertical and horizontal scrollbars
for ci in range(len(picks)):
if ch_names[ci] in params['info']['bads']:
this_color = params['bad_color']
else:
this_color = color[types[ci]]
ax_vscroll.add_patch(mpl.patches.Rectangle((0, ci), 1, 1,
facecolor=this_color,
edgecolor=this_color,
zorder=4))
vsel_patch = mpl.patches.Rectangle((0, 0), 1, n_channels, alpha=0.5,
edgecolor='w', facecolor='w', zorder=5)
ax_vscroll.add_patch(vsel_patch)
ax_vscroll.set_ylim(len(types), 0)
ax_vscroll.set_title('Ch.')
# populate colors list
type_colors = [colorConverter.to_rgba(color[c]) for c in types]
colors = list()
for color_idx in range(len(type_colors)):
colors.append([type_colors[color_idx]] * len(epochs.events))
lines = list()
n_times = len(epochs.times)
for ch_idx in range(n_channels):
if len(colors) - 1 < ch_idx:
break
lc = LineCollection(list(), antialiased=False, linewidths=0.5,
zorder=3, picker=3.)
ax.add_collection(lc)
lines.append(lc)
times = epochs.times
data = np.zeros((params['info']['nchan'], len(times) * n_epochs))
ylim = (25., 0.) # Hardcoded 25 because butterfly has max 5 rows (5*5=25).
# make shells for plotting traces
offset = ylim[0] / n_channels
offsets = np.arange(n_channels) * offset + (offset / 2.)
times = np.arange(len(times) * len(epochs.events))
epoch_times = np.arange(0, len(times), n_times)
ax.set_yticks(offsets)
ax.set_ylim(ylim)
ticks = epoch_times + 0.5 * n_times
ax.set_xticks(ticks)
ax2.set_xticks(ticks[:n_epochs])
labels = list(range(1, len(ticks) + 1)) # epoch numbers
ax.set_xticklabels(labels)
ax2.set_xticklabels(labels)
xlim = epoch_times[-1] + len(epochs.times)
ax_hscroll.set_xlim(0, xlim)
vertline_t = ax_hscroll.text(0, 1, '', color='y', va='bottom', ha='right')
# fit horizontal scroll bar ticks
hscroll_ticks = np.arange(0, xlim, xlim / 7.0)
hscroll_ticks = np.append(hscroll_ticks, epoch_times[-1])
hticks = list()
for tick in hscroll_ticks:
hticks.append(epoch_times.flat[np.abs(epoch_times - tick).argmin()])
hlabels = [x / n_times + 1 for x in hticks]
ax_hscroll.set_xticks(hticks)
ax_hscroll.set_xticklabels(hlabels)
for epoch_idx in range(len(epoch_times)):
ax_hscroll.add_patch(mpl.patches.Rectangle((epoch_idx * n_times, 0),
n_times, 1, facecolor='w',
edgecolor='w', alpha=0.6))
hsel_patch = mpl.patches.Rectangle((0, 0), duration, 1,
edgecolor='k',
facecolor=(0.75, 0.75, 0.75),
alpha=0.25, linewidth=1, clip_on=False)
ax_hscroll.add_patch(hsel_patch)
text = ax.text(0, 0, 'blank', zorder=3, verticalalignment='baseline',
ha='left', fontweight='bold')
text.set_visible(False)
params.update({'fig': fig,
'ax': ax,
'ax2': ax2,
'ax_hscroll': ax_hscroll,
'ax_vscroll': ax_vscroll,
'vsel_patch': vsel_patch,
'hsel_patch': hsel_patch,
'lines': lines,
'projs': projs,
'ch_names': ch_names,
'n_channels': n_channels,
'n_epochs': n_epochs,
'scalings': scalings,
'duration': duration,
'ch_start': 0,
'colors': colors,
'def_colors': type_colors, # don't change at runtime
'picks': picks,
'bads': np.array(list(), dtype=int),
'data': data,
'times': times,
'epoch_times': epoch_times,
'offsets': offsets,
'labels': labels,
'scale_factor': 1.0,
'butterfly_scale': 1.0,
'fig_proj': None,
'types': np.array(types),
'inds': inds,
'vert_lines': list(),
'vertline_t': vertline_t,
'butterfly': False,
'text': text,
'ax_help_button': ax_help_button, # needed for positioning
'help_button': help_button, # reference needed for clicks
'fig_options': None,
'settings': [True, True, True, True],
'image_plot': None})
params['plot_fun'] = partial(_plot_traces, params=params)
# callbacks
callback_scroll = partial(_plot_onscroll, params=params)
fig.canvas.mpl_connect('scroll_event', callback_scroll)
callback_click = partial(_mouse_click, params=params)
fig.canvas.mpl_connect('button_press_event', callback_click)
callback_key = partial(_plot_onkey, params=params)
fig.canvas.mpl_connect('key_press_event', callback_key)
callback_resize = partial(_resize_event, params=params)
fig.canvas.mpl_connect('resize_event', callback_resize)
fig.canvas.mpl_connect('pick_event', partial(_onpick, params=params))
params['callback_key'] = callback_key
# Draw event lines for the first time.
_plot_vert_lines(params)
def _prepare_projectors(params):
""" Helper for setting up the projectors for epochs browser """
import matplotlib.pyplot as plt
import matplotlib as mpl
epochs = params['epochs']
projs = params['projs']
if len(projs) > 0 and not epochs.proj:
ax_button = plt.subplot2grid((10, 15), (9, 14))
opt_button = mpl.widgets.Button(ax_button, 'Proj')
callback_option = partial(_toggle_options, params=params)
opt_button.on_clicked(callback_option)
params['opt_button'] = opt_button
params['ax_button'] = ax_button
# As here code is shared with plot_evoked, some extra steps:
# first the actual plot update function
params['plot_update_proj_callback'] = _plot_update_epochs_proj
# then the toggle handler
callback_proj = partial(_toggle_proj, params=params)
# store these for use by callbacks in the options figure
params['callback_proj'] = callback_proj
callback_proj('none')
def _plot_traces(params):
""" Helper for plotting concatenated epochs """
params['text'].set_visible(False)
ax = params['ax']
butterfly = params['butterfly']
if butterfly:
ch_start = 0
n_channels = len(params['picks'])
data = params['data'] * params['butterfly_scale']
else:
ch_start = params['ch_start']
n_channels = params['n_channels']
data = params['data'] * params['scale_factor']
offsets = params['offsets']
lines = params['lines']
epochs = params['epochs']
n_times = len(epochs.times)
tick_list = list()
start_idx = int(params['t_start'] / n_times)
end = params['t_start'] + params['duration']
end_idx = int(end / n_times)
xlabels = params['labels'][start_idx:]
event_ids = params['epochs'].events[:, 2]
params['ax2'].set_xticklabels(event_ids[start_idx:])
ax.set_xticklabels(xlabels)
ylabels = ax.yaxis.get_ticklabels()
# do the plotting
for line_idx in range(n_channels):
ch_idx = line_idx + ch_start
if line_idx >= len(lines):
break
elif ch_idx < len(params['ch_names']):
if butterfly:
ch_type = params['types'][ch_idx]
if ch_type == 'grad':
offset = offsets[0]
elif ch_type == 'mag':
offset = offsets[1]
elif ch_type == 'eeg':
offset = offsets[2]
elif ch_type == 'eog':
offset = offsets[3]
elif ch_type == 'ecg':
offset = offsets[4]
else:
lines[line_idx].set_segments(list())
else:
tick_list += [params['ch_names'][ch_idx]]
offset = offsets[line_idx]
this_data = data[ch_idx]
# subtraction here gets correct orientation for flipped ylim
ydata = offset - this_data
xdata = params['times'][:params['duration']]
num_epochs = np.min([params['n_epochs'],
len(epochs.events)])
segments = np.split(np.array((xdata, ydata)).T, num_epochs)
ch_name = params['ch_names'][ch_idx]
if ch_name in params['info']['bads']:
if not butterfly:
this_color = params['bad_color']
ylabels[line_idx].set_color(this_color)
this_color = np.tile((params['bad_color']), (num_epochs, 1))
for bad_idx in params['bads']:
if bad_idx < start_idx or bad_idx > end_idx:
continue
this_color[bad_idx - start_idx] = (1., 0., 0.)
lines[line_idx].set_zorder(2)
else:
this_color = params['colors'][ch_idx][start_idx:end_idx]
lines[line_idx].set_zorder(3)
if not butterfly:
ylabels[line_idx].set_color('black')
lines[line_idx].set_segments(segments)
lines[line_idx].set_color(this_color)
else:
lines[line_idx].set_segments(list())
# finalize plot
ax.set_xlim(params['times'][0], params['times'][0] + params['duration'],
False)
params['ax2'].set_xlim(params['times'][0],
params['times'][0] + params['duration'], False)
if butterfly:
factor = -1. / params['butterfly_scale']
labels = np.empty(20, dtype='S15')
labels.fill('')
ticks = ax.get_yticks()
idx_offset = 1
if 'grad' in params['types']:
labels[idx_offset + 1] = '0.00'
for idx in [idx_offset, idx_offset + 2]:
labels[idx] = '{0:.2f}'.format((ticks[idx] - offsets[0]) *
params['scalings']['grad'] *
1e13 * factor)
idx_offset += 4
if 'mag' in params['types']:
labels[idx_offset + 1] = '0.00'
for idx in [idx_offset, idx_offset + 2]:
labels[idx] = '{0:.2f}'.format((ticks[idx] - offsets[1]) *
params['scalings']['mag'] *
1e15 * factor)
idx_offset += 4
if 'eeg' in params['types']:
labels[idx_offset + 1] = '0.00'
for idx in [idx_offset, idx_offset + 2]:
labels[idx] = '{0:.2f}'.format((ticks[idx] - offsets[2]) *
params['scalings']['eeg'] *
1e6 * factor)
idx_offset += 4
if 'eog' in params['types']:
labels[idx_offset + 1] = '0.00'
for idx in [idx_offset, idx_offset + 2]:
labels[idx] = '{0:.2f}'.format((ticks[idx] - offsets[3]) *
params['scalings']['eog'] *
1e6 * factor)
idx_offset += 4
if 'ecg' in params['types']:
labels[idx_offset + 1] = '0.00'
for idx in [idx_offset, idx_offset + 2]:
labels[idx] = '{0:.2f}'.format((ticks[idx] - offsets[4]) *
params['scalings']['ecg'] *
1e6 * factor)
ax.set_yticklabels(labels, fontsize=12, color='black')
else:
ax.set_yticklabels(tick_list, fontsize=12)
params['vsel_patch'].set_y(ch_start)
params['fig'].canvas.draw()
# XXX This is a hack to make sure this figure gets drawn last
# so that when matplotlib goes to calculate bounds we don't get a
# CGContextRef error on the MacOSX backend :(
if params['fig_proj'] is not None:
params['fig_proj'].canvas.draw()
def _plot_update_epochs_proj(params, bools=None):
"""Helper only needs to be called when proj is changed"""
if bools is not None:
inds = np.where(bools)[0]
params['info']['projs'] = [copy.deepcopy(params['projs'][ii])
for ii in inds]
params['proj_bools'] = bools
params['projector'], _ = setup_proj(params['info'], add_eeg_ref=False,
verbose=False)
start = int(params['t_start'] / len(params['epochs'].times))
n_epochs = params['n_epochs']
end = start + n_epochs
data = np.concatenate(params['epochs'][start:end].get_data(), axis=1)
if params['projector'] is not None:
data = np.dot(params['projector'], data)
types = params['types']
for pick, ind in enumerate(params['inds']):
params['data'][pick] = data[ind] / params['scalings'][types[pick]]
params['plot_fun']()
def _handle_picks(epochs):
"""Aux function to handle picks."""
if any('ICA' in k for k in epochs.ch_names):
picks = pick_types(epochs.info, misc=True, ref_meg=False,
exclude=[])
else:
picks = pick_types(epochs.info, meg=True, eeg=True, eog=True, ecg=True,
seeg=True, ecog=True, ref_meg=False, fnirs=True,
exclude=[])
return picks
def _plot_window(value, params):
"""Deal with horizontal shift of the viewport."""
max_times = len(params['times']) - params['duration']
if value > max_times:
value = len(params['times']) - params['duration']
if value < 0:
value = 0
if params['t_start'] != value:
params['t_start'] = value
params['hsel_patch'].set_x(value)
params['plot_update_proj_callback'](params)
def _plot_vert_lines(params):
""" Helper function for plotting vertical lines."""
ax = params['ax']
while len(ax.lines) > 0:
ax.lines.pop()
params['vert_lines'] = list()
params['vertline_t'].set_text('')
epochs = params['epochs']
if params['settings'][3]: # if zeroline visible
t_zero = np.where(epochs.times == 0.)[0]
if len(t_zero) == 1:
for event_idx in range(len(epochs.events)):
pos = [event_idx * len(epochs.times) + t_zero[0],
event_idx * len(epochs.times) + t_zero[0]]
ax.plot(pos, ax.get_ylim(), 'g', zorder=4, alpha=0.4)
for epoch_idx in range(len(epochs.events)):
pos = [epoch_idx * len(epochs.times), epoch_idx * len(epochs.times)]
ax.plot(pos, ax.get_ylim(), color='black', linestyle='--', zorder=2)
def _pick_bad_epochs(event, params):
"""Helper for selecting / dropping bad epochs"""
if 'ica' in params:
pos = (event.xdata, event.ydata)
_pick_bad_channels(pos, params)
return
n_times = len(params['epochs'].times)
start_idx = int(params['t_start'] / n_times)
xdata = event.xdata
xlim = event.inaxes.get_xlim()
epoch_idx = start_idx + int(xdata / (xlim[1] / params['n_epochs']))
total_epochs = len(params['epochs'].events)
if epoch_idx > total_epochs - 1:
return
# remove bad epoch
if epoch_idx in params['bads']:
params['bads'] = params['bads'][(params['bads'] != epoch_idx)]
for ch_idx in range(len(params['ch_names'])):
params['colors'][ch_idx][epoch_idx] = params['def_colors'][ch_idx]
params['ax_hscroll'].patches[epoch_idx].set_color('w')
params['ax_hscroll'].patches[epoch_idx].set_zorder(2)
params['plot_fun']()
return
# add bad epoch
params['bads'] = np.append(params['bads'], epoch_idx)
params['ax_hscroll'].patches[epoch_idx].set_color((1., 0., 0., 1.))
params['ax_hscroll'].patches[epoch_idx].set_zorder(3)
params['ax_hscroll'].patches[epoch_idx].set_edgecolor('w')
for ch_idx in range(len(params['ch_names'])):
params['colors'][ch_idx][epoch_idx] = (1., 0., 0., 1.)
params['plot_fun']()
def _pick_bad_channels(pos, params):
"""Helper function for selecting bad channels."""
text, ch_idx = _label2idx(params, pos)
if text is None:
return
if text in params['info']['bads']:
while text in params['info']['bads']:
params['info']['bads'].remove(text)
color = params['def_colors'][ch_idx]
params['ax_vscroll'].patches[ch_idx + 1].set_color(color)
else:
params['info']['bads'].append(text)
color = params['bad_color']
params['ax_vscroll'].patches[ch_idx + 1].set_color(color)
if 'ica' in params:
params['plot_fun']()
else:
params['plot_update_proj_callback'](params)
def _plot_onscroll(event, params):
"""Function to handle scroll events."""
if event.key == 'control':
if event.step < 0:
event.key = '-'
else:
event.key = '+'
_plot_onkey(event, params)
return
if params['butterfly']:
return
_plot_raw_onscroll(event, params, len(params['ch_names']))
def _mouse_click(event, params):
"""Function to handle mouse click events."""
if event.inaxes is None:
if params['butterfly'] or not params['settings'][0]:
return
ax = params['ax']
ylim = ax.get_ylim()
pos = ax.transData.inverted().transform((event.x, event.y))
if pos[0] > 0 or pos[1] < 0 or pos[1] > ylim[0]:
return
if event.button == 1: # left click
params['label_click_fun'](pos)
elif event.button == 3: # right click
if 'ica' not in params:
_, ch_idx = _label2idx(params, pos)
if ch_idx is None:
return
if channel_type(params['info'], ch_idx) not in ['mag', 'grad',
'eeg', 'eog']:
logger.info('Event related fields / potentials only '
'available for MEG and EEG channels.')
return
fig = plot_epochs_image(params['epochs'],
picks=params['inds'][ch_idx],
fig=params['image_plot'])[0]
params['image_plot'] = fig
elif event.button == 1: # left click
# vertical scroll bar changed
if event.inaxes == params['ax_vscroll']:
if params['butterfly']:
return
# Don't let scrollbar go outside vertical scrollbar limits
# XXX: floating point exception on some machines if this happens.
ch_start = min(
max(int(event.ydata) - params['n_channels'] // 2, 0),
len(params['ch_names']) - params['n_channels'])
if params['ch_start'] != ch_start:
params['ch_start'] = ch_start
params['plot_fun']()
# horizontal scroll bar changed
elif event.inaxes == params['ax_hscroll']:
# find the closest epoch time
times = params['epoch_times']
offset = 0.5 * params['n_epochs'] * len(params['epochs'].times)
xdata = times.flat[np.abs(times - (event.xdata - offset)).argmin()]
_plot_window(xdata, params)
# main axes
elif event.inaxes == params['ax']:
_pick_bad_epochs(event, params)
elif event.inaxes == params['ax'] and event.button == 2: # middle click
params['fig'].canvas.draw()
if params['fig_proj'] is not None:
params['fig_proj'].canvas.draw()
elif event.inaxes == params['ax'] and event.button == 3: # right click
n_times = len(params['epochs'].times)
xdata = int(event.xdata % n_times)
prev_xdata = 0
if len(params['vert_lines']) > 0:
prev_xdata = params['vert_lines'][0][0].get_data()[0][0]
while len(params['vert_lines']) > 0:
params['ax'].lines.remove(params['vert_lines'][0][0])
params['vert_lines'].pop(0)
if prev_xdata == xdata: # lines removed
params['vertline_t'].set_text('')
params['plot_fun']()
return
ylim = params['ax'].get_ylim()
for epoch_idx in range(params['n_epochs']): # plot lines
pos = [epoch_idx * n_times + xdata, epoch_idx * n_times + xdata]
params['vert_lines'].append(params['ax'].plot(pos, ylim, 'y',
zorder=5))
params['vertline_t'].set_text('%0.3f' % params['epochs'].times[xdata])
params['plot_fun']()
def _plot_onkey(event, params):
"""Function to handle key presses."""
import matplotlib.pyplot as plt
if event.key == 'down':
if params['butterfly']:
return
params['ch_start'] += params['n_channels']
_channels_changed(params, len(params['ch_names']))
elif event.key == 'up':
if params['butterfly']:
return
params['ch_start'] -= params['n_channels']
_channels_changed(params, len(params['ch_names']))
elif event.key == 'left':
sample = params['t_start'] - params['duration']
sample = np.max([0, sample])
_plot_window(sample, params)
elif event.key == 'right':
sample = params['t_start'] + params['duration']
sample = np.min([sample, params['times'][-1] - params['duration']])
times = params['epoch_times']
xdata = times.flat[np.abs(times - sample).argmin()]
_plot_window(xdata, params)
elif event.key == '-':
if params['butterfly']:
params['butterfly_scale'] /= 1.1
else:
params['scale_factor'] /= 1.1
params['plot_fun']()
elif event.key in ['+', '=']:
if params['butterfly']:
params['butterfly_scale'] *= 1.1
else:
params['scale_factor'] *= 1.1
params['plot_fun']()
elif event.key == 'f11':
mng = plt.get_current_fig_manager()
mng.full_screen_toggle()
elif event.key == 'pagedown':
if params['n_channels'] == 1 or params['butterfly']:
return
n_channels = params['n_channels'] - 1
ylim = params['ax'].get_ylim()
offset = ylim[0] / n_channels
params['offsets'] = np.arange(n_channels) * offset + (offset / 2.)
params['n_channels'] = n_channels
params['ax'].collections.pop()
params['ax'].set_yticks(params['offsets'])
params['lines'].pop()
params['vsel_patch'].set_height(n_channels)
params['plot_fun']()
elif event.key == 'pageup':
if params['butterfly']:
return
from matplotlib.collections import LineCollection
n_channels = params['n_channels'] + 1
ylim = params['ax'].get_ylim()
offset = ylim[0] / n_channels
params['offsets'] = np.arange(n_channels) * offset + (offset / 2.)
params['n_channels'] = n_channels
lc = LineCollection(list(), antialiased=False, linewidths=0.5,
zorder=3, picker=3.)
params['ax'].add_collection(lc)
params['ax'].set_yticks(params['offsets'])
params['lines'].append(lc)
params['vsel_patch'].set_height(n_channels)
params['plot_fun']()
elif event.key == 'home':
n_epochs = params['n_epochs'] - 1
if n_epochs <= 0:
return
n_times = len(params['epochs'].times)
ticks = params['epoch_times'] + 0.5 * n_times
params['ax2'].set_xticks(ticks[:n_epochs])
params['n_epochs'] = n_epochs
params['duration'] -= n_times
params['hsel_patch'].set_width(params['duration'])
params['data'] = params['data'][:, :-n_times]
params['plot_update_proj_callback'](params)
elif event.key == 'end':
n_epochs = params['n_epochs'] + 1
n_times = len(params['epochs'].times)
if n_times * n_epochs > len(params['times']):
return
ticks = params['epoch_times'] + 0.5 * n_times
params['ax2'].set_xticks(ticks[:n_epochs])
params['n_epochs'] = n_epochs
if len(params['vert_lines']) > 0:
ax = params['ax']
pos = params['vert_lines'][0][0].get_data()[0] + params['duration']
params['vert_lines'].append(ax.plot(pos, ax.get_ylim(), 'y',
zorder=4))
params['duration'] += n_times
if params['t_start'] + params['duration'] > len(params['times']):
params['t_start'] -= n_times
params['hsel_patch'].set_x(params['t_start'])
params['hsel_patch'].set_width(params['duration'])
params['data'] = np.zeros((len(params['data']), params['duration']))
params['plot_update_proj_callback'](params)
elif event.key == 'b':
if params['fig_options'] is not None:
plt.close(params['fig_options'])
params['fig_options'] = None
_prepare_butterfly(params)
_plot_traces(params)
elif event.key == 'o':
if not params['butterfly']:
_open_options(params)
elif event.key == 'h':
_plot_histogram(params)
elif event.key == '?':
_onclick_help(event, params)
elif event.key == 'escape':
plt.close(params['fig'])
def _prepare_butterfly(params):
"""Helper function for setting up butterfly plot."""
from matplotlib.collections import LineCollection
butterfly = not params['butterfly']
if butterfly:
types = set(['grad', 'mag', 'eeg', 'eog',
'ecg']) & set(params['types'])
if len(types) < 1:
return
params['ax_vscroll'].set_visible(False)
ax = params['ax']
labels = ax.yaxis.get_ticklabels()
for label in labels:
label.set_visible(True)
ylim = (5. * len(types), 0.)
ax.set_ylim(ylim)
offset = ylim[0] / (4. * len(types))
ticks = np.arange(0, ylim[0], offset)
ticks = [ticks[x] if x < len(ticks) else 0 for x in range(20)]
ax.set_yticks(ticks)
used_types = 0
params['offsets'] = [ticks[2]]
if 'grad' in types:
pos = (0, 1 - (ticks[2] / ylim[0]))
params['ax2'].annotate('Grad (fT/cm)', xy=pos, xytext=(-70, 0),
ha='left', size=12, va='center',
xycoords='axes fraction', rotation=90,
textcoords='offset points')
used_types += 1
params['offsets'].append(ticks[2 + used_types * 4])
if 'mag' in types:
pos = (0, 1 - (ticks[2 + used_types * 4] / ylim[0]))
params['ax2'].annotate('Mag (fT)', xy=pos, xytext=(-70, 0),
ha='left', size=12, va='center',
xycoords='axes fraction', rotation=90,
textcoords='offset points')
used_types += 1
params['offsets'].append(ticks[2 + used_types * 4])
if 'eeg' in types:
pos = (0, 1 - (ticks[2 + used_types * 4] / ylim[0]))
params['ax2'].annotate('EEG (uV)', xy=pos, xytext=(-70, 0),
ha='left', size=12, va='center',
xycoords='axes fraction', rotation=90,
textcoords='offset points')
used_types += 1
params['offsets'].append(ticks[2 + used_types * 4])
if 'eog' in types:
pos = (0, 1 - (ticks[2 + used_types * 4] / ylim[0]))
params['ax2'].annotate('EOG (uV)', xy=pos, xytext=(-70, 0),
ha='left', size=12, va='center',
xycoords='axes fraction', rotation=90,
textcoords='offset points')
used_types += 1
params['offsets'].append(ticks[2 + used_types * 4])
if 'ecg' in types:
pos = (0, 1 - (ticks[2 + used_types * 4] / ylim[0]))
params['ax2'].annotate('ECG (uV)', xy=pos, xytext=(-70, 0),
ha='left', size=12, va='center',
xycoords='axes fraction', rotation=90,
textcoords='offset points')
used_types += 1
while len(params['lines']) < len(params['picks']):
lc = LineCollection(list(), antialiased=False, linewidths=0.5,
zorder=3, picker=3.)
ax.add_collection(lc)
params['lines'].append(lc)
else: # change back to default view
labels = params['ax'].yaxis.get_ticklabels()
for label in labels:
label.set_visible(params['settings'][0])
params['ax_vscroll'].set_visible(True)
while len(params['ax2'].texts) > 0:
params['ax2'].texts.pop()
n_channels = params['n_channels']
while len(params['lines']) > n_channels:
params['ax'].collections.pop()
params['lines'].pop()
ylim = (25., 0.)
params['ax'].set_ylim(ylim)
offset = ylim[0] / n_channels
params['offsets'] = np.arange(n_channels) * offset + (offset / 2.)
params['ax'].set_yticks(params['offsets'])
params['butterfly'] = butterfly
def _onpick(event, params):
"""Helper to add a channel name on click"""
if event.mouseevent.button != 2 or not params['butterfly']:
return # text label added with a middle mouse button
lidx = np.where([l is event.artist for l in params['lines']])[0][0]
text = params['text']
text.set_x(event.mouseevent.xdata)
text.set_y(event.mouseevent.ydata)
text.set_text(params['ch_names'][lidx])
text.set_visible(True)
# do NOT redraw here, since for butterfly plots hundreds of lines could
# potentially be picked -- use _mouse_click (happens once per click)
# to do the drawing
def _close_event(event, params):
"""Function to drop selected bad epochs. Called on closing of the plot."""
params['epochs'].drop(params['bads'])
params['epochs'].info['bads'] = params['info']['bads']
logger.info('Channels marked as bad: %s' % params['epochs'].info['bads'])
def _resize_event(event, params):
"""Function to handle resize event"""
size = ','.join([str(s) for s in params['fig'].get_size_inches()])
set_config('MNE_BROWSE_RAW_SIZE', size, set_env=False)
_layout_figure(params)
def _update_channels_epochs(event, params):
"""Function for changing the amount of channels and epochs per view."""
from matplotlib.collections import LineCollection
# Channels
n_channels = int(np.around(params['channel_slider'].val))
offset = params['ax'].get_ylim()[0] / n_channels
params['offsets'] = np.arange(n_channels) * offset + (offset / 2.)
while len(params['lines']) > n_channels:
params['ax'].collections.pop()
params['lines'].pop()
while len(params['lines']) < n_channels:
lc = LineCollection(list(), linewidths=0.5, antialiased=False,
zorder=3, picker=3.)
params['ax'].add_collection(lc)
params['lines'].append(lc)
params['ax'].set_yticks(params['offsets'])
params['vsel_patch'].set_height(n_channels)
params['n_channels'] = n_channels
# Epochs
n_epochs = int(np.around(params['epoch_slider'].val))
n_times = len(params['epochs'].times)
ticks = params['epoch_times'] + 0.5 * n_times
params['ax2'].set_xticks(ticks[:n_epochs])
params['n_epochs'] = n_epochs
params['duration'] = n_times * n_epochs
params['hsel_patch'].set_width(params['duration'])
params['data'] = np.zeros((len(params['data']), params['duration']))
if params['t_start'] + n_times * n_epochs > len(params['times']):
params['t_start'] = len(params['times']) - n_times * n_epochs
params['hsel_patch'].set_x(params['t_start'])
params['plot_update_proj_callback'](params)
def _toggle_labels(label, params):
"""Function for toggling axis labels on/off."""
if label == 'Channel names visible':
params['settings'][0] = not params['settings'][0]
labels = params['ax'].yaxis.get_ticklabels()
for label in labels:
label.set_visible(params['settings'][0])
elif label == 'Event-id visible':
params['settings'][1] = not params['settings'][1]
labels = params['ax2'].xaxis.get_ticklabels()
for label in labels:
label.set_visible(params['settings'][1])
elif label == 'Epoch-id visible':
params['settings'][2] = not params['settings'][2]
labels = params['ax'].xaxis.get_ticklabels()
for label in labels:
label.set_visible(params['settings'][2])
elif label == 'Zeroline visible':
params['settings'][3] = not params['settings'][3]
_plot_vert_lines(params)
params['fig'].canvas.draw()
if params['fig_proj'] is not None:
params['fig_proj'].canvas.draw()
def _open_options(params):
"""Function for opening the option window."""
import matplotlib.pyplot as plt
import matplotlib as mpl
if params['fig_options'] is not None:
# turn off options dialog
plt.close(params['fig_options'])
params['fig_options'] = None
return
width = 10
height = 3
fig_options = figure_nobar(figsize=(width, height), dpi=80)
fig_options.canvas.set_window_title('View settings')
params['fig_options'] = fig_options
ax_channels = plt.axes([0.15, 0.1, 0.65, 0.1])
ax_epochs = plt.axes([0.15, 0.25, 0.65, 0.1])
ax_button = plt.axes([0.85, 0.1, 0.1, 0.25])
ax_check = plt.axes([0.15, 0.4, 0.4, 0.55])
plt.axis('off')
params['update_button'] = mpl.widgets.Button(ax_button, 'Update')
params['channel_slider'] = mpl.widgets.Slider(ax_channels, 'Channels', 1,
len(params['ch_names']),
valfmt='%0.0f',
valinit=params['n_channels'])
params['epoch_slider'] = mpl.widgets.Slider(ax_epochs, 'Epochs', 1,
len(params['epoch_times']),
valfmt='%0.0f',
valinit=params['n_epochs'])
params['checkbox'] = mpl.widgets.CheckButtons(ax_check,
['Channel names visible',
'Event-id visible',
'Epoch-id visible',
'Zeroline visible'],
actives=params['settings'])
update = partial(_update_channels_epochs, params=params)
params['update_button'].on_clicked(update)
labels_callback = partial(_toggle_labels, params=params)
params['checkbox'].on_clicked(labels_callback)
close_callback = partial(_settings_closed, params=params)
params['fig_options'].canvas.mpl_connect('close_event', close_callback)
try:
params['fig_options'].canvas.draw()
params['fig_options'].show(warn=False)
if params['fig_proj'] is not None:
params['fig_proj'].canvas.draw()
except Exception:
pass
def _settings_closed(events, params):
"""Function to handle close event from settings dialog."""
params['fig_options'] = None
def _plot_histogram(params):
"""Function for plotting histogram of peak-to-peak values."""
import matplotlib.pyplot as plt
epochs = params['epochs']
p2p = np.ptp(epochs.get_data(), axis=2)
types = list()
data = list()
if 'eeg' in params['types']:
eegs = np.array([p2p.T[i] for i,
x in enumerate(params['types']) if x == 'eeg'])
data.append(eegs.ravel())
types.append('eeg')
if 'mag' in params['types']:
mags = np.array([p2p.T[i] for i,
x in enumerate(params['types']) if x == 'mag'])
data.append(mags.ravel())
types.append('mag')
if 'grad' in params['types']:
grads = np.array([p2p.T[i] for i,
x in enumerate(params['types']) if x == 'grad'])
data.append(grads.ravel())
types.append('grad')
params['histogram'] = plt.figure()
scalings = _handle_default('scalings')
units = _handle_default('units')
titles = _handle_default('titles')
colors = _handle_default('color')
for idx in range(len(types)):
ax = plt.subplot(len(types), 1, idx + 1)
plt.xlabel(units[types[idx]])
plt.ylabel('count')
color = colors[types[idx]]
rej = None
if epochs.reject is not None and types[idx] in epochs.reject.keys():
rej = epochs.reject[types[idx]] * scalings[types[idx]]
rng = [0., rej * 1.1]
else:
rng = None
plt.hist(data[idx] * scalings[types[idx]], bins=100, color=color,
range=rng)
if rej is not None:
ax.plot((rej, rej), (0, ax.get_ylim()[1]), color='r')
plt.title(titles[types[idx]])
params['histogram'].suptitle('Peak-to-peak histogram', y=0.99)
params['histogram'].subplots_adjust(hspace=0.6)
try:
params['histogram'].show(warn=False)
except:
pass
if params['fig_proj'] is not None:
params['fig_proj'].canvas.draw()
def _label2idx(params, pos):
"""Aux function for click on labels. Returns channel name and idx."""
labels = params['ax'].yaxis.get_ticklabels()
offsets = np.array(params['offsets']) + params['offsets'][0]
line_idx = np.searchsorted(offsets, pos[1])
text = labels[line_idx].get_text()
if len(text) == 0:
return None, None
ch_idx = params['ch_start'] + line_idx
return text, ch_idx
| jmontoyam/mne-python | mne/viz/epochs.py | Python | bsd-3-clause | 65,600 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from struct import *
import numpy, sys
# http://stackoverflow.com/questions/25019287/how-to-convert-grayscale-values-in-matrix-to-an-image-in-python
# end if
imagesFile = 'data/brt-train-images.data';
w, h = 70, 74
chessmenTypesCount = 7
xMatrix = [[0 for x in range(70*74)] for y in range(chessmenTypesCount)]
# 前10张图表示的数字为:05 00 04 01 09 02 01 03 01 04
def getImagesData():
with open(imagesFile, mode='rb') as file: # b is important -> binary
fileContent = file.read()
# 5180 = 70*74
for nthImage in range(1, chessmenTypesCount + 1):
imageBytes = unpack("5180c", fileContent[16+w*h*(nthImage-1):16+w*h*nthImage])
for i in range(70*74):
xMatrix[nthImage-1][i] = ord(imageBytes[i])
# end for
# end for
return xMatrix
def getLabelsData():
with open(imagesFile, mode='rb') as file: # b is important -> binary
fileContent = file.read()
# 5180 = 70*74
for nthImage in range(1, chessmenTypesCount + 1):
imageBytes = unpack("5180c", fileContent[16+w*h*(nthImage-1):16+w*h*nthImage])
for i in range(70*74):
xMatrix[nthImage-1][i] = ord(imageBytes[i])
# end for
# end for
return xMatrix
if __name__ == '__main__':
data = getImagesData()
print (data)
| archcra/brt | train/loadTrainData.py | Python | gpl-3.0 | 1,508 |
# Copyright (C) 2010-2011 Mathijs de Bruin <[email protected]>
#
# This file is part of django-shopkit.
#
# django-shopkit is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import logging
logger = logging.getLogger(__name__)
from decimal import Decimal
from django.db import models
from django.utils.translation import ugettext_lazy as _
# Get the currently configured currency field, whatever it is
from shopkit.currency.utils import get_currency_field
PriceField = get_currency_field()
from shopkit.core.basemodels import AbstractPricedItemBase
from shopkit.shipping.settings import ADDRESS_MODEL
class ShippedItemBase(AbstractPricedItemBase):
""" Base class for shippable items. """
class Meta:
abstract = True
def get_shipping_costs(self, **kwargs):
"""
Return the most sensible shipping cost associated with this item.
By default, it returns the total shipping cost as yielded by
`get_total_shipping_costs`.
"""
shipping_costs = self.get_total_shipping_costs(**kwargs)
logger.debug(u'Total shipping costs for %s: %s',
self, shipping_costs)
return shipping_costs
def get_total_shipping_costs(self, **kwargs):
"""
Return the total shipping applicable for this item. Must be
implemented in subclasses.
"""
raise NotImplementedError
def get_price_without_shipping(self, **kwargs):
""" Get the price without shipping costs. """
return super(ShippedItemBase, self).get_price(**kwargs)
def get_price(self, **kwargs):
""" Get the price with shipping costs applied. """
without = self.get_price_without_shipping(**kwargs)
shipping_costs = self.get_shipping_costs(**kwargs)
return without + shipping_costs
class ShippedCartBase(ShippedItemBase):
"""
Mixin class for shopping carts with shipping costs associated with them.
"""
class Meta:
abstract = True
def get_total_shipping_costs(self, **kwargs):
"""
Get the total shipping cost for this `Cart`, summing up the shipping
costs for the whole order and those for individual items (where
applicable).
"""
cost = self.get_order_shipping_costs(**kwargs)
for item in self.get_items():
cost += item.get_shipping_costs(**kwargs)
assert cost < self.get_price(**kwargs), \
'Shipping costs should not be higher than price of Cart.'
return cost
def get_order_shipping_costs(self, **kwargs):
"""
Get the shipping costs for this order. Must be implemented in
subclasses.
"""
raise NotImplementedError
class ShippedCartItemBase(ShippedItemBase):
"""
Mixin class for `CartItemz`'s with a function `get_shipping_costs()`.
"""
class Meta:
abstract = True
class ShippedOrderBase(ShippedItemBase):
"""
Mixin class for orders with shipping costs associated with them.
"""
class Meta:
abstract = True
order_shipping_costs = PriceField(default=Decimal('0.00'),
verbose_name=_('order shipping costs'))
"""
Shipping costs relating to the whole order and not individual items.
"""
def get_order_shipping_costs(self, **kwargs):
"""
Get the shipping costs for this order.
"""
return self.order_shipping_costs
def get_total_shipping_costs(self, **kwargs):
"""
Get the total shipping cost for this `Cart`, summing up the shipping
costs for the whole order and those for individual items (where
applicable).
"""
costs = self.get_order_shipping_costs()
for item in self.get_items():
item_costs = item.get_shipping_costs()
assert isinstance(item_costs, Decimal)
costs += item_costs
return costs
def update_shipping(self):
""" Update the shipping costs for order and order items. """
# Make sure we call the superclass here
superclass = super(ShippedOrderBase, self)
self.order_shipping_costs = superclass.get_order_shipping_costs()
logger.debug(u'Updating order shipping costs for %s to %s',
self, self.order_shipping_costs)
for item in self.get_items():
item.update_shipping()
if ADDRESS_MODEL:
class AddressedShippedItemBase(models.Model):
class Meta:
abstract = True
shipping_address = models.ForeignKey(ADDRESS_MODEL, null=True, blank=True,
related_name='shippable%(class)s_set')
""" Shipping address for this order"""
else:
logger.warn('ADDRESS_MODEL not defined, not using shipping_address in shipping module.')
class ShippedOrderItemBase(ShippedItemBase):
"""
Mixin class for `OrderItem`'s with shipping costs associated with them.
"""
class Meta:
abstract = True
shipping_costs = PriceField(default=Decimal('0.00'),
verbose_name=_('shipping cost'))
""" Shipping costs for this item. """
def get_shipping_costs(self, **kwargs):
""" Return the shipping costs for this item. """
return self.shipping_costs
def update_shipping(self):
""" Update shipping costs - does *not* save the object. """
# Make sure we call the superclass here
superclass = super(ShippedOrderItemBase, self)
self.shipping_costs = superclass.get_shipping_costs()
logger.debug(u'Updating order shipping costs for %s to %s',
self, self.shipping_costs)
| dokterbob/django-shopkit | shopkit/shipping/basemodels.py | Python | agpl-3.0 | 6,392 |
##@package isinterface
# Instance simulator interface.
#@author Sebastien MATHIEU
import time, datetime, re, sys, threading, queue, subprocess,traceback,os
import asyncio,websockets
import xml.etree.ElementTree as ElementTree
from .job import Job
from .log import log
# Static parameters
## Maximum number of iterations of a simulation.
MAX_ITERATIONS=5
## Numerical tolerance on the convergence.
CONVERGENCE_TOLERANCE=0.005
## Maximum threads.
MAX_THREADS=8
## Jobs to perform
jobs=queue.Queue()
## Computing status
computingLock=threading.Lock()
## List of available threads where each element is a number.
availableThreads=queue.Queue()
## Errors list where each error is a dictionary.
errors=[]
## Lock to print.
printLock=threading.Lock()
## Lock to add an error to the errors list.
errorsLock=threading.Lock()
## Running processes. The key is the thread id and the value is the process.
processList={}
## Simulator path
simulatorPath='./simulator'
# Progression
progression=0
# Computation routine
def compute():
global processList
while True:
# Get a job
job=jobs.get()
with computingLock:
# Computation process
jobThread=threading.Thread(target=computationThread,args=[job])
jobThread.start()
jobThread.join()
# Communicate that the task is finished
jobs.task_done()
## Simulate an instance
# @param threadId Id of the thread using this function.
# @param instanceDirectory Directory containing the instances to simulate (hash included).
# @param hash Hash of the instance.
# @param d Day to simulate.
# @param opfMethod Method to perform optimal power flows. Values: None or "linearOpf".
def simulateInstance(threadId,instanceDirectory,hash,d,opfMethod=None):
instanceDayDirectory="%s/%s"%(instanceDirectory,d)
resultFile='%s/result-%s-d%s.zip'%(instanceDayDirectory,hash,d)
if not os.path.isfile(resultFile):
# Prepare the command
cmd= ['python3', 'main.py', '--maxiterations', str(MAX_ITERATIONS), '-f', 'operationFolder-%s' % threadId, '-o',
'../%s' % resultFile, '-t', str(CONVERGENCE_TOLERANCE)]
if opfMethod is not None and opfMethod.lower()=='linearopf':
cmd.append('-l')
cmd.append('../%s'%instanceDayDirectory)
# Launch the simulation
process = subprocess.Popen(cmd, cwd=simulatorPath, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
processList[threadId]=process
log('Command launched with PID %s:\n\t%s'%(process.pid," ".join(cmd)))
returnCode=process.wait()
processList[threadId]=None
stdout, stderr = process.communicate()
if stdout is None:
stdout=""
else:
stdout=stdout.decode('latin-1').encode('ascii','ignore').decode('latin-1').strip()
if stderr is None:
stderr=""
else:
stderr=stderr.decode('latin-1').encode('ascii','ignore').decode('latin-1').strip()
# Check if the instance solved correctly
if returnCode != 0:
cmd=" ".join(cmd)
with errorsLock:
errors.append({'day':d,'cmd':cmd,'returncode':returnCode,'stdout':stdout,'stderr':stderr})
# Write the error to an error file
with open('%s/error-%s-d%s.log'%(instanceDayDirectory,hash,d),'w') as errorFile:
errorFile.write(cmd)
errorFile.write("\n\n")
errorFile.write(stdout)
errorFile.write("\n\n")
errorFile.write(stderr)
with printLock:
log("\terror with\n\t\t%s"%cmd)
# Add itself to the available thread list
availableThreads.task_done()
availableThreads.put(threadId)
## Computation thread of the instance generator.
# @param job Job to be done.
def computationThread(job):
global progression
progression=0
# Clear
global processList
processList={}
global errors
with errorsLock:
errors=[]
try:
job.result="0 0";
job.status=Job.RUNNING
# Get the instance directory
hash=job.args[1]
instancesFolder=job.args[0]
instanceDirectory='%s/%s'%(instancesFolder,hash)
if not os.path.exists(instanceDirectory):
raise Exception("Instance \"%s\" not found."%hash)
log("Instance %s launched" % hash)
# Get the days
xmlFilePath="%s/%s.xml"%(instanceDirectory,hash)
tree=ElementTree.parse(xmlFilePath)
root=tree.getroot()
tag=root.find('days')
days=list(range(int(tag.find('start').text), int(tag.find('end').text)))
# Obtain the opfMethod
opfMethod=None
tag=root.find('opfMethod')
if tag is not None:
opfMethod=tag.text.lower()
# Remove the error file if any exists
errorFile='%s/errors.xml'% instanceDirectory
if os.path.isfile(errorFile):
os.remove(errorFile)
# Ensure lock file are removed
toRemove=[]
for f in os.listdir(simulatorPath):
if f.endswith(".lock"):
toRemove.append('%s/%s'%(simulatorPath,f));
for f in toRemove:
log("\tRemove %s"%f)
os.remove(f)
# Prepare the list of available threads
with availableThreads.mutex:
availableThreads.queue.clear()
for i in range(MAX_THREADS):
availableThreads.put(i)
# Simulate
completedInstancesNumber=-MAX_THREADS
totalInstances=len(days)
for d in days:
if job.status==Job.ABORTED: # TODO abort also the running processes
break;
# Wait for an available thread and start it
threadNumber=availableThreads.get()
thread=threading.Thread(target=simulateInstance,args=[threadNumber,instanceDirectory,hash,d,opfMethod])
thread.daemon=True
thread.start()
# Progression
completedInstancesNumber+=1
if completedInstancesNumber>0:
progression=completedInstancesNumber/totalInstances
job.result="%s"%progression
with printLock:
log("\t%s progression: %.2f%%"%(hash, progression*100.0))
# Wait for the last thread
for i in range(MAX_THREADS):
if job.status==Job.ABORTED:
break
# Wait for an available thread and remove it from the pile
availableThreads.get()
# Add to instances
completedInstancesNumber+=1
if completedInstancesNumber>0:
progression=completedInstancesNumber/totalInstances
job.result="%s"%progression
with printLock:
log("\t%s progression: %.2f%%"%(hash,progression*100.0))
log('%s completed with %s error(s).'%(hash,len(errors)))
# Generate the global results
if len(errors) == 0:
cmd=['python3','scripts/globalResults.py',hash]
process = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
returnCode=process.wait()
# Check for error in the global results generation
if returnCode != 0:
# Obtain outputs
stdout, stderr = process.communicate()
if stdout is None:
stdout=""
else:
stdout=stdout.decode('latin-1').encode('ascii','ignore').decode('latin-1').strip()
if stderr is None:
stderr=""
else:
stderr=stderr.decode('latin-1').encode('ascii','ignore').decode('latin-1').strip()
# Write error
errors.append({'cmd':cmd,'returncode':returnCode,'stdout':stdout,'stderr':stderr})
if stderr != "":
raise Exception(stderr.decode('latin-1').strip())
elif stdout != "":
raise Exception(stdout.decode('latin-1').strip())
else:
raise Exception('Error code %s with command "%s"'%(returnCode," ".join(cmd)))
# Get the status tag of the xml document
statusTag=root.find('status')
if statusTag is None:
statusTag=ElementTree.Element('status')
root.append(statusTag)
# Change the status and manage errors
if len(errors) > 0:
if totalInstances == 1 and len(errors)==1:
# If only one instance gives the full error
job.result="error instance simulation 1/1 :\n\tcmd:\n%s\n\t\n\tstderr:\n%s\n"%(errors[0]['cmd'],errors[0]['stderr'])
else:
# Display the number of errors
job.result="%s/%s"%(len(errors),totalInstances)
statusTag.text="error instance simulation %s"%job.result
job.status=Job.ERROR
# Write errors in errors.csv
with open(errorFile, 'w') as file:
file.write('<?xml version="1.0" encoding="ISO-8859-1" ?>\n<xml>')
for e in errors:
file.write('<error>\n')
for k,v in e.items():
file.write('\t<%s>%s</%s>\n'%(k,v if v is not None else "",k))
file.write('</error>\n')
file.write('</xml>\n')
else:
statusTag.text="simulated"
job.status=Job.COMPLETED
if job.status==Job.ABORTED:
statusTag.text="aborted"
tree.write(xmlFilePath)
except Exception as e:
job.status=Job.ERROR
job.result=e
log(traceback.format_exc())
finally:
#TODO Clean the mess if aborted
job.finished.release()
# Terminate the processes launched by the computation threads.
def terminateProcesses():
for p in processList.values():
if p is not None:
# Try to terminate
pid = p.pid
p.terminate()
# Force kill if needed.
try:
os.kill(pid, 0)
p.kill()
except OSError:
pass # Terminated correctly
## Interact with the user and with the instance simulator module.
# @param client Client we are interacting with.
# @param message Client's message.
@asyncio.coroutine
def interact(client,message):
hash=re.compile('"(\w\w\w\w\w\w\w\w)"').search(message).groups()[0]
# Add the simulating status
instanceDirectory='%s/%s'%(client.instancesFolder,hash)
if not os.path.exists(instanceDirectory):
raise Exception("Instance \"%s\" not found."%hash)
xmlFilePath="%s/%s.xml"%(instanceDirectory,hash)
tree=ElementTree.parse(xmlFilePath)
root=tree.getroot()
statusTag=root.find('status')
if statusTag is None:
statusTag=ElementTree.Element('status')
root.append(statusTag)
statusTag.text="simulating"
tree.write(xmlFilePath)
# Agree the transaction
yield from client.socket.send("ok waiting")
# Start the instance simulation thread
job=Job([client.instancesFolder,hash])
job.result=jobs.qsize() # Initial position
jobs.put(job)
client.log("Instance simulation "+hash+" added to the jobs.")
# Polling of the client until the job is complete
message=""
runDisconnected=False
while not runDisconnected or message is not None:
message = yield from client.socket.recv()
if message is None or message.strip().lower()=="terminate":
# Kill thread if running
if not runDisconnected and (job.status==Job.RUNNING or job.status==Job.WAITING):
client.log("Terminate request")
job.status=Job.ABORTED
terminateProcesses()
break
elif message.strip().lower()=="run disconnected":
runDisconnected=True;
client.log("Run disconnected")
yield from client.socket.send("ok run disconnected")
elif message.strip().lower() == "ready?":
# Check status
if job.finished.locked():
if job.status==Job.RUNNING:
yield from client.socket.send("ok running %s"%job.result)
else:
yield from client.socket.send("ok waiting")
else:
if job.status==Job.COMPLETED:
yield from client.socket.send("ok instance simulated")
else:
yield from client.socket.send("error instance simulation %s"%job.result)
break
else:
yield from client.handleMessage(message)
if runDisconnected:
yield from job.finished.acquire() # Wait for the job to finish
## Send a message with the computing status.
@asyncio.coroutine
def isComputing(client,message):
if computingLock.locked() :
yield from client.socket.send('is computing simulation with progression %s and %s jobs'%(progression,jobs.qsize())) # TODO pile size
else:
yield from client.socket.send('is waiting for simulation')
| sebMathieu/dsima | server/isinterface.py | Python | bsd-3-clause | 11,595 |
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for gcloud.py."""
from unittest import mock
from common import gcloud
from common import new_process
from test_libs import utils as test_utils
INSTANCE_NAME = 'instance-a'
ZONE = 'zone-a'
MACHINE_TYPE = 'my-machine-type'
CONFIG = {
'cloud_compute_zone': ZONE,
'service_account': 'blah',
'runner_machine_type': MACHINE_TYPE
}
def test_create_instance():
"""Tests create_instance creates an instance."""
with test_utils.mock_popen_ctx_mgr(returncode=1) as mocked_popen:
gcloud.create_instance(INSTANCE_NAME, gcloud.InstanceType.DISPATCHER,
CONFIG)
assert mocked_popen.commands == [[
'gcloud',
'compute',
'instances',
'create',
'instance-a',
'--image-family=cos-stable',
'--image-project=cos-cloud',
'--zone=zone-a',
'--scopes=cloud-platform',
'--machine-type=n1-highmem-96',
'--boot-disk-size=4TB',
'--boot-disk-type=pd-ssd',
]]
def _get_expected_create_runner_command(is_preemptible):
command = [
'gcloud',
'compute',
'instances',
'create',
'instance-a',
'--image-family=cos-stable',
'--image-project=cos-cloud',
'--zone=zone-a',
'--scopes=cloud-platform',
'--machine-type=my-machine-type',
'--no-address',
'--boot-disk-size=30GB',
]
if is_preemptible:
command.append('--preemptible')
return command
def test_create_instance_not_preemptible():
"""Tests create_instance doesn't specify preemptible when it isn't supposed
to."""
with test_utils.mock_popen_ctx_mgr(returncode=1) as mocked_popen:
gcloud.create_instance(INSTANCE_NAME, gcloud.InstanceType.RUNNER,
CONFIG)
assert mocked_popen.commands == [
_get_expected_create_runner_command(False)
]
def test_create_instance_preemptible():
"""Tests create_instance doesn't specify preemptible when it isn't supposed
to."""
with test_utils.mock_popen_ctx_mgr(returncode=1) as mocked_popen:
gcloud.create_instance(INSTANCE_NAME,
gcloud.InstanceType.RUNNER,
CONFIG,
preemptible=True)
assert mocked_popen.commands == [
_get_expected_create_runner_command(True)
]
@mock.patch('common.new_process.execute')
def test_create_instance_failed_create(mocked_execute):
"""Tests create_instance creates an instance if it doesn't already
exist."""
mocked_execute.return_value = new_process.ProcessResult(1, '', False)
# We shouldn't exception here.
assert not gcloud.create_instance(INSTANCE_NAME,
gcloud.InstanceType.DISPATCHER, CONFIG)
# Check that the first call is to create the instance.
assert 'create' in mocked_execute.call_args_list[0][0][0]
@mock.patch('common.new_process.execute')
def test_delete_instances_less_than_batch_size(mocked_execute):
"""Test that delete_instances works as intended when instance count is less
than batch size."""
instances = ['instance-%d' % i for i in range(5)]
mocked_execute.return_value = new_process.ProcessResult(0, '', False)
zone = 'us-central1-a'
expected_command = (['gcloud', 'compute', 'instances', 'delete', '-q'] +
instances + ['--zone', zone])
result = gcloud.delete_instances(instances, zone)
assert result
mocked_execute.assert_called_with(expected_command, expect_zero=False)
@mock.patch('common.new_process.execute')
def test_delete_instances_greater_than_batch_size(mocked_execute):
"""Test that delete_instances works as intended when instance count is more
than batch size."""
instances = ['instance-%d' % i for i in range(103)]
mocked_execute.return_value = new_process.ProcessResult(0, '', False)
zone = 'us-central1-a'
result = gcloud.delete_instances(instances, zone)
assert result
expected_command_1 = (['gcloud', 'compute', 'instances', 'delete', '-q'] +
['instance-%d' % i for i in range(100)] +
['--zone', zone])
expected_command_2 = (['gcloud', 'compute', 'instances', 'delete', '-q'] +
['instance-%d' % i for i in range(100, 103)] +
['--zone', zone])
mocked_execute.assert_has_calls([
mock.call(expected_command_1, expect_zero=False),
mock.call(expected_command_2, expect_zero=False)
])
@mock.patch('common.new_process.execute')
def test_delete_instances_fail(mocked_execute):
"""Test that delete_instances returns False when instance deletion fails."""
instances = ['instance-%d' % i for i in range(5)]
mocked_execute.return_value = new_process.ProcessResult(1, 'Error', False)
zone = 'us-central1-a'
expected_command = (['gcloud', 'compute', 'instances', 'delete', '-q'] +
instances + ['--zone', zone])
result = gcloud.delete_instances(instances, zone)
assert not result
mocked_execute.assert_called_with(expected_command, expect_zero=False)
@mock.patch('common.new_process.execute')
def test_create_instance_template(mocked_execute):
"""Tests that create_instance_template uses the correct gcloud command and
returns the correct instance template URL."""
template_name = 'my-template'
docker_image = 'docker_image'
env = {'ENV_VAR': 'value'}
project = 'fuzzbench'
result = gcloud.create_instance_template(template_name, docker_image, env,
project, ZONE)
expected_command = [
'gcloud', 'compute', '--project', project, 'instance-templates',
'create-with-container', template_name, '--no-address',
'--image-family=cos-stable', '--image-project=cos-cloud',
'--region=zone-a', '--scopes=cloud-platform',
'--machine-type=n1-standard-1', '--boot-disk-size=50GB',
'--preemptible', '--container-image', docker_image, '--container-env',
'ENV_VAR=value'
]
mocked_execute.assert_called_with(expected_command)
expected_result = (
'https://www.googleapis.com/compute/v1/projects/{project}'
'/global/instanceTemplates/{name}').format(project=project,
name=template_name)
assert result == expected_result
@mock.patch('common.new_process.execute')
def test_delete_instance_template(mocked_execute):
"""Tests that delete_instance_template uses the correct gcloud command to
delete an instance template."""
template_name = 'my-template'
gcloud.delete_instance_template(template_name)
expected_command = [
'gcloud', 'compute', 'instance-templates', 'delete', template_name
]
mocked_execute.assert_called_with(expected_command)
| google/fuzzbench | common/test_gcloud.py | Python | apache-2.0 | 7,576 |
class ApiVersionMismatchException(RuntimeError):
"""
Represents an error because a webhooks event has an API version that this version of the SDK does not support.
"""
def __init__(self, event_api_version, sdk_api_version):
super(ApiVersionMismatchException, self).__init__(
"event API version" + event_api_version +
" is not compatible with SDK API version" + sdk_api_version)
self.__event_api_version = event_api_version
self.__sdk_api_version = sdk_api_version
@property
def event_api_version(self):
"""
:return: The API version from the webhooks event.
"""
return self.__event_api_version
@property
def sdk_api_version(self):
"""
:return: The API version that this version of the SDK supports.
"""
return self.__sdk_api_version
| Ingenico-ePayments/connect-sdk-python2 | ingenico/connect/sdk/webhooks/api_version_mismatch_exception.py | Python | mit | 879 |
# -*- coding:utf-8 -*-
"""
(c) All rights reserved. ECOLE POLYTECHNIQUE FEDERALE DE LAUSANNE, Switzerland, VPSI, 2017
"""
import json
import ast
import locale
from collections import defaultdict, OrderedDict
from datetime import datetime, timedelta
from fpdf import FPDF
from django.shortcuts import render
from django.http import HttpResponse, HttpResponseRedirect
from django.urls import reverse
from django.db.models import Sum, Q
from django.core.exceptions import ObjectDoesNotExist
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.conf import settings
# Custom models
from uniflow.models import BudgettransactionsT
from .models import Section, Semester, SemesterSummary, Student, Transaction, UpdateStatus
##########################
#
# LOCAL FUNCTIONS
#
##########################
def __get_semesters():
return Semester.objects.order_by("end_date").values_list('name', flat=True)
def __get_faculties():
return SemesterSummary.objects.\
order_by("section__faculty__name").\
values_list('section__faculty__name', flat=True).\
distinct()
def __get_sections_by_faculty(faculty):
return SemesterSummary.objects.\
filter(section__faculty__name=faculty).\
order_by("section__acronym").\
values_list('section__acronym', flat=True).\
distinct()
def __get_current_faculty(faculties, post, arg):
return arg if arg else post['faculty'] if 'faculty' in post else faculties[0]
def __get_current_semester(post, arg=""):
if arg:
semester = arg
elif 'semester' in post:
semester = post['semester']
else:
now = datetime.now()
semesters = Semester.objects.\
filter(end_date__gt=now).\
order_by("end_date").\
values_list('name', flat=True)
semester = semesters[0]
return semester
def __get_number_of_students(semester, faculty="", section=""):
number_of_students = SemesterSummary.objects.filter(semester__name=semester)
if faculty:
number_of_students = number_of_students.filter(section__faculty__name=faculty)
if section:
number_of_students = number_of_students.filter(section__acronym=section)
return number_of_students.values('student').distinct().count()
def __set_pagination(objects, items_per_page, page):
paginator = Paginator(objects, items_per_page)
try:
pagin = paginator.page(page)
except PageNotAnInteger:
pagin = paginator.page(1)
except EmptyPage:
pagin = paginator.page(paginator.num_pages)
index = pagin.number - 1
max_index = len(paginator.page_range)
start_index = index - 3 if index >= 3 else 0
end_index = index + 3 if index <= max_index - 3 else max_index
page_range = list(paginator.page_range)[start_index:end_index]
return {'objects': pagin, 'page_range': page_range}
def __get_floored_faculties_allowance(floors, amount, charges):
result = defaultdict(float)
prev_floor = 0
for floor in floors:
if prev_floor <= amount <= floor[1]:
new_amount = min(floor[1], amount + charges)
if new_amount != amount:
result[floor[0]] += new_amount - amount
charges = max(0, amount + charges - floor[1])
amount = new_amount
prev_floor = floor[1]
return result
def __compute(dict):
return min(0, dict['vpsi'] + dict['added'] + dict['spent'] - dict['amount'])
def __compute_bill(semester, faculty, section=""):
fac_sect = faculty + ":" + section
billing_faculty = SemesterSummary.objects.\
filter(semester__name=semester).\
filter(billing_faculty__contains=fac_sect).\
values_list('billing_faculty', flat=True)
sum_bill = 0.0
for bill in billing_faculty:
bill_dict = ast.literal_eval(bill)
for key, value in bill_dict.items():
if fac_sect in key:
sum_bill += value
return sum_bill
def __create_PDF(faculty, date_start, date_end, data, total):
title = u"Facturation myPrint étudiants " + faculty
subtitle1 = "Consommation des rallonges facultaires"
subtitle2 = u"période du " + date_start + " au " + date_end
class PDF(FPDF):
def __init__(self):
FPDF.__init__(self)
self.add_font('DejaVu-Bold', '', 'DejaVuSansCondensed-Bold.ttf', uni=True)
self.add_font('DejaVu', '', 'DejaVuSansCondensed.ttf', uni=True)
self.set_margins(20, 20, 20)
def header(self):
# Logo
# self.image('logo_pb.png', 10, 8, 33)
# title
self.set_font('DejaVu-Bold', '', 15)
w = self.get_string_width(title) + 6
self.set_x((210 - w) / 2)
self.cell(w, 9, title, 0, 0, 'C', 0)
self.ln(15)
# subtitle1
self.set_font('DejaVu', '', 13)
w = self.get_string_width(subtitle1) + 6
self.set_x((210 - w) / 2)
self.set_text_color(0, 128, 254)
self.cell(w, 9, subtitle1, 0, 0, 'C')
self.ln(7)
# subtitle2
w = self.get_string_width(subtitle2) + 6
self.set_x((210 - w) / 2)
self.set_text_color(0, 128, 254)
self.cell(w, 9, subtitle2, 0, 0, 'C')
self.ln(15)
# line
self.set_draw_color(0, 128, 254)
self.set_fill_color(0, 128, 254)
self.cell(0, 1, "", 1, 0, "", 1)
self.ln(15)
# Page footer
def footer(self):
pdf.set_font('DejaVu', '', 12)
pdf.set_text_color(0, 0, 0)
# Position at 1.5 cm from bottom
self.set_y(-15)
# Arial italic 8
self.set_font('Arial', 'I', 8)
# Page number
self.cell(0, 10, 'Page ' + str(self.page_no()) + '/{nb}', 0, 0, 'C')
section_margin = 40;
semester_margin = 90
amount_margin = 40
pdf = PDF()
pdf.alias_nb_pages()
pdf.add_page()
pdf.set_font('DejaVu-Bold', '', 12)
pdf.cell(section_margin, 0, "Section", 0, 0)
pdf.cell(semester_margin, 0, "Semestre", 0, 0)
pdf.cell(amount_margin, 0, "Consommation", 0, 1, 'R')
pdf.ln(10)
pdf.set_font('DejaVu', '', 12)
section = data[0]['section']
for datum in data:
if datum['section'] != section:
section = datum['section']
pdf.ln(15)
else:
pdf.ln(7)
pdf.cell(section_margin, 0, datum['section'], 0, 0)
pdf.cell(semester_margin, 0, datum['semester'], 0, 0)
pdf.cell(amount_margin, 0, locale.format('%.2f', datum['amount'], True), 0, 1, 'R')
pdf.ln(20)
pdf.set_font('DejaVu-Bold', '', 12)
pdf.cell(section_margin, 0, "Total", 0, 0)
pdf.cell(semester_margin, 0, "", 0, 0)
pdf.set_text_color(254, 0, 0)
pdf.cell(amount_margin, 0, locale.format('%.2f', total, True), 0, 1, 'R')
pdf.output(settings.MEDIA_ROOT + 'pdf/Facturation-' + faculty + '.pdf', 'F')
##########################
#
# FUNCTIONS FROM VIEWS
#
##########################
def compute(request, semester=""):
# Semesters must be ordered to compute billing historically
semesters = __get_semesters()
students = Student.objects.all()
if semester:
students = students.filter(semestersummary__semester__name=semester)
for student in students:
comp_dict = defaultdict(float)
floored_faculty_allowance = []
for t_semester in semesters:
semesters_datas = SemesterSummary.objects.\
filter(semester__name=t_semester).\
filter(student=student).\
order_by("-myprint_allowance", "-faculty_allowance")
for semesters_data in semesters_datas:
comp_dict['vpsi'] += semesters_data.myprint_allowance
comp_dict['faculty'] += semesters_data.faculty_allowance
comp_dict['added'] += semesters_data.total_charged
comp_dict['spent'] += semesters_data.total_spent
total_billing_faculties = __compute(comp_dict)
section = Section.objects.get(id=semesters_data.section_id)
floored_faculty_allowance.append([section.faculty.name + ":" + section.acronym, comp_dict['faculty']])
faculties_billing = __get_floored_faculties_allowance(
floored_faculty_allowance,
-comp_dict['amount'],
-total_billing_faculties
)
if not semester or t_semester == semester:
semesters_data.billing_faculty = repr(dict(faculties_billing))
semesters_data.save()
comp_dict['billing_faculty'] = -sum(faculties_billing.values())
comp_dict['amount'] += comp_dict['billing_faculty']
if not semester or t_semester == semester:
comp_dict['remain'] = comp_dict['vpsi'] + comp_dict['faculty'] + comp_dict['added'] + comp_dict['spent']
for semesters_data in semesters_datas:
semesters_data.remain = comp_dict['remain']
semesters_data.save()
if semester and t_semester == semester:
break
return HttpResponseRedirect(reverse('homepage'))
##########################
#
# VIEWS FUNCTIONS
#
##########################
def homepage(request):
locale.setlocale(locale.LC_NUMERIC, 'fr_CH.utf8')
semesters = __get_semesters()
current_semester = __get_current_semester(request.POST)
faculties = __get_faculties()
billing = dict()
sum_billing = 0.0
for faculty in faculties:
billing[faculty] = __compute_bill(semester=current_semester, faculty=faculty)
sum_billing += billing[faculty]
number_of_students = __get_number_of_students(semester=current_semester)
last_update = UpdateStatus.objects.latest(field_name="update_date")
return render(
request,
'bill2myprint/homepage.html',
{
'is_homepage': True,
'current_semester': current_semester,
'semesters': semesters,
'faculties': OrderedDict(sorted(billing.items())),
'sum_billing': sum_billing,
'last_update': last_update,
'number_of_students': number_of_students,
}
)
def faculties(request, faculty="", semester=""):
semesters = __get_semesters()
faculties = __get_faculties()
current_semester = __get_current_semester(post=request.POST, arg=semester)
current_faculty = __get_current_faculty(faculties=faculties, post=request.POST, arg=faculty)
sections = __get_sections_by_faculty(current_faculty)
if 'faculty' in request.POST:
kwargs = {'faculty': current_faculty, 'semester': current_semester}
return HttpResponseRedirect(reverse('faculties', kwargs=kwargs))
semesters_data = SemesterSummary.objects.filter(semester__name=current_semester)
sections_data = []
for section in sections:
section_data = semesters_data.filter(section__acronym=section)
dict = defaultdict(float)
dict['section'] = section
if section_data:
dict['vpsi'] = section_data.aggregate(Sum('myprint_allowance'))['myprint_allowance__sum']
dict['faculty'] = section_data.aggregate(Sum('faculty_allowance'))['faculty_allowance__sum']
dict['added'] = section_data.aggregate(Sum('total_charged'))['total_charged__sum']
dict['spent'] = section_data.aggregate(Sum('total_spent'))['total_spent__sum']
dict['amount'] = __compute_bill(semester=current_semester, faculty=current_faculty, section=section)
sections_data.append(dict)
number_of_students = __get_number_of_students(semester=current_semester, faculty=current_faculty)
return render(
request,
'bill2myprint/faculties.html',
{
'is_faculties': True,
'faculties': faculties,
'sections': sections,
'semesters': semesters,
'current_faculty': current_faculty,
'current_semester': current_semester,
'number_of_students': number_of_students,
'sections_data': sections_data,
}
)
def sections(request, faculty="", section="", semester=""):
semesters = __get_semesters()
faculties = __get_faculties()
current_semester = __get_current_semester(post=request.POST, arg=semester)
current_faculty = __get_current_faculty(faculties=faculties, post=request.POST, arg=faculty)
sections = __get_sections_by_faculty(current_faculty)
current_section = sections[0]
if section:
current_section = section
elif ('section' in request.POST) and (request.POST['section'] in sections):
current_section = request.POST['section']
if 'faculty' in request.POST:
faculty = request.POST['faculty']
kwargs = {'faculty': faculty, 'section': current_section, 'semester': current_semester}
return HttpResponseRedirect(reverse('sections', kwargs=kwargs))
students = SemesterSummary.objects.\
filter(semester__name=current_semester).\
filter(section__acronym=current_section).\
order_by("student__sciper").\
values('student__sciper',
'myprint_allowance',
'faculty_allowance',
'total_charged',
'total_spent',
'remain',
'billing_faculty')
pagination = __set_pagination(students, 50, request.GET.get('page'))
return render(
request,
'bill2myprint/sections.html',
{
'is_sections': True,
'faculties': faculties,
'sections': sections,
'semesters': semesters,
'current_faculty': current_faculty,
'current_section': current_section,
'current_semester': current_semester,
'number_of_students': len(students),
'students': pagination['objects'],
'page_range': pagination['page_range'],
}
)
def students(request, sciper=""):
if 'student' in request.POST:
if request.POST['student'].isdigit():
student_sciper = request.POST['student']
return HttpResponseRedirect(reverse('students', kwargs={'sciper': student_sciper}))
else:
student_sciper = None
student_name = request.POST['student']
elif sciper:
student_sciper = sciper
student_name = None
else:
student_sciper = None
student_name = None
if student_sciper:
try:
student = Student.objects.get(sciper=student_sciper)
except ObjectDoesNotExist:
student = None
transactions = Transaction.objects.filter(student__sciper=student_sciper)
elif student_name:
try:
student = Student.objects.get(name=student_name)
return HttpResponseRedirect(reverse('students', kwargs={'sciper': student.sciper}))
except ObjectDoesNotExist:
student = None
transactions = Transaction.objects.filter(student__name=student_name)
else:
student = None
transactions = []
if transactions:
cumulated = list(transactions.values('transaction_type').annotate(Sum('amount')))
transactions = transactions.order_by("-transaction_date")
else:
cumulated = None
t = defaultdict(float)
if cumulated:
for cumulus in cumulated:
if cumulus['transaction_type'] == 'MYPRINT_ALLOWANCE':
t['vpsi'] = cumulus['amount__sum']
if cumulus['transaction_type'] == 'FACULTY_ALLOWANCE':
t['faculty'] = cumulus['amount__sum']
if cumulus['transaction_type'] == 'ACCOUNT_CHARGING':
t['added'] = cumulus['amount__sum']
if cumulus['transaction_type'] == 'PRINT_JOB':
t['spent'] = t['spent'] + cumulus['amount__sum']
if cumulus['transaction_type'] == 'REFUND':
t['spent'] = t['spent'] + cumulus['amount__sum']
t['credit'] = t['vpsi'] + t['faculty'] + t['added'] + t['spent']
pagination = __set_pagination(transactions, 50, request.GET.get('page'))
return render(
request,
'bill2myprint/students.html',
{
'is_students': True,
'student': student,
'transactions': pagination['objects'],
'page_range': pagination['page_range'],
'cumulated': t,
}
)
def faculty_extension(request):
transactions = BudgettransactionsT.objects.\
filter(transactiondata__startswith='Rallonge').\
distinct().\
values_list('transactiondata')
faculties = __get_faculties()
faculties_extensions = []
for faculty in faculties:
sections = __get_sections_by_faculty(faculty)
for section in sections:
for transaction in transactions:
if len(transaction) == 0:
continue
keys = transaction[0].split(" ")
dict = {}
found = False
for key in keys:
if "_StudU" in key:
if key.startswith(section):
dict["faculty"] = faculty
dict["section"] = section
dict["entity"] = key.replace("_StudU", "")[len(section):]
dict["whole_entity"] = key
found = True
elif found and key.isdigit():
dict["amount"] = float(key)
if found:
faculties_extensions.append(dict)
faculties_extensions = sorted(faculties_extensions, key=lambda k: (k['faculty'], k['whole_entity']))
return render(
request,
'bill2myprint/faculty_extension.html',
{
'is_miscellaneous': True,
'faculties_extensions': faculties_extensions,
}
)
def status(request):
status_table = UpdateStatus.objects.order_by("-update_date")
return render(
request,
'bill2myprint/status.html',
{
'is_miscellaneous': True,
'status_table': status_table,
}
)
def student_billing(request):
message = ""
transactions = []
student = None
if "student" in request.POST:
sciper = request.POST['student']
students = Student.objects.filter(sciper=sciper)
if len(students) > 0:
student = students[0]
semesters = __get_semesters()
comp_dict = defaultdict(float)
floored_faculty_allowance = []
for semester in semesters:
semesters_datas = SemesterSummary.objects.\
filter(semester__name=semester).\
filter(student=student).\
order_by("-myprint_allowance", "-faculty_allowance").\
values()
for semesters_data in semesters_datas:
comp_dict['vpsi'] += semesters_data['myprint_allowance']
comp_dict['faculty'] += semesters_data['faculty_allowance']
comp_dict['added'] += semesters_data['total_charged']
comp_dict['spent'] += semesters_data['total_spent']
comp_dict['remain'] = semesters_data['myprint_allowance'] +\
semesters_data['faculty_allowance'] +\
semesters_data['total_charged'] +\
semesters_data['total_spent']
comp_dict['billing_faculty'] = __compute(comp_dict)
section = Section.objects.get(id=semesters_data['section_id'])
floored_faculty_allowance.append(
[section.faculty.name + ":" + section.acronym, comp_dict['faculty']]
)
facs_billing = __get_floored_faculties_allowance(
floored_faculty_allowance,
-comp_dict['amount'],
-comp_dict['billing_faculty']
)
comp_dict['billing_faculty'] = -sum(facs_billing.values())
comp_dict['amount'] += comp_dict['billing_faculty']
comp_dict['cum_remain'] += comp_dict['remain']
trans_dict = dict()
trans_dict['semester'] = semester
trans_dict['faculty_name'] = section.faculty.name
trans_dict['facs_billing'] = dict(facs_billing)
trans_dict['vpsi'] = semesters_data['myprint_allowance']
trans_dict['faculty'] = semesters_data['faculty_allowance']
trans_dict['added'] = semesters_data['total_charged']
trans_dict['spent'] = semesters_data['total_spent']
trans_dict['remain'] = comp_dict['remain']
trans_dict['cum_vpsi'] = comp_dict['vpsi']
trans_dict['cum_faculty'] = comp_dict['faculty']
trans_dict['cum_added'] = comp_dict['added']
trans_dict['cum_spent'] = comp_dict['spent']
trans_dict['cum_amount'] = comp_dict['amount']
trans_dict['cum_remain'] = comp_dict['cum_remain']
trans_dict['billing'] = comp_dict['billing_faculty']
transactions.append(trans_dict)
else:
message = "Numéro sciper invalide"
return render(
request,
'bill2myprint/student_billing.html',
{
'is_miscellaneous': True,
'student': student,
'transactions': transactions,
'message': message,
}
)
def download_faculty(request, faculty="", semester=""):
all_semesters = list(__get_semesters())
sections = __get_sections_by_faculty(faculty)
if 'semesters' in request.POST:
semesters_temp = request.POST.getlist('semesters')
else:
semesters_temp = []
semesters_temp.append(semester)
semesters = []
for sem in all_semesters:
if sem in semesters_temp:
semesters.append(sem)
date_start = datetime.strptime('01019999', '%d%m%Y').date()
date_end = datetime.strptime('01010001', '%d%m%Y').date()
for sem in semesters:
curr_sem = Semester.objects.get(name=sem)
if date_end < curr_sem.end_date.date():
date_end = curr_sem.end_date.date()
index = all_semesters.index(sem)
if index > 0:
curr_sem = Semester.objects.get(name=all_semesters[index-1])
if date_start > curr_sem.end_date.date():
date_start = curr_sem.end_date.date()
else:
date_start = datetime.strptime('15092008', '%d%m%Y').date()
date_start += timedelta(days=1)
total = 0.0
data = []
for section in sections:
for sem in semesters:
dict = {}
dict['section'] = section
dict['semester'] = sem
dict['amount'] = __compute_bill(semester=sem, faculty=faculty, section=section)
total += dict['amount']
data.append(dict)
__create_PDF(
faculty=faculty,
date_start=date_start.strftime("%d.%m.%y"),
date_end=date_end.strftime("%d.%m.%y"),
data=data,
total=total
)
return render(
request,
'bill2myprint/bill.html',
{
'all_semesters': all_semesters,
'semesters': semesters,
'faculty': faculty,
'date_start': date_start,
'date_end': date_end,
'data': data,
'total': total,
'semesters_length': str(len(semesters))
}
)
##########################
#
# AJAX FUNCTIONS
#
##########################
def sciper_list(request):
pattern = request.GET.get('term', None)
if pattern.isdigit():
students = Student.objects.\
filter(sciper__icontains=pattern). \
extra(select={'student': 'sciper'})
else:
students = Student.objects.\
filter(Q(**{"name__istartswith": pattern}) | Q(**{"name__icontains": ' ' + pattern})).\
extra(select={'student': 'name'})
return HttpResponse(json.dumps(list(students.order_by('student').values('student'))))
| epfl-sdf/bill2myprint | src/bill2myprint/views.py | Python | mit | 24,715 |
__author__ = 'dimd'
from NetCatKS.Components import IXMLResourceAPI, IXMLResource
from zope.interface import implementer
from zope.component import adapts
@implementer(IXMLResourceAPI)
class Convert(object):
adapts(IXMLResource)
def __init__(self, factory):
self.factory = factory
def process_factory(self):
self.factory.convert.id = 42
return self.factory
| dimddev/NetCatKS | examples/components/adapters/time/xml/__init__.py | Python | bsd-2-clause | 401 |
import machine, time, micropython
from machine import Timer
TIMEOUT_MS = 5000 #soft-reset will happen around 5 sec
class TimeoutException(Exception):
pass
def timeout_callback(t):
micropython.schedule_exc(TimeoutException())
def trial_function():
cnt = 0
while True:
print("%d..." % cnt)
time.sleep(1.0)
cnt += 1
try:
timer = Timer(0)
timer.init(period=TIMEOUT_MS, mode=Timer.ONE_SHOT, callback=timeout_callback)
trial_function()
timer.deinit()
except TimeoutException:
print("Function was forced to exit!")
except Exception as exc:
print("Caught exc: %s" % exc)
| open-eio/upython-poly | timeout.py | Python | mit | 642 |
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, Integer, String, Boolean
from sqlalchemy.sql.schema import ForeignKey
from sqlalchemy.orm import relationship
from .serializable import Serializable
Base = declarative_base()
class Developer(Serializable, Base):
__tablename__ = 'developer'
id = Column(Integer, primary_key=True)
first_name = Column(String(50), nullable=False)
last_name = Column(String(50), nullable=False)
phone_number = Column(String(20), nullable=False)
experience = Column(String(255), nullable=False)
skills = Column(String(255), nullable=False)
email = Column(String(100), nullable=False)
enabled = Column(Boolean, nullable=False, default=True)
assignments = relationship('Assignment')
class Assignment(Serializable, Base):
__tablename__ = 'assignment'
fk_dev = Column(Integer, ForeignKey('developer.id'), primary_key=True)
fk_proj = Column(Integer, ForeignKey('project.id'), primary_key=True)
hours_worked = Column(Integer, nullable=False)
developer = relationship('Developer')
project = relationship('Project')
class Project(Serializable, Base):
__tablename__ = 'project'
id = Column(Integer, primary_key=True)
name = Column(String(50), nullable=False)
description = Column(String(255), nullable=False)
estimated_hours = Column(Integer, nullable=False)
enabled = Column(Boolean, nullable=False, default=True)
assignments = relationship(
'Assignment',
cascade="save-update, merge, delete, delete-orphan"
)
| GOFCROW/project_manager_server | server/data_layer/models.py | Python | isc | 1,590 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2011 Nebula, Inc.
# Copyright 2011 OpenStack LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Views for Instances and Volumes.
"""
import logging
from django.contrib import messages
from django.utils.translation import ugettext as _
from novaclient import exceptions as novaclient_exceptions
from horizon import api
from horizon import tables
from .keypairs.tables import KeypairsTable
from .floating_ips.tables import FloatingIPsTable
from .security_groups.tables import SecurityGroupsTable
LOG = logging.getLogger(__name__)
class IndexView(tables.MultiTableView):
table_classes = (KeypairsTable, SecurityGroupsTable, FloatingIPsTable)
template_name = 'nova/access_and_security/index.html'
def get_keypairs_data(self):
try:
keypairs = api.nova.keypair_list(self.request)
except Exception, e:
keypairs = []
LOG.exception("Exception in keypair index")
messages.error(self.request,
_('Keypair list is currently unavailable.'))
return keypairs
def get_security_groups_data(self):
try:
security_groups = api.security_group_list(self.request)
except novaclient_exceptions.ClientException, e:
security_groups = []
LOG.exception("ClientException in security_groups index")
messages.error(self.request,
_('Error fetching security_groups: %s') % e)
return security_groups
def get_floating_ips_data(self):
try:
floating_ips = api.tenant_floating_ip_list(self.request)
except novaclient_exceptions.ClientException, e:
floating_ips = []
LOG.exception("ClientException in floating ip index")
messages.error(self.request,
_('Error fetching floating ips: %s') % e)
return floating_ips
| citrix-openstack/horizon | horizon/horizon/dashboards/nova/access_and_security/views.py | Python | apache-2.0 | 2,643 |
from torch.autograd import Function
import torch
from torch.nn.modules.utils import _pair
from pyinn.utils import Dtype, Stream, load_kernel
CUDA_NUM_THREADS = 1024
def GET_BLOCKS(N):
return (N + CUDA_NUM_THREADS - 1) // CUDA_NUM_THREADS
_im2col_kernel = '''
#define CUDA_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; \
i < (n); \
i += blockDim.x * gridDim.x)
// Kernel for fast unfold+copy
// (borrowed from Caffe: https://github.com/BVLC/caffe/blob/master/src/caffe/layers/conv_layer.cu)
extern "C"
__global__ void im2col_kernel(const ${Dtype}* data_im, ${Dtype}* data_col) {
CUDA_KERNEL_LOOP(index, ${n}) {
int w_out = index % ${width_col};
index /= ${width_col};
int h_out = index % ${height_col};
int channel_in = index / ${height_col};
int channel_out = channel_in * ${ksize_h} * ${ksize_w};
int h_in = h_out * ${stride_h} - ${pad_h};
int w_in = w_out * ${stride_w} - ${pad_w};
data_col += (channel_out * ${height_col} + h_out) * ${width_col} + w_out;
data_im += (channel_in * ${height} + h_in) * ${width} + w_in;
#pragma unroll
for (int i = 0; i < ${ksize_h}; ++i) {
for (int j = 0; j < ${ksize_w}; ++j) {
int h = h_in + i;
int w = w_in + j;
*data_col = (h >= 0 && w >= 0 && h < ${height} && w < ${width}) ?
data_im[i * ${width} + j] : 0;
data_col += ${height_col} * ${width_col};
}
}
}
}
'''
_col2im_kernel = '''
#define CUDA_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; \
i < (n); \
i += blockDim.x * gridDim.x)
extern "C"
__global__ void col2im_kernel(const ${Dtype}* data_col, ${Dtype}* data_im) {
CUDA_KERNEL_LOOP(index, ${n}) {
${Dtype} val = 0;
int w = index % ${width} + ${pad_w};
int h = (index / ${width}) % ${height} + ${pad_h};
int c = index / (${width} * ${height});
// compute the start and end of the output
int w_col_start = (w < ${ksize_w}) ? 0 : (w - ${ksize_w}) / ${stride_w} + 1;
int w_col_end = min(w / ${stride_w} + 1, ${width_col});
int h_col_start = (h < ${ksize_h}) ? 0 : (h - ${ksize_h}) / ${stride_h} + 1;
int h_col_end = min(h / ${stride_h} + 1, ${height_col});
// equivalent implementation
int offset = (c * ${ksize_h} * ${ksize_w} + h * ${ksize_w} + w) * ${height_col} * ${width_col};
int coeff_h_col = (1 - ${stride_h} * ${ksize_w} * ${height_col}) * ${width_col};
int coeff_w_col = (1 - ${stride_w} * ${height_col} * ${width_col});
#pragma unroll
for (int h_col = h_col_start; h_col < h_col_end; ++h_col) {
for (int w_col = w_col_start; w_col < w_col_end; ++w_col) {
val += data_col[offset + h_col * coeff_h_col + w_col * coeff_w_col];
}
}
data_im[index] = val;
}
}
'''
def im2col_shape(size, kernel_size, stride, padding):
ksize_h, ksize_w = _pair(kernel_size)
stride_h, stride_w = _pair(stride)
pad_h, pad_w = _pair(padding)
n_input_plane, height, width = size
height_col = (height + 2 * pad_h - ksize_h) // stride_h + 1
width_col = (width + 2 * pad_w - ksize_w) // stride_w + 1
return n_input_plane, ksize_h, ksize_w, height_col, width_col
def _im2col(data, kernel_size, stride, padding, out=None):
assert data.dim() == 3 and data.is_cuda
ksize_h, ksize_w = _pair(kernel_size)
stride_h, stride_w = _pair(stride)
pad_h, pad_w = _pair(padding)
n_input_plane, height, width = data.size()
height_col = (height + 2 * pad_h - ksize_h) // stride_h + 1
width_col = (width + 2 * pad_w - ksize_w) // stride_w + 1
n = n_input_plane * height_col * width_col
shape = torch.Size((n_input_plane, ksize_h, ksize_w, height_col, width_col))
if out is not None:
assert out.size() == shape
data_col = out
else:
data_col = data.new(*shape)
with torch.cuda.device_of(data):
f = load_kernel('im2col_kernel', _im2col_kernel, Dtype=Dtype(data), n=n,
height_col=height_col,
width_col=width_col,
height=height, width=width,
ksize_h=ksize_h, ksize_w=ksize_w,
pad_h=pad_h, pad_w=pad_w,
stride_h=stride_h, stride_w=stride_w,
channels=n_input_plane)
f(block=(CUDA_NUM_THREADS,1,1),
grid=(GET_BLOCKS(n),1,1),
args=[data.data_ptr(), data_col.data_ptr()],
stream=Stream(ptr=torch.cuda.current_stream().cuda_stream))
return data_col
col2im_modules = {}
def col2im_shape(size, kernel_size, stride, padding, input_size=None):
ksize_h, ksize_w = _pair(kernel_size)
stride_h, stride_w = _pair(stride)
pad_h, pad_w = _pair(padding)
n_input_plane, ksize_h, ksize_w, height_col, width_col = size
if input_size is not None:
height, width = input_size
else:
height = (height_col - 1) * stride_h - 2 * pad_h + ksize_h
width = (width_col - 1) * stride_w - 2 * pad_w + ksize_w
return n_input_plane, height, width
def _col2im(data_col, kernel_size, stride, padding, out=None, input_size=None):
assert data_col.dim() == 5
ksize_h, ksize_w = _pair(kernel_size)
stride_h, stride_w = _pair(stride)
pad_h, pad_w = _pair(padding)
n_input_plane, ksize_h, ksize_w, height_col, width_col = data_col.size()
if input_size is not None:
height, width = input_size
else:
height = (height_col - 1) * stride_h - 2 * pad_h + ksize_h
width = (width_col - 1) * stride_w - 2 * pad_w + ksize_w
n = n_input_plane * height * width
if out is not None:
assert tuple(out.size()) == (n_input_plane, height, width)
data = out
else:
data = data_col.new(n_input_plane, height, width)
with torch.cuda.device_of(data_col):
f = load_kernel('col2im_kernel', _col2im_kernel, Dtype=Dtype(data), n=n,
height_col=height_col,
width_col=width_col,
height=height, width=width,
ksize_h=ksize_h, ksize_w=ksize_w,
pad_h=pad_h, pad_w=pad_w,
stride_h=stride_h, stride_w=stride_w,
channels=n_input_plane)
f(block=(CUDA_NUM_THREADS,1,1),
grid=(GET_BLOCKS(n),1,1),
args=[data_col.data_ptr(), data.data_ptr()],
stream=Stream(ptr=torch.cuda.current_stream().cuda_stream))
return data
def im2col_batch(input, kernel_size, stride, padding):
if input.dim() == 3:
return _im2col(input, kernel_size, stride, padding)
elif input.dim() == 4:
shape = (input.size(0),) + im2col_shape(input.size()[1:], kernel_size, stride, padding)
out = input.new(*shape)
for x, o in zip(input, out):
_im2col(x, kernel_size, stride, padding, out=o)
return out
def col2im_batch(grad_output, kernel_size, stride, padding, input_size=None):
if grad_output.dim() == 5:
return _col2im(grad_output, kernel_size, stride, padding, out=None, input_size=input_size)
elif grad_output.dim() == 6:
shape = (grad_output.size(0),) + col2im_shape(grad_output.size()[1:], kernel_size, stride, padding, input_size)
grad_input = grad_output.new(*shape)
for go, gx in zip(grad_output, grad_input):
_col2im(go, kernel_size, stride, padding, out=gx, input_size=input_size)
return grad_input
class Im2Col(Function):
def __init__(self, kernel_size, stride, padding):
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
def forward(self, input):
assert(input.is_contiguous())
self.input_size = input.size()[-2:]
return im2col_batch(input, self.kernel_size, self.stride, self.padding)
def backward(self, grad_output):
if not grad_output.is_contiguous():
grad_output = grad_output.contiguous()
assert(grad_output.is_contiguous())
return col2im_batch(grad_output, self.kernel_size, self.stride, self.padding, self.input_size)
class Col2Im(Function):
def __init__(self, kernel_size, stride, padding, input_size=None):
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
self.input_size = input_size
def forward(self, input):
assert(input.is_contiguous())
return col2im_batch(input, self.kernel_size, self.stride, self.padding, self.input_size)
def backward(self, grad_output):
if not grad_output.is_contiguous():
grad_output = grad_output.contiguous()
assert(grad_output.is_contiguous())
return im2col_batch(grad_output, self.kernel_size, self.stride, self.padding)
def im2col(input, kernel_size, stride, padding):
"""Rearrange image blocks into columns
The representation is used in GEMM-based convolution.
Output is 5D (or 6D in case of minibatch) tensor.
Minibatch implementation is inefficient, and could be done in a single CUDA kernel.
TODO: add CPU version (via numpy?)
"""
return Im2Col(kernel_size, stride, padding)(input)
def col2im(input, kernel_size, stride, padding):
"""Converts columns back to NCHW format.
This is used in backward wrt inputs in GEMM-based convolution.
"""
return Col2Im(kernel_size, stride, padding)(input)
| szagoruyko/pyinn | pyinn/im2col.py | Python | mit | 9,567 |
#!/usr/bin/env python
# Copyright (c) 2012 NTT DOCOMO, INC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Starter script for Bare-Metal Deployment Service."""
import os
import sys
import threading
import time
import cgi
import Queue
import re
import socket
import stat
from wsgiref import simple_server
from nova import config
from nova import context as nova_context
from nova import exception
from nova.openstack.common import log as logging
from nova import utils
from nova.virt.baremetal import baremetal_states
from nova.virt.baremetal import db
QUEUE = Queue.Queue()
LOG = logging.getLogger(__name__)
# All functions are called from deploy() directly or indirectly.
# They are split for stub-out.
def discovery(portal_address, portal_port):
"""Do iSCSI discovery on portal."""
utils.execute('iscsiadm',
'-m', 'discovery',
'-t', 'st',
'-p', '%s:%s' % (portal_address, portal_port),
run_as_root=True,
check_exit_code=[0])
def login_iscsi(portal_address, portal_port, target_iqn):
"""Login to an iSCSI target."""
utils.execute('iscsiadm',
'-m', 'node',
'-p', '%s:%s' % (portal_address, portal_port),
'-T', target_iqn,
'--login',
run_as_root=True,
check_exit_code=[0])
# Ensure the login complete
time.sleep(3)
def logout_iscsi(portal_address, portal_port, target_iqn):
"""Logout from an iSCSI target."""
utils.execute('iscsiadm',
'-m', 'node',
'-p', '%s:%s' % (portal_address, portal_port),
'-T', target_iqn,
'--logout',
run_as_root=True,
check_exit_code=[0])
def make_partitions(dev, root_mb, swap_mb):
"""Create partitions for root and swap on a disk device."""
# Lead in with 1MB to allow room for the partition table itself, otherwise
# the way sfdisk adjusts doesn't shift the partition up to compensate, and
# we lose the space.
# http://bazaar.launchpad.net/~ubuntu-branches/ubuntu/raring/util-linux/
# raring/view/head:/fdisk/sfdisk.c#L1940
stdin_command = ('1 %d 83;\n- %d 82;\n0 0;\n0 0;\n' % (root_mb, swap_mb))
utils.execute('sfdisk', '-uM', dev, process_input=stdin_command,
run_as_root=True,
check_exit_code=[0])
# avoid "device is busy"
time.sleep(3)
def is_block_device(dev):
"""Check whether a device is block or not."""
s = os.stat(dev)
return stat.S_ISBLK(s.st_mode)
def dd(src, dst):
"""Execute dd from src to dst."""
utils.execute('dd',
'if=%s' % src,
'of=%s' % dst,
'bs=1M',
'oflag=direct',
run_as_root=True,
check_exit_code=[0])
def mkswap(dev, label='swap1'):
"""Execute mkswap on a device."""
utils.execute('mkswap',
'-L', label,
dev,
run_as_root=True,
check_exit_code=[0])
def block_uuid(dev):
"""Get UUID of a block device."""
out, _ = utils.execute('blkid', '-s', 'UUID', '-o', 'value', dev,
run_as_root=True,
check_exit_code=[0])
return out.strip()
def switch_pxe_config(path, root_uuid):
"""Switch a pxe config from deployment mode to service mode."""
with open(path) as f:
lines = f.readlines()
root = 'UUID=%s' % root_uuid
rre = re.compile(r'\$\{ROOT\}')
dre = re.compile('^default .*$')
with open(path, 'w') as f:
for line in lines:
line = rre.sub(root, line)
line = dre.sub('default boot', line)
f.write(line)
def notify(address, port):
"""Notify a node that it becomes ready to reboot."""
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.connect((address, port))
s.send('done')
finally:
s.close()
def get_dev(address, port, iqn, lun):
"""Returns a device path for given parameters."""
dev = "/dev/disk/by-path/ip-%s:%s-iscsi-%s-lun-%s" \
% (address, port, iqn, lun)
return dev
def get_image_mb(image_path):
"""Get size of an image in Megabyte."""
mb = 1024 * 1024
image_byte = os.path.getsize(image_path)
# round up size to MB
image_mb = int((image_byte + mb - 1) / mb)
return image_mb
def work_on_disk(dev, root_mb, swap_mb, image_path):
"""Creates partitions and write an image to the root partition."""
root_part = "%s-part1" % dev
swap_part = "%s-part2" % dev
if not is_block_device(dev):
LOG.warn("parent device '%s' not found", dev)
return
make_partitions(dev, root_mb, swap_mb)
if not is_block_device(root_part):
LOG.warn("root device '%s' not found", root_part)
return
if not is_block_device(swap_part):
LOG.warn("swap device '%s' not found", swap_part)
return
dd(image_path, root_part)
mkswap(swap_part)
root_uuid = block_uuid(root_part)
return root_uuid
def deploy(address, port, iqn, lun, image_path, pxe_config_path,
root_mb, swap_mb):
"""All-in-one function to deploy a node."""
dev = get_dev(address, port, iqn, lun)
image_mb = get_image_mb(image_path)
if image_mb > root_mb:
root_mb = image_mb
discovery(address, port)
login_iscsi(address, port, iqn)
try:
root_uuid = work_on_disk(dev, root_mb, swap_mb, image_path)
except exception.ProcessExecutionError, err:
# Log output if there was a error
LOG.error("Cmd : %s" % err.cmd)
LOG.error("StdOut : %s" % err.stdout)
LOG.error("StdErr : %s" % err.stderr)
finally:
logout_iscsi(address, port, iqn)
switch_pxe_config(pxe_config_path, root_uuid)
# Ensure the node started netcat on the port after POST the request.
time.sleep(3)
notify(address, 10000)
class Worker(threading.Thread):
"""Thread that handles requests in queue."""
def __init__(self):
super(Worker, self).__init__()
self.setDaemon(True)
self.stop = False
self.queue_timeout = 1
def run(self):
while not self.stop:
try:
# Set timeout to check self.stop periodically
(node_id, params) = QUEUE.get(block=True,
timeout=self.queue_timeout)
except Queue.Empty:
pass
else:
# Requests comes here from BareMetalDeploy.post()
LOG.info(_('start deployment for node %(node_id)s, '
'params %(params)s') % locals())
context = nova_context.get_admin_context()
try:
db.bm_node_update(context, node_id,
{'task_state': baremetal_states.DEPLOYING})
deploy(**params)
except Exception:
LOG.error(_('deployment to node %s failed') % node_id)
db.bm_node_update(context, node_id,
{'task_state': baremetal_states.DEPLOYFAIL})
else:
LOG.info(_('deployment to node %s done') % node_id)
db.bm_node_update(context, node_id,
{'task_state': baremetal_states.DEPLOYDONE})
class BareMetalDeploy(object):
"""WSGI server for bare-metal deployment."""
def __init__(self):
self.worker = Worker()
self.worker.start()
def __call__(self, environ, start_response):
method = environ['REQUEST_METHOD']
if method == 'POST':
return self.post(environ, start_response)
else:
start_response('501 Not Implemented',
[('Content-type', 'text/plain')])
return 'Not Implemented'
def post(self, environ, start_response):
LOG.info("post: environ=%s", environ)
inpt = environ['wsgi.input']
length = int(environ.get('CONTENT_LENGTH', 0))
x = inpt.read(length)
q = dict(cgi.parse_qsl(x))
try:
node_id = q['i']
deploy_key = q['k']
address = q['a']
port = q.get('p', '3260')
iqn = q['n']
lun = q.get('l', '1')
except KeyError as e:
start_response('400 Bad Request', [('Content-type', 'text/plain')])
return "parameter '%s' is not defined" % e
context = nova_context.get_admin_context()
d = db.bm_node_get(context, node_id)
if d['deploy_key'] != deploy_key:
start_response('400 Bad Request', [('Content-type', 'text/plain')])
return 'key is not match'
params = {'address': address,
'port': port,
'iqn': iqn,
'lun': lun,
'image_path': d['image_path'],
'pxe_config_path': d['pxe_config_path'],
'root_mb': int(d['root_mb']),
'swap_mb': int(d['swap_mb']),
}
# Restart worker, if needed
if not self.worker.isAlive():
self.worker = Worker()
self.worker.start()
LOG.info("request is queued: node %s, params %s", node_id, params)
QUEUE.put((node_id, params))
# Requests go to Worker.run()
start_response('200 OK', [('Content-type', 'text/plain')])
return ''
def main():
config.parse_args(sys.argv)
logging.setup("nova")
global LOG
LOG = logging.getLogger('nova.virt.baremetal.deploy_helper')
app = BareMetalDeploy()
srv = simple_server.make_server('', 10000, app)
srv.serve_forever()
| zestrada/nova-cs498cc | nova/cmd/baremetal_deploy_helper.py | Python | apache-2.0 | 10,439 |
"""Support for Lupusec Security System binary sensors."""
import logging
from datetime import timedelta
from homeassistant.components.lupusec import (LupusecDevice,
DOMAIN as LUPUSEC_DOMAIN)
from homeassistant.components.binary_sensor import (BinarySensorDevice,
DEVICE_CLASSES)
DEPENDENCIES = ['lupusec']
SCAN_INTERVAL = timedelta(seconds=2)
_LOGGER = logging.getLogger(__name__)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up a sensor for an Lupusec device."""
if discovery_info is None:
return
import lupupy.constants as CONST
data = hass.data[LUPUSEC_DOMAIN]
device_types = [CONST.TYPE_OPENING]
devices = []
for device in data.lupusec.get_devices(generic_type=device_types):
devices.append(LupusecBinarySensor(data, device))
add_entities(devices)
class LupusecBinarySensor(LupusecDevice, BinarySensorDevice):
"""A binary sensor implementation for Lupusec device."""
@property
def is_on(self):
"""Return True if the binary sensor is on."""
return self._device.is_on
@property
def device_class(self):
"""Return the class of the binary sensor."""
if self._device.generic_type not in DEVICE_CLASSES:
return None
return self._device.generic_type
| HydrelioxGitHub/home-assistant | homeassistant/components/lupusec/binary_sensor.py | Python | apache-2.0 | 1,410 |
class Solution(object):
def maxProfit1(self, prices):
"""
:type prices: List[int]
:rtype: int
"""
if prices is None or len(prices) == 0:
return 0
l = len(prices)
dp = [[None]*l for i in xrange(l)]
for offset in xrange(l):
for i in xrange(l):
j = i + offset
if offset == 0:
dp[i][j] = 0
elif offset == 1 and j < l:
dp[i][j] = max(prices[j] - prices[i], 0)
elif offset > 1 and j < l:
dp[i][j] = max(dp[i][j-1], dp[i+1][j], prices[j] - prices[i])
return dp[0][l-1]
def maxProfit2(self, prices):
"""
:type prices: List[int]
:rtype: int
"""
if prices is None or len(prices) == 0:
return 0
dp = []
min_prices = prices[0]
for i,e in enumerate(prices):
min_prices = min(e, min_prices)
if i == 0:
dp.append(0)
else:
dp.append(max(e-min_prices, dp[i-1]))
return dp[-1]
| shuquan/leetcode | easy/best-time-to-buy-and-sell-stock/best_time_to_buy_and_sell_stock.py | Python | apache-2.0 | 1,146 |
from datetime import datetime
from skylines.model import Club
def lva(**kwargs):
return Club(
name=u"LV Aachen",
website=u"http://www.lv-aachen.de",
time_created=datetime(2015, 12, 24, 12, 34, 56),
).apply_kwargs(kwargs)
def sfn(**kwargs):
return Club(
name=u"Sportflug Niederberg", time_created=datetime(2017, 1, 1, 12, 34, 56)
).apply_kwargs(kwargs)
| skylines-project/skylines | tests/data/clubs.py | Python | agpl-3.0 | 405 |
"""An NNTP client class based on RFC 977: Network News Transfer Protocol.
Example:
>>> from nntplib import NNTP
>>> s = NNTP('news')
>>> resp, count, first, last, name = s.group('comp.lang.python')
>>> print 'Group', name, 'has', count, 'articles, range', first, 'to', last
Group comp.lang.python has 51 articles, range 5770 to 5821
>>> resp, subs = s.xhdr('subject', first + '-' + last)
>>> resp = s.quit()
>>>
Here 'resp' is the server response line.
Error responses are turned into exceptions.
To post an article from a file:
>>> f = open(filename, 'r') # file containing article, including header
>>> resp = s.post(f)
>>>
For descriptions of all methods, read the comments in the code below.
Note that all arguments and return values representing article numbers
are strings, not numbers, since they are rarely used for calculations.
"""
# RFC 977 by Brian Kantor and Phil Lapsley.
# xover, xgtitle, xpath, date methods by Kevan Heydon
# Imports
import re
import socket
__all__ = ["NNTP","NNTPReplyError","NNTPTemporaryError",
"NNTPPermanentError","NNTPProtocolError","NNTPDataError",
"error_reply","error_temp","error_perm","error_proto",
"error_data",]
# Exceptions raised when an error or invalid response is received
class NNTPError(Exception):
"""Base class for all nntplib exceptions"""
def __init__(self, *args):
apply(Exception.__init__, (self,)+args)
try:
self.response = args[0]
except IndexError:
self.response = 'No response given'
class NNTPReplyError(NNTPError):
"""Unexpected [123]xx reply"""
pass
class NNTPTemporaryError(NNTPError):
"""4xx errors"""
pass
class NNTPPermanentError(NNTPError):
"""5xx errors"""
pass
class NNTPProtocolError(NNTPError):
"""Response does not begin with [1-5]"""
pass
class NNTPDataError(NNTPError):
"""Error in response data"""
pass
# for backwards compatibility
error_reply = NNTPReplyError
error_temp = NNTPTemporaryError
error_perm = NNTPPermanentError
error_proto = NNTPProtocolError
error_data = NNTPDataError
# Standard port used by NNTP servers
NNTP_PORT = 119
# Response numbers that are followed by additional text (e.g. article)
LONGRESP = ['100', '215', '220', '221', '222', '224', '230', '231', '282']
# Line terminators (we always output CRLF, but accept any of CRLF, CR, LF)
CRLF = '\r\n'
# The class itself
class NNTP:
def __init__(self, host, port=NNTP_PORT, user=None, password=None,
readermode=None):
"""Initialize an instance. Arguments:
- host: hostname to connect to
- port: port to connect to (default the standard NNTP port)
- user: username to authenticate with
- password: password to use with username
- readermode: if true, send 'mode reader' command after
connecting.
readermode is sometimes necessary if you are connecting to an
NNTP server on the local machine and intend to call
reader-specific comamnds, such as `group'. If you get
unexpected NNTPPermanentErrors, you might need to set
readermode.
"""
self.host = host
self.port = port
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.connect((self.host, self.port))
self.file = self.sock.makefile('rb')
self.debugging = 0
self.welcome = self.getresp()
# 'mode reader' is sometimes necessary to enable 'reader' mode.
# However, the order in which 'mode reader' and 'authinfo' need to
# arrive differs between some NNTP servers. Try to send
# 'mode reader', and if it fails with an authorization failed
# error, try again after sending authinfo.
readermode_afterauth = 0
if readermode:
try:
self.welcome = self.shortcmd('mode reader')
except NNTPPermanentError:
# error 500, probably 'not implemented'
pass
except NNTPTemporaryError, e:
if user and e.response[:3] == '480':
# Need authorization before 'mode reader'
readermode_afterauth = 1
else:
raise
if user:
resp = self.shortcmd('authinfo user '+user)
if resp[:3] == '381':
if not password:
raise NNTPReplyError(resp)
else:
resp = self.shortcmd(
'authinfo pass '+password)
if resp[:3] != '281':
raise NNTPPermanentError(resp)
if readermode_afterauth:
try:
self.welcome = self.shortcmd('mode reader')
except NNTPPermanentError:
# error 500, probably 'not implemented'
pass
# Get the welcome message from the server
# (this is read and squirreled away by __init__()).
# If the response code is 200, posting is allowed;
# if it 201, posting is not allowed
def getwelcome(self):
"""Get the welcome message from the server
(this is read and squirreled away by __init__()).
If the response code is 200, posting is allowed;
if it 201, posting is not allowed."""
if self.debugging: print '*welcome*', `self.welcome`
return self.welcome
def set_debuglevel(self, level):
"""Set the debugging level. Argument 'level' means:
0: no debugging output (default)
1: print commands and responses but not body text etc.
2: also print raw lines read and sent before stripping CR/LF"""
self.debugging = level
debug = set_debuglevel
def putline(self, line):
"""Internal: send one line to the server, appending CRLF."""
line = line + CRLF
if self.debugging > 1: print '*put*', `line`
self.sock.send(line)
def putcmd(self, line):
"""Internal: send one command to the server (through putline())."""
if self.debugging: print '*cmd*', `line`
self.putline(line)
def getline(self):
"""Internal: return one line from the server, stripping CRLF.
Raise EOFError if the connection is closed."""
line = self.file.readline()
if self.debugging > 1:
print '*get*', `line`
if not line: raise EOFError
if line[-2:] == CRLF: line = line[:-2]
elif line[-1:] in CRLF: line = line[:-1]
return line
def getresp(self):
"""Internal: get a response from the server.
Raise various errors if the response indicates an error."""
resp = self.getline()
if self.debugging: print '*resp*', `resp`
c = resp[:1]
if c == '4':
raise NNTPTemporaryError(resp)
if c == '5':
raise NNTPPermanentError(resp)
if c not in '123':
raise NNTPProtocolError(resp)
return resp
def getlongresp(self):
"""Internal: get a response plus following text from the server.
Raise various errors if the response indicates an error."""
resp = self.getresp()
if resp[:3] not in LONGRESP:
raise NNTPReplyError(resp)
list = []
while 1:
line = self.getline()
if line == '.':
break
if line[:2] == '..':
line = line[1:]
list.append(line)
return resp, list
def shortcmd(self, line):
"""Internal: send a command and get the response."""
self.putcmd(line)
return self.getresp()
def longcmd(self, line):
"""Internal: send a command and get the response plus following text."""
self.putcmd(line)
return self.getlongresp()
def newgroups(self, date, time):
"""Process a NEWGROUPS command. Arguments:
- date: string 'yymmdd' indicating the date
- time: string 'hhmmss' indicating the time
Return:
- resp: server response if successful
- list: list of newsgroup names"""
return self.longcmd('NEWGROUPS ' + date + ' ' + time)
def newnews(self, group, date, time):
"""Process a NEWNEWS command. Arguments:
- group: group name or '*'
- date: string 'yymmdd' indicating the date
- time: string 'hhmmss' indicating the time
Return:
- resp: server response if successful
- list: list of article ids"""
cmd = 'NEWNEWS ' + group + ' ' + date + ' ' + time
return self.longcmd(cmd)
def list(self):
"""Process a LIST command. Return:
- resp: server response if successful
- list: list of (group, last, first, flag) (strings)"""
resp, list = self.longcmd('LIST')
for i in range(len(list)):
# Parse lines into "group last first flag"
list[i] = tuple(list[i].split())
return resp, list
def group(self, name):
"""Process a GROUP command. Argument:
- group: the group name
Returns:
- resp: server response if successful
- count: number of articles (string)
- first: first article number (string)
- last: last article number (string)
- name: the group name"""
resp = self.shortcmd('GROUP ' + name)
if resp[:3] != '211':
raise NNTPReplyError(resp)
words = resp.split()
count = first = last = 0
n = len(words)
if n > 1:
count = words[1]
if n > 2:
first = words[2]
if n > 3:
last = words[3]
if n > 4:
name = words[4].lower()
return resp, count, first, last, name
def help(self):
"""Process a HELP command. Returns:
- resp: server response if successful
- list: list of strings"""
return self.longcmd('HELP')
def statparse(self, resp):
"""Internal: parse the response of a STAT, NEXT or LAST command."""
if resp[:2] != '22':
raise NNTPReplyError(resp)
words = resp.split()
nr = 0
id = ''
n = len(words)
if n > 1:
nr = words[1]
if n > 2:
id = words[2]
return resp, nr, id
def statcmd(self, line):
"""Internal: process a STAT, NEXT or LAST command."""
resp = self.shortcmd(line)
return self.statparse(resp)
def stat(self, id):
"""Process a STAT command. Argument:
- id: article number or message id
Returns:
- resp: server response if successful
- nr: the article number
- id: the article id"""
return self.statcmd('STAT ' + id)
def next(self):
"""Process a NEXT command. No arguments. Return as for STAT."""
return self.statcmd('NEXT')
def last(self):
"""Process a LAST command. No arguments. Return as for STAT."""
return self.statcmd('LAST')
def artcmd(self, line):
"""Internal: process a HEAD, BODY or ARTICLE command."""
resp, list = self.longcmd(line)
resp, nr, id = self.statparse(resp)
return resp, nr, id, list
def head(self, id):
"""Process a HEAD command. Argument:
- id: article number or message id
Returns:
- resp: server response if successful
- nr: article number
- id: message id
- list: the lines of the article's header"""
return self.artcmd('HEAD ' + id)
def body(self, id):
"""Process a BODY command. Argument:
- id: article number or message id
Returns:
- resp: server response if successful
- nr: article number
- id: message id
- list: the lines of the article's body"""
return self.artcmd('BODY ' + id)
def article(self, id):
"""Process an ARTICLE command. Argument:
- id: article number or message id
Returns:
- resp: server response if successful
- nr: article number
- id: message id
- list: the lines of the article"""
return self.artcmd('ARTICLE ' + id)
def slave(self):
"""Process a SLAVE command. Returns:
- resp: server response if successful"""
return self.shortcmd('SLAVE')
def xhdr(self, hdr, str):
"""Process an XHDR command (optional server extension). Arguments:
- hdr: the header type (e.g. 'subject')
- str: an article nr, a message id, or a range nr1-nr2
Returns:
- resp: server response if successful
- list: list of (nr, value) strings"""
pat = re.compile('^([0-9]+) ?(.*)\n?')
resp, lines = self.longcmd('XHDR ' + hdr + ' ' + str)
for i in range(len(lines)):
line = lines[i]
m = pat.match(line)
if m:
lines[i] = m.group(1, 2)
return resp, lines
def xover(self,start,end):
"""Process an XOVER command (optional server extension) Arguments:
- start: start of range
- end: end of range
Returns:
- resp: server response if successful
- list: list of (art-nr, subject, poster, date,
id, references, size, lines)"""
resp, lines = self.longcmd('XOVER ' + start + '-' + end)
xover_lines = []
for line in lines:
elem = line.split("\t")
try:
xover_lines.append((elem[0],
elem[1],
elem[2],
elem[3],
elem[4],
elem[5].split(),
elem[6],
elem[7]))
except IndexError:
raise NNTPDataError(line)
return resp,xover_lines
def xgtitle(self, group):
"""Process an XGTITLE command (optional server extension) Arguments:
- group: group name wildcard (i.e. news.*)
Returns:
- resp: server response if successful
- list: list of (name,title) strings"""
line_pat = re.compile("^([^ \t]+)[ \t]+(.*)$")
resp, raw_lines = self.longcmd('XGTITLE ' + group)
lines = []
for raw_line in raw_lines:
match = line_pat.search(raw_line.strip())
if match:
lines.append(match.group(1, 2))
return resp, lines
def xpath(self,id):
"""Process an XPATH command (optional server extension) Arguments:
- id: Message id of article
Returns:
resp: server response if successful
path: directory path to article"""
resp = self.shortcmd("XPATH " + id)
if resp[:3] != '223':
raise NNTPReplyError(resp)
try:
[resp_num, path] = resp.split()
except ValueError:
raise NNTPReplyError(resp)
else:
return resp, path
def date (self):
"""Process the DATE command. Arguments:
None
Returns:
resp: server response if successful
date: Date suitable for newnews/newgroups commands etc.
time: Time suitable for newnews/newgroups commands etc."""
resp = self.shortcmd("DATE")
if resp[:3] != '111':
raise NNTPReplyError(resp)
elem = resp.split()
if len(elem) != 2:
raise NNTPDataError(resp)
date = elem[1][2:8]
time = elem[1][-6:]
if len(date) != 6 or len(time) != 6:
raise NNTPDataError(resp)
return resp, date, time
def post(self, f):
"""Process a POST command. Arguments:
- f: file containing the article
Returns:
- resp: server response if successful"""
resp = self.shortcmd('POST')
# Raises error_??? if posting is not allowed
if resp[0] != '3':
raise NNTPReplyError(resp)
while 1:
line = f.readline()
if not line:
break
if line[-1] == '\n':
line = line[:-1]
if line[:1] == '.':
line = '.' + line
self.putline(line)
self.putline('.')
return self.getresp()
def ihave(self, id, f):
"""Process an IHAVE command. Arguments:
- id: message-id of the article
- f: file containing the article
Returns:
- resp: server response if successful
Note that if the server refuses the article an exception is raised."""
resp = self.shortcmd('IHAVE ' + id)
# Raises error_??? if the server already has it
if resp[0] != '3':
raise NNTPReplyError(resp)
while 1:
line = f.readline()
if not line:
break
if line[-1] == '\n':
line = line[:-1]
if line[:1] == '.':
line = '.' + line
self.putline(line)
self.putline('.')
return self.getresp()
def quit(self):
"""Process a QUIT command and close the socket. Returns:
- resp: server response if successful"""
resp = self.shortcmd('QUIT')
self.file.close()
self.sock.close()
del self.file, self.sock
return resp
def _test():
"""Minimal test function."""
s = NNTP('news', readermode='reader')
resp, count, first, last, name = s.group('comp.lang.python')
print resp
print 'Group', name, 'has', count, 'articles, range', first, 'to', last
resp, subs = s.xhdr('subject', first + '-' + last)
print resp
for item in subs:
print "%7s %s" % item
resp = s.quit()
print resp
# Run the test when run as a script
if __name__ == '__main__':
_test()
| MalloyPower/parsing-python | front-end/testsuite-python-lib/Python-2.1/Lib/nntplib.py | Python | mit | 18,078 |
from measurements import parametric_sweep as ps
try:
pna.set_average(True)
pna.set_bandwidth(100)
pna.set_averages(3)
pna.set_nop(1)
pna.set_power(-35)
pna.get_all()
pna.set_centerfreq(5.2682e9)
pna.set_span(1e6)
lo.set_power(-5)
lo.set_status(1)
#lo.set_frequency(13.8477e9)
#bias.set_current(-7231.25e-6)
Frequencies = arange (3e9, 10e9, 30e6)
currents = arange (-10000e-6, -4000e-6, 30e-6)
current.set_status(1)
measurement = ps.sweep2D(pna, Frequencies, lo.set_frequency, currents, current.set_current)
#measurement = ps.sweep1D(pna, Powers, lo.set_power)
finally:
current.set_current(0)
current.set_status(0)
lo.set_status(0)
| vdrhtc/Measurement-automation | lib/script_qubit_2tone.py | Python | gpl-3.0 | 725 |
import pytest
from distutils.version import LooseVersion
import pandas as pd
from pandas.core.computation.engines import _engines
import pandas.core.computation.expr as expr
from pandas.core.computation.check import _MIN_NUMEXPR_VERSION
def test_compat():
# test we have compat with our version of nu
from pandas.core.computation.check import _NUMEXPR_INSTALLED
try:
import numexpr as ne
ver = ne.__version__
if ver < LooseVersion(_MIN_NUMEXPR_VERSION):
assert not _NUMEXPR_INSTALLED
else:
assert _NUMEXPR_INSTALLED
except ImportError:
pytest.skip("not testing numexpr version compat")
@pytest.mark.parametrize('engine', _engines)
@pytest.mark.parametrize('parser', expr._parsers)
def test_invalid_numexpr_version(engine, parser):
def testit():
a, b = 1, 2 # noqa
res = pd.eval('a + b', engine=engine, parser=parser)
assert res == 3
if engine == 'numexpr':
try:
import numexpr as ne
except ImportError:
pytest.skip("no numexpr")
else:
if ne.__version__ < LooseVersion(_MIN_NUMEXPR_VERSION):
with pytest.raises(ImportError):
testit()
else:
testit()
else:
testit()
| NixaSoftware/CVis | venv/lib/python2.7/site-packages/pandas/tests/computation/test_compat.py | Python | apache-2.0 | 1,320 |
#################################################################################
##____ ___ _ _ ____ ___ _____ ____ ___
#| _ \_ _| \ | | __ ) / _ \_ _| |___ \ / _ \
#| |_) | || \| | _ \| | | || | __) || | | |
#| __/| || |\ | |_) | |_| || | / __/ | |_| |
#|_|__|___|_|_\_|____/_\___/_|_| __|_____(_)___/_____ ___ ___ _ _
#| _ \ | _ \ / _ \ / ___| | ____| _ \_ _|_ _|_ _/ _ \| \ | |
#| |_) |____| |_) | | | | | | _| | | | | | | | | | | | | \| |
#| __/_____| _ <| |_| | |___ | |___| |_| | | | | | | |_| | |\ |
#|_| |_| \_\\___/ \____| |_____|____/___| |_| |___\___/|_| \_|
##
## A P-ROC Project by Dan Myers, Copyright 2013-2014
## Built on the PyProcGame Framework from Adam Preble and Gerry Stellenberg
## Thanks to Scott Danesi for his Earthshaker Project, which is my starting point
#################################################################################
#################################################################################
## __________ ____ __ __________ __
## /_ __/ __ \/ __ \/ / / / ____/ / / /
## / / / /_/ / / / / / / / / __/ /_/ /
## / / / _, _/ /_/ / /_/ / /_/ / __ /
## /_/ /_/ |_|\____/\____/\____/_/ /_/
##
#################################################################################
import procgame.game
import procgame.dmd
import logging
class Trough(procgame.game.Mode):
"""Manages trough by providing the following functionality:
- Keeps track of the number of balls in play
- Keeps track of the number of balls in the trough
- Launches one or more balls on request and calls a launch_callback when complete, if one exists.
- Auto-launches balls while ball save is active (if linked to a ball save object
- Identifies when balls drain and calls a registered drain_callback, if one exists.
- Maintains a count of balls locked in playfield lock features (if externally incremented) and adjusts the count of number of balls in play appropriately. This will help the drain_callback distinguish between a ball ending or simply a multiball ball draining.
Parameters:
'game': Parent game object.
'position_switchnames': List of switchnames for each ball position in the trough.
'eject_switchname': Name of switch in the ball position the feeds the shooter lane.
'eject_coilname': Name of coil used to put a ball into the shooter lane.
'early_save_switchnames': List of switches that will initiate a ball save before the draining ball reaches the trough (ie. Outlanes).
'shooter_lane_switchname': Name of the switch in the shooter lane. This is checked before a new ball is ejected.
'drain_callback': Optional - Name of method to be called when a ball drains (and isn't saved).
"""
def __init__(self, game, priority, position_switchnames=None, eject_switchname=None, eject_coilname=None, early_save_switchnames=None, shooter_lane_switchname=None, drain_callback=None):
super(Trough, self).__init__(game, priority)
#setup logging
self.log = logging.getLogger('pinbot.trough')
#setup vars
self.position_switchnames = [] #position_switchnames
self.early_save_switchnames = []
self.eject_switchname = None #eject_switchname
self.eject_coilname = None #eject_coilname
self.shooter_lane_switchname = None #shooter_lane_switchname
self.drain_callback = drain_callback
self.extra_ball = False # Is the ball sitting in the lane an extra ball awarded
#populate vars from yaml*
for switch in self.game.switches.items_tagged('trough'):
self.position_switchnames.append(switch.name)
self.log.info("Trough Switch is:"+switch.name)
for switch in self.game.switches.items_tagged('early_save'):
self.early_save_switchnames.append(switch.name)
self.log.info("Early Save Switch is:"+switch.name)
for switch in self.game.switches.items_tagged('outhole'):
self.outhole_switchname = switch.name
self.log.info("Outhole Switch is:"+self.outhole_switchname)
for switch in self.game.switches.items_tagged('trough_eject'):
self.eject_switchname = switch.name
self.log.info("Trough Eject Switch is:"+self.eject_switchname)
for switch in self.game.switches.items_tagged('shooter_lane'):
self.shooter_lane_switchname = switch.name
self.log.info("Shooter Lane Switch is:"+self.shooter_lane_switchname)
for coil in self.game.coils.items_tagged('outhole'):
self.outhole_coilname = coil.name
self.log.info("Outhole Coil is:"+self.outhole_coilname)
for coil in self.game.coils.items_tagged('trough_eject'):
self.eject_coilname = coil.name
self.log.info("Trough Eject Coil is:"+self.eject_coilname)
# Install switch handlers.
# Use a delay of 750ms which should ensure balls are settled.
for switch in self.position_switchnames:
self.add_switch_handler(name=switch, event_type='active', delay=1.0, handler=self.position_switch_handler)
for switch in self.position_switchnames:
self.add_switch_handler(name=switch, event_type='inactive', delay=1.0, handler=self.position_switch_handler)
# Install early ball_save switch handlers.
#for switch in self.early_save_switchnames:
#self.add_switch_handler(name=switch, event_type='active', delay=None, handler=self.early_save_switch_handler)
# Install outhole switch handler.
#self.add_switch_handler(name=self.outhole_switchname, event_type='active', delay=.20, handler=self.outhole_switch_handler)
# Install shooter lane handler
self.add_switch_handler(name=self.shooter_lane_switchname, event_type='active', delay=1.0, handler=self.shooter_lane_switch_handler)
# Reset variables
# This is the number of balls not in the trough or locks, so physically in play
self.num_balls_in_play = 0
# This is the number of balls physically sitting in locks, so not in play and not in the trough
self.num_balls_locked = 0
self.num_balls_to_launch = 0
self.num_balls_to_stealth_launch = 0
self.launch_in_progress = False
self.ball_save_active = False
""" Callback called when a ball is saved. Used optionally only when ball save is enabled (by a call to :meth:`Trough.enable_ball_save`). Set externally if a callback should be used. """
self.ball_save_callback = None
""" Method to get the number of balls to save. Set externally when using ball save logic."""
self.num_balls_to_save = None
self.launch_callback = None
self.debug()
#def mode_tick(self):
#self.debug()
def debug(self):
self.log.info("BALL" +str(self.game.ball) +"/" +str(self.game.balls_per_game) +",B-IN-PLY"+str(self.num_balls_in_play) + ", B-LCKD" + str(self.num_balls_locked)+ ", TRO" + str(self.num_balls())+", player locks "+str(self.game.utilities.get_player_stats('balls_locked')))
#self.game.utilities.arduino_write_number(number=self.num_balls())
self.delay(name='launch', event_type=None, delay=1.0, handler=self.debug)
def state_str(self):
return '%d/%d balls' % (self.num_balls(), self.game.num_balls_total)
def enable_ball_save(self, enable=True):
"""Used to enable/disable ball save logic."""
self.ball_save_active = enable
#def early_save_switch_handler(self, sw):
#if self.ball_save_active:
# Only do an early ball save if a ball is ready to be launched.
# Otherwise, let the trough switches take care of it.
#if self.game.switches[self.eject_switchname].is_active():
#self.launch_balls(1, self.ball_save_callback, stealth=True)
#add handler for outhole
#def sw_outhole_active_for_500ms(self, sw):
def sw_outhole_closed_for_1s(self, sw):
#def outhole_switch_handler(self,sw):
self.log.info('Outhole switch handler')
self.log.info("Balls in play before pulse = "+str(self.num_balls_in_play))
self.log.info("Balls in trough before pulse = "+str(self.num_balls()))
#self.delay(name='dummy', event_type=None, delay=1.0, handler=self.dummy)
# Kick the ball into the trough
self.game.utilities.acCoilPulse('outholeKicker_Knocker')
if self.num_balls_in_play > 0:
self.num_balls_in_play -= 1
self.delay(name='dummy', event_type=None, delay=1.0, handler=self.dummy)
self.log.info("Balls in play after pulse = "+str(self.num_balls_in_play))
self.log.info("Balls in trough after pulse = "+str(self.num_balls()))
# Schedule a call for one second from now to let things settle
self.delay('outhole_recheck',delay=1.0,handler=self.outhole_recheck)
# Called one second after the outhole is handled. Will call the handler again if there is still
# a ball in the outhole, otherwise will continue with rest of outhole processing
def outhole_recheck(self):
#self.delay(name='dummy', event_type=None, delay=1.0, handler=self.dummy)
if self.game.switches.outhole.is_closed() == True:
self.log.info('Calling outhole_recheck, is_closed is true')
self.sw_outhole_closed_for_1s('Dummy')
else:
self.log.info('Calling outhole_recheck, is_closed is false')
# If ball save is active, save it but wait first for one second for the trough to settle
if (self.game.utilities.get_player_stats('ballsave_active') == True):
self.game.ballsaver_mode.saveBall()
# If ball save isn't active, check for end of multiball
elif(self.game.utilities.get_player_stats('multiball_running') != 'None'):
self.checkForEndOfMultiball()
if self.num_balls_in_play == 0: #Last ball in play
self.game.utilities.setBallInPlay(False) # Will need to use the trough mode for this
self.game.base_mode.finish_ball()
def checkForEndOfMultiball(self):
if (self.num_balls() >= 3 and self.game.utilities.get_player_stats('multiball_running') == 'Standard' ):
self.game.multiball_mode.stopMultiball()
elif self.game.utilities.get_player_stats('multiball_running') == 'Quick':
self.game.quick_multiball_mode.stopMultiball()
# Switches will change states a lot as balls roll down the trough.
# So don't go through all of the logic every time. Keep resetting a
# delay function when switches change state. When they're all settled,
# the delay will call the real handler (check_switches).
def position_switch_handler(self, sw):
self.cancel_delayed('check_switches')
self.delay(name='check_switches', event_type=None, delay=0.50, handler=self.check_switches)
def check_switches(self):
self.log.info('Trough settled')
if self.num_balls_in_play > 0:
# Base future calculations on how many balls the machine
# thinks are currently installed.
num_current_machine_balls = self.game.num_balls_total
temp_num_balls = self.num_balls()
if self.ball_save_active:
if self.num_balls_to_save:
num_balls_to_save = self.num_balls_to_save()
else:
num_balls_to_save = 0
# Calculate how many balls shouldn't be in the
# trough assuming one just drained
num_balls_out = self.num_balls_locked + \
(num_balls_to_save - 1)
# Translate that to how many balls should be in
# the trough if one is being saved.
balls_in_trough = num_current_machine_balls - \
num_balls_out
if (temp_num_balls - \
self.num_balls_to_launch) >= balls_in_trough:
self.log.info("Trough thinks it needs another ball to launch")
self.launch_balls(1, self.ball_save_callback, \
stealth=True)
else:
# If there are too few balls in the trough.
# Ignore this one in an attempt to correct
# the tracking.
return 'ignore'
else:
# Calculate how many balls should be in the trough
# for various conditions.
num_trough_balls_if_ball_ending = \
num_current_machine_balls - self.num_balls_locked
num_trough_balls_if_multiball_ending = \
num_trough_balls_if_ball_ending - 1
num_trough_balls_if_multiball_drain = \
num_trough_balls_if_ball_ending - \
(self.num_balls_in_play - 1)
self.log.info("Ball ending = "+str(num_trough_balls_if_ball_ending)+ \
", Multiball ending = "+str(num_trough_balls_if_multiball_ending) + \
", Multiball drain = "+str(num_trough_balls_if_multiball_drain))
# The ball should end if all of the balls
# are in the trough.
if temp_num_balls == num_current_machine_balls or \
temp_num_balls == num_trough_balls_if_ball_ending:
self.num_balls_in_play = 0
if self.drain_callback:
self.drain_callback()
# Multiball is ending if all but 1 ball are in the trough.
# Shouldn't need this, but it fixes situations where
# num_balls_in_play tracking
# fails, and those situations are still occuring.
elif temp_num_balls == \
num_trough_balls_if_multiball_ending:
self.num_balls_in_play = 1
if self.drain_callback:
self.drain_callback()
# Otherwise, another ball from multiball is draining
# if the trough gets one more than it would have if
# all num_balls_in_play are not in the trough.
elif temp_num_balls == \
num_trough_balls_if_multiball_drain:
# Fix num_balls_in_play if too low.
if self.num_balls_in_play < 3:
self.num_balls_in_play = 2
# otherwise subtract 1
else:
#else:
self.num_balls_in_play -= 1
# if self.drain_callback:
# self.drain_callback()
self.num_balls_in_play = 4 - self.num_balls() - self.num_balls_locked
# Count the number of balls in the trough by counting active trough switches.
def num_balls(self):
"""Returns the number of balls in the trough."""
ball_count = 0
for switch in self.position_switchnames:
if self.game.switches[switch].is_active():
ball_count += 1
return ball_count
# Perform physical lock count. Tracking this in software can be error prone if one scenario is missed
# and there is always the possibility that a ball might bounce somewhere unexpected. If we actually
# count the locks with a ball in by checking the switches then we should be good.
def lock_count(self):
lock_count = 0
for switch in ['singleEject','rightEyeball','leftEyeball']:
if self.game.switches[switch].is_active():
lock_count += 1
self.num_balls_locked = lock_count
def is_full(self):
return self.num_balls() == self.game.num_balls_total
# Either initiate a new launch or add another ball to the count of balls
# being launched. Make sure to keep a separate count for stealth launches
# that should not increase num_balls_in_play.
def launch_balls(self, num, callback=None, stealth=False):
self.log.info('Calling: launch_balls,' +str(num))
"""Launches balls into play.
'num': Number of balls to be launched.
If ball launches are still pending from a previous request,
this number will be added to the previously requested number.
'callback': If specified, the callback will be called once
all of the requested balls have been launched.
'stealth': Set to true if the balls being launched should NOT
be added to the number of balls in play. For instance, if
a ball is being locked on the playfield, and a new ball is
being launched to keep only 1 active ball in play,
stealth should be used.
"""
self.num_balls_to_launch += num
self.log.info('in launch_balls now self.num_balls_to_launch = ' +str(self.num_balls_to_launch))
#self.autolaunch = autolaunch
#if stealth:
#self.num_balls_to_stealth_launch += num
if not self.launch_in_progress:
self.launch_in_progress = True
if callback:
self.launch_callback = callback
self.common_launch_code()
# This is the part of the ball launch code that repeats for multiple launches.
def common_launch_code(self):
# Only kick out another ball if the last ball is gone from the
# shooter lane.
if self.game.switches[self.shooter_lane_switchname].is_inactive():
self.log.info('common_launch_code says... shooter is clear')
self.num_balls_to_launch -= 1
#self.log.info("Balls in play before pulse = "+str(self.num_balls_in_play))
#self.log.info("Balls in trough before pulse = "+str(self.num_balls()))
self.delay(name='dummy', event_type=None, delay=1.0, handler=self.dummy)
#pulse coil
self.game.utilities.acCoilPulse(coilname='feedShooter_UpperPFFLash',pulsetime=100)
self.delay(name='dummy', event_type=None, delay=1.0, handler=self.dummy)
#if self.game.switches[self.shooter_lane_switchname].is_active():
#self.num_balls_to_launch -= 1
#self.num_balls_in_play += 1
#self.log.info("Balls in play after pulse = "+str(self.num_balls_in_play))
#self.log.info("Balls in trough after pulse = "+str(self.num_balls()))
#If the ball in the shooter lane is an extra ball which a player has been awarded
#then decrement the number of extra balls available and flag the situation so the lamp
#handler can flash the lamp
if self.game.utilities.get_player_stats('extra_balls') > 0:
self.game.utilities.set_player_stats('extra_balls',-1,mode='add')
self.extra_ball = True
else:
self.extra_ball = False
self.update_lamps()
# Only increment num_balls_in_play if there are no more
# stealth launches to complete.
if self.num_balls_to_stealth_launch > 0:
self.num_balls_to_stealth_launch -= 1
else:
self.num_balls_in_play += 1
# If more balls need to be launched, delay 1 second
if self.num_balls_to_launch > 0:
self.delay(name='launch', event_type=None, delay=1.0, handler=self.common_launch_code)
else:
self.launch_in_progress = False
if self.launch_callback:
self.launch_callback()
# Otherwise, wait 1 second before trying again.
else:
self.log.info('common_launch_code says... shooter not clear')
self.delay(name='launch', event_type=None, delay=1.0, \
handler=self.common_launch_code)
def dummy(self):
self.log.info('Calling: dummy')
pass
def shooter_lane_switch_handler(self,sw):
self.log.info("Placeholder until autolauncher works")
#if self.autolaunch == True:
#self.log.info("Shooter lane autolaunch = "+str(self.autolaunch))
##self.game.coils.autoLaunch.pulse(100)
#self.autolaunch=False
# The trough mode can handle the extra ball lamp
def update_lamps(self):
if self.extra_ball == True:
self.game.lamps.shootAgain.schedule(schedule=0xFF00FF00)
elif self.game.utilities.get_player_stats('extra_balls') > 0:
self.game.lamps.shootAgain.enable()
else:
self.game.lamps.shootAgain.disable()
def mode_stopped(self):
self.cancel_delayed('check_switches')
def sw_debug_active(self,sw):
self.log.info("Balls in play = "+str(self.num_balls_in_play))
self.log.info("Balls in trough = "+str(self.num_balls()))
| Curbfeeler/PinbotFromES | trough.py | Python | mit | 19,435 |
#/usr/bin/python
def devidefile(trainFile,subFilePath,codeNum):
trainf=open(trainFile,'r');
lines=trainf.readlines();
totalLine=len(lines);
perfileLine=totalLine/int(codeNum);
for i in range(0,int(codeNum)+1):
subtrainf=open('%s/train%d.tmp'%(subFilePath,i),'wt');
if perfileLine*(i+1)<totalLine :
subtrainf.writelines(lines[perfileLine*i:perfileLine*(i+1)]);
else :
subtrainf.writelines(lines[perfileLine*i:totalLine])
subtrainf.close();
trainf.close();
import sys
if __name__=='__main__':
devidefile(sys.argv[1],sys.argv[2],sys.argv[3]); | drawfish/VAD_HTK | pyscrp/devidetrainfile.py | Python | gpl-2.0 | 628 |
import json
import re
from aloisius import Stack, StackException
import mock
from moto import mock_cloudformation
import pytest
dummy_template = {
"AWSTemplateFormatVersion": "2010-09-09",
"Description": "Stack 3",
"Resources": {
"VPC": {
"Properties": {
"CidrBlock": "192.168.0.0/16",
},
"Type": "AWS::EC2::VPC"
}
},
"Outputs": {
"VPC": {
"Value": {"Ref": "VPC"},
"Description": "This is a description."
}
}
}
@mock_cloudformation
def test_stack_is_created():
stack = Stack(
StackName='dummy',
TargetState='present',
RegionName='eu-west-1',
TemplateBody=json.dumps(dummy_template)
)
assert re.match("vpc-[a-z0-9]+", stack.outputs['VPC'])
@mock_cloudformation
def test_stack_create_failed_raises_exception(monkeypatch):
def mock_return(_):
return mock.Mock(stack_status='CREATE_FAILED')
monkeypatch.setattr(Stack, '_describe_stack', mock_return)
with pytest.raises(StackException):
stack = Stack(
StackName='dummy_failed',
TargetState='present',
RegionName='eu-west-1',
TemplateBody=json.dumps(dummy_template)
)
stack.outputs['VPC'] # Wait for result
@mock_cloudformation
def test_stack_rollback(monkeypatch):
def mock_return(_):
return mock.Mock(stack_status='ROLLBACK_COMPLETE')
monkeypatch.setattr(Stack, '_describe_stack', mock_return)
with pytest.raises(StackException):
stack = Stack(
StackName='dummy_failed',
TargetState='present',
RegionName='eu-west-1',
TemplateBody=json.dumps(dummy_template)
)
stack.outputs['VPC'] # Wait for result
@mock_cloudformation
def test_stack_results():
Stack._futures = {} # Cleanup from other runs
Stack(
StackName='dummy',
TargetState='present',
RegionName='eu-west-1',
TemplateBody=json.dumps(dummy_template)
)
for name, result in Stack.results().items():
assert name == 'dummy'
assert list(result.keys()) == ['VPC']
| diasjorge/aloisius | tests/test_stack.py | Python | bsd-2-clause | 2,199 |
"""Preprocessing tools useful for building models."""
# Copyright 2015-present Scikit Flow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import division, print_function, absolute_import
from tensorflow.contrib.skflow.python.skflow.preprocessing.text import *
from tensorflow.contrib.skflow.python.skflow.preprocessing.categorical import *
| panmari/tensorflow | tensorflow/contrib/skflow/python/skflow/preprocessing/__init__.py | Python | apache-2.0 | 894 |
# -*- coding: utf-8 -*-
"""
/***************************************************************************
DsgTools
A QGIS plugin
Brazilian Army Cartographic Production Tools
-------------------
begin : 2019-04-26
git sha : $Format:%H$
copyright : (C) 2019 by Philipe Borba - Cartographic Engineer @ Brazilian Army
email : [email protected]
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
from PyQt5.QtCore import QCoreApplication
from qgis.core import (QgsProcessing,
QgsFeatureSink,
QgsProcessingAlgorithm,
QgsProcessingParameterFeatureSource,
QgsProcessingParameterFeatureSink,
QgsFeature,
QgsDataSourceUri,
QgsProcessingOutputVectorLayer,
QgsProcessingParameterVectorLayer,
QgsWkbTypes,
QgsProcessingParameterBoolean,
QgsProcessingParameterEnum,
QgsProcessingParameterNumber,
QgsProcessingParameterMultipleLayers,
QgsProcessingUtils,
QgsSpatialIndex,
QgsGeometry,
QgsProcessingParameterField,
QgsProcessingMultiStepFeedback,
QgsProcessingParameterFile,
QgsProcessingParameterExpression,
QgsProcessingException,
QgsProcessingParameterString,
QgsProcessingParameterDefinition,
QgsProcessingParameterType,
QgsProcessingParameterCrs,
QgsCoordinateTransform,
QgsProject,
QgsCoordinateReferenceSystem,
QgsField,
QgsFields,
QgsProcessingOutputMultipleLayers,
QgsProcessingParameterString)
class AssignFilterToLayersAlgorithm(QgsProcessingAlgorithm):
INPUT_LAYERS = 'INPUT_LAYERS'
FILTER = 'FILTER'
BEHAVIOR = 'BEHAVIOR'
OUTPUT = 'OUTPUT'
AndMode, OrMode, ReplaceMode = list(range(3))
def initAlgorithm(self, config):
"""
Parameter setting.
"""
self.addParameter(
QgsProcessingParameterMultipleLayers(
self.INPUT_LAYERS,
self.tr('Input Layers'),
QgsProcessing.TypeVectorAnyGeometry
)
)
self.addParameter(
QgsProcessingParameterString(
self.FILTER,
self.tr('Filter')
)
)
self.modes = [self.tr('Append to existing filter with AND clause'),
self.tr('Append to existing filter with OR clause'),
self.tr('Replace filter')
]
self.addParameter(
QgsProcessingParameterEnum(
self.BEHAVIOR,
self.tr('Behavior'),
options=self.modes,
defaultValue=0
)
)
self.addOutput(
QgsProcessingOutputMultipleLayers(
self.OUTPUT,
self.tr('Original layers with assigned styles')
)
)
def processAlgorithm(self, parameters, context, feedback):
"""
Here is where the processing itself takes place.
"""
inputLyrList = self.parameterAsLayerList(
parameters,
self.INPUT_LAYERS,
context
)
inputFilterExpression = self.parameterAsString(
parameters,
self.FILTER,
context
)
behavior = self.parameterAsEnum(
parameters,
self.BEHAVIOR,
context
)
listSize = len(inputLyrList)
stepSize = 100/listSize if listSize else 0
for current, lyr in enumerate(inputLyrList):
if feedback.isCanceled():
break
filterExpression = self.adaptFilter(lyr, inputFilterExpression, behavior)
lyr.setSubsetString(filterExpression)
feedback.setProgress(current * stepSize)
return {self.OUTPUT: inputLyrList}
def adaptFilter(self, lyr, inputFilter, behavior):
"""
Adapts filter according to the selected mode
"""
originalFilter = lyr.subsetString()
if behavior == AssignFilterToLayersAlgorithm.ReplaceMode or originalFilter == '':
return inputFilter
clause = ' AND ' if behavior == AssignFilterToLayersAlgorithm.AndMode else ' OR '
return clause.join([originalFilter, inputFilter])
def name(self):
"""
Returns the algorithm name, used for identifying the algorithm. This
string should be fixed for the algorithm, and must not be localised.
The name should be unique within each provider. Names should contain
lowercase alphanumeric characters only and no spaces or other
formatting characters.
"""
return 'assignfiltertolayers'
def displayName(self):
"""
Returns the translated algorithm name, which should be used for any
user-visible display of the algorithm name.
"""
return self.tr('Assign Filter to Layers')
def group(self):
"""
Returns the name of the group this algorithm belongs to. This string
should be localised.
"""
return self.tr('Layer Management Algorithms')
def groupId(self):
"""
Returns the unique ID of the group this algorithm belongs to. This
string should be fixed for the algorithm, and must not be localised.
The group id should be unique within each provider. Group id should
contain lowercase alphanumeric characters only and no spaces or other
formatting characters.
"""
return 'DSGTools: Layer Management Algorithms'
def tr(self, string):
return QCoreApplication.translate('AssignFilterToLayersAlgorithm', string)
def createInstance(self):
return AssignFilterToLayersAlgorithm() | lcoandrade/DsgTools | core/DSGToolsProcessingAlgs/Algs/LayerManagementAlgs/assignFilterToLayersAlgorithm.py | Python | gpl-2.0 | 7,059 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_flaskpypi
----------------------------------
Tests for `flaskpypi` module.
"""
import pytest
from flaskpypi import flaskpypi
# Code from https://wiki.python.org/moin/PyPISimple
from xml.etree import ElementTree
from urllib.request import urlopen
def get_distributions(simple_index='https://pypi.python.org/simple/'):
with urlopen(simple_index) as f:
tree = ElementTree.parse(f)
return [a.text for a in tree.iter('a')]
def scrape_links(dist, simple_index='https://pypi.python.org/simple/'):
with urlopen(simple_index + dist + '/') as f:
tree = ElementTree.parse(f)
return [a.attrib['href'] for a in tree.iter('a')]
def test_this_is_a_test():
assert True
| waynew/flaskpypi | tests/test_flaskpypi.py | Python | bsd-3-clause | 752 |
# -*- coding: utf-8 -*-
"""Pickle format processing function."""
__author__ = "Yuan Chang"
__copyright__ = "Copyright (C) 2016-2021"
__license__ = "AGPL"
__email__ = "[email protected]"
from pickle import load, dump, UnpicklingError, HIGHEST_PROTOCOL
from qtpy.QtWidgets import QMessageBox
from .format_editor import FormatEditor
class PickleEditor(FormatEditor):
"""Pickle reader and writer."""
def __init__(self, *args):
super(PickleEditor, self).__init__(*args)
@staticmethod
def test(file_name: str) -> bool:
"""Test the file is valid."""
try:
with open(file_name, 'rb') as f:
load(f)
except (OSError, UnicodeError, UnpicklingError):
return False
else:
return True
def save(self, file_name: str) -> None:
"""Save to pickle file."""
data = self.save_data()
with open(file_name, 'wb') as f:
dump(data, f, HIGHEST_PROTOCOL)
def load(self, file_name: str) -> None:
"""Load a pickle file."""
try:
with open(file_name, 'rb') as f:
data = load(f)
except (OSError, UnicodeError, UnpicklingError) as e:
QMessageBox.warning(self._parent, "Loader Error", f"{e}")
self.load_data(file_name, data)
| 40323230/Pyslvs-PyQt5 | pyslvs_ui/io/project_pickle.py | Python | agpl-3.0 | 1,320 |
import time
from unittest import TestCase
from numpy.random import rand
from numpy.testing import assert_allclose
from span.utils.decorate import cached_property, thunkify
from span.testing import slow
class ThunkifyException(Exception):
pass
class TestCachedProperty(TestCase):
def test_cached_property(self):
class CachedPropertyClass(object):
def __init__(self, a, b):
self.a, self.b = a, b
@cached_property
def c(self):
return self.a + self.b
@cached_property
def d(self):
return self.c
a, b = rand(), rand()
cpc = CachedPropertyClass(a, b)
c = cpc.c
self.assert_(hasattr(cpc, '__property_cache'))
self.assertEqual(c, a + b)
self.assertEqual(cpc.d, c)
class TestThunkify(TestCase):
@slow
def test_thunkify(self):
@thunkify
def thunky(i):
time.sleep(0.1)
return i * 2
def call_thunky(x):
double_thunk = thunky(x)
time.sleep(0.4)
if x == 100:
res = double_thunk()
else:
res = None
return res
t0 = time.time()
call_thunky(10)
t1 = time.time() - t0
t0 = time.time()
call_thunky(100)
t2 = time.time() - t0
assert_allclose(t1, t2, atol=1e-3)
def test_thunkify_raise(self):
@thunkify
def type_error_thrower():
return None ** 2
@thunkify
def exception_thrower():
raise ThunkifyException('thunkify exception')
self.assertRaises(TypeError, type_error_thrower())
self.assertRaises(Exception, exception_thrower())
| cpcloud/span | span/utils/tests/test_decorate.py | Python | gpl-3.0 | 1,777 |
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import deque, OrderedDict
import numpy as np
from rlkit.core.eval_util import create_stats_ordered_dict
from rlkit.data_management.path_builder import PathBuilder
from rlkit.samplers.data_collector.base import StepCollector
class MdpStepCollector(StepCollector):
def __init__(
self,
env,
policy,
max_num_epoch_paths_saved=None,
render=False,
render_kwargs=None,
):
if render_kwargs is None:
render_kwargs = {}
self._env = env
self._policy = policy
self._max_num_epoch_paths_saved = max_num_epoch_paths_saved
self._epoch_paths = deque(maxlen=self._max_num_epoch_paths_saved)
self._render = render
self._render_kwargs = render_kwargs
self._num_steps_total = 0
self._num_paths_total = 0
self._obs = None # cache variable
def get_epoch_paths(self):
return self._epoch_paths
def end_epoch(self, epoch):
self._epoch_paths = deque(maxlen=self._max_num_epoch_paths_saved)
self._obs = None
def get_diagnostics(self):
path_lens = [len(path['actions']) for path in self._epoch_paths]
stats = OrderedDict([
('num steps total', self._num_steps_total),
('num paths total', self._num_paths_total),
])
stats.update(create_stats_ordered_dict(
"path length",
path_lens,
always_show_all_stats=True,
))
return stats
def get_snapshot(self):
return dict(
env=self._env,
policy=self._policy,
)
def collect_new_steps(
self,
max_path_length,
num_steps,
discard_incomplete_paths,
):
for _ in range(num_steps):
self.collect_one_step(max_path_length, discard_incomplete_paths)
def collect_one_step(
self,
max_path_length,
discard_incomplete_paths,
):
if self._obs is None:
self._start_new_rollout()
action, agent_info = self._policy.get_action(self._obs)
next_ob, reward, terminal, env_info = (
self._env.step(action)
)
if self._render:
self._env.render(**self._render_kwargs)
terminal = np.array([terminal])
reward = np.array([reward])
# store path obs
self._current_path_builder.add_all(
observations=self._obs,
actions=action,
rewards=reward,
next_observations=next_ob,
terminals=terminal,
agent_infos=agent_info,
env_infos=env_info,
)
if terminal or len(self._current_path_builder) >= max_path_length:
self._handle_rollout_ending(max_path_length,
discard_incomplete_paths)
self._start_new_rollout()
else:
self._obs = next_ob
def _start_new_rollout(self):
self._current_path_builder = PathBuilder()
self._obs = self._env.reset()
def _handle_rollout_ending(
self,
max_path_length,
discard_incomplete_paths
):
if len(self._current_path_builder) > 0:
path = self._current_path_builder.get_all_stacked()
path_len = len(path['actions'])
if (
path_len != max_path_length
and not path['terminals'][-1]
and discard_incomplete_paths
):
return
self._epoch_paths.append(path)
self._num_paths_total += 1
self._num_steps_total += path_len
class GoalConditionedStepCollector(StepCollector):
def __init__(
self,
env,
policy,
max_num_epoch_paths_saved=None,
render=False,
render_kwargs=None,
observation_key='observation',
desired_goal_key='desired_goal',
):
if render_kwargs is None:
render_kwargs = {}
self._env = env
self._policy = policy
self._max_num_epoch_paths_saved = max_num_epoch_paths_saved
self._epoch_paths = deque(maxlen=self._max_num_epoch_paths_saved)
self._render = render
self._render_kwargs = render_kwargs
self._observation_key = observation_key
self._desired_goal_key = desired_goal_key
self._num_steps_total = 0
self._num_paths_total = 0
self._obs = None # cache variable
def get_epoch_paths(self):
return self._epoch_paths
def end_epoch(self, epoch):
self._epoch_paths = deque(maxlen=self._max_num_epoch_paths_saved)
self._obs = None
def get_diagnostics(self):
path_lens = [len(path['actions']) for path in self._epoch_paths]
stats = OrderedDict([
('num steps total', self._num_steps_total),
('num paths total', self._num_paths_total),
])
stats.update(create_stats_ordered_dict(
"path length",
path_lens,
always_show_all_stats=True,
))
return stats
def get_snapshot(self):
return dict(
env=self._env,
policy=self._policy,
observation_key=self._observation_key,
desired_goal_key=self._desired_goal_key,
)
def start_collection(self):
self._start_new_rollout()
def end_collection(self):
epoch_paths = self.get_epoch_paths()
return epoch_paths
def collect_new_steps(
self,
max_path_length,
num_steps,
discard_incomplete_paths,
):
for _ in range(num_steps):
self.collect_one_step(max_path_length, discard_incomplete_paths)
def collect_one_step(
self,
max_path_length,
discard_incomplete_paths,
):
if self._obs is None:
self._start_new_rollout()
new_obs = np.hstack((
self._obs[self._observation_key],
self._obs[self._desired_goal_key],
))
action, agent_info = self._policy.get_action(new_obs)
next_ob, reward, terminal, env_info = (
self._env.step(action)
)
if self._render:
self._env.render(**self._render_kwargs)
terminal = np.array([terminal])
reward = np.array([reward])
# store path obs
self._current_path_builder.add_all(
observations=self._obs,
actions=action,
rewards=reward,
next_observations=next_ob,
terminals=terminal,
agent_infos=agent_info,
env_infos=env_info,
)
if terminal or len(self._current_path_builder) >= max_path_length:
self._handle_rollout_ending(max_path_length,
discard_incomplete_paths)
self._start_new_rollout()
else:
self._obs = next_ob
def _start_new_rollout(self):
self._current_path_builder = PathBuilder()
self._obs = self._env.reset()
def _handle_rollout_ending(
self,
max_path_length,
discard_incomplete_paths
):
if len(self._current_path_builder) > 0:
path = self._current_path_builder.get_all_stacked()
path_len = len(path['actions'])
if (
path_len != max_path_length
and not path['terminals'][-1]
and discard_incomplete_paths
):
return
self._epoch_paths.append(path)
self._num_paths_total += 1
self._num_steps_total += path_len
class ObsDictStepCollector(StepCollector):
def __init__(
self,
env,
policy,
max_num_epoch_paths_saved=None,
render=False,
render_kwargs=None,
observation_key='observation',
):
if render_kwargs is None:
render_kwargs = {}
self._env = env
self._policy = policy
self._max_num_epoch_paths_saved = max_num_epoch_paths_saved
self._epoch_paths = deque(maxlen=self._max_num_epoch_paths_saved)
self._render = render
self._render_kwargs = render_kwargs
self._observation_key = observation_key
self._num_steps_total = 0
self._num_paths_total = 0
self._obs = None # cache variable
def get_epoch_paths(self):
return self._epoch_paths
def end_epoch(self, epoch):
self._epoch_paths = deque(maxlen=self._max_num_epoch_paths_saved)
self._obs = None
def get_diagnostics(self):
path_lens = [len(path['actions']) for path in self._epoch_paths]
stats = OrderedDict([
('num steps total', self._num_steps_total),
('num paths total', self._num_paths_total),
])
stats.update(create_stats_ordered_dict(
"path length",
path_lens,
always_show_all_stats=True,
))
return stats
def get_snapshot(self):
return dict(
env=self._env,
policy=self._policy,
observation_key=self._observation_key,
)
def start_collection(self):
self._start_new_rollout()
def end_collection(self):
epoch_paths = self.get_epoch_paths()
return epoch_paths
def collect_new_steps(
self,
max_path_length,
num_steps,
discard_incomplete_paths,
):
for _ in range(num_steps):
self.collect_one_step(max_path_length, discard_incomplete_paths)
def collect_one_step(
self,
max_path_length,
discard_incomplete_paths,
):
if self._obs is None:
self._start_new_rollout()
new_obs = self._obs[self._observation_key]
action, agent_info = self._policy.get_action(new_obs)
next_ob, reward, terminal, env_info = (
self._env.step(action)
)
if self._render:
self._env.render(**self._render_kwargs)
terminal = np.array([terminal])
reward = np.array([reward])
# store path obs
self._current_path_builder.add_all(
observations=self._obs,
actions=action,
rewards=reward,
next_observations=next_ob,
terminals=terminal,
agent_infos=agent_info,
env_infos=env_info,
)
if terminal or len(self._current_path_builder) >= max_path_length:
self._handle_rollout_ending(max_path_length,
discard_incomplete_paths)
self._start_new_rollout()
else:
self._obs = next_ob
def _start_new_rollout(self):
self._current_path_builder = PathBuilder()
self._obs = self._env.reset()
def _handle_rollout_ending(
self,
max_path_length,
discard_incomplete_paths
):
if len(self._current_path_builder) > 0:
path = self._current_path_builder.get_all_stacked()
path_len = len(path['actions'])
if (
path_len != max_path_length
and not path['terminals'][-1]
and discard_incomplete_paths
):
return
self._epoch_paths.append(path)
self._num_paths_total += 1
self._num_steps_total += path_len
| google-research/DBAP-algorithm | third_party/rlkit_library/rlkit/samplers/data_collector/step_collector.py | Python | apache-2.0 | 12,271 |
"""
POST-PROCESSORS
=============================================================================
Markdown also allows post-processors, which are similar to preprocessors in
that they need to implement a "run" method. However, they are run after core
processing.
"""
import markdown
class Processor:
def __init__(self, markdown_instance=None):
if markdown_instance:
self.markdown = markdown_instance
class Postprocessor(Processor):
"""
Postprocessors are run after the ElementTree it converted back into text.
Each Postprocessor implements a "run" method that takes a pointer to a
text string, modifies it as necessary and returns a text string.
Postprocessors must extend markdown.Postprocessor.
"""
def run(self, text):
"""
Subclasses of Postprocessor should implement a `run` method, which
takes the html document as a single text string and returns a
(possibly modified) string.
"""
pass
class RawHtmlPostprocessor(Postprocessor):
""" Restore raw html to the document. """
def run(self, text):
""" Iterate over html stash and restore "safe" html. """
for i in range(self.markdown.htmlStash.html_counter):
html, safe = self.markdown.htmlStash.rawHtmlBlocks[i]
if self.markdown.safeMode and not safe:
if str(self.markdown.safeMode).lower() == 'escape':
html = self.escape(html)
elif str(self.markdown.safeMode).lower() == 'remove':
html = ''
else:
html = markdown.HTML_REMOVED_TEXT
if safe or not self.markdown.safeMode:
text = text.replace("<p>%s</p>" %
(markdown.preprocessors.HTML_PLACEHOLDER % i),
html + "\n")
text = text.replace(markdown.preprocessors.HTML_PLACEHOLDER % i,
html)
return text
def escape(self, html):
""" Basic html escaping """
html = html.replace('&', '&')
html = html.replace('<', '<')
html = html.replace('>', '>')
return html.replace('"', '"')
class AndSubstitutePostprocessor(Postprocessor):
""" Restore valid entities """
def __init__(self):
pass
def run(self, text):
text = text.replace(markdown.AMP_SUBSTITUTE, "&")
return text
| bbondy/brianbondy.gae | libs/markdown/postprocessors.py | Python | mit | 2,548 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from datetime import datetime
from typing import Any, Callable, Dict, Type, TYPE_CHECKING
from flask_babel import gettext as __
from sqlalchemy import types
from sqlalchemy.engine.interfaces import Dialect
if TYPE_CHECKING:
from superset.db_engine_specs.base import BaseEngineSpec
def literal_dttm_type_factory(
sqla_type: types.TypeEngine,
db_engine_spec: Type["BaseEngineSpec"],
col_type: str,
db_extra: Dict[str, Any],
) -> types.TypeEngine:
"""
Create a custom SQLAlchemy type that supports datetime literal binds.
:param sqla_type: Base type to extend
:param db_engine_spec: Database engine spec which supports `convert_dttm` method
:param col_type: native column type as defined in table metadata
:param db_extra: The database extra object
:return: SQLAlchemy type that supports using datetima as literal bind
"""
# pylint: disable=too-few-public-methods
class TemporalWrapperType(type(sqla_type)): # type: ignore
# pylint: disable=unused-argument
def literal_processor(self, dialect: Dialect) -> Callable[[Any], Any]:
def process(value: Any) -> Any:
if isinstance(value, datetime):
ts_expression = db_engine_spec.convert_dttm(
col_type, value, db_extra=db_extra
)
if ts_expression is None:
raise NotImplementedError(
__(
"Temporal expression not supported for type: "
"%(col_type)s",
col_type=col_type,
)
)
return ts_expression
return super().process(value)
return process
return TemporalWrapperType()
| apache/incubator-superset | superset/models/sql_types/base.py | Python | apache-2.0 | 2,642 |
#!/usr/bin/env python3
# -*- coding:UTF-8 -*-
#Copyright (c) 1986 Nick Wong.
#Copyright (c) 2016-2026 TP-NEW Corp.
# License: TP-NEW (www.tp-new.com)
__author__ = "Nick Wong"
'''
fork() 子进程 进程间通信
'''
from multiprocessing import Process,Queue
import os,time,random
#写数据进程执行代码
def write(q):
print('进程写: %s' % os.getpid())
for value in ['A', 'B', 'C']:
print('将进程 %s 放置到队列...' % value)
q.put(value)
time.sleep(random.random())
#读数据进程执行代码
def read(q):
print('进程读: %s' % os.getpid())
while True:
value = q.get(True)
print('从队列获取进程%s.' % value)
if __name__ == '__main__':
#父进程创建Queue,并传给各个子进程
q = Queue()
pw = Process(target=write, args=(q,))
pr = Process(target=read, args=(q,))
#启动子进程pw,写入:
pw.start()
#启动子进程pr,读取
pr.start()
#等待pw结束
pw.join()
#pr进程是死循环,无法等待其结束,只能强行终止
pr.terminate() | nick-huang-cc/GraffitiSpaceTT | UnderstandStudyPython/Queue_SH.py | Python | agpl-3.0 | 1,096 |
from bottypes.command import Command
from bottypes.command_descriptor import CommandDesc
from bottypes.invalid_command import InvalidCommand
from handlers import handler_factory
from handlers.base_handler import BaseHandler
from util.githandler import GitHandler
from util.loghandler import log
import subprocess
import json
class PingCommand(Command):
"""Ping this server to check for uptime."""
@classmethod
def execute(cls, slack_wrapper, args, timestamp, channel_id, user_id, user_is_admin):
"""Announce the bot's presence in the channel."""
slack_wrapper.post_message(channel_id, "Pong!")
class IntroCommand(Command):
"""Show an introduction message for new members."""
@classmethod
def execute(cls, slack_wrapper, args, timestamp, channel_id, user_id, user_is_admin):
"""Execute the Intro command."""
with open("./config/config.json") as f:
message = json.load(f).get("intro_message")
slack_wrapper.post_message(channel_id, message)
class VersionCommand(Command):
"""Show git information about the current running version of the bot."""
@classmethod
def execute(cls, slack_wrapper, args, timestamp, channel_id, user_id, user_is_admin):
"""Execute the Version command."""
try:
message = GitHandler(".").get_version()
slack_wrapper.post_message(channel_id, message)
except:
log.exception("BotHandler::VersionCommand")
raise InvalidCommand("Sorry, couldn't retrieve the git information for the bot...")
class InviteCommand(Command):
"""
Invite a list of members to the current channel, ignores members already
present.
"""
@classmethod
def execute(cls, slack_wrapper, args, timestamp, channel_id, user_id, user_is_admin):
current_members = slack_wrapper.get_channel_members(channel_id)
# strip uid formatting
invited_users = [user.strip("<>@") for user in args]
# remove already present members
invited_users = [user for user in invited_users if user not in current_members]
failed_users = []
for member in invited_users:
if not slack_wrapper.invite_user(member, channel_id)["ok"]:
failed_users.append(member)
if failed_users:
log.exception("BotHandler::InviteCommand")
raise InvalidCommand("Sorry, couldn't invite the following members to the channel: " + ' '.join(failed_users))
class SysInfoCommand(Command):
"""
Show information about system resources on the machine, otabot is running on.
"""
@classmethod
def execute(cls, slack_wrapper, args, timestamp, channel_id, user_id, user_is_admin):
result = b"```\n"
result += b'\n'.join(subprocess.check_output(['top', '-bn1']).split(b"\n")[:20])
result += b"\n\n"
result += subprocess.check_output(['df', '-h'])
result += b"```\n"
slack_wrapper.post_message(user_id, result)
class BotHandler(BaseHandler):
"""Handler for generic bot commands."""
def __init__(self):
self.commands = {
"ping": CommandDesc(PingCommand, "Ping the bot", None, None),
"intro": CommandDesc(IntroCommand, "Show an introduction message for new members", None, None),
"version": CommandDesc(VersionCommand, "Show git information about the running version of the bot", None, None),
"invite": CommandDesc(InviteCommand, "Invite a list of members (using @username) to the current channel (smarter than /invite)", ["user_list"], None),
"sysinfo": CommandDesc(SysInfoCommand, "Show system information", None, None, True)
}
handler_factory.register("bot", BotHandler())
| OpenToAllCTF/OTA-Challenge-Bot | handlers/bot_handler.py | Python | mit | 3,767 |
from game import Game
from player import Player
| asceth/devsyn | games/asteroids/__init__.py | Python | mit | 48 |
# usr/bin/env python
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 29 10:10:35 2017
@author: Vijayasai S
"""
"""
mercator_proj is a function which converts latitude and longitude
to x and y coordinate system.
lat and long should be mentioned in degrees
"""
import numpy as np
def mercator_proj(lat, long):
radius = 6371.0 # mean radius of the earth (in KM)
long_0 = 0 # central meridian (Greenwich with longitude zero degree)
X = (radius * (long - long_0) * np.pi) / 180.0
Y = radius * np.log(np.tan(np.radians(45 + (lat * 0.5))))
return X, Y
def reverse_mercator_proj(X, Y):
radius = 6371.0 # mean radius of the earth (in KM)
long_0 = 0 # central meridian (Greenwich with longitude zero degree)
long = np.degrees(long_0 + (X/radius))
lat = np.degrees(2 * np.arctan(np.exp(Y/radius))) - 90
return lat, long
# X, Y = mercator_proj(-23.53,74.85)
# lat, long = reverse_mercator_proj(X, Y)
| Vijaysai005/KProject | vijay/DBSCAN/temp/mercator_projection.py | Python | gpl-3.0 | 957 |
# Copyright 2017 IoT-Lab Team
# Contributor(s) : see AUTHORS file
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""CoAP gateway tornado application module."""
import logging
import time
import asyncio
import aiocoap.resource as resource
from tornado.ioloop import PeriodicCallback
from aiocoap import Context, Message, GET, PUT, CHANGED
from aiocoap.numbers.codes import Code
from pyaiot.gateway.common import GatewayBase, Node
logger = logging.getLogger("pyaiot.gw.coap")
COAP_PORT = 5683
MAX_TIME = 120
def _coap_endpoints(link_header):
link = link_header.replace(' ', '')
return link.split(',')
async def _coap_resource(url, method=GET, payload=b''):
protocol = await Context.create_client_context()
request = Message(code=method, payload=payload)
request.set_request_uri(url)
try:
response = await protocol.request(request).response
except Exception as exc:
code = "Failed to fetch resource"
payload = '{0}'.format(exc)
else:
code = response.code
payload = response.payload.decode('utf-8')
finally:
await protocol.shutdown()
logger.debug('Code: {0} - Payload: {1}'.format(code, payload))
return code, payload
class CoapAliveResource(resource.Resource):
"""CoAP server running within the tornado application."""
def __init__(self, gateway):
super(CoapAliveResource, self).__init__()
self._gateway = gateway
async def render_post(self, request):
"""Triggered when a node post an alive check to the gateway."""
payload = request.payload.decode('utf8')
try:
addr = request.remote[0]
except TypeError:
addr = request.remote.sockaddr[0]
logger.debug("CoAP Alive POST received from {}".format(addr))
# Let the controller handle this message
uid = payload.split(':')[-1]
await self._gateway.handle_coap_check(
uid, addr, reset=(payload.startswith('reset')))
# Kindly reply the message has been processed
return Message(code=CHANGED,
payload="Received '{}'".format(payload).encode('utf-8'))
class CoapServerResource(resource.Resource):
"""CoAP server running within the tornado application."""
def __init__(self, gateway):
super(CoapServerResource, self).__init__()
self._gateway = gateway
async def render_post(self, request):
"""Triggered when a node post a new value to the gateway."""
payload = request.payload.decode('utf-8')
try:
remote = request.remote[0]
except TypeError:
remote = request.remote.sockaddr[0]
logger.debug("CoAP POST received from {} with payload: {}"
.format(remote, payload))
path, data = payload.split(":", 1)
self._gateway.handle_coap_post(remote, path, data)
return Message(code=CHANGED,
payload="Received '{}'".format(payload).encode('utf-8'))
class CoapGateway(GatewayBase):
"""Tornado based gateway application for managing CoAP nodes."""
PROTOCOL = 'CoAP'
def __init__(self, keys, options):
self.port = options.coap_port
self.max_time = options.max_time
self.interface = options.interface
self.node_mapping = {} # map node address to its uuid (TODO: FIXME)
super().__init__(keys, options)
# Configure the CoAP server
root_coap = resource.Site()
root_coap.add_resource(('server', ),
CoapServerResource(self))
root_coap.add_resource(('alive', ),
CoapAliveResource(self))
asyncio.ensure_future(
Context.create_server_context(root_coap, bind=('::', self.port)))
# Start the periodic node cleanup task
PeriodicCallback(self.check_dead_nodes, 1000).start()
logger.info('CoAP gateway application started')
async def discover_node(self, node):
"""Discover resources available on a node."""
address = node.resources['ip']
if self.interface is not None:
interface = '%{}'.format(self.interface)
else:
interface = ''
coap_node_url = 'coap://[{}{}]'.format(address, interface)
logger.debug("Discovering CoAP node {}".format(address))
_, payload = await _coap_resource('{0}/.well-known/core'
.format(coap_node_url),
method=GET)
endpoints = [endpoint
for endpoint in _coap_endpoints(payload)
if 'well-known/core' not in endpoint]
logger.debug("Fetching CoAP node resources: {}".format(endpoints))
for endpoint in endpoints:
elems = endpoint.split(';')
path = elems.pop(0).replace('<', '').replace('>', '')
try:
code, payload = await _coap_resource(
'{0}{1}'.format(coap_node_url, path), method=GET)
except Exception:
logger.debug("Cannot discover resource {} on node {}"
.format(endpoint, address))
return
# Remove '/' from path
self.forward_data_from_node(node, path[1:], payload)
logger.debug("CoAP node resources '{}' sent to broker"
.format(endpoints))
async def update_node_resource(self, node, endpoint, payload):
""""""
address = node.resources['ip']
logger.debug("Updating CoAP node '{}' resource '{}'"
.format(address, endpoint))
code, p = await _coap_resource(
'coap://[{0}]/{1}'.format(address, endpoint),
method=PUT,
payload=payload.encode('ascii'))
if code == Code.CHANGED:
self.forward_data_from_node(node, endpoint, payload)
def handle_coap_post(self, address, endpoint, value):
"""Handle CoAP post message sent from coap node."""
if address not in self.node_mapping:
logger.debug("Unknown CoAP node '{}'".format(address))
return
node = self.get_node(self.node_mapping[address])
self.forward_data_from_node(node, endpoint, value)
async def handle_coap_check(self, uid, address, reset=False):
"""Handle check message received from coap node."""
if uid not in self.node_mapping:
# This is a totally new node: create uid, initialized cached node
# send 'new' node notification, 'update' notification.
node = Node(uid, ip=address)
self.node_mapping.update({address: uid})
await self.add_node(node)
elif reset:
# The data of the node need to be reset without removing it. This
# is particularly the case after a reboot of the node or a
# firmware update of the node that triggered the reboot.
node = self.get_node(self.node_mapping[address])
self.reset_node(node, default_resources={'ip': address})
else:
# The node simply sent a check message to notify that it's still
# online.
node = self.get_node(self.node_mapping[address])
node.update_last_seen()
def check_dead_nodes(self):
"""Check and remove nodes that are not alive anymore."""
to_remove = [node for node in self.nodes.values()
if int(time.time()) > node.last_seen + self.max_time]
for node in to_remove:
logger.info("Removing inactive node {}".format(node.uid))
self.node_mapping.pop(node.resources['ip'])
self.remove_node(node)
| pyaiot/pyaiot | pyaiot/gateway/coap/gateway.py | Python | bsd-3-clause | 9,193 |
# Copyright 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import math
import requests
import scipy.io
import numpy as np
from mpi4py import MPI
from subprocess import call
from scipy.stats import stats
from sklearn import model_selection
from sklearn.metrics import mean_squared_error
def recon_err(data, F, W):
"""Calcuate reconstruction error
Parameters
----------
data : 2D array
True data to recover.
F : 2D array
HTFA factor matrix.
W : 2D array
HTFA weight matrix.
Returns
-------
float
Returns root mean squared reconstruction error.
"""
recon = F.dot(W).ravel()
err = mean_squared_error(
data.ravel(),
recon,
multioutput='uniform_average')
return math.sqrt(err)
def get_train_err(htfa, data, F):
"""Calcuate training error
Parameters
----------
htfa : HTFA
An instance of HTFA, factor anaysis class in BrainIAK.
data : 2D array
Input data to HTFA.
F : 2D array
HTFA factor matrix.
Returns
-------
float
Returns root mean squared error on training.
"""
W = htfa.get_weights(data, F)
return recon_err(data, F, W)
def get_test_err(htfa, test_weight_data, test_recon_data,
test_weight_R, test_recon_R, centers, widths):
"""Calcuate test error
Parameters
----------
htfa : HTFA
An instance of HTFA, factor anaysis class in BrainIAK.
test_weigth_data : 2D array
Data used for testing weights.
test_recon_data : 2D array
Data used for testing reconstruction error.
test_weigth_R : 2D array
Coordinate matrix used for testing weights.
test_recon_R : 2D array
Coordinate matrix used for testing reconstruction error.
centers : 2D array
Center matrix of HTFA factors.
widths : 1D array
Width matrix of HTFA factors.
Returns
-------
float
Returns root mean squared error on test.
"""
# calculate F on test_weight_R, based on trained centers/widths
unique_R, inds = htfa.get_unique_R(test_weight_R)
F = htfa.get_factors(unique_R,
inds,
centers,
widths)
# calculate weights on test_weight_data
W = htfa.get_weights(test_weight_data, F)
# calculate F on final test_recon_data
unique_R, inds = htfa.get_unique_R(test_recon_R)
F = htfa.get_factors(unique_R,
inds,
centers,
widths)
return recon_err(test_recon_data, F, W)
n_subj = 2
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
size = comm.Get_size()
group_id = int(rank/n_subj)
n_group = math.ceil(size/n_subj)
htfa_comm = comm.Split(group_id, rank)
htfa_rank = htfa_comm.Get_rank()
htfa_size = htfa_comm.Get_size()
if rank == 0:
import logging
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
data_dir = os.path.join(os.getcwd(), 'data')
if rank == 0 and not os.path.exists(data_dir):
os.makedirs(data_dir)
url = []
url.append(' https://www.dropbox.com/s/r5s9tg4ekxzbrco/s0.mat?dl=0')
url.append(' https://www.dropbox.com/s/39tr01m76vxwaqa/s1.mat?dl=0')
for idx in range(n_subj):
if idx % size == rank:
file_name = os.path.join(data_dir, 's' + str(idx) + '.mat')
#check if file has already been downloaded
if not os.path.exists(file_name):
#check if URL exists
ret = requests.head(url[idx])
if ret.status_code == 200:
#download data
cmd = 'curl --location -o ' + file_name + url[idx]
try:
retcode = call(cmd, shell=True)
if retcode < 0:
print("File download was terminated by signal", -retcode, file=sys.stderr)
else:
print("File download returned", retcode, file=sys.stderr)
except OSError as e:
print("File download failed:", e, file=sys.stderr)
else:
print("File s%d.mat does not exist!\n"%idx)
comm.Barrier()
#get fMRI data and scanner RAS coordinates
data = []
R = []
mapping = {}
n_local_subj = 0
for idx in range(n_subj):
if idx % htfa_size == htfa_rank:
file_name = os.path.join(data_dir, 's' + str(idx) + '.mat')
all_data = scipy.io.loadmat(file_name)
bold = all_data['data']
# z-score the data
bold = stats.zscore(bold, axis=1, ddof=1)
data.append(bold)
R.append(all_data['R'])
mapping[str(n_local_subj)] = idx
n_local_subj += 1
min_K = 3
max_K = 6
n_K = 2
Ks = np.linspace(min_K, max_K, n_K, endpoint=True).astype(int)
n_splits = 3
# recon_err in shape n_splits*n_K
test_recon_errs = np.zeros((n_subj, n_splits, n_K))
tmp_test_recon_errs = np.zeros((n_subj, n_splits, n_K))
train_recon_errs = np.zeros((n_subj, n_splits, n_K))
tmp_train_recon_errs = np.zeros((n_subj, n_splits, n_K))
local_size = math.ceil(n_subj/size)
if n_local_subj > 0:
from brainiak.factoranalysis.htfa import HTFA
n_voxel, n_tr = data[0].shape
n_dim = R[0].shape[1]
test_size = 0.3
rnd_seed_voxel = 30000
rnd_seed_tr = 3000
tr_solver = 'exact'
nlss_method = 'dogbox'
nlss_loss = 'linear'
upper_ratio = 1.8
lower_ratio = 0.1
voxel_ratio = 0.25
tr_ratio = 0.1
max_voxel = 2000
max_tr = 200
max_sample_voxel = min(max_voxel,
int(voxel_ratio * n_voxel))
max_sample_tr = min(max_tr, int(tr_ratio * n_tr))
#split voxel and TR for two-level cross validation
ss_voxel = model_selection.ShuffleSplit(
n_splits=n_splits,
test_size=test_size,
random_state=rnd_seed_voxel)
voxel_indices = np.arange(n_voxel)
ss_voxel.get_n_splits(voxel_indices)
ss_tr = model_selection.ShuffleSplit(
n_splits=n_splits,
test_size=test_size,
random_state=rnd_seed_tr)
tr_indices = np.arange(n_tr)
ss_tr.get_n_splits(tr_indices)
train_voxels = []
test_voxels = []
train_trs = []
test_trs = []
for train_index, test_index in ss_voxel.split(voxel_indices):
train_voxels.append(train_index)
test_voxels.append(test_index)
for train_index, test_index in ss_tr.split(tr_indices):
train_trs.append(train_index)
test_trs.append(test_index)
for p in range(n_splits):
for idx in range(n_K):
index = p*n_K + idx
if index % n_group == group_id:
#split data and R
train_voxel_indices = train_voxels[p]
test_voxel_indices = test_voxels[p]
train_tr_indices = train_trs[p]
test_tr_indices = test_trs[p]
train_data = []
total_test_data = []
test_weight_data = []
test_recon_data = []
test_weight_R = []
test_recon_R = []
for s in range(n_local_subj):
train_data.append(data[s][:, train_tr_indices])
total_test_data.append(data[s][:, test_tr_indices])
test_weight_data.append(
total_test_data[s][train_voxel_indices, :])
test_recon_data.append(
total_test_data[s][test_voxel_indices, :])
test_weight_R.append(R[s][train_voxel_indices])
test_recon_R.append(R[s][test_voxel_indices])
htfa = HTFA(K=Ks[idx],
max_global_iter=5,
max_local_iter=2,
n_subj=n_subj,
nlss_method=nlss_method,
nlss_loss=nlss_loss,
tr_solver=tr_solver,
upper_ratio=upper_ratio,
lower_ratio=lower_ratio,
max_tr=max_sample_tr,
max_voxel=max_sample_voxel,
comm=htfa_comm,
verbose=True)
htfa.fit(train_data, R)
for s in range(n_local_subj):
#get posterior for each subject
subj_idx = mapping[str(s)]
start_idx = s * htfa.prior_size
end_idx = (s + 1) * htfa.prior_size
local_posteiror = htfa.local_posterior_[start_idx:end_idx]
local_centers = htfa.get_centers(local_posteiror)
local_widths = htfa.get_widths(local_posteiror)
htfa.n_dim = n_dim
htfa.cov_vec_size = np.sum(np.arange(htfa.n_dim) + 1)
htfa.map_offset = htfa.get_map_offset()
#training happens on all voxels, but part of TRs
unique_R_all, inds_all = htfa.get_unique_R(R[s])
train_F = htfa.get_factors(unique_R_all,
inds_all,
local_centers,
local_widths)
#calculate train_recon_err
tmp_train_recon_errs[subj_idx, p,idx] = get_train_err(htfa,
train_data[s],
train_F)
#calculate weights on test_weight_data, test_recon_err on test_recon_data
tmp_test_recon_errs[subj_idx, p,idx] = get_test_err(htfa,
test_weight_data[s],
test_recon_data[s],
test_weight_R[s],
test_recon_R[s],
local_centers,
local_widths)
comm.Reduce(tmp_test_recon_errs, test_recon_errs, op=MPI.SUM)
comm.Reduce(tmp_train_recon_errs, train_recon_errs, op=MPI.SUM)
if rank == 0:
errs = train_recon_errs.reshape(n_subj * n_splits, n_K)
mean_errs = np.average(errs, axis=0)
print("train error on each K is\n")
print(mean_errs)
errs = test_recon_errs.reshape(n_subj * n_splits, n_K)
mean_errs = np.average(errs, axis=0)
print("test error on each K is\n")
print(mean_errs)
best_idx = np.argmin(mean_errs)
print("best K for test recon is %d " % (Ks[best_idx]))
| TuKo/brainiak | examples/factoranalysis/htfa_cv_example.py | Python | apache-2.0 | 11,484 |
# -*- coding: utf-8 -*-
#
# WsgiService documentation build configuration file, created by
# sphinx-quickstart on Fri May 1 16:34:26 2009.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
import wsgiservice
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.append(os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.todo', 'sphinx.ext.coverage']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'WsgiService'
copyright = u'2009-2014, Patrice Neff'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = wsgiservice.__version__
if 'pre' in version:
version = version[:version.index('pre')]
# The full version, including alpha/beta/rc tags.
release = wsgiservice.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'WsgiServicedoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'WsgiService.tex', u'WsgiService Documentation',
u'Patrice Neff', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
todo_include_todos = True
| beekpr/wsgiservice | docs/conf.py | Python | bsd-2-clause | 6,526 |
import numpy as np
from .utils import trig_sum
def lombscargle_fast(t, y, dy, f0, df, Nf,
center_data=True, fit_mean=True,
normalization='standard',
use_fft=True, trig_sum_kwds=None):
"""Fast Lomb-Scargle Periodogram
This implements the Press & Rybicki method [1]_ for fast O[N log(N)]
Lomb-Scargle periodograms.
Parameters
----------
t, y, dy : array_like (NOT astropy.Quantities)
times, values, and errors of the data points. These should be
broadcastable to the same shape.
f0, df, Nf : (float, float, int)
parameters describing the frequency grid, f = f0 + df * arange(Nf).
center_data : bool (default=True)
Specify whether to subtract the mean of the data before the fit
fit_mean : bool (default=True)
If True, then compute the floating-mean periodogram; i.e. let the mean
vary with the fit.
normalization : string (optional, default='standard')
Normalization to use for the periodogram.
Options are 'standard', 'model', 'log', or 'psd'.
use_fft : bool (default=True)
If True, then use the Press & Rybicki O[NlogN] algorithm to compute
the result. Otherwise, use a slower O[N^2] algorithm
trig_sum_kwds : dict or None (optional)
extra keyword arguments to pass to the ``trig_sum`` utility.
Options are ``oversampling`` and ``Mfft``. See documentation
of ``trig_sum`` for details.
Returns
-------
power : ndarray
Lomb-Scargle power associated with each frequency.
Units of the result depend on the normalization.
Notes
-----
Note that the ``use_fft=True`` algorithm is an approximation to the true
Lomb-Scargle periodogram, and as the number of points grows this
approximation improves. On the other hand, for very small datasets
(<~50 points or so) this approximation may not be useful.
References
----------
.. [1] Press W.H. and Rybicki, G.B, "Fast algorithm for spectral analysis
of unevenly sampled data". ApJ 1:338, p277, 1989
.. [2] M. Zechmeister and M. Kurster, A&A 496, 577-584 (2009)
.. [3] W. Press et al, Numerical Recipes in C (2002)
"""
if dy is None:
dy = 1
# Validate and setup input data
t, y, dy = np.broadcast_arrays(t, y, dy)
if t.ndim != 1:
raise ValueError("t, y, dy should be one dimensional")
# Validate and setup frequency grid
if f0 < 0:
raise ValueError("Frequencies must be positive")
if df <= 0:
raise ValueError("Frequency steps must be positive")
if Nf <= 0:
raise ValueError("Number of frequencies must be positive")
w = dy ** -2.0
w /= w.sum()
# Center the data. Even if we're fitting the offset,
# this step makes the expressions below more succinct
if center_data or fit_mean:
y = y - np.dot(w, y)
# set up arguments to trig_sum
kwargs = dict.copy(trig_sum_kwds or {})
kwargs.update(f0=f0, df=df, use_fft=use_fft, N=Nf)
# ----------------------------------------------------------------------
# 1. compute functions of the time-shift tau at each frequency
Sh, Ch = trig_sum(t, w * y, **kwargs)
S2, C2 = trig_sum(t, w, freq_factor=2, **kwargs)
if fit_mean:
S, C = trig_sum(t, w, **kwargs)
tan_2omega_tau = (S2 - 2 * S * C) / (C2 - (C * C - S * S))
else:
tan_2omega_tau = S2 / C2
# This is what we're computing below; the straightforward way is slower
# and less stable, so we use trig identities instead
#
# omega_tau = 0.5 * np.arctan(tan_2omega_tau)
# S2w, C2w = np.sin(2 * omega_tau), np.cos(2 * omega_tau)
# Sw, Cw = np.sin(omega_tau), np.cos(omega_tau)
S2w = tan_2omega_tau / np.sqrt(1 + tan_2omega_tau * tan_2omega_tau)
C2w = 1 / np.sqrt(1 + tan_2omega_tau * tan_2omega_tau)
Cw = np.sqrt(0.5) * np.sqrt(1 + C2w)
Sw = np.sqrt(0.5) * np.sign(S2w) * np.sqrt(1 - C2w)
# ----------------------------------------------------------------------
# 2. Compute the periodogram, following Zechmeister & Kurster
# and using tricks from Press & Rybicki.
YY = np.dot(w, y ** 2)
YC = Ch * Cw + Sh * Sw
YS = Sh * Cw - Ch * Sw
CC = 0.5 * (1 + C2 * C2w + S2 * S2w)
SS = 0.5 * (1 - C2 * C2w - S2 * S2w)
if fit_mean:
CC -= (C * Cw + S * Sw) ** 2
SS -= (S * Cw - C * Sw) ** 2
power = (YC * YC / CC + YS * YS / SS)
if normalization == 'standard':
power /= YY
elif normalization == 'model':
power /= YY - power
elif normalization == 'log':
power = -np.log(1 - power / YY)
elif normalization == 'psd':
power *= 0.5 * (dy ** -2.0).sum()
else:
raise ValueError("normalization='{}' "
"not recognized".format(normalization))
return power
| bsipocz/astropy | astropy/timeseries/periodograms/lombscargle/implementations/fast_impl.py | Python | bsd-3-clause | 4,924 |
"""The ``celery upgrade`` command, used to upgrade from previous versions."""
from __future__ import absolute_import, print_function, unicode_literals
import codecs
from celery.app import defaults
from celery.bin.base import Command
from celery.utils.functional import pass1
class upgrade(Command):
"""Perform upgrade between versions."""
choices = {'settings'}
def add_arguments(self, parser):
group = parser.add_argument_group('Upgrading Options')
group.add_argument(
'--django', action='store_true', default=False,
help='Upgrade Django project',
)
group.add_argument(
'--compat', action='store_true', default=False,
help='Maintain backwards compatibility',
)
group.add_argument(
'--no-backup', action='store_true', default=False,
help='Dont backup original files',
)
def usage(self, command):
return '%(prog)s <command> settings [filename] [options]'
def run(self, *args, **kwargs):
try:
command = args[0]
except IndexError:
raise self.UsageError(
'missing upgrade type: try `celery upgrade settings` ?')
if command not in self.choices:
raise self.UsageError('unknown upgrade type: {0}'.format(command))
return getattr(self, command)(*args, **kwargs)
def settings(self, command, filename,
no_backup=False, django=False, compat=False, **kwargs):
lines = self._slurp(filename)
keyfilter = self._compat_key if django or compat else pass1
print('processing {0}...'.format(filename), file=self.stderr)
# gives list of tuples: ``(did_change, line_contents)``
new_lines = [
self._to_new_key(line, keyfilter) for line in lines
]
if any(n[0] for n in new_lines): # did have changes
if not no_backup:
self._backup(filename)
with codecs.open(filename, 'w', 'utf-8') as write_fh:
for _, line in new_lines:
write_fh.write(line)
print('Changes to your setting have been made!',
file=self.stdout)
else:
print('Does not seem to require any changes :-)',
file=self.stdout)
def _slurp(self, filename):
with codecs.open(filename, 'r', 'utf-8') as read_fh:
return [line for line in read_fh]
def _backup(self, filename, suffix='.orig'):
lines = []
backup_filename = ''.join([filename, suffix])
print('writing backup to {0}...'.format(backup_filename),
file=self.stderr)
with codecs.open(filename, 'r', 'utf-8') as read_fh:
with codecs.open(backup_filename, 'w', 'utf-8') as backup_fh:
for line in read_fh:
backup_fh.write(line)
lines.append(line)
return lines
def _to_new_key(self, line, keyfilter=pass1, source=defaults._TO_NEW_KEY):
# sort by length to avoid, for example, broker_transport overriding
# broker_transport_options.
for old_key in reversed(sorted(source, key=lambda x: len(x))):
new_line = line.replace(old_key, keyfilter(source[old_key]))
if line != new_line and 'CELERY_CELERY' not in new_line:
return 1, new_line # only one match per line.
return 0, line
def _compat_key(self, key, namespace='CELERY'):
key = key.upper()
if not key.startswith(namespace):
key = '_'.join([namespace, key])
return key
| ammarkhann/FinalSeniorCode | lib/python2.7/site-packages/celery/bin/upgrade.py | Python | mit | 3,651 |
import os
from posixpath import basename
from urllib.parse import urlparse
from .common.spiders import BaseDocumentationSpider
from typing import Any, List, Set
def get_images_dir(images_path: str) -> str:
# Get index html file as start url and convert it to file uri
dir_path = os.path.dirname(os.path.realpath(__file__))
target_path = os.path.join(dir_path, os.path.join(*[os.pardir] * 4), images_path)
return os.path.realpath(target_path)
class UnusedImagesLinterSpider(BaseDocumentationSpider):
images_path = ""
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
self.static_images = set() # type: Set[str]
self.images_static_dir = get_images_dir(self.images_path) # type: str
def _is_external_url(self, url: str) -> bool:
is_external = url.startswith('http') and self.start_urls[0] not in url
if self._has_extension(url) and 'localhost:9981/{}'.format(self.images_path) in url:
self.static_images.add(basename(urlparse(url).path))
return is_external or self._has_extension(url)
def closed(self, *args: Any, **kwargs: Any) -> None:
unused_images = set(os.listdir(self.images_static_dir)) - self.static_images
if unused_images:
exception_message = "The following images are not used in documentation " \
"and can be removed: {}"
self._set_error_state()
unused_images_relatedpath = [
os.path.join(self.images_path, img) for img in unused_images]
raise Exception(exception_message.format(', '.join(unused_images_relatedpath)))
class HelpDocumentationSpider(UnusedImagesLinterSpider):
name = "help_documentation_crawler"
start_urls = ['http://localhost:9981/help']
deny_domains = [] # type: List[str]
deny = ['/privacy']
images_path = "static/images/help"
class APIDocumentationSpider(UnusedImagesLinterSpider):
name = 'api_documentation_crawler'
start_urls = ['http://localhost:9981/api']
deny_domains = [] # type: List[str]
images_path = "static/images/api"
class PorticoDocumentationSpider(BaseDocumentationSpider):
name = 'portico_documentation_crawler'
start_urls = ['http://localhost:9981/hello',
'http://localhost:9981/history',
'http://localhost:9981/plans',
'http://localhost:9981/team',
'http://localhost:9981/apps',
'http://localhost:9981/integrations',
'http://localhost:9981/terms',
'http://localhost:9981/privacy',
'http://localhost:9981/features',
'http://localhost:9981/why-zulip',
'http://localhost:9981/for/open-source',
'http://localhost:9981/for/companies',
'http://localhost:9981/for/working-groups-and-communities',
'http://localhost:9981/for/mystery-hunt',
'http://localhost:9981/security']
deny_domains = [] # type: List[str]
| jackrzhang/zulip | tools/documentation_crawler/documentation_crawler/spiders/check_help_documentation.py | Python | apache-2.0 | 3,112 |
"""Helper script for generating compact docstring coverage report.
The ``docstr-coverage`` package has a quite verbose output. This script
mimicks the one-line-per-file output of ``coverage.py``.
It also sets some defaults and does more elaborate filtering/exclusion than
currently possible with the basic ``docstr-coverage`` package.
"""
from docstr_coverage import coverage
import glob
FORMAT = "%-74s %5s %5s %3d%%"
def main():
"""Call docstr-coverage's main method with our preferences.
- Custom filtering.
- Some defaults (like "don't worry about __init__() methods").
- Custom one-line-per file output.
"""
filenames = glob.glob("**/*.py", recursive=True)
filenames = [
filename
for filename in filenames
if not (
filename.startswith("external")
or filename.startswith("help")
or "/test" in filename
)
]
file_results, total_results = coverage.get_docstring_coverage(
filenames,
skip_magic=True,
skip_file_docstring=False,
skip_init=True,
skip_class_def=False,
verbose=0,
)
for filename in filenames:
result = file_results.get(filename)
print(
FORMAT
% (
filename,
result["needed_count"],
result["missing_count"],
result["coverage"],
)
)
print(
FORMAT
% (
"TOTAL",
total_results["needed_count"],
total_results["missing_count"],
total_results["coverage"],
)
)
if __name__ == "__main__":
main()
| nens/threedi-qgis-plugin | scripts/docstring-report.py | Python | gpl-3.0 | 1,684 |
# -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2010, 2011, 2013 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""
The Refextract task tests suite for tasks
It requires a fully functional invenio installation.
"""
# Note: unit tests were moved to the regression test suite. Keeping
# this file here with empty test case set in order to overwrite any
# previously installed file. Also, keeping TEST_SUITE empty so that
# `inveniocfg --run-unit-tests' would not complain.
from invenio.testutils import make_test_suite, run_test_suite
TEST_SUITE = make_test_suite()
if __name__ == "__main__":
run_test_suite(TEST_SUITE)
| jmartinm/invenio | modules/docextract/lib/refextract_api_unit_tests.py | Python | gpl-2.0 | 1,319 |
import tkinter as tk
from tkinter import simpledialog, messagebox
import tkinter.filedialog as filedialog
import tkinter.ttk as ttk
import os as os
import json
from PIL import Image, ImageTk
import shutil
import re
PEOPLE_FILENAME = "settings.json"
def load_settings():
global people
global directory_variable
global tagged_members
people = []
directory_variable.set("")
try:
with open(PEOPLE_FILENAME, mode="r") as f:
settings = json.load(f)
try:
people = settings['people']
except Exception:
pass
try:
directory_variable.set(settings['directory'])
except Exception:
pass
try:
tagged_members = settings['tagged_members']
except Exception:
tagged_members = dict()
except Exception as ex:
pass
def draw_people():
global people_frame
global people
for child in people_frame.winfo_children():
child.destroy()
if len(people) == 0:
ttk.Label(people_frame, text="No people present").grid(row=0,column=0, sticky=[tk.E, tk.W])
else:
for i in range(0, len(people)):
p = people[i]
name = ttk.Label(people_frame, text=p)
name.grid(row=i,column=0, sticky=[tk.E, tk.W])
ttk.Button(people_frame, text="Export", command=lambda p=p: export_pressed(p)).grid(row=i, column=2, sticky=[tk.E])
ttk.Button(people_frame, text="Edit", command=lambda p=p, name=name: edit_person_pressed(p)).grid(row=i, column=3, sticky=[tk.E])
ttk.Button(people_frame, text="Delete", command=lambda p=p: delete_person_pressed(p)).grid(row=i, column=4, sticky=[tk.E])
people_frame.columnconfigure(0, weight=1)
def save_settings():
global people
global directory_variable
global tagged_members
try:
with open(PEOPLE_FILENAME, mode="w") as f:
settings = dict()
settings['people'] = people
settings['directory'] = directory_variable.get()
settings['tagged_members'] = tagged_members
json.dump(settings, f)
except Exception as ex:
messagebox.showerror("Error", ex)
def export_pressed(p):
global tagged_members
global directory_variable
output_directory = filedialog.askdirectory()
if not output_directory or len(output_directory) == 0:
messagebox.showerror("Error.", "Must enter a directory")
return
try:
for image in tagged_members.keys():
if p in tagged_members[image]:
input_file = os.path.join(directory_variable.get(), image)
output_file = os.path.join(output_directory, image)
dir = os.path.split(image)[0]
if len(dir) > 0:
try:
os.makedirs(os.path.join(output_directory, dir))
except FileExistsError:
pass
shutil.copyfile(input_file, output_file)
messagebox.showinfo("Done", "Copy complete.")
except Exception as ex:
messagebox.showerror("Error", str(ex))
def edit_person_pressed(p):
global people
global tagged_members
new_name = simpledialog.askstring("Name", "Enter new name", initialvalue=p)
if new_name == None:
return
new_name = new_name.strip()
if len(new_name) == 0:
messagebox.showerror("Error", "Must enter a name!")
return
try:
people[people.index(p)] = new_name
for k in tagged_members.keys():
tagged_members[k] = [new_name if x == p else x for x in tagged_members[k]]
draw_people()
draw_image()
save_settings()
except ValueError:
messagebox.showerror("Error", "%s not in list of people!" % p)
def delete_person_pressed(p):
global people
global tagged_members
if not messagebox.askyesno("Delete Person", "Are you sure you want to delete %s" % p):
return
try:
people.remove(p)
for i in tagged_members.keys():
try:
tagged_members[i].remove(p)
except ValueError:
pass
draw_people()
save_settings()
except ValueError:
messagebox.showerror("Error", "%s not in list of people!" % p)
def browse_button_pressed():
global directory_variable
if directory_variable.get() and len(directory_variable.get()) > 0:
if not messagebox.askyesno("Warning", "Changing the directory will erase all tagged photos.\nContinue?"):
return
dir_ = filedialog.askdirectory()
if dir_ and len(dir_) > 0:
directory_variable.set(dir_)
directory_changed()
def add_person_button_pressed():
global people
name = simpledialog.askstring("Name", "Enter name.")
if name == None:
return
name = name.strip()
if len(name) == 0:
messagebox.showerror("Error", "Must enter a name!")
return
if name in people:
messagebox.showerror("Error", "%s already in list!" % name)
return
people.append(name)
draw_people()
save_settings()
def directory_changed():
global tagged_members
tagged_members = dict()
save_settings()
build_image_list()
draw_image()
def build_image_list():
global directory_variable
global image_list_box
global tagged_members
image_list_box.delete(0, tk.END)
image_list = []
directory = directory_variable.get()
for root, dir, files in os.walk(directory):
relative_path = root.replace(directory, "")
for file in files:
if file.endswith( ('.png', '.jpg') ):
relative_filepath = os.path.join(relative_path, file)
relative_filepath = re.sub("^[\\\\/]", "", relative_filepath)
image_list_box.insert(tk.END, relative_filepath)
image_list.append(relative_filepath)
tagged_deleted = False
new_tagged_members = tagged_members.copy()
for k in tagged_members.keys():
if k not in image_list:
del new_tagged_members[k]
tagged_deleted = True
if tagged_deleted:
tagged_members = new_tagged_members
save_settings()
def draw_image():
global image_frame
global photo_image
global image_list_box
global directory_variable
global people
global tagged_members
global check_variable
for child in image_frame.winfo_children():
child.destroy()
button_frame = ttk.Frame(image_frame)
ttk.Button(button_frame, text="Previous", command=previous_button_pressed).grid(row=0,column=0)
ttk.Button(button_frame, text="Next", command=next_button_pressed).grid(row=0,column=1)
button_frame.grid(row=0,column=0,sticky=[tk.N])
directory = directory_variable.get()
try:
i = image_list_box.curselection()[0]
except IndexError:
i = 0
selected_image = image_list_box.get(i)
if directory_variable.get() and len(directory) > 0 and selected_image and len(selected_image) > 0:
path = os.path.join(directory, selected_image)
path = path.replace("\\", "/")
image = Image.open(path)
image.thumbnail((400, 300), Image.ANTIALIAS)
photo_image = ImageTk.PhotoImage(image)
photo_label = tk.Label(image_frame, text="Hello World!", image=photo_image)
photo_label.grid(row=1,column=0)
row = 2
check_variable = dict()
for p in people:
try:
tagged = p in tagged_members[selected_image]
except:
tagged = False
check_variable[p] = tk.BooleanVar()
check_variable[p].set(tagged)
check_button = ttk.Checkbutton(image_frame, text=p, variable=check_variable[p], onvalue=True, offvalue=False, command= lambda image=selected_image, person=p: tagged_members_changed(image, person))
check_button.grid(row=row, column=0, sticky=[tk.W])
row += 1
def tagged_members_changed(image, person):
global tagged_members
global check_variable
try:
tagged_members[image]
except KeyError:
tagged_members[image] = []
if check_variable[person].get() == True:
if not person in tagged_members[image]:
tagged_members[image].append(person)
else:
try:
tagged_members[image].remove(person)
except ValueError:
pass
save_settings()
def previous_button_pressed():
global image_list_box
try:
i = image_list_box.curselection()[0]
image_list_box.selection_clear(i)
if i > 0:
i -= 1
except IndexError:
i = 0
image_list_box.selection_set(i)
draw_image()
def next_button_pressed():
global image_list_box
try:
i = image_list_box.curselection()[0]
image_list_box.selection_clear(i)
if i < image_list_box.index(tk.END):
i += 1
except IndexError:
i = 0
image_list_box.selection_set(i)
draw_image()
def on_image_select(evt):
draw_image()
def refresh_pressed():
build_image_list()
draw_image()
if __name__ == "__main__":
global people_frame
global directory_variable
global image_list_box
global image_frame
global tagged_members
root = tk.Tk()
n = ttk.Notebook(root)
n.columnconfigure(0, weight=1)
n.grid(sticky=[tk.N, tk.S, tk.W, tk.E])
f1 = tk.Frame(n)
ttk.Label(f1, text="Root Directory").grid(row=0,column=0, sticky=tk.W)
f1.columnconfigure(0, weight=1)
directory_variable = tk.StringVar()
directory_variable.trace("u", directory_changed)
root_directory = ttk.Entry(f1, textvariable=directory_variable)
root_directory.grid(row=1, column=0, columnspan=2, sticky=[tk.E, tk.W])
browse_button = ttk.Button(f1, text="Browse", command=browse_button_pressed)
browse_button.grid(row=1, column=2, sticky=tk.E)
n.add(f1, text="Settings")
f2 = tk.Frame(n)
n.add(f2, text="People")
people_frame = tk.Frame(f2)
people_frame.grid(row=0, column=0, sticky=[tk.N, tk.S, tk.W, tk.E])
load_settings()
draw_people()
add_frame = ttk.Frame(f2)
add_frame.grid(row=1, column=0, sticky=[tk.W, tk.N])
add_people_button = ttk.Button(add_frame, text="Add", command=add_person_button_pressed)
add_people_button.pack()
f2.columnconfigure(0, weight=1)
f3 = tk.Frame(n)
image_list_box = tk.Listbox(f3, width=35)
image_list_box.grid(row=0, column=0, sticky=[tk.W, tk.N, tk.S])
image_list_box.bind('<<ListboxSelect>>', on_image_select)
scrollbar = tk.Scrollbar(f3, orient=tk.VERTICAL)
scrollbar.config(command=image_list_box.yview)
scrollbar.grid(row=0, column=1, sticky=[tk.W, tk.N, tk.S])
ttk.Button(f3, text="Refresh", command=refresh_pressed).grid(row=1,column=0, sticky=[tk.S])
f3.rowconfigure(0, weight=1)
build_image_list()
image_frame = ttk.Frame(f3)
image_frame.grid(row=0, column=2, sticky=[tk.N])
draw_image()
f3.columnconfigure(2, weight=1)
n.add(f3, text="Images")
root.columnconfigure(0, weight=1)
root.rowconfigure(0, weight=1)
root.wm_minsize(800, 600)
root.mainloop()
| ande3577/image_tagger | image_tagger.py | Python | gpl-3.0 | 11,353 |
# -*- coding: utf-8 -*-
# Copyright (C) 2004-2008 Tristan Seligmann and Jonathan Jacobs
# Copyright (C) 2012-2014 Bastian Kleineidam
# Copyright (C) 2015-2019 Tobias Gruetzmacher
from __future__ import absolute_import, division, print_function
import re
import operator
import os
import pytest
from xdist.dsession import LoadScopeScheduling
from dosagelib import scraper
def get_test_scrapers():
"""Return scrapers that should be tested."""
if "TESTALL" in os.environ:
# test all comics (this will take some time)
scrapers = scraper.get_scrapers()
else:
if 'TESTCOMICS' in os.environ:
scraper_pattern = re.compile(os.environ['TESTCOMICS'])
else:
# Get limited number of scraper tests on Travis builds to make it
# faster
testscrapernames = [
# "classic" _BasicScraper
'AbstruseGoose',
# complex _ParserScraper
'GoComics/CalvinAndHobbes',
# _WordPressScraper
'GrrlPower'
]
scraper_pattern = re.compile('^(' + '|'.join(testscrapernames) +
')$')
scrapers = [
scraperobj for scraperobj in scraper.get_scrapers()
if scraper_pattern.match(scraperobj.name)
]
return scrapers
def pytest_generate_tests(metafunc):
if 'scraperobj' in metafunc.fixturenames:
scrapers = get_test_scrapers()
scraperids = list(x.name for x in scrapers)
metafunc.parametrize('scraperobj', scrapers, ids=scraperids)
class LoadModScheduling(LoadScopeScheduling):
"""Implement load scheduling for comic modules. See xdist for details."""
def _split_scope(self, nodeid):
mod, test = nodeid.split("::", 1)
return mod + "::" + test.split("/", 1)[0]
@pytest.mark.trylast
def pytest_xdist_make_scheduler(config, log):
return LoadModScheduling(config, log)
| peterjanes/dosage | tests/modules/conftest.py | Python | mit | 1,983 |
from xform import *
from dataflow import *
def apply(cfg):
# Various algos below don't work with no explicit entry in CFG
cfg_preheader(cfg)
# Also don't work with >1 entries
remove_unreachable_entries(cfg)
# Various algos below require single-exit CFG
cfg_single_exit(cfg)
foreach_inst(cfg, sub_const_to_add)
# Initial pass on simplifying expressions
foreach_inst(cfg, simplify_inst)
analyze_live_vars(cfg)
insert_initial_regs(cfg)
analyze_reach_defs(cfg)
make_du_chains(cfg)
#const_propagation(cfg)
#copy_propagation(cfg)
#mem_propagation(cfg)
expr_propagation(cfg)
analyze_live_vars(cfg)
estimate_params(cfg)
foreach_bblock(cfg, dead_code_elimination)
collect_calls(cfg)
| pfalcon/ScratchABlock | script_propagate_dce.py | Python | gpl-3.0 | 765 |
# -*- coding: utf-8 -*-
# Telegram
from telegram.ext import run_async, CallbackContext
from telegram import InlineKeyboardButton, InlineKeyboardMarkup, Update
#Modules
from module.shared import config_map, check_log
# System libraries
from urllib.parse import quote
import requests
import sqlite3
import logging
import base64
import gitlab
import time
import re
import os
# Logger
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO)
logger = logging.getLogger(__name__)
GITLAB_AUTH_TOKEN = config_map['gitlab']['token']
GITLAB_ROOT_GROUP = config_map['gitlab']['root']
session = None
api = None
db = None
# Formats
formats = {
**dict.fromkeys(["pdf", "epub"], "📕"),
**dict.fromkeys(["doc", "docx", "txt"], "📘"),
**dict.fromkeys(["jpg", "jpeg", "bmp", "png", "gif"], "📷"),
**dict.fromkeys(["rar", "zip"], "🗄"),
**dict.fromkeys(["out", "exe"], "⚙"),
**dict.fromkeys(["c", "cc", "cpp", "h", "py", "java", "js", "html", "php"], "💻")
}
def git(update: Update, context: CallbackContext):
check_log(update, "gitlab")
chat_id = update.message.chat_id
executed_command = update.message.text.split(' ')[0]
if chat_id < 0:
context.bot.sendMessage(chat_id=chat_id, text="❗️ La funzione %s non è ammessa nei gruppi" % executed_command)
else:
gitlab_handler(update, context)
def new_session(token):
"""
Create a new session using the authentication token passed as argument.
Parameters:
token: Authentication Token for GitLab APIs
"""
global session
session = requests.Session()
session.headers.update({'Private-Token': token})
def init_api():
""" Initialize the GitLab APIs """
global api
global session
if not api:
logger.info(msg="API Initialized")
new_session(GITLAB_AUTH_TOKEN)
api = gitlab.Gitlab(url='https://gitlab.com', api_version=4, session=session)
def get_chat_id(update: Update):
"""
Return the chat ID from update object
Parameters:
update: "update" object of Telegram API
"""
chat_id = None
if hasattr(update, "callback_query") and hasattr(update.callback_query, "message"):
chat_id = update.callback_query.message.chat.id
if not chat_id:
chat_id = update.message.chat_id
return chat_id
def get_subgroups(group_id):
"""
Returns an array containing subgroups of a group
Paramaters:
group_id: Parent group ID
"""
global api
try:
return api.groups.get(group_id).subgroups.list()
except gitlab.GitlabGetError:
return []
def get_projects(group_id):
"""
Returns an array containing projects of a group
Paramaters:
group_id: Parent group ID
"""
global api
try:
return api.groups.get(group_id).projects.list()
except gitlab.GitlabGetError:
return []
def get_repository_tree(project_id, path='/', recursive=False):
"""
Return the repository tree
Parameters:
project_id: Project ID
path: Folder path of the project (default: '/')
recursive: If True return every file or directory of the repository (default: False)
"""
global api
try:
return api.projects.get(project_id).repository_tree(path=path, recursive=recursive)
except gitlab.GitlabGetError:
return None
def explore_repository_tree(origin_id, path='/', db=None):
"""
Explore a repository analyzing for files and directories
Parameters:
origin_id: Origin repository ID
path: (default: '/')
db: (default: None)
"""
buttons = []
repository_tree = get_repository_tree(origin_id, path)
for item in repository_tree:
if item['name'].startswith('.'):
continue
if db:
db.execute("INSERT OR REPLACE INTO gitlab (id, parent_id, name, pathname, type) VALUES (?, ?, ?, ?, ?)",
(item['id'], origin_id, item['name'], item['path'], item['type']))
if item['type'] == 'blob':
item_extension = os.path.splitext(item['name'])[1].replace('.', '')
format_icon = formats.get(item_extension, "📄")
buttons.append(InlineKeyboardButton("%s %s" % (format_icon, item['name']), callback_data='git_b_%s_%s' % (origin_id, item['id'])))
elif item['type'] == 'tree':
buttons.append(InlineKeyboardButton("🗂 %s" % item['name'], callback_data='git_t_%s_%s' % (origin_id, item['id'])))
return buttons
def get_blob_file(project_id, blob_id):
"""
Return blob object
Parameters:
project_id: Project ID
blob_id: Blob ID
"""
global api
try:
blob_file = api.projects.get(project_id).repository_blob(blob_id)
if type(blob_file['content']) == str:
blob_content = blob_file['content']
else:
blob_content = base64.b64decode(blob_file['content']).decode()
blob_size = blob_file['size']
if blob_content.startswith('version https://git-lfs.github.com/spec/v1'):
blob_size = re.findall('size (\d+)?', blob_content)[0]
return {'size': blob_size, 'content': blob_content}
except gitlab.GitlabGetError:
print('Problem during the download of file from gitlab')
@run_async
def download_blob_file_async_internal(update: Update, context: CallbackContext, blob_id, blob_name, db_result):
"""
Download a file asynchronously and send it if the size is less than 50MB, otherwise send the download link
Parameters:
bot: "bot" object of Telegram API
update: "update" object of Telegram API
blob_id: The id of file to download
blob_name: The name of file to download
db_result: The result of query to achieve web_url, pathname and parent_id
"""
global session
chat_id = get_chat_id(update)
if chat_id:
web_url, pathname, parent_id = db_result
blob_info = get_blob_file(parent_id, blob_id)
download_url = "%s/raw/master/%s" % (web_url, quote(pathname))
if int(blob_info['size']) < 4.5e+7:
file_name = "%s_%s" % (time.time(), blob_name)
with open('file/%s' % file_name, 'wb') as file_handle:
with session.get(download_url, stream=True) as download:
file_handle.write(download.content)
with open('file/%s' % file_name, 'rb') as downloaded_file:
context.bot.sendChatAction(chat_id=chat_id, action="UPLOAD_DOCUMENT")
context.bot.sendDocument(chat_id=chat_id, document=downloaded_file)
os.remove('file/%s' % file_name)
else:
context.bot.sendMessage(chat_id=chat_id,
text="⚠️ Il file è troppo grande per il download diretto!\nScaricalo al seguente link:\n%s" % download_url)
def download_blob_file_async(update: Update, context: CallbackContext, blob=None):
"""
Return the handle to the file if below the maximum size otherwise the download link
Parameters:
bot: "bot" object of Telegram API
update: "update" object of Telegram API
blob: Object containing ID and name of a blob (default: None)
"""
global db
global api
global session
if blob:
blob_id, blob_name = blob['id'], blob['name']
query = "SELECT * FROM\
(SELECT web_url FROM gitlab WHERE id = (\
SELECT parent_id FROM gitlab WHERE id = '{0}'\
)),\
(SELECT pathname FROM gitlab WHERE id = '{0}'),\
(SELECT parent_id FROM gitlab WHERE id = '{0}')"
db_result = db.execute(query.format(blob_id)).fetchone()
download_blob_file_async_internal(update, context, blob_id, blob_name, db_result)
def format_keyboard_buttons(buttons=[]):
"""
Place the buttons on multiple lines if possible
Parameters:
buttons: Array containing the buttons to display (default: [])
"""
keyboard = [[]]
number_row = 0
number_array = 0
for button in buttons:
if isinstance(button, InlineKeyboardButton):
if number_row >= 1:
keyboard.append([button])
number_array += 1
number_row = 0
else:
keyboard[number_array].append(button)
number_row += 1
else:
keyboard.append([button[0]])
return keyboard
def send_message(update: Update, context: CallbackContext, message, buttons=[[]], blob=None):
"""
Send a reply message with text and button or upload a document
Parameters:
bot: "bot" object of Telegram API
update: "update" object of Telegram API
message: Message text
buttons: Array of answer buttons (default: [[]])
blob: Object that specifies the blob file to download (default: None)
"""
chat_id = get_chat_id(update)
if chat_id:
if blob:
download_blob_file_async(update, context, blob)
else:
buttons = format_keyboard_buttons(buttons)
reply_markup = InlineKeyboardMarkup(buttons)
context.bot.sendMessage(chat_id=chat_id, text=message, reply_markup=reply_markup)
def gitlab_handler(update: Update, context: CallbackContext):
"""
Handle every action of /git and /gitlab command
Parameters:
bot: "bot" object of Telegram API
update: "update" object of Telegram API
"""
global db
init_api()
db = sqlite3.connect('data/DMI_DB.db')
blob = None
blob_id = None
origin_id = None
parent = (GITLAB_ROOT_GROUP, "DMI UNICT - Appunti & Risorse:")
buttons = []
data = None
query = update.callback_query
if query is not None and query.data is not None:
data = query.data.replace("git_", "")
if not data:
subgroups = get_subgroups(GITLAB_ROOT_GROUP)
for subgroup in subgroups:
db.execute("INSERT OR REPLACE INTO gitlab (id, parent_id, name, type) VALUES (?, ?, ?, ?)",
(subgroup.id, subgroup.parent_id, subgroup.name, 'subgroup'))
buttons.append(InlineKeyboardButton("🗂 %s" % subgroup.name, callback_data='git_s_%s' % subgroup.id))
else:
action, origin_id, blob_id = (data.split('_') + [None] * 3)[:3]
if blob_id:
query = "SELECT * FROM\
(SELECT parent_id, name FROM gitlab WHERE id = %s),\
(SELECT name FROM gitlab WHERE id = '%s')"
db_result = db.execute(query % (origin_id, blob_id)).fetchone()
else:
db_result = db.execute("SELECT parent_id, name FROM gitlab WHERE id = %s" % origin_id).fetchone()
if db_result:
parent = db_result
if action == 'x':
_type = db.execute('SELECT type FROM gitlab WHERE id = %s' % origin_id).fetchone()
action = (_type[0] if _type else 'subgroup')[0]
if action == 's':
subgroups = get_subgroups(origin_id)
if subgroups:
for subgroup in subgroups:
db.execute("INSERT OR REPLACE INTO gitlab (id, parent_id, name, type) VALUES (?, ?, ?, ?)",
(subgroup.id, subgroup.parent_id, subgroup.name, 'subgroup'))
buttons.append(InlineKeyboardButton("🗂 %s" % subgroup.name, callback_data='git_s_%s' % subgroup.id))
projects = get_projects(origin_id)
for project in projects:
db.execute("INSERT OR REPLACE INTO gitlab (id, parent_id, name, web_url, type) VALUES (?, ?, ?, ?, ?)",
(project.id, origin_id, project.name, project.web_url, 'project'))
buttons.append(InlineKeyboardButton("🗂 %s" % project.name, callback_data='git_p_%s' % project.id))
elif action == 'p':
buttons.extend(explore_repository_tree(origin_id, '/', db))
elif action == 't':
path = db.execute("SELECT pathname FROM gitlab WHERE id = '%s'" % blob_id).fetchone()
buttons.extend(explore_repository_tree(origin_id, path, db))
elif action == 'b':
blob = {'id': blob_id, 'name': parent[2]}
if origin_id != str(GITLAB_ROOT_GROUP):
buttons.append([InlineKeyboardButton("🔙", callback_data='git_x_%s' % parent[0])])
title = parent[2] if blob_id and len(parent) == 3 else parent[1]
send_message(update, context, title, buttons, blob)
db.commit()
db.close()
| UNICT-DMI/Telegram-DMI-Bot | module/gitlab.py | Python | gpl-3.0 | 12,856 |
"""
Hilbert projective metric (Basic conding !)
=========================
- boundary of Ω
- order of the points p,x, q, y
- || || Real norm
||py|| ||xq||
d(x,y) = ln( ---------------- )
||px|| ||yq||
"""
from sympy import symbols
from sympy import log
from sympy.geometry import Point, Line
from sympy.geometry import convexHull
class HilbertMetric():
def __init__(self):
"""
"""
pass
def check_collinear(x, y, p, q)--> bool:
"""
Checking collinearity of points
"""
pass
def birapport_calculs():
"""
Birapport
"""
pass
def d_c(x, y):
"""
Hilbert metric
"""
| kiaderouiche/hilbmetrics | hilbert/mtchilbert.py | Python | apache-2.0 | 659 |
from xml.etree import cElementTree as ElementTree
SENSEVAL3_TEST_DATA_FILE = "english-all-words.xml"
SENSEVAL3_TEST_ANSWERS_FILE = "EnglishAW.test.key"
def senseval_data():
# TODO: Add part of speech of each word using WordNet
all_sentences = []
senseval_test = ElementTree.parse(SENSEVAL3_TEST_DATA_FILE)
texts = senseval_test.getroot().findall("text")
sentence = []
sats = []
test_words = {}
test_phrases = []
macro_sentence = [] # Macro variable are for sentences with subclauses in brackets, to process the clause in
macro_test_words = {} # the brackets without losing the continuity of the sentence outside the brackets
macro_test_phrases = []
for text in texts:
elems = text.iter()
for elem in elems:
if elem.tag == "text":
tail_words = elem.text.lower().split()
elif elem.tag == "sat":
sentence.append(elem.text.lower())
tail_words = elem.tail.lower().split()
sats.append(elem)
elif elem.tag == "head":
if "sats" in elem.attrib:
test_phrases.append({"headword": (elem.attrib["id"], elem.text.lower()), "sats": elem.attrib["sats"].split()})
else:
test_words[elem.attrib["id"]] = elem.text.lower()
sentence.append(elem.text.lower())
tail_words = elem.tail.lower().split()
else:
raise ValueError("tag of unidentified kind: " + elem.tag)
for tail_word in tail_words:
# Ignore certain characters
if not tail_word.isdigit() and tail_word[0] != "*" and tail_word != "," and tail_word != """:
# if sentence over, run sentence through Lesk
if tail_word == "." or tail_word == "!" or tail_word == "?" or \
tail_word == "--" or tail_word == ":":
all_sentences.append({"sentence": sentence, "test_words": test_words, "test_phrases": test_phrases})
sentence = []
test_words = {}
test_phrases = []
# if left bracket
elif tail_word == "-LRB-":
macro_sentence = sentence
macro_test_words = test_words
macro_test_phrases = test_phrases
sentence = []
test_words = {}
test_phrases = []
# if right bracket
elif tail_word == "-RRB-":
all_sentences.append({"sentence": sentence, "test_words": test_words, "test_phrases": test_phrases})
sentence = macro_sentence
test_words = macro_test_words
test_phrases = macro_test_phrases
macro_sentence = []
macro_test_words = {}
macro_test_phrases = []
else:
sentence.append(tail_word.lower())
if sentence or test_words:
all_sentences.append({"sentence": sentence, "test_words": test_words, "test_phrases": test_phrases})
return all_sentences
def senseval_answers():
with open(SENSEVAL3_TEST_ANSWERS_FILE) as answer_file:
answers = answer_file.read().split('\n')[:-1]
answer_dicts = []
total_answers = 0
for answer in answers:
answer = answer.split()
answer_dicts.append({"id": answer[1], "lemmas": answer[2:]})
if answer[2] != "U": # catches the case where WordNet doesn't provide the proper sense.
total_answers += 1
return answer_dicts, total_answers | tgrant59/pydante | sensevalapi.py | Python | mit | 3,821 |
# -*- coding: utf-8 -*-
#########################################################################
#
# Copyright (C) 2012 OpenPlans
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
from urlparse import urlparse
import httplib2
import urllib
import logging
from datetime import datetime
from lxml import etree
from django.conf import settings
from django.db import models
from django.db.models import signals
from django.contrib.auth.models import User
from django.contrib.contenttypes.models import ContentType
from django.utils.translation import ugettext, ugettext_lazy as _
from django.core.urlresolvers import reverse
from geonode import GeoNodeException
from geonode.base.models import ResourceBase, ResourceBaseManager, Link, \
resourcebase_post_save, resourcebase_post_delete
from geonode.utils import _user, _password, get_wms
from geonode.utils import http_client
from geonode.geoserver.helpers import cascading_delete
from geonode.people.models import Profile
from geonode.security.enumerations import AUTHENTICATED_USERS, ANONYMOUS_USERS
from geonode.layers.ows import wcs_links, wfs_links, wms_links, \
wps_execute_layer_attribute_statistics
from geonode.layers.enumerations import LAYER_ATTRIBUTE_NUMERIC_DATA_TYPES
from geonode.utils import ogc_server_settings
from geoserver.catalog import Catalog, FailedRequestError
from agon_ratings.models import OverallRating
logger = logging.getLogger("geonode.layers.models")
class Style(models.Model):
"""Model for storing styles.
"""
name = models.CharField(_('style name'), max_length=255, unique=True)
sld_title = models.CharField(max_length=255, null=True, blank=True)
sld_body = models.TextField(_('sld text'), null=True, blank=True)
sld_version = models.CharField(_('sld version'), max_length=12, null=True, blank=True)
sld_url = models.CharField(_('sld url'), null = True, max_length=1000)
workspace = models.CharField(max_length=255, null=True, blank=True)
def __str__(self):
return "%s" % self.name.encode('utf-8')
class LayerManager(ResourceBaseManager):
def __init__(self):
models.Manager.__init__(self)
url = ogc_server_settings.rest
self.gs_catalog = Catalog(url, _user, _password)
def add_bbox_query(q, bbox):
'''modify the queryset q to limit to the provided bbox
bbox - 4 tuple of floats representing x0,x1,y0,y1
returns the modified query
'''
bbox = map(str, bbox) # 2.6 compat - float to decimal conversion
q = q.filter(bbox_x0__gte=bbox[0])
q = q.filter(bbox_x1__lte=bbox[1])
q = q.filter(bbox_y0__gte=bbox[2])
return q.filter(bbox_y1__lte=bbox[3])
class Layer(ResourceBase):
"""
Layer (inherits ResourceBase fields)
"""
# internal fields
objects = LayerManager()
workspace = models.CharField(max_length=128)
store = models.CharField(max_length=128)
storeType = models.CharField(max_length=128)
name = models.CharField(max_length=128)
typename = models.CharField(max_length=128, unique=True)
popular_count = models.IntegerField(default=0)
share_count = models.IntegerField(default=0)
default_style = models.ForeignKey(Style, related_name='layer_default_style', null=True, blank=True)
styles = models.ManyToManyField(Style, related_name='layer_styles')
def update_thumbnail(self, save=True):
try:
self.save_thumbnail(self._thumbnail_url(width=200, height=150), save)
except RuntimeError, e:
logger.warn('Could not create thumbnail for %s' % self, e)
def _render_thumbnail(self, spec):
resp, content = http_client.request(spec)
if 'ServiceException' in content or resp.status < 200 or resp.status > 299:
msg = 'Unable to obtain thumbnail: %s' % content
raise RuntimeError(msg)
return content
def _thumbnail_url(self, width=20, height=None):
""" Generate a URL representing thumbnail of the layer """
params = {
'layers': self.typename.encode('utf-8'),
'format': 'image/png8',
'width': width,
}
if height is not None:
params['height'] = height
# Avoid using urllib.urlencode here because it breaks the url.
# commas and slashes in values get encoded and then cause trouble
# with the WMS parser.
p = "&".join("%s=%s"%item for item in params.items())
return ogc_server_settings.LOCATION + "wms/reflect?" + p
def verify(self):
"""Makes sure the state of the layer is consistent in GeoServer and Catalogue.
"""
# Check the layer is in the wms get capabilities record
# FIXME: Implement caching of capabilities record site wide
_local_wms = get_wms()
record = _local_wms.contents.get(self.typename)
if record is None:
msg = "WMS Record missing for layer [%s]" % self.typename.encode('utf-8')
raise GeoNodeException(msg)
@property
def display_type(self):
return ({
"dataStore" : "Vector Data",
"coverageStore": "Raster Data",
}).get(self.storeType, "Data")
@property
def store_type(self):
cat = Layer.objects.gs_catalog
res = cat.get_resource(self.name)
res.store.fetch()
return res.store.dom.find('type').text
@property
def service_type(self):
if self.storeType == 'coverageStore':
return "WCS"
if self.storeType == 'dataStore':
return "WFS"
def get_absolute_url(self):
return reverse('layer_detail', args=(self.typename,))
def attribute_config(self):
#Get custom attribute sort order and labels if any
cfg = {}
visible_attributes = self.attribute_set.visible()
if (visible_attributes.count() > 0):
cfg["getFeatureInfo"] = {
"fields": [l.attribute for l in visible_attributes],
"propertyNames": dict([(l.attribute,l.attribute_label) for l in visible_attributes])
}
return cfg
def __str__(self):
return "%s Layer" % self.typename.encode('utf-8')
class Meta:
# custom permissions,
# change and delete are standard in django
permissions = (('view_layer', 'Can view'),
('change_layer_permissions', "Can change permissions"), )
# Permission Level Constants
# LEVEL_NONE inherited
LEVEL_READ = 'layer_readonly'
LEVEL_WRITE = 'layer_readwrite'
LEVEL_ADMIN = 'layer_admin'
def set_default_permissions(self):
self.set_gen_level(ANONYMOUS_USERS, self.LEVEL_READ)
self.set_gen_level(AUTHENTICATED_USERS, self.LEVEL_READ)
# remove specific user permissions
current_perms = self.get_all_level_info()
for username in current_perms['users'].keys():
user = User.objects.get(username=username)
self.set_user_level(user, self.LEVEL_NONE)
# assign owner admin privileges
if self.owner:
self.set_user_level(self.owner, self.LEVEL_ADMIN)
def tiles_url(self):
return self.link_set.get(name='Tiles').url
def maps(self):
from geonode.maps.models import MapLayer
return MapLayer.objects.filter(name=self.typename)
@property
def class_name(self):
return self.__class__.__name__
class Layer_Styles(models.Model):
layer = models.ForeignKey(Layer)
style = models.ForeignKey(Style)
class AttributeManager(models.Manager):
"""Helper class to access filtered attributes
"""
def visible(self):
return self.get_query_set().filter(visible=True).order_by('display_order')
class Attribute(models.Model):
"""
Auxiliary model for storing layer attributes.
This helps reduce the need for runtime lookups
to GeoServer, and lets users customize attribute titles,
sort order, and visibility.
"""
layer = models.ForeignKey(Layer, blank=False, null=False, unique=False, related_name='attribute_set')
attribute = models.CharField(_('attribute name'), help_text=_('name of attribute as stored in shapefile/spatial database'), max_length=255, blank=False, null=True, unique=False)
description = models.CharField(_('attribute description'), help_text=_('description of attribute to be used in metadata'), max_length=255, blank=True, null=True)
attribute_label = models.CharField(_('attribute label'), help_text=_('title of attribute as displayed in GeoNode'), max_length=255, blank=False, null=True, unique=False)
attribute_type = models.CharField(_('attribute type'), help_text=_('the data type of the attribute (integer, string, geometry, etc)'), max_length=50, blank=False, null=False, default='xsd:string', unique=False)
visible = models.BooleanField(_('visible?'), help_text=_('specifies if the attribute should be displayed in identify results'), default=True)
display_order = models.IntegerField(_('display order'), help_text=_('specifies the order in which attribute should be displayed in identify results'), default=1)
# statistical derivations
count = models.IntegerField(_('count'), help_text=_('count value for this field'), default=1)
min = models.CharField(_('min'), help_text=_('minimum value for this field'), max_length=255, blank=False, null=True, unique=False, default='NA')
max = models.CharField(_('max'), help_text=_('maximum value for this field'), max_length=255, blank=False, null=True, unique=False, default='NA')
average = models.CharField(_('average'), help_text=_('average value for this field'), max_length=255, blank=False, null=True, unique=False, default='NA')
median = models.CharField(_('median'), help_text=_('median value for this field'), max_length=255, blank=False, null=True, unique=False, default='NA')
stddev = models.CharField(_('standard deviation'), help_text=_('standard deviation for this field'), max_length=255, blank=False, null=True, unique=False, default='NA')
sum = models.CharField(_('sum'), help_text=_('sum value for this field'), max_length=255, blank=False, null=True, unique=False, default='NA')
unique_values = models.TextField(_('unique values for this field'), null=True, blank=True, default='NA')
last_stats_updated = models.DateTimeField(_('last modified'), default=datetime.now, help_text=_('date when attribute statistics were last updated')) # passing the method itself, not
objects = AttributeManager()
def __str__(self):
return "%s" % self.attribute_label.encode("utf-8") if self.attribute_label else self.attribute.encode("utf-8")
def unique_values_as_list(self):
return self.unique_values.split(',')
def geoserver_pre_delete(instance, sender, **kwargs):
"""Removes the layer from GeoServer
"""
ct = ContentType.objects.get_for_model(instance)
OverallRating.objects.filter(content_type = ct, object_id = instance.id).delete()
#cascading_delete should only be called if ogc_server_settings.BACKEND_WRITE_ENABLED == True
if getattr(ogc_server_settings,"BACKEND_WRITE_ENABLED", True):
cascading_delete(Layer.objects.gs_catalog, instance.typename)
def pre_save_layer(instance, sender, **kwargs):
if kwargs.get('raw', False):
instance.owner = instance.resourcebase_ptr.owner
instance.uuid = instance.resourcebase_ptr.uuid
instance.bbox_x0 = instance.resourcebase_ptr.bbox_x0
instance.bbox_x1 = instance.resourcebase_ptr.bbox_x1
instance.bbox_y0 = instance.resourcebase_ptr.bbox_y0
instance.bbox_y1 = instance.resourcebase_ptr.bbox_y1
if instance.abstract == '' or instance.abstract is None:
instance.abstract = 'No abstract provided'
if instance.title == '' or instance.title is None:
instance.title = instance.name
def pre_delete_layer(instance, sender, **kwargs):
"""
Remove any associated style to the layer, if it is not used by other layers.
Default style will be deleted in post_delete_layer
"""
logger.debug("Going to delete the styles associated for [%s]", instance.typename.encode('utf-8'))
default_style = instance.default_style
for style in instance.styles.all():
if style.layer_styles.all().count()==1:
if style != default_style:
style.delete()
def post_delete_layer(instance, sender, **kwargs):
"""
Removed the layer from any associated map, if any.
Remove the layer default style.
"""
from geonode.maps.models import MapLayer
logger.debug("Going to delete associated maplayers for [%s]", instance.typename.encode('utf-8'))
MapLayer.objects.filter(name=instance.typename).delete()
logger.debug("Going to delete the default style for [%s]", instance.typename.encode('utf-8'))
if instance.default_style and Layer.objects.filter(default_style__id=instance.default_style.id).count() == 0:
instance.default_style.delete()
def geoserver_pre_save(instance, sender, **kwargs):
"""Send information to geoserver.
The attributes sent include:
* Title
* Abstract
* Name
* Keywords
* Metadata Links,
* Point of Contact name and url
"""
url = ogc_server_settings.internal_rest
try:
gs_catalog = Catalog(url, _user, _password)
gs_resource = gs_catalog.get_resource(instance.name)
except (EnvironmentError, FailedRequestError) as e:
gs_resource = None
msg = ('Could not connect to geoserver at "%s"'
'to save information for layer "%s"' % (
ogc_server_settings.LOCATION, instance.name.encode('utf-8'))
)
logger.warn(msg, e)
# If geoserver is not online, there is no need to continue
return
# If there is no resource returned it could mean one of two things:
# a) There is a synchronization problem in geoserver
# b) The unit tests are running and another geoserver is running in the
# background.
# For both cases it is sensible to stop processing the layer
if gs_resource is None:
logger.warn('Could not get geoserver resource for %s' % instance)
return
gs_resource.title = instance.title
gs_resource.abstract = instance.abstract
gs_resource.name= instance.name
# Get metadata links
metadata_links = []
for link in instance.link_set.metadata():
metadata_links.append((link.name, link.mime, link.url))
gs_resource.metadata_links = metadata_links
#gs_resource should only be called if ogc_server_settings.BACKEND_WRITE_ENABLED == True
if getattr(ogc_server_settings,"BACKEND_WRITE_ENABLED", True):
gs_catalog.save(gs_resource)
gs_layer = gs_catalog.get_layer(instance.name)
if instance.poc and instance.poc.user:
gs_layer.attribution = str(instance.poc.user)
profile = Profile.objects.get(user=instance.poc.user)
gs_layer.attribution_link = settings.SITEURL[:-1] + profile.get_absolute_url()
#gs_layer should only be called if ogc_server_settings.BACKEND_WRITE_ENABLED == True
if getattr(ogc_server_settings,"BACKEND_WRITE_ENABLED", True):
gs_catalog.save(gs_layer)
"""Get information from geoserver.
The attributes retrieved include:
* Bounding Box
* SRID
* Download links (WMS, WCS or WFS and KML)
* Styles (SLD)
"""
gs_resource = gs_catalog.get_resource(instance.name)
bbox = gs_resource.latlon_bbox
#FIXME(Ariel): Correct srid setting below
#self.srid = gs_resource.src
# Set bounding box values
instance.bbox_x0 = bbox[0]
instance.bbox_x1 = bbox[1]
instance.bbox_y0 = bbox[2]
instance.bbox_y1 = bbox[3]
instance.update_thumbnail(save=False)
def geoserver_post_save(instance, sender, **kwargs):
"""Save keywords to GeoServer
The way keywords are implemented requires the layer
to be saved to the database before accessing them.
"""
url = ogc_server_settings.internal_rest
try:
gs_catalog = Catalog(url, _user, _password)
gs_resource = gs_catalog.get_resource(instance.name)
except (FailedRequestError, EnvironmentError) as e:
msg = ('Could not connect to geoserver at "%s"'
'to save information for layer "%s"' % (
ogc_server_settings.LOCATION, instance.name.encode('utf-8'))
)
logger.warn(msg, e)
# If geoserver is not online, there is no need to continue
return
# If there is no resource returned it could mean one of two things:
# a) There is a synchronization problem in geoserver
# b) The unit tests are running and another geoserver is running in the
# background.
# For both cases it is sensible to stop processing the layer
if gs_resource is None:
logger.warn('Could not get geoserver resource for %s' % instance)
return
gs_resource.keywords = instance.keyword_list()
#gs_resource should only be called if ogc_server_settings.BACKEND_WRITE_ENABLED == True
if getattr(ogc_server_settings,"BACKEND_WRITE_ENABLED", True):
gs_catalog.save(gs_resource)
bbox = gs_resource.latlon_bbox
dx = float(bbox[1]) - float(bbox[0])
dy = float(bbox[3]) - float(bbox[2])
dataAspect = 1 if dy == 0 else dx / dy
height = 550
width = int(height * dataAspect)
# Set download links for WMS, WCS or WFS and KML
links = wms_links(ogc_server_settings.public_url + 'wms?',
instance.typename.encode('utf-8'), instance.bbox_string,
instance.srid, height, width)
for ext, name, mime, wms_url in links:
Link.objects.get_or_create(resource= instance.resourcebase_ptr,
name=ugettext(name),
defaults=dict(
extension=ext,
url=wms_url,
mime=mime,
link_type='image',
)
)
if instance.storeType == "dataStore":
links = wfs_links(ogc_server_settings.public_url + 'wfs?', instance.typename.encode('utf-8'))
for ext, name, mime, wfs_url in links:
Link.objects.get_or_create(resource= instance.resourcebase_ptr,
url=wfs_url,
defaults=dict(
extension=ext,
name=name,
mime=mime,
url=wfs_url,
link_type='data',
)
)
elif instance.storeType == 'coverageStore':
#FIXME(Ariel): This works for public layers, does it work for restricted too?
# would those end up with no geotiff links, like, forever?
permissions = {}
permissions['anonymous'] = instance.get_gen_level(ANONYMOUS_USERS)
permissions['authenticated'] = instance.get_gen_level(AUTHENTICATED_USERS)
instance.set_gen_level(ANONYMOUS_USERS,'layer_readonly')
links = wcs_links(ogc_server_settings.public_url + 'wcs?', instance.typename.encode('utf-8'),
bbox=instance.bbox[:-1], crs=instance.bbox[-1], height=height, width=width)
for ext, name, mime, wcs_url in links:
Link.objects.get_or_create(resource= instance.resourcebase_ptr,
url=wcs_url,
defaults=dict(
extension=ext,
name=name,
mime=mime,
link_type='data',
)
)
instance.set_gen_level(ANONYMOUS_USERS,permissions['anonymous'])
instance.set_gen_level(AUTHENTICATED_USERS,permissions['authenticated'])
kml_reflector_link_download = ogc_server_settings.public_url + "wms/kml?" + urllib.urlencode({
'layers': instance.typename.encode('utf-8'),
'mode': "download"
})
Link.objects.get_or_create(resource= instance.resourcebase_ptr,
url=kml_reflector_link_download,
defaults=dict(
extension='kml',
name=_("KML"),
mime='text/xml',
link_type='data',
)
)
kml_reflector_link_view = ogc_server_settings.public_url + "wms/kml?" + urllib.urlencode({
'layers': instance.typename.encode('utf-8'),
'mode': "refresh"
})
Link.objects.get_or_create(resource= instance.resourcebase_ptr,
url=kml_reflector_link_view,
defaults=dict(
extension='kml',
name=_("View in Google Earth"),
mime='text/xml',
link_type='data',
)
)
tile_url = ('%sgwc/service/gmaps?' % ogc_server_settings.public_url +
'layers=%s' % instance.typename.encode('utf-8') +
'&zoom={z}&x={x}&y={y}' +
'&format=image/png8'
)
Link.objects.get_or_create(resource= instance.resourcebase_ptr,
url=tile_url,
defaults=dict(
extension='tiles',
name=_("Tiles"),
mime='image/png',
link_type='image',
)
)
html_link_url = '%s%s' % (settings.SITEURL[:-1], instance.get_absolute_url())
Link.objects.get_or_create(resource= instance.resourcebase_ptr,
url=html_link_url,
defaults=dict(
extension='html',
name=instance.typename,
mime='text/html',
link_type='html',
)
)
#remove links that belong to and old address
for link in instance.link_set.all():
if not urlparse(settings.SITEURL).hostname == urlparse(link.url).hostname and not \
urlparse(ogc_server_settings.public_url).hostname == urlparse(link.url).hostname:
link.delete()
#Save layer attributes
set_attributes(instance)
#Save layer styles
set_styles(instance, gs_catalog)
def set_styles(layer, gs_catalog):
style_set = []
gs_layer = gs_catalog.get_layer(layer.name)
default_style = gs_layer.default_style
layer.default_style = save_style(default_style)
style_set.append(layer.default_style)
alt_styles = gs_layer.styles
for alt_style in alt_styles:
style_set.append(save_style(alt_style))
layer.styles = style_set
return layer
def save_style(gs_style):
style, created = Style.objects.get_or_create(name = gs_style.sld_name)
style.sld_title = gs_style.sld_title
style.sld_body = gs_style.sld_body
style.sld_url = gs_style.body_href()
style.save()
return style
def is_layer_attribute_aggregable(store_type, field_name, field_type):
"""
Decipher whether layer attribute is suitable for statistical derivation
"""
# must be vector layer
if store_type != 'dataStore':
return False
# must be a numeric data type
if field_type not in LAYER_ATTRIBUTE_NUMERIC_DATA_TYPES:
return False
# must not be an identifier type field
if field_name.lower() in ['id', 'identifier']:
return False
return True
def get_attribute_statistics(layer_name, field):
"""
Generate statistics (range, mean, median, standard deviation, unique values)
for layer attribute
"""
logger.debug('Deriving aggregate statistics for attribute %s', field)
if not ogc_server_settings.WPS_ENABLED:
return None
try:
return wps_execute_layer_attribute_statistics(layer_name, field)
except Exception:
logger.exception('Error generating layer aggregate statistics')
def set_attributes(layer, overwrite=False):
"""
Retrieve layer attribute names & types from Geoserver,
then store in GeoNode database using Attribute model
"""
#Appending authorizations seems necessary to avoid 'layer not found' from GeoServer
http = httplib2.Http()
http.add_credentials(_user, _password)
_netloc = urlparse(ogc_server_settings.LOCATION).netloc
http.authorizations.append(
httplib2.BasicAuthentication(
(_user, _password),
_netloc,
ogc_server_settings.LOCATION,
{},
None,
None,
http
)
)
attribute_map = []
if layer.storeType == "dataStore":
dft_url = ogc_server_settings.LOCATION + "wfs?" + urllib.urlencode({
"service": "wfs",
"version": "1.0.0",
"request": "DescribeFeatureType",
"typename": layer.typename.encode('utf-8'),
})
try:
body = http.request(dft_url)[1]
doc = etree.fromstring(body)
path = ".//{xsd}extension/{xsd}sequence/{xsd}element".format(xsd="{http://www.w3.org/2001/XMLSchema}")
attribute_map = [[n.attrib["name"],n.attrib["type"]] for n in doc.findall(path)]
except Exception:
attribute_map = []
elif layer.storeType == "coverageStore":
dc_url = ogc_server_settings.LOCATION + "wcs?" + urllib.urlencode({
"service": "wcs",
"version": "1.1.0",
"request": "DescribeCoverage",
"identifiers": layer.typename.encode('utf-8')
})
try:
response, body = http.request(dc_url)
doc = etree.fromstring(body)
path = ".//{wcs}Axis/{wcs}AvailableKeys/{wcs}Key".format(wcs="{http://www.opengis.net/wcs/1.1.1}")
attribute_map = [[n.text,"raster"] for n in doc.findall(path)]
except Exception:
attribute_map = []
attributes = layer.attribute_set.all()
# Delete existing attributes if they no longer exist in an updated layer
for la in attributes:
lafound = False
for field, ftype in attribute_map:
if field == la.attribute:
lafound = True
if overwrite or not lafound:
logger.debug("Going to delete [%s] for [%s]", la.attribute, layer.name.encode('utf-8'))
la.delete()
# Add new layer attributes if they don't already exist
if attribute_map is not None:
iter = len(Attribute.objects.filter(layer=layer)) + 1
for field, ftype in attribute_map:
if field is not None:
la, created = Attribute.objects.get_or_create(layer=layer, attribute=field, attribute_type=ftype)
if created:
if is_layer_attribute_aggregable(layer.storeType, field, ftype):
logger.debug("Generating layer attribute statistics")
result = get_attribute_statistics(layer.name, field)
if result is not None:
la.count = result['Count']
la.min = result['Min']
la.max = result['Max']
la.average = result['Average']
la.median = result['Median']
la.stddev = result['StandardDeviation']
la.sum = result['Sum']
la.unique_values = result['unique_values']
la.last_stats_updated = datetime.now()
la.attribute_label = field.title()
la.visible = ftype.find("gml:") != 0
la.display_order = iter
la.save()
iter += 1
logger.debug("Created [%s] attribute for [%s]", field, layer.name.encode('utf-8'))
else:
logger.debug("No attributes found")
signals.pre_save.connect(pre_save_layer, sender=Layer)
signals.pre_save.connect(geoserver_pre_save, sender=Layer)
signals.pre_delete.connect(geoserver_pre_delete, sender=Layer)
signals.post_save.connect(geoserver_post_save, sender=Layer)
signals.pre_delete.connect(pre_delete_layer, sender=Layer)
signals.post_delete.connect(post_delete_layer, sender=Layer)
signals.post_save.connect(resourcebase_post_save, sender=Layer)
signals.post_delete.connect(resourcebase_post_delete, sender=Layer)
| AnnalisaS/migration_geonode | geonode/layers/models.py | Python | gpl-3.0 | 29,589 |
#-*- coding: utf-8 -*-
from django.conf import settings
from django.core import exceptions
from django.utils.importlib import import_module
def load_class(class_path, setting_name=None):
"""
Loads a class given a class_path.
The setting_name parameter is only there for pretty error output, and
therefore is optional
"""
try:
class_module, class_name = class_path.rsplit('.', 1)
except ValueError:
if setting_name:
txt = '%s isn\'t a valid module. Check your %s setting' % (
class_path, setting_name)
else:
txt = '%s isn\'t a valid module.' % class_path
raise exceptions.ImproperlyConfigured(txt)
try:
mod = import_module(class_module)
except ImportError, e:
if setting_name:
txt = 'Error importing backend %s: "%s". Check your %s setting' % (
class_module, e, setting_name)
else:
txt = 'Error importing backend %s: "%s".' % (class_module, e)
raise exceptions.ImproperlyConfigured(txt)
try:
clazz = getattr(mod, class_name)
except AttributeError:
if setting_name:
txt = ('Backend module "%s" does not define a "%s" class. Check'
' your %s setting' % (class_module, class_name,
setting_name))
else:
txt = 'Backend module "%s" does not define a "%s" class.' % (
class_module, class_name)
raise exceptions.ImproperlyConfigured(txt)
return clazz
def get_model_string(model_name):
"""
Returns the model string notation Django uses for lazily loaded ForeignKeys
(eg 'auth.User') to prevent circular imports.
This is needed to allow our crazy custom model usage.
"""
class_path = getattr(settings,
'SHOP_%s_MODEL' % model_name.upper().replace('_', ''), None)
if not class_path:
return 'shop.%s' % model_name
else:
klass = load_class(class_path)
return '%s.%s' % (klass._meta.app_label, klass.__name__)
| hzlf/openbroadcast | website/shop/shop/util/loader.py | Python | gpl-3.0 | 2,073 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class RouteFiltersOperations(object):
"""RouteFiltersOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2019_07_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _delete_initial(
self,
resource_group_name, # type: str
route_filter_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-07-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeFilterName': self._serialize.url("route_filter_name", route_filter_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
route_filter_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes the specified route filter.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_filter_name: The name of the route filter.
:type route_filter_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
route_filter_name=route_filter_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeFilterName': self._serialize.url("route_filter_name", route_filter_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
route_filter_name, # type: str
expand=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> "_models.RouteFilter"
"""Gets the specified route filter.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_filter_name: The name of the route filter.
:type route_filter_name: str
:param expand: Expands referenced express route bgp peering resources.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: RouteFilter, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_07_01.models.RouteFilter
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteFilter"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-07-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeFilterName': self._serialize.url("route_filter_name", route_filter_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('RouteFilter', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name, # type: str
route_filter_name, # type: str
route_filter_parameters, # type: "_models.RouteFilter"
**kwargs # type: Any
):
# type: (...) -> "_models.RouteFilter"
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteFilter"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-07-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeFilterName': self._serialize.url("route_filter_name", route_filter_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(route_filter_parameters, 'RouteFilter')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('RouteFilter', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('RouteFilter', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
route_filter_name, # type: str
route_filter_parameters, # type: "_models.RouteFilter"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.RouteFilter"]
"""Creates or updates a route filter in a specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_filter_name: The name of the route filter.
:type route_filter_name: str
:param route_filter_parameters: Parameters supplied to the create or update route filter
operation.
:type route_filter_parameters: ~azure.mgmt.network.v2019_07_01.models.RouteFilter
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either RouteFilter or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2019_07_01.models.RouteFilter]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteFilter"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
route_filter_name=route_filter_name,
route_filter_parameters=route_filter_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('RouteFilter', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeFilterName': self._serialize.url("route_filter_name", route_filter_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}'} # type: ignore
def _update_initial(
self,
resource_group_name, # type: str
route_filter_name, # type: str
route_filter_parameters, # type: "_models.PatchRouteFilter"
**kwargs # type: Any
):
# type: (...) -> "_models.RouteFilter"
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteFilter"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-07-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeFilterName': self._serialize.url("route_filter_name", route_filter_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(route_filter_parameters, 'PatchRouteFilter')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('RouteFilter', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}'} # type: ignore
def begin_update(
self,
resource_group_name, # type: str
route_filter_name, # type: str
route_filter_parameters, # type: "_models.PatchRouteFilter"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.RouteFilter"]
"""Updates a route filter in a specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_filter_name: The name of the route filter.
:type route_filter_name: str
:param route_filter_parameters: Parameters supplied to the update route filter operation.
:type route_filter_parameters: ~azure.mgmt.network.v2019_07_01.models.PatchRouteFilter
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either RouteFilter or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2019_07_01.models.RouteFilter]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteFilter"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._update_initial(
resource_group_name=resource_group_name,
route_filter_name=route_filter_name,
route_filter_parameters=route_filter_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('RouteFilter', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeFilterName': self._serialize.url("route_filter_name", route_filter_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}'} # type: ignore
def list_by_resource_group(
self,
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.RouteFilterListResult"]
"""Gets all route filters in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either RouteFilterListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2019_07_01.models.RouteFilterListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteFilterListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-07-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_resource_group.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('RouteFilterListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters'} # type: ignore
def list(
self,
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.RouteFilterListResult"]
"""Gets all route filters in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either RouteFilterListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2019_07_01.models.RouteFilterListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteFilterListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-07-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('RouteFilterListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/routeFilters'} # type: ignore
| Azure/azure-sdk-for-python | sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_07_01/operations/_route_filters_operations.py | Python | mit | 30,281 |
"""
Wrappers to LAPACK library
==========================
NOTE: this module is deprecated -- use scipy.linalg.lapack instead!
flapack -- wrappers for Fortran [*] LAPACK routines
clapack -- wrappers for ATLAS LAPACK routines
calc_lwork -- calculate optimal lwork parameters
get_lapack_funcs -- query for wrapper functions.
[*] If ATLAS libraries are available then Fortran routines
actually use ATLAS routines and should perform equally
well to ATLAS routines.
Module flapack
++++++++++++++
In the following all function names are shown without
type prefix (s,d,c,z). Optimal values for lwork can
be computed using calc_lwork module.
Linear Equations
----------------
Drivers::
lu,piv,x,info = gesv(a,b,overwrite_a=0,overwrite_b=0)
lub,piv,x,info = gbsv(kl,ku,ab,b,overwrite_ab=0,overwrite_b=0)
c,x,info = posv(a,b,lower=0,overwrite_a=0,overwrite_b=0)
Computational routines::
lu,piv,info = getrf(a,overwrite_a=0)
x,info = getrs(lu,piv,b,trans=0,overwrite_b=0)
inv_a,info = getri(lu,piv,lwork=min_lwork,overwrite_lu=0)
c,info = potrf(a,lower=0,clean=1,overwrite_a=0)
x,info = potrs(c,b,lower=0,overwrite_b=0)
inv_a,info = potri(c,lower=0,overwrite_c=0)
inv_c,info = trtri(c,lower=0,unitdiag=0,overwrite_c=0)
Linear Least Squares (LLS) Problems
-----------------------------------
Drivers::
v,x,s,rank,info = gelss(a,b,cond=-1.0,lwork=min_lwork,overwrite_a=0,overwrite_b=0)
Computational routines::
qr,tau,info = geqrf(a,lwork=min_lwork,overwrite_a=0)
q,info = orgqr|ungqr(qr,tau,lwork=min_lwork,overwrite_qr=0,overwrite_tau=1)
Generalized Linear Least Squares (LSE and GLM) Problems
-------------------------------------------------------
Standard Eigenvalue and Singular Value Problems
-----------------------------------------------
Drivers::
w,v,info = syev|heev(a,compute_v=1,lower=0,lwork=min_lwork,overwrite_a=0)
w,v,info = syevd|heevd(a,compute_v=1,lower=0,lwork=min_lwork,overwrite_a=0)
w,v,info = syevr|heevr(a,compute_v=1,lower=0,vrange=,irange=,atol=-1.0,lwork=min_lwork,overwrite_a=0)
t,sdim,(wr,wi|w),vs,info = gees(select,a,compute_v=1,sort_t=0,lwork=min_lwork,select_extra_args=(),overwrite_a=0)
wr,(wi,vl|w),vr,info = geev(a,compute_vl=1,compute_vr=1,lwork=min_lwork,overwrite_a=0)
u,s,vt,info = gesdd(a,compute_uv=1,lwork=min_lwork,overwrite_a=0)
Computational routines::
ht,tau,info = gehrd(a,lo=0,hi=n-1,lwork=min_lwork,overwrite_a=0)
ba,lo,hi,pivscale,info = gebal(a,scale=0,permute=0,overwrite_a=0)
Generalized Eigenvalue and Singular Value Problems
--------------------------------------------------
Drivers::
w,v,info = sygv|hegv(a,b,itype=1,compute_v=1,lower=0,lwork=min_lwork,overwrite_a=0,overwrite_b=0)
w,v,info = sygvd|hegvd(a,b,itype=1,compute_v=1,lower=0,lwork=min_lwork,overwrite_a=0,overwrite_b=0)
(alphar,alphai|alpha),beta,vl,vr,info = ggev(a,b,compute_vl=1,compute_vr=1,lwork=min_lwork,overwrite_a=0,overwrite_b=0)
Auxiliary routines
------------------
a,info = lauum(c,lower=0,overwrite_c=0)
a = laswp(a,piv,k1=0,k2=len(piv)-1,off=0,inc=1,overwrite_a=0)
Module clapack
++++++++++++++
Linear Equations
----------------
Drivers::
lu,piv,x,info = gesv(a,b,rowmajor=1,overwrite_a=0,overwrite_b=0)
c,x,info = posv(a,b,lower=0,rowmajor=1,overwrite_a=0,overwrite_b=0)
Computational routines::
lu,piv,info = getrf(a,rowmajor=1,overwrite_a=0)
x,info = getrs(lu,piv,b,trans=0,rowmajor=1,overwrite_b=0)
inv_a,info = getri(lu,piv,rowmajor=1,overwrite_lu=0)
c,info = potrf(a,lower=0,clean=1,rowmajor=1,overwrite_a=0)
x,info = potrs(c,b,lower=0,rowmajor=1,overwrite_b=0)
inv_a,info = potri(c,lower=0,rowmajor=1,overwrite_c=0)
inv_c,info = trtri(c,lower=0,unitdiag=0,rowmajor=1,overwrite_c=0)
Auxiliary routines
------------------
a,info = lauum(c,lower=0,rowmajor=1,overwrite_c=0)
Module calc_lwork
+++++++++++++++++
Optimal lwork is maxwrk. Default is minwrk.
minwrk,maxwrk = gehrd(prefix,n,lo=0,hi=n-1)
minwrk,maxwrk = gesdd(prefix,m,n,compute_uv=1)
minwrk,maxwrk = gelss(prefix,m,n,nrhs)
minwrk,maxwrk = getri(prefix,n)
minwrk,maxwrk = geev(prefix,n,compute_vl=1,compute_vr=1)
minwrk,maxwrk = heev(prefix,n,lower=0)
minwrk,maxwrk = syev(prefix,n,lower=0)
minwrk,maxwrk = gees(prefix,n,compute_v=1)
minwrk,maxwrk = geqrf(prefix,m,n)
minwrk,maxwrk = gqr(prefix,m,n)
"""
from __future__ import division, print_function, absolute_import
__all__ = ['get_lapack_funcs','calc_lwork','flapack','clapack']
from numpy import deprecate
from . import calc_lwork
# The following ensures that possibly missing flavor (C or Fortran) is
# replaced with the available one. If none is available, exception
# is raised at the first attempt to use the resources.
@deprecate(old_name="scipy.lib.lapack", new_name="scipy.linalg.lapack")
def _deprecated():
pass
try:
_deprecated()
except DeprecationWarning as e:
# don't fail import if DeprecationWarnings raise error -- works around
# the situation with Numpy's test framework
pass
from . import flapack
from . import clapack
_use_force_clapack = 1
if hasattr(clapack,'empty_module'):
clapack = flapack
_use_force_clapack = 0
elif hasattr(flapack,'empty_module'):
flapack = clapack
_type_conv = {'f':'s', 'd':'d', 'F':'c', 'D':'z'} # 'd' will be default for 'i',..
_inv_type_conv = {'s':'f','d':'d','c':'F','z':'D'}
@deprecate
def get_lapack_funcs(names,arrays=(),debug=0,force_clapack=1):
"""Return available LAPACK function objects with names.
arrays are used to determine the optimal prefix of
LAPACK routines.
If force_clapack is True then available Atlas routine
is returned for column major storaged arrays with
rowmajor argument set to False.
"""
force_clapack = 0 # XXX: Don't set it true! The feature is unreliable
# and may cause incorrect results.
# See test_basic.test_solve.check_20Feb04_bug.
ordering = []
for i in range(len(arrays)):
t = arrays[i].dtype.char
if t not in _type_conv:
t = 'd'
ordering.append((t,i))
if ordering:
ordering.sort()
required_prefix = _type_conv[ordering[0][0]]
else:
required_prefix = 'd'
dtypechar = _inv_type_conv[required_prefix]
# Default lookup:
if ordering and arrays[ordering[0][1]].flags['FORTRAN']:
# prefer Fortran code for leading array with column major order
m1,m2 = flapack,clapack
else:
# in all other cases, C code is preferred
m1,m2 = clapack,flapack
if not _use_force_clapack:
force_clapack = 0
funcs = []
m1_name = m1.__name__.split('.')[-1]
m2_name = m2.__name__.split('.')[-1]
for name in names:
func_name = required_prefix + name
func = getattr(m1,func_name,None)
if func is None:
func = getattr(m2,func_name)
func.module_name = m2_name
else:
func.module_name = m1_name
if force_clapack and m1 is flapack:
func2 = getattr(m2,func_name,None)
if func2 is not None:
import new
exec(_colmajor_func_template % {'func_name':func_name})
func = new.function(func_code,{'clapack_func':func2},func_name)
func.module_name = m2_name
func.__doc__ = func2.__doc__
func.prefix = required_prefix
func.dtypechar = dtypechar
funcs.append(func)
return tuple(funcs)
_colmajor_func_template = '''\
def %(func_name)s(*args,**kws):
if "rowmajor" not in kws:
kws["rowmajor"] = 0
return clapack_func(*args,**kws)
func_code = %(func_name)s.func_code
'''
from numpy.testing import Tester
test = Tester().test
| beiko-lab/gengis | bin/Lib/site-packages/scipy/lib/lapack/__init__.py | Python | gpl-3.0 | 8,118 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.7 on 2018-10-19 04:07
from __future__ import unicode_literals
import django.contrib.postgres.fields.jsonb
from django.db import migrations
import functools
import instance.models.utils
class Migration(migrations.Migration):
dependencies = [
('instance', '0109_remove_github_admin_fields'),
]
operations = [
migrations.AlterField(
model_name='openedxappserver',
name='openstack_server_base_image',
field=django.contrib.postgres.fields.jsonb.JSONField(blank=True, default=functools.partial(instance.models.utils._get_setting, *('OPENSTACK_SANDBOX_BASE_IMAGE',), **{}), help_text='JSON openstack base image selector, e.g. {"name": "xenial-16.04-unmodified"} Defaults to settings.OPENSTACK_SANDBOX_BASE_IMAGE on server creation.', null=True),
),
migrations.AlterField(
model_name='openedxappserver',
name='openstack_server_flavor',
field=django.contrib.postgres.fields.jsonb.JSONField(blank=True, default=functools.partial(instance.models.utils._get_setting, *('OPENSTACK_SANDBOX_FLAVOR',), **{}), help_text='JSON openstack flavor selector, e.g. {"name": "vps-ssd-1"}. Defaults to settings.OPENSTACK_SANDBOX_FLAVOR on server creation.', null=True),
),
migrations.AlterField(
model_name='openedxinstance',
name='openstack_server_base_image',
field=django.contrib.postgres.fields.jsonb.JSONField(blank=True, default=functools.partial(instance.models.utils._get_setting, *('OPENSTACK_SANDBOX_BASE_IMAGE',), **{}), help_text='JSON openstack base image selector, e.g. {"name": "xenial-16.04-unmodified"} Defaults to settings.OPENSTACK_SANDBOX_BASE_IMAGE on server creation.', null=True),
),
migrations.AlterField(
model_name='openedxinstance',
name='openstack_server_flavor',
field=django.contrib.postgres.fields.jsonb.JSONField(blank=True, default=functools.partial(instance.models.utils._get_setting, *('OPENSTACK_SANDBOX_FLAVOR',), **{}), help_text='JSON openstack flavor selector, e.g. {"name": "vps-ssd-1"}. Defaults to settings.OPENSTACK_SANDBOX_FLAVOR on server creation.', null=True),
),
]
| open-craft/opencraft | instance/migrations/0110_auto_20181019_0407.py | Python | agpl-3.0 | 2,277 |
#answer: dx = (/ (1) (2))
var:
[0,1] x;
cost:
x/2
| keram88/gelpia_tests | reverse_diff_tests/x_over_2.py | Python | mit | 52 |
from ..node_tree import UMOGReferenceHolder
import bpy
class UMOGSetSceneFrameRange(bpy.types.Operator):
"""Set playback/rendering frame range to simulation range"""
bl_idname = 'umog.frame_range'
bl_label = 'Set Scene Frame Range'
bl_options = {'REGISTER', 'UNDO'}
position = bpy.props.StringProperty(default="")
def execute(self, context):
tree = context.area.spaces.active.node_tree
if self.position == 'start': context.scene.frame_start = tree.properties.StartFrame
elif self.position == 'end': context.scene.frame_end = tree.properties.EndFrame
return {'FINISHED'}
class UMOGSetSceneCurrentFrame(bpy.types.Operator):
"""Set playback/rendering frame range to simulation range"""
bl_idname = 'umog.frame_jump'
bl_label = 'Set Scene Frame Range'
bl_options = {'REGISTER', 'UNDO'}
position = bpy.props.StringProperty(default="")
def execute(self, context):
tree = context.area.spaces.active.node_tree
if self.position == 'start': context.scene.frame_current = tree.properties.StartFrame
elif self.position == 'end': context.scene.frame_current = tree.properties.EndFrame
return {'FINISHED'} | hsab/UMOG | umog_addon/operators/frame_operators.py | Python | gpl-3.0 | 1,211 |
#!/usr/bin/env python
'''
PySQM reading program
____________________________
Copyright (c) Mireia Nievas <mnievas[at]ucm[dot]es>
This file is part of PySQM.
PySQM is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
PySQM is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with PySQM. If not, see <http://www.gnu.org/licenses/>.
____________________________
'''
import os,sys
import inspect
import time
import datetime
import numpy as np
import struct
import socket
# Default, to ignore the length of the read string.
_cal_len_ = None
_meta_len_ = None
_data_len_ = None
from pysqm.common import *
'''
This import section is only for software build purposes.
Dont worry if some of these are missing in your setup.
'''
def relaxed_import(themodule):
try: exec('import '+str(themodule))
except: pass
relaxed_import('serial')
relaxed_import('_mysql')
relaxed_import('pysqm.email')
'''
Read configuration
'''
import pysqm.settings as settings
config = settings.GlobalConfig.config
try:
DEBUG=config.DEBUG
except:
DEBUG=False
'''
Conditional imports
'''
# If the old format (SQM_LE/SQM_LU) is used, replace _ with -
config._device_type = config._device_type.replace('_','-')
if config._device_type == 'SQM-LE':
import socket
elif config._device_type == 'SQM-LU':
import serial
if config._use_mysql == True:
import _mysql
def filtered_mean(array,sigma=3):
# Our data probably contains outliers, filter them
# Notes:
# Median is more robust than mean
# Std increases if the outliers are far away from real values.
# We need to limit the amount of discrepancy we want in the data (20%?).
# We will use data masking and some operations with arrays. Convert to numpy.
array = np.array(array)
# Get the median and std.
data_median = np.median(array)
data_std = np.std(array)
# Max discrepancy we allow.
fixed_max_dev = 0.2*data_median
clip_deviation = np.min([fixed_max_dev,data_std*sigma+0.1])
# Create the filter (10% flux + variable factor)
filter_values_ok = np.abs(array-data_median)<=clip_deviation
filtered_values = array[filter_values_ok]
# Return the mean of filtered data or the median.
if np.size(filtered_values)==0:
print('Warning: High dispersion found on last measures')
filtered_mean = data_median
else:
filtered_mean = np.mean(filtered_values)
return(filtered_mean)
class device(observatory):
def standard_file_header(self):
# Data Header, at the end of this script.
header_content=RAWHeaderContent
# Update data file header with observatory data
header_content = header_content.replace(\
'$DEVICE_TYPE',str(config._device_type))
header_content = header_content.replace(\
'$DEVICE_ID',str(config._device_id))
header_content = header_content.replace(\
'$DATA_SUPPLIER',str(config._data_supplier))
header_content = header_content.replace(\
'$LOCATION_NAME',str(config._device_locationname))
header_content = header_content.replace(\
'$OBSLAT',str(config._observatory_latitude))
header_content = header_content.replace(\
'$OBSLON',str(config._observatory_longitude))
header_content = header_content.replace(\
'$OBSALT',str(config._observatory_altitude))
header_content = header_content.replace(\
'$OFFSET',str(config._offset_calibration))
if config._local_timezone==0:
header_content = header_content.replace(\
'$TIMEZONE','UTC')
elif config._local_timezone>0:
header_content = header_content.replace(\
'$TIMEZONE','UTC+'+str(config._local_timezone))
elif config._local_timezone<0:
header_content = header_content.replace(\
'$TIMEZONE','UTC'+str(config._local_timezone))
header_content = header_content.replace(\
'$PROTOCOL_NUMBER',str(self.protocol_number))
header_content = header_content.replace(\
'$MODEL_NUMBER', str(self.model_number))
header_content = header_content.replace(\
'$FEATURE_NUMBER', str(self.feature_number))
header_content = header_content.replace(\
'$SERIAL_NUMBER', str(self.serial_number))
header_content = header_content.replace(\
'$IXREADOUT', remove_linebreaks(self.ix_readout))
header_content = header_content.replace(\
'$RXREADOUT', remove_linebreaks(self.rx_readout))
header_content = header_content.replace(\
'$CXREADOUT', remove_linebreaks(self.cx_readout))
return(header_content)
def format_content(self,timeutc_mean,timelocal_mean,temp_sensor,\
freq_sensor,ticks_uC,sky_brightness):
# Format a string with data
date_time_utc_str = str(\
timeutc_mean.strftime("%Y-%m-%dT%H:%M:%S"))+'.000'
date_time_local_str = str(\
timelocal_mean.strftime("%Y-%m-%dT%H:%M:%S"))+'.000'
temp_sensor_str = str('%.2f' %temp_sensor)
ticks_uC_str = str('%.3f' %ticks_uC)
freq_sensor_str = str('%.3f' %freq_sensor)
sky_brightness_str = str('%.3f' %sky_brightness)
formatted_data = \
date_time_utc_str+";"+date_time_local_str+";"+temp_sensor_str+";"+\
ticks_uC_str+";"+freq_sensor_str+";"+sky_brightness_str+"\n"
return(formatted_data)
def define_filenames(self):
# Filenames should follow a standard based on observatory name and date.
date_time_file = self.local_datetime(\
self.read_datetime())-datetime.timedelta(hours=12)
date_file = date_time_file.date()
yearmonth = str(date_file)[0:7]
yearmonthday = str(date_file)[0:10]
self.monthly_datafile = \
config.monthly_data_directory+"/"+config._device_shorttype+\
"_"+config._observatory_name+"_"+yearmonth+".dat"
#self.daily_datafile = \
# config.daily_data_directory+"/"+config._device_shorttype+\
# "_"+config._observatory_name+"_"+yearmonthday+".dat"
self.daily_datafile = \
config.daily_data_directory+"/"+\
yearmonthday.replace('-','')+'_120000_'+\
config._device_shorttype+'-'+config._observatory_name+'.dat'
self.current_datafile = \
config.current_data_directory+"/"+config._device_shorttype+\
"_"+config._observatory_name+".dat"
def save_data(self,formatted_data):
'''
Save data to file and duplicate to current
data file (the one that will be ploted)
'''
for each_file in [self.monthly_datafile,self.daily_datafile]:
if not os.path.exists(each_file):
datafile = open(each_file,'w')
datafile.write(self.standard_file_header())
datafile.close()
datafile = open(each_file,'a+')
datafile.write(formatted_data)
datafile.close()
self.copy_file(self.daily_datafile,self.current_datafile)
def save_data_datacenter(self,formatted_data):
'''
This function sends the data from this pysqm client to the central
node @ UCM. It saves the data there (only the SQM data file contents)
'''
# Connection details (hardcoded to avoid user changes)
DC_HOST = "muon.gae.ucm.es"
DC_PORT = 8739
DEV_ID = str(config._device_id)+"_"+str(self.serial_number)
def send_data(data):
try:
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client.connect((DC_HOST, DC_PORT))
client.sendall(data)
client.shutdown(socket.SHUT_RDWR)
client.close()
except:
return(0)
else:
return(1)
def write_buffer():
for data_line in self.DataBuffer[:]:
success = send_data(DEV_ID+";;D;;"+data_line)
if (success==1): self.DataBuffer.remove(data_line)
return(success)
'''
Send the new file initialization to the datacenter
Appends the header to the buffer (it will be sent later)
'''
if (formatted_data=="NEWFILE"):
self.DataBuffer=[\
hl+"\n" for hl in self.standard_file_header().split("\n")[:-1]]
# Try to connect with the datacenter and send the header
success = send_data(DEV_ID+";;C;;")
success = write_buffer()
return(success)
else:
'''
Send the data to the datacenter
'''
# If the buffer is full, dont append more data.
if (len(self.DataBuffer)<10000):
self.DataBuffer.append(formatted_data)
# Try to connect with the datacenter and send the data
success = write_buffer()
return(success)
def save_data_mysql(self,formatted_data):
'''
Use the Python MySQL API to save the
data to a database
'''
mydb = None
values = formatted_data.split(';')
try:
''' Start database connection '''
mydb = _mysql.connect(\
host = config._mysql_host,
user = config._mysql_user,
passwd = config._mysql_pass,
db = config._mysql_database,
port = config._mysql_port)
''' Insert the data '''
mydb.query(\
"INSERT INTO "+str(config._mysql_dbtable)+" VALUES (NULL,'"+\
values[0]+"','"+values[1]+"',"+\
values[2]+","+values[3]+","+\
values[4]+","+values[5]+")")
except Exception, ex:
print(str(inspect.stack()[0][2:4][::-1])+\
' DB Error. Exception: %s' % str(ex))
if mydb != None:
mydb.close()
def data_cache(self,formatted_data,number_measures=1,niter=0):
'''
Append data to DataCache str.
If len(data)>number_measures, write to file
and flush the cache
'''
try:
self.DataCache
except:
self.DataCache=""
self.DataCache = self.DataCache+formatted_data
if len(self.DataCache.split("\n"))>=number_measures+1:
self.save_data(self.DataCache)
self.DataCache = ""
print(str(niter)+'\t'+formatted_data[:-1])
def flush_cache(self):
''' Flush the data cache '''
self.save_data(self.DataCache)
self.DataCache = ""
def copy_file(self,source,destination):
# Copy file content from source to dest.
fichero_source = open(source,'r')
contenido_source = fichero_source.read()
fichero_source.close()
# Create file and truncate it
fichero_destination = open(destination,'w')
fichero_destination.close()
# Write content
fichero_destination = open(destination,'r+')
fichero_destination.write(contenido_source)
fichero_destination.close()
def remove_currentfile(self):
# Remove a file from the host
if os.path.exists(self.current_datafile):
os.remove(self.current_datafile)
class SQM(device):
def read_photometer(self,Nmeasures=1,PauseMeasures=2):
# Initialize values
temp_sensor = []
flux_sensor = []
freq_sensor = []
ticks_uC = []
Nremaining = Nmeasures
# Promediate N measures to remove jitter
timeutc_initial = self.read_datetime()
while(Nremaining>0):
InitialDateTime = datetime.datetime.now()
# Get the raw data from the photometer and process it.
raw_data = self.read_data(tries=10)
temp_sensor_i,freq_sensor_i,ticks_uC_i,sky_brightness_i = \
self.data_process(raw_data)
temp_sensor += [temp_sensor_i]
freq_sensor += [freq_sensor_i]
ticks_uC += [ticks_uC_i]
flux_sensor += [10**(-0.4*sky_brightness_i)]
Nremaining -= 1
DeltaSeconds = (datetime.datetime.now()-InitialDateTime).total_seconds()
# Just to show on screen that the program is alive and running
sys.stdout.write('.')
sys.stdout.flush()
if (Nremaining>0): time.sleep(max(1,PauseMeasures-DeltaSeconds))
timeutc_final = self.read_datetime()
timeutc_delta = timeutc_final - timeutc_initial
timeutc_mean = timeutc_initial+\
datetime.timedelta(seconds=int(timeutc_delta.seconds/2.+0.5))
timelocal_mean = self.local_datetime(timeutc_mean)
# Calculate the mean of the data.
temp_sensor = filtered_mean(temp_sensor)
freq_sensor = filtered_mean(freq_sensor)
flux_sensor = filtered_mean(flux_sensor)
ticks_uC = filtered_mean(ticks_uC)
sky_brightness = -2.5*np.log10(flux_sensor)
# Correct from offset (if cover is installed on the photometer)
#sky_brightness = sky_brightness+config._offset_calibration
return(\
timeutc_mean,timelocal_mean,\
temp_sensor,freq_sensor,\
ticks_uC,sky_brightness)
def metadata_process(self,msg,sep=','):
# Separate the output array in items
msg = format_value(msg)
msg_array = msg.split(sep)
# Get Photometer identification codes
self.protocol_number = int(format_value(msg_array[1]))
self.model_number = int(format_value(msg_array[2]))
self.feature_number = int(format_value(msg_array[3]))
self.serial_number = int(format_value(msg_array[4]))
def data_process(self,msg,sep=','):
# Separate the output array in items
msg = format_value(msg)
msg_array = msg.split(sep)
# Output definition characters
mag_char = 'm'
freq_char = 'Hz'
perc_char = 'c'
pers_char = 's'
temp_char = 'C'
# Get the measures
sky_brightness = float(format_value(msg_array[1],mag_char))
freq_sensor = float(format_value(msg_array[2],freq_char))
ticks_uC = float(format_value(msg_array[3],perc_char))
period_sensor = float(format_value(msg_array[4],pers_char))
temp_sensor = float(format_value(msg_array[5],temp_char))
# For low frequencies, use the period instead
if freq_sensor<30 and period_sensor>0:
freq_sensor = 1./period_sensor
return(temp_sensor,freq_sensor,ticks_uC,sky_brightness)
def start_connection(self):
''' Start photometer connection '''
pass
def close_connection(self):
''' End photometer connection '''
pass
def reset_device(self):
''' Restart connection'''
self.close_connection()
time.sleep(0.1)
#self.__init__()
self.start_connection()
class SQMLE(SQM):
def __init__(self):
'''
Search the photometer in the network and
read its metadata
'''
try:
print('Trying fixed device address %s ... ' %str(config._device_addr))
self.addr = config._device_addr
self.port = 10001
self.start_connection()
except:
print('Trying auto device address ...')
self.addr = self.search()
print('Found address %s ... ' %str(self.addr))
self.port = 10001
self.start_connection()
# Clearing buffer
print('Clearing buffer ... |'),
buffer_data = self.read_buffer()
print(buffer_data),
print('| ... DONE')
print('Reading test data (ix,cx,rx)...')
time.sleep(1)
self.ix_readout = self.read_metadata(tries=10)
time.sleep(1)
self.cx_readout = self.read_calibration(tries=10)
time.sleep(1)
self.rx_readout = self.read_data(tries=10)
def search(self):
''' Search SQM LE in the LAN. Return its adress '''
self.s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.s.setblocking(False)
if hasattr(socket,'SO_BROADCAST'):
self.s.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
self.s.sendto("000000f6".decode("hex"), ("255.255.255.255", 30718))
buf=''
starttime = time.time()
print "Looking for replies; press Ctrl-C to stop."
addr=[None,None]
while True:
try:
(buf, addr) = self.s.recvfrom(30)
if buf[3].encode("hex")=="f7":
print "Received from %s: MAC: %s" % \
(addr, buf[24:30].encode("hex"))
except:
#Timeout in seconds. Allow all devices time to respond
if time.time()-starttime > 3:
break
pass
try:
assert(addr[0]!=None)
except:
print('ERR. Device not found!')
raise
else:
return(addr[0])
def start_connection(self):
''' Start photometer connection '''
self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.s.settimeout(20)
self.s.connect((self.addr,int(self.port)))
#self.s.settimeout(1)
def close_connection(self):
''' End photometer connection '''
self.s.setsockopt(\
socket.SOL_SOCKET,\
socket.SO_LINGER,\
struct.pack('ii', 1, 0))
# Check until there is no answer from device
request = ""
r = True
while r:
r = self.read_buffer()
request += str(r)
self.s.close()
def read_buffer(self):
''' Read the data '''
msg = None
try: msg = self.s.recv(256)
except: pass
return(msg)
def reset_device(self):
''' Connection reset '''
#print('Trying to reset connection')
self.close_connection()
self.start_connection()
def read_metadata(self,tries=1):
''' Read the serial number, firmware version '''
self.s.send('ix')
time.sleep(1)
read_err = False
msg = self.read_buffer()
# Check metadata
try:
# Sanity check
assert(len(msg)==_meta_len_ or _meta_len_==None)
assert("i," in msg)
self.metadata_process(msg)
except:
tries-=1
read_err=True
if (read_err==True and tries>0):
time.sleep(1)
self.reset_device()
time.sleep(1)
msg = self.read_metadata(tries)
if (msg!=-1): read_err=False
# Check that msg contains data
if read_err==True:
print('ERR. Reading the photometer!: %s' %str(msg))
if (DEBUG): raise
return(-1)
else:
print('Sensor info: '+str(msg)),
return(msg)
def read_calibration(self,tries=1):
''' Read the calibration parameters '''
self.s.send('cx')
time.sleep(1)
read_err = False
msg = self.read_buffer()
# Check caldata
try:
# Sanity check
assert(len(msg)==_cal_len_ or _cal_len_==None)
assert("c," in msg)
except:
tries-=1
read_err=True
if (read_err==True and tries>0):
time.sleep(1)
self.reset_device()
time.sleep(1)
msg = self.read_calibration(tries)
if (msg!=-1): read_err=False
# Check that msg contains data
if read_err==True:
print('ERR. Reading the photometer!: %s' %str(msg))
if (DEBUG): raise
return(-1)
else:
print('Calibration info: '+str(msg)),
return(msg)
def read_data(self,tries=1):
''' Read the SQM and format the Temperature, Frequency and NSB measures '''
self.s.send('rx')
time.sleep(1)
read_err = False
msg = self.read_buffer()
# Check data
try:
# Sanity check
assert(len(msg)==_data_len_ or _data_len_==None)
assert("r," in msg)
self.data_process(msg)
except:
tries-=1
read_err=True
if (read_err==True and tries>0):
time.sleep(1)
self.reset_device()
time.sleep(1)
msg = self.read_data(tries)
if (msg!=-1): read_err=False
# Check that msg contains data
if read_err==True:
print('ERR. Reading the photometer!: %s' %str(msg))
if (DEBUG): raise
return(-1)
else:
if (DEBUG): print('Data msg: '+str(msg))
return(msg)
class SQMLU(SQM):
def __init__(self):
'''
Search the photometer and
read its metadata
'''
try:
print('Trying fixed device address %s ... ' %str(config._device_addr))
self.addr = config._device_addr
self.bauds = 115200
self.start_connection()
except:
print('Trying auto device address ...')
self.addr = self.search()
print('Found address %s ... ' %str(self.addr))
self.bauds = 115200
self.start_connection()
# Clearing buffer
print('Clearing buffer ... |'),
buffer_data = self.read_buffer()
print(buffer_data),
print('| ... DONE')
print('Reading test data (ix,cx,rx)...')
time.sleep(1)
self.ix_readout = self.read_metadata(tries=10)
time.sleep(1)
self.cx_readout = self.read_calibration(tries=10)
time.sleep(1)
self.rx_readout = self.read_data(tries=10)
def search(self):
'''
Photometer search.
Name of the port depends on the platform.
'''
ports_unix = ['/dev/ttyUSB'+str(num) for num in range(100)]
ports_win = ['COM'+str(num) for num in range(100)]
os_in_use = sys.platform
if os_in_use == 'linux2':
print('Detected Linux platform')
ports = ports_unix
elif os_in_use == 'win32':
print('Detected Windows platform')
ports = ports_win
used_port = None
for port in ports:
conn_test = serial.Serial(port, 115200, timeout=1)
conn_test.write('ix')
if conn_test.readline()[0] == 'i':
used_port = port
break
try:
assert(used_port!=None)
except:
print('ERR. Device not found!')
raise
else:
return(used_port)
def start_connection(self):
'''Start photometer connection '''
self.s = serial.Serial(self.addr, 115200, timeout=2)
def close_connection(self):
''' End photometer connection '''
# Check until there is no answer from device
request = ""
r = True
while r:
r = self.read_buffer()
request += str(r)
self.s.close()
def reset_device(self):
''' Connection reset '''
#print('Trying to reset connection')
self.close_connection()
self.start_connection()
def read_buffer(self):
''' Read the data '''
msg = None
try: msg = self.s.readline()
except: pass
return(msg)
def read_metadata(self,tries=1):
''' Read the serial number, firmware version '''
self.s.write('ix')
time.sleep(1)
read_err = False
msg = self.read_buffer()
# Check metadata
try:
# Sanity check
assert(len(msg)==_meta_len_ or _meta_len_==None)
assert("i," in msg)
self.metadata_process(msg)
except:
tries-=1
read_err=True
if (read_err==True and tries>0):
time.sleep(1)
self.reset_device()
time.sleep(1)
msg = self.read_metadata(tries)
if (msg!=-1): read_err=False
# Check that msg contains data
if read_err==True:
print('ERR. Reading the photometer!: %s' %str(msg))
if (DEBUG): raise
return(-1)
else:
print('Sensor info: '+str(msg)),
return(msg)
def read_calibration(self,tries=1):
''' Read the calibration data '''
self.s.write('cx')
time.sleep(1)
read_err = False
msg = self.read_buffer()
# Check caldata
try:
# Sanity check
assert(len(msg)==_cal_len_ or _cal_len_==None)
assert("c," in msg)
except:
tries-=1
read_err=True
if (read_err==True and tries>0):
time.sleep(1)
self.reset_device()
time.sleep(1)
msg = self.read_calibration(tries)
if (msg!=-1): read_err=False
# Check that msg contains data
if read_err==True:
print('ERR. Reading the photometer!: %s' %str(msg))
if (DEBUG): raise
return(-1)
else:
print('Calibration info: '+str(msg)),
return(msg)
def read_data(self,tries=1):
''' Read the SQM and format the Temperature, Frequency and NSB measures '''
self.s.write('rx')
time.sleep(1)
read_err = False
msg = self.read_buffer()
# Check data
try:
# Sanity check
assert(len(msg)==_data_len_ or _data_len_==None)
assert("r," in msg)
self.data_process(msg)
except:
tries-=1
read_err=True
if (read_err==True and tries>0):
time.sleep(1)
self.reset_device()
time.sleep(1)
msg = self.read_data(tries)
if (msg!=-1): read_err=False
# Check that msg contains data
if read_err==True:
print('ERR. Reading the photometer!: %s' %str(msg))
if (DEBUG): raise
return(-1)
else:
if (DEBUG): print('Data msg: '+str(msg))
return(msg)
| migueln/PySQM | pysqm/read.py | Python | gpl-3.0 | 26,816 |
"""Pressure util functions."""
from __future__ import annotations
from numbers import Number
from homeassistant.const import (
PRESSURE,
PRESSURE_BAR,
PRESSURE_CBAR,
PRESSURE_HPA,
PRESSURE_INHG,
PRESSURE_KPA,
PRESSURE_MBAR,
PRESSURE_PA,
PRESSURE_PSI,
UNIT_NOT_RECOGNIZED_TEMPLATE,
)
VALID_UNITS: tuple[str, ...] = (
PRESSURE_PA,
PRESSURE_HPA,
PRESSURE_KPA,
PRESSURE_BAR,
PRESSURE_CBAR,
PRESSURE_MBAR,
PRESSURE_INHG,
PRESSURE_PSI,
)
UNIT_CONVERSION: dict[str, float] = {
PRESSURE_PA: 1,
PRESSURE_HPA: 1 / 100,
PRESSURE_KPA: 1 / 1000,
PRESSURE_BAR: 1 / 100000,
PRESSURE_CBAR: 1 / 1000,
PRESSURE_MBAR: 1 / 100,
PRESSURE_INHG: 1 / 3386.389,
PRESSURE_PSI: 1 / 6894.757,
}
def convert(value: float, unit_1: str, unit_2: str) -> float:
"""Convert one unit of measurement to another."""
if unit_1 not in VALID_UNITS:
raise ValueError(UNIT_NOT_RECOGNIZED_TEMPLATE.format(unit_1, PRESSURE))
if unit_2 not in VALID_UNITS:
raise ValueError(UNIT_NOT_RECOGNIZED_TEMPLATE.format(unit_2, PRESSURE))
if not isinstance(value, Number):
raise TypeError(f"{value} is not of numeric type")
if unit_1 == unit_2:
return value
pascals = value / UNIT_CONVERSION[unit_1]
return pascals * UNIT_CONVERSION[unit_2]
| jawilson/home-assistant | homeassistant/util/pressure.py | Python | apache-2.0 | 1,358 |
#!/usr/bin/env python
#
# Copyright 2004,2007,2010 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, gr_unittest
import math
class test_sig_source (gr_unittest.TestCase):
def setUp (self):
self.tb = gr.top_block ()
def tearDown (self):
self.tb = None
def test_const_f (self):
tb = self.tb
expected_result = (1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5)
src1 = gr.sig_source_f (1e6, gr.GR_CONST_WAVE, 0, 1.5)
op = gr.head (gr.sizeof_float, 10)
dst1 = gr.vector_sink_f ()
tb.connect (src1, op)
tb.connect (op, dst1)
tb.run ()
dst_data = dst1.data ()
self.assertEqual (expected_result, dst_data)
def test_const_i (self):
tb = self.tb
expected_result = (1, 1, 1, 1)
src1 = gr.sig_source_i (1e6, gr.GR_CONST_WAVE, 0, 1)
op = gr.head (gr.sizeof_int, 4)
dst1 = gr.vector_sink_i ()
tb.connect (src1, op)
tb.connect (op, dst1)
tb.run ()
dst_data = dst1.data ()
self.assertEqual (expected_result, dst_data)
def test_const_s (self):
tb = self.tb
expected_result = (1, 1, 1, 1)
src1 = gr.sig_source_s (1e6, gr.GR_CONST_WAVE, 0, 1)
op = gr.head (gr.sizeof_short, 4)
dst1 = gr.vector_sink_s ()
tb.connect (src1, op)
tb.connect (op, dst1)
tb.run ()
dst_data = dst1.data ()
self.assertEqual (expected_result, dst_data)
def test_sine_f (self):
tb = self.tb
sqrt2 = math.sqrt(2) / 2
expected_result = (0, sqrt2, 1, sqrt2, 0, -sqrt2, -1, -sqrt2, 0)
src1 = gr.sig_source_f (8, gr.GR_SIN_WAVE, 1.0, 1.0)
op = gr.head (gr.sizeof_float, 9)
dst1 = gr.vector_sink_f ()
tb.connect (src1, op)
tb.connect (op, dst1)
tb.run ()
dst_data = dst1.data ()
self.assertFloatTuplesAlmostEqual (expected_result, dst_data, 5)
def test_cosine_f (self):
tb = self.tb
sqrt2 = math.sqrt(2) / 2
expected_result = (1, sqrt2, 0, -sqrt2, -1, -sqrt2, 0, sqrt2, 1)
src1 = gr.sig_source_f (8, gr.GR_COS_WAVE, 1.0, 1.0)
op = gr.head (gr.sizeof_float, 9)
dst1 = gr.vector_sink_f ()
tb.connect (src1, op)
tb.connect (op, dst1)
tb.run ()
dst_data = dst1.data ()
self.assertFloatTuplesAlmostEqual (expected_result, dst_data, 5)
def test_sqr_c (self):
tb = self.tb #arg6 is a bit before -PI/2
expected_result = (1j, 1j, 0, 0, 1, 1, 1+0j, 1+1j, 1j)
src1 = gr.sig_source_c (8, gr.GR_SQR_WAVE, 1.0, 1.0)
op = gr.head (gr.sizeof_gr_complex, 9)
dst1 = gr.vector_sink_c ()
tb.connect (src1, op)
tb.connect (op, dst1)
tb.run ()
dst_data = dst1.data ()
self.assertEqual (expected_result, dst_data)
def test_tri_c (self):
tb = self.tb
expected_result = (1+.5j, .75+.75j, .5+1j, .25+.75j, 0+.5j, .25+.25j, .5+0j, .75+.25j, 1+.5j)
src1 = gr.sig_source_c (8, gr.GR_TRI_WAVE, 1.0, 1.0)
op = gr.head (gr.sizeof_gr_complex, 9)
dst1 = gr.vector_sink_c ()
tb.connect (src1, op)
tb.connect (op, dst1)
tb.run ()
dst_data = dst1.data ()
self.assertComplexTuplesAlmostEqual (expected_result, dst_data, 5)
def test_saw_c (self):
tb = self.tb
expected_result = (.5+.25j, .625+.375j, .75+.5j, .875+.625j, 0+.75j, .125+.875j, .25+1j, .375+.125j, .5+.25j)
src1 = gr.sig_source_c (8, gr.GR_SAW_WAVE, 1.0, 1.0)
op = gr.head (gr.sizeof_gr_complex, 9)
dst1 = gr.vector_sink_c ()
tb.connect (src1, op)
tb.connect (op, dst1)
tb.run ()
dst_data = dst1.data ()
self.assertComplexTuplesAlmostEqual (expected_result, dst_data, 5)
def test_sqr_f (self):
tb = self.tb
expected_result = (0, 0, 0, 0, 1, 1, 1, 1, 0)
src1 = gr.sig_source_f (8, gr.GR_SQR_WAVE, 1.0, 1.0)
op = gr.head (gr.sizeof_float, 9)
dst1 = gr.vector_sink_f ()
tb.connect (src1, op)
tb.connect (op, dst1)
tb.run ()
dst_data = dst1.data ()
self.assertEqual (expected_result, dst_data)
def test_sqr_s (self):
tb = self.tb
expected_result = (0, 0, 0, 0, 1, 1, 1, 1, 0)
src1 = gr.sig_source_s (8, gr.GR_SQR_WAVE, 1.0, 1.0)
op = gr.head (gr.sizeof_short, 9)
dst1 = gr.vector_sink_s ()
tb.connect (src1, op)
tb.connect (op, dst1)
tb.run ()
dst_data = dst1.data ()
self.assertEqual (expected_result, dst_data)
def test_tri_f (self):
tb = self.tb
expected_result = (1, .75, .5, .25, 0, .25, .5, .75, 1)
src1 = gr.sig_source_f (8, gr.GR_TRI_WAVE, 1.0, 1.0)
op = gr.head (gr.sizeof_float, 9)
dst1 = gr.vector_sink_f ()
tb.connect (src1, op)
tb.connect (op, dst1)
tb.run ()
dst_data = dst1.data ()
self.assertFloatTuplesAlmostEqual (expected_result, dst_data, 5)
def test_saw_f (self):
tb = self.tb
expected_result = (.5, .625, .75, .875, 0, .125, .25, .375, .5)
src1 = gr.sig_source_f (8, gr.GR_SAW_WAVE, 1.0, 1.0)
op = gr.head (gr.sizeof_float, 9)
dst1 = gr.vector_sink_f ()
tb.connect (src1, op)
tb.connect (op, dst1)
tb.run ()
dst_data = dst1.data ()
self.assertFloatTuplesAlmostEqual (expected_result, dst_data, 5)
if __name__ == '__main__':
gr_unittest.run(test_sig_source, "test_sig_source.xml")
| RedhawkSDR/integration-gnuhawk | qa/tests/qa_sig_source.py | Python | gpl-3.0 | 6,406 |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
# Generated by the OpenERP plugin for Dia !
#
import l10n_pe_hr_payroll
| kailIII/emaresa | trunk.pe.bk/l10n_pe_hr_payroll/__init__.py | Python | agpl-3.0 | 1,051 |
from django.contrib.auth.models import User
from django.db import models
from django.utils import timezone
from videoclases.models.course import Course
class Student(models.Model):
user = models.OneToOneField(User)
courses = models.ManyToManyField(Course, related_name='students')
changed_password = models.BooleanField(default=False)
def course_actual(self):
course_qs = self.courses.filter(year=timezone.now().year)
return course_qs[0] if course_qs.exists() else False
def __str__(self):
if self.course_actual():
return 'Course: ' + self.course_actual().name + ' ' + str(self.course_actual().year) + ' ' + \
self.user.get_full_name()
return 'Sin courses actualmente' | Videoclases/videoclases | videoclases/models/student.py | Python | gpl-3.0 | 756 |
import os.path
import multiprocessing
import utils
import functools
import sys
def check_existing_default_config(species, script_path):
species = species.lower().split(' ')
trueCoverage_config_folder = os.path.join(os.path.dirname(script_path), 'modules', 'trueCoverage_rematch', '')
config = None
reference = None
files = [f for f in os.listdir(trueCoverage_config_folder) if not f.startswith('.') and os.path.isfile(os.path.join(trueCoverage_config_folder, f))]
for file_found in files:
file_path = os.path.join(trueCoverage_config_folder, file_found)
if file_found == '_'.join(species) + '.config':
config = file_path
elif file_found == '_'.join(species) + '.fasta':
reference = file_path
return config, reference
def parse_config(config_file):
config = {'reference_file': None, 'length_extra_seq': None, 'maximum_number_absent_genes': None, 'maximum_number_genes_multiple_alleles': None, 'minimum_read_coverage': None, 'minimum_depth_presence': None, 'minimum_depth_call': None, 'minimum_depth_frequency_dominant_allele': None, 'minimum_gene_coverage': None, 'minimum_gene_identity': None}
with open(config_file, 'rtU') as reader:
field = None
for line in reader:
line = line.splitlines()[0]
if len(line) > 0:
line = line.split(' ')[0]
if line.startswith('#'):
line = line[1:].split(' ')[0]
field = line
else:
if field is not None:
if field in ['length_extra_seq', 'maximum_number_absent_genes', 'maximum_number_genes_multiple_alleles', 'minimum_read_coverage', 'minimum_depth_presence', 'minimum_depth_call', 'minimum_gene_coverage', 'minimum_gene_identity']:
line = int(line)
if field in ['minimum_gene_coverage', 'minimum_gene_identity']:
if line < 0 or line > 100:
sys.exit('minimum_gene_coverage in trueCoverage_rematch config file must be an integer between 0 and 100')
elif field == 'minimum_depth_frequency_dominant_allele':
line = float(line)
if line < 0 or line > 1:
sys.exit('minimum_depth_frequency_dominant_allele in trueCoverage_rematch config file must be a double between 0 and 1')
config[field] = line
field = None
for field in config:
if config[field] is None:
sys.exit(field + ' in trueCoverage_rematch config file is missing')
return config
def index_fasta_samtools(fasta, region_None, region_outfile_none, print_comand_True):
command = ['samtools', 'faidx', fasta, '', '', '']
shell_true = False
if region_None is not None:
command[3] = region_None
if region_outfile_none is not None:
command[4] = '>'
command[5] = region_outfile_none
shell_true = True
run_successfully, stdout, stderr = utils.runCommandPopenCommunicate(command, shell_true, None, print_comand_True)
return run_successfully, stdout
# Indexing reference file using Bowtie2
def indexSequenceBowtie2(referenceFile, threads):
if os.path.isfile(str(referenceFile + '.1.bt2')):
run_successfully = True
else:
command = ['bowtie2-build', '--threads', str(threads), referenceFile, referenceFile]
run_successfully, stdout, stderr = utils.runCommandPopenCommunicate(command, False, None, True)
return run_successfully
# Mapping with Bowtie2
def mappingBowtie2(fastq_files, referenceFile, threads, outdir, conserved_True, numMapLoc):
sam_file = os.path.join(outdir, str('alignment.sam'))
# Index reference file
run_successfully = indexSequenceBowtie2(referenceFile, threads)
if run_successfully:
command = ['bowtie2', '-k', str(numMapLoc), '-q', '', '--threads', str(threads), '-x', referenceFile, '', '--no-unal', '-S', sam_file]
if len(fastq_files) == 1:
command[9] = '-U ' + fastq_files[0]
elif len(fastq_files) == 2:
command[9] = '-1 ' + fastq_files[0] + ' -2 ' + fastq_files[1]
else:
return False, None
if conserved_True:
command[4] = '--sensitive'
else:
command[4] = '--very-sensitive-local'
run_successfully, stdout, stderr = utils.runCommandPopenCommunicate(command, False, None, True)
if not run_successfully:
sam_file = None
return run_successfully, sam_file
# Sort alignment file
def sortAlignment(alignment_file, output_file, sortByName_True, threads):
outFormat_string = os.path.splitext(output_file)[1][1:].lower()
command = ['samtools', 'sort', '-o', output_file, '-O', outFormat_string, '', '-@', str(threads), alignment_file]
if sortByName_True:
command[6] = '-n'
run_successfully, stdout, stderr = utils.runCommandPopenCommunicate(command, False, None, True)
if not run_successfully:
output_file = None
return run_successfully, output_file
# Index alignment file
def indexAlignment(alignment_file):
command = ['samtools', 'index', alignment_file]
run_successfully, stdout, stderr = utils.runCommandPopenCommunicate(command, False, None, True)
return run_successfully
def mapping_reads(fastq_files, reference_file, threads, outdir, conserved_True, numMapLoc):
# Create a symbolic link to the reference_file
reference_link = os.path.join(outdir, os.path.basename(reference_file))
os.symlink(reference_file, reference_link)
bam_file = None
# Mapping reads using Bowtie2
run_successfully, sam_file = mappingBowtie2(fastq_files, reference_link, threads, outdir, conserved_True, numMapLoc)
if run_successfully:
# Convert sam to bam and sort bam
run_successfully, bam_file = sortAlignment(sam_file, str(os.path.splitext(sam_file)[0] + '.bam'), False, threads)
if run_successfully:
os.remove(sam_file)
# Index bam
run_successfully = indexAlignment(bam_file)
return run_successfully, bam_file, reference_link
def create_vcf(bam_file, sequence_to_analyse, outdir, counter, reference_file):
gene_vcf = os.path.join(outdir, 'samtools_mpileup.sequence_' + str(counter) + '.vcf')
command = ['samtools', 'mpileup', '--count-orphans', '--no-BAQ', '--min-BQ', '0', '--min-MQ', str(7), '--fasta-ref', reference_file, '--region', sequence_to_analyse, '--output', gene_vcf, '--VCF', '--uncompressed', '--output-tags', 'INFO/AD,AD,DP', bam_file]
run_successfully, stdout, stderr = utils.runCommandPopenCommunicate(command, False, None, False)
if not run_successfully:
gene_vcf = None
return run_successfully, gene_vcf
# Read vcf file
class Vcf():
def __init__(self, vcfFile):
self.vcf = open(vcfFile, 'rtU')
self.line_read = self.vcf.readline()
while self.line_read.startswith('#'):
self.line_read = self.vcf.readline()
self.line = self.line_read
def readline(self):
self.line_stored = self.line
self.line = self.vcf.readline()
return self.line_stored
def close(self):
self.vcf.close()
def get_variants(gene_vcf):
variants = {}
vfc_file = Vcf(gene_vcf)
line = vfc_file.readline()
while len(line) > 0:
fields = line.splitlines()[0].split('\t')
if len(fields) > 0:
fields[1] = int(fields[1])
info_field = {}
for i in fields[7].split(';'):
i = i.split('=')
if len(i) > 1:
info_field[i[0]] = i[1]
else:
info_field[i[0]] = None
format_field = {}
format_field_name = fields[8].split(':')
format_data = fields[9].split(':')
for i in range(0, len(format_data)):
format_field[format_field_name[i]] = format_data[i].split(',')
fields_to_store = {'REF': fields[3], 'ALT': fields[4].split(','), 'info': info_field, 'format': format_field}
if fields[1] in variants:
variants[fields[1]][len(variants[fields[1]])] = fields_to_store
else:
variants[fields[1]] = {0: fields_to_store}
line = vfc_file.readline()
vfc_file.close()
return variants
def indel_entry(variant_position):
entry_with_indel = []
entry_with_snp = None
for i in variant_position:
keys = variant_position[i]['info'].keys()
if 'INDEL' in keys:
entry_with_indel.append(i)
else:
entry_with_snp = i
return entry_with_indel, entry_with_snp
def get_alt_noMatter(variant_position, indel_true):
dp = sum(map(int, variant_position['format']['AD']))
index_alleles_sorted_position = sorted(zip(map(int, variant_position['format']['AD']), range(0, len(variant_position['format']['AD']))), reverse=True)
index_dominant_allele = None
if not indel_true:
ad_idv = index_alleles_sorted_position[0][0]
if len([x for x in index_alleles_sorted_position if x[0] == ad_idv]) > 1:
index_alleles_sorted_position = sorted([x for x in index_alleles_sorted_position if x[0] == ad_idv])
index_dominant_allele = index_alleles_sorted_position[0][1]
if index_dominant_allele == 0:
alt = '.'
else:
alt = variant_position['ALT'][index_dominant_allele - 1]
else:
ad_idv = variant_position['info']['IDV']
if float(ad_idv) / float(dp) >= 0.5:
if len([x for x in index_alleles_sorted_position if x[0] == index_alleles_sorted_position[0][0]]) > 1:
index_alleles_sorted_position = sorted([x for x in index_alleles_sorted_position if x[0] == index_alleles_sorted_position[0][0]])
index_dominant_allele = index_alleles_sorted_position[0][1]
if index_dominant_allele == 0:
alt = '.'
else:
alt = variant_position['ALT'][index_dominant_allele - 1]
else:
ad_idv = int(variant_position['format']['AD'][0])
alt = '.'
return alt, dp, ad_idv, index_dominant_allele
def count_number_diferences(ref, alt):
number_diferences = 0
if len(ref) != len(alt):
number_diferences += 1
for i in range(0, min(len(ref), len(alt))):
if alt[i] != 'N' and ref[i] != alt[i]:
number_diferences += 1
return number_diferences
def get_alt_correct(variant_position, alt_noMatter, dp, ad_idv, index_dominant_allele, minimum_depth_presence, minimum_depth_call, minimum_depth_frequency_dominant_allele):
alt = None
low_coverage = False
multiple_alleles = False
if dp >= minimum_depth_presence:
if dp < minimum_depth_call:
alt = 'N' * len(variant_position['REF'])
low_coverage = True
else:
if ad_idv < minimum_depth_call:
alt = 'N' * len(variant_position['REF'])
low_coverage = True
else:
if float(ad_idv) / float(dp) < minimum_depth_frequency_dominant_allele:
if index_dominant_allele is not None:
variants_coverage = [int(variant_position['format']['AD'][i]) for i in range(0, len(variant_position['ALT']) + 1) if i != index_dominant_allele]
if sum(variants_coverage) == 0:
alt = alt_noMatter
else:
if float(max(variants_coverage)) / float(sum(variants_coverage)) > 0.5:
multiple_alleles = True
alt = 'N' * len(variant_position['REF'])
elif float(max(variants_coverage)) / float(sum(variants_coverage)) == 0.5 and len(variants_coverage) > 2:
multiple_alleles = True
alt = 'N' * len(variant_position['REF'])
else:
alt = alt_noMatter
else:
multiple_alleles = True
alt = 'N' * len(variant_position['REF'])
else:
alt = alt_noMatter
else:
low_coverage = True
return alt, low_coverage, multiple_alleles
def get_alt_alignment(ref, alt):
if alt is None:
alt = 'N' * len(ref)
else:
if len(ref) != len(alt):
if len(alt) < len(ref):
alt += 'N' * (len(ref) - len(alt))
else:
if alt[:len(ref)] == ref:
alt = '.'
else:
alt = alt[:len(ref)]
return alt
def get_indel_more_likely(variant_position, indels_entry):
indel_coverage = {}
for i in indels_entry:
indel_coverage[i] = int(variant_position['info']['IDV'])
return indel_coverage.index(str(max(indel_coverage.values())))
def determine_variant(variant_position, minimum_depth_presence, minimum_depth_call, minimum_depth_frequency_dominant_allele, indel_true):
alt_noMatter, dp, ad_idv, index_dominant_allele = get_alt_noMatter(variant_position, indel_true)
alt_correct, low_coverage, multiple_alleles = get_alt_correct(variant_position, alt_noMatter, dp, ad_idv, index_dominant_allele, minimum_depth_presence, minimum_depth_call, minimum_depth_frequency_dominant_allele)
alt_alignment = get_alt_alignment(variant_position['REF'], alt_correct)
return variant_position['REF'], alt_correct, low_coverage, multiple_alleles, alt_noMatter, alt_alignment
def confirm_nucleotides_indel(ref, alt, variants, position_start_indel, minimum_depth_presence, minimum_depth_call, minimum_depth_frequency_dominant_allele):
alt = list(alt)
for i in range(0, len(alt) - 1):
if alt[1 + i] == 'N':
continue
if len(alt) < len(ref):
new_position = position_start_indel + len(alt) - i
else:
if i + 1 > len(ref) - 1:
break
new_position = position_start_indel + 1 + i
if new_position not in variants:
alt[1 + i] = ''
continue
entry_with_indel, entry_with_snp = indel_entry(variants[new_position])
new_ref, alt_correct, low_coverage, multiple_alleles, alt_noMatter, alt_alignment = determine_variant(variants[new_position][entry_with_snp], minimum_depth_presence, minimum_depth_call, minimum_depth_frequency_dominant_allele, False)
if alt_noMatter != '.' and alt[1 + i] != alt_noMatter:
alt[1 + i] = alt_noMatter
return ''.join(alt)
def snp_indel(variants, position, minimum_depth_presence, minimum_depth_call, minimum_depth_frequency_dominant_allele):
entry_with_indel, entry_with_snp = indel_entry(variants[position])
if len(entry_with_indel) == 0:
ref, alt_correct, low_coverage, multiple_alleles, alt_noMatter, alt_alignment = determine_variant(variants[position][entry_with_snp], minimum_depth_presence, minimum_depth_call, minimum_depth_frequency_dominant_allele, False)
else:
ref_snp, alt_correct_snp, low_coverage_snp, multiple_alleles_snp, alt_noMatter_snp, alt_alignment_snp = determine_variant(variants[position][entry_with_snp], minimum_depth_presence, minimum_depth_call, minimum_depth_frequency_dominant_allele, False)
indel_more_likely = entry_with_indel[0]
if len(entry_with_indel) > 1:
indel_more_likely = get_indel_more_likely(variants[position], entry_with_indel)
ref, alt_correct, low_coverage, multiple_alleles, alt_noMatter, alt_alignment = determine_variant(variants[position][indel_more_likely], minimum_depth_presence, minimum_depth_call, minimum_depth_frequency_dominant_allele, True)
if alt_noMatter == '.':
ref, alt_correct, low_coverage, multiple_alleles, alt_noMatter, alt_alignment = ref_snp, alt_correct_snp, low_coverage_snp, multiple_alleles_snp, alt_noMatter_snp, alt_alignment_snp
else:
if alt_correct is None and alt_correct_snp is not None:
alt_correct = alt_correct_snp
elif alt_correct is not None and alt_correct_snp is not None:
if alt_correct_snp != '.' and alt_correct[0] != alt_correct_snp:
alt_correct = alt_correct_snp + alt_correct[1:] if len(alt_correct) > 1 else alt_correct_snp
if alt_noMatter_snp != '.' and alt_noMatter[0] != alt_noMatter_snp:
alt_noMatter = alt_noMatter_snp + alt_noMatter[1:] if len(alt_noMatter) > 1 else alt_noMatter_snp
if alt_alignment_snp != '.' and alt_alignment[0] != alt_alignment_snp:
alt_alignment = alt_alignment_snp + alt_alignment[1:] if len(alt_alignment) > 1 else alt_alignment_snp
if alt_noMatter != '.':
alt_noMatter = confirm_nucleotides_indel(ref, alt_noMatter, variants, position, minimum_depth_presence, minimum_depth_call, minimum_depth_frequency_dominant_allele)
if alt_correct is not None and alt_correct != '.':
alt_correct = confirm_nucleotides_indel(ref, alt_correct, variants, position, minimum_depth_presence, minimum_depth_call, minimum_depth_frequency_dominant_allele)
if alt_alignment != '.':
alt_alignment = confirm_nucleotides_indel(ref, alt_alignment, variants, position, minimum_depth_presence, minimum_depth_call, minimum_depth_frequency_dominant_allele)
return ref, alt_correct, low_coverage, multiple_alleles, alt_noMatter, alt_alignment
def get_true_variants(variants, minimum_depth_presence, minimum_depth_call, minimum_depth_frequency_dominant_allele, sequence):
variants_correct = {}
variants_noMatter = {}
variants_alignment = {}
absent_positions = {}
last_absent_position = ''
multiple_alleles_found = []
counter = 1
while counter <= len(sequence):
if counter in variants:
ref, alt_correct, low_coverage, multiple_alleles, alt_noMatter, alt_alignment = snp_indel(variants, counter, minimum_depth_presence, minimum_depth_call, minimum_depth_frequency_dominant_allele)
if alt_alignment != '.':
variants_alignment[counter] = {'REF': ref, 'ALT': alt_alignment}
if alt_noMatter != '.':
variants_noMatter[counter] = {'REF': ref, 'ALT': alt_noMatter}
if alt_correct is None:
if counter - len(last_absent_position) in absent_positions:
absent_positions[counter - len(last_absent_position)]['REF'] += ref
else:
absent_positions[counter] = {'REF': ref, 'ALT': ''}
last_absent_position += ref
else:
if alt_correct != '.':
if len(alt_correct) < len(ref):
if len(alt_correct) == 1:
absent_positions[counter + 1] = {'REF': ref[1:], 'ALT': ''}
else:
absent_positions[counter + 1] = {'REF': ref[1:], 'ALT': alt_correct[1:]}
last_absent_position = ref[1:]
else:
variants_correct[counter] = {'REF': ref, 'ALT': alt_correct}
last_absent_position = ''
else:
last_absent_position = ''
if multiple_alleles:
multiple_alleles_found.append(counter)
counter += len(ref)
else:
variants_alignment[counter] = {'REF': sequence[counter - 1], 'ALT': 'N'}
if counter - len(last_absent_position) in absent_positions:
absent_positions[counter - len(last_absent_position)]['REF'] += sequence[counter - 1]
else:
absent_positions[counter] = {'REF': sequence[counter - 1], 'ALT': ''}
last_absent_position += sequence[counter - 1]
counter += 1
for position in absent_positions:
if position == 1:
variants_correct[position] = {'REF': absent_positions[position]['REF'], 'ALT': 'N'}
if position not in variants:
variants_noMatter[position] = {'REF': absent_positions[position]['REF'], 'ALT': 'N'}
else:
if position - 1 not in variants_correct:
variants_correct[position - 1] = {'REF': sequence[position - 2] + absent_positions[position]['REF'], 'ALT': sequence[position - 2] + absent_positions[position]['ALT']}
else:
variants_correct[position - 1] = {'REF': variants_correct[position - 1]['REF'] + absent_positions[position]['REF'][len(variants_correct[position - 1]['REF']) - 1:], 'ALT': variants_correct[position - 1]['ALT'] + absent_positions[position]['ALT'][len(variants_correct[position - 1]['ALT']) - 1 if len(variants_correct[position - 1]['ALT']) > 0 else 0:]}
if position not in variants:
if position - 1 not in variants_noMatter:
variants_noMatter[position - 1] = {'REF': sequence[position - 2] + absent_positions[position]['REF'], 'ALT': sequence[position - 2] + absent_positions[position]['ALT']}
else:
variants_noMatter[position - 1] = {'REF': variants_noMatter[position - 1]['REF'] + absent_positions[position]['REF'][len(variants_noMatter[position - 1]['REF']) - 1:], 'ALT': variants_noMatter[position - 1]['ALT'] + absent_positions[position]['ALT'][len(variants_noMatter[position - 1]['ALT']) - 1 if len(variants_noMatter[position - 1]['ALT']) > 0 else 0:]}
return variants_correct, variants_noMatter, variants_alignment, multiple_alleles_found
def clean_variant_in_extra_seq_left(variant_dict, position, length_extra_seq, multiple_alleles_found, number_multi_alleles):
number_diferences = 0
if position + len(variant_dict[position]['REF']) - 1 > length_extra_seq:
if multiple_alleles_found is not None and position in multiple_alleles_found:
number_multi_alleles += 1
temp_variant = variant_dict[position]
del variant_dict[position]
variant_dict[length_extra_seq] = {}
variant_dict[length_extra_seq]['REF'] = temp_variant['REF'][length_extra_seq - position:]
variant_dict[length_extra_seq]['ALT'] = temp_variant['ALT'][length_extra_seq - position:] if len(temp_variant['ALT']) > length_extra_seq - position else temp_variant['REF'][length_extra_seq - position]
number_diferences = count_number_diferences(variant_dict[length_extra_seq]['REF'], variant_dict[length_extra_seq]['ALT'])
else:
del variant_dict[position]
return variant_dict, number_multi_alleles, number_diferences
def clean_variant_in_extra_seq_rigth(variant_dict, position, sequence_length, length_extra_seq):
if position + len(variant_dict[position]['REF']) - 1 > sequence_length - length_extra_seq:
variant_dict[position]['REF'] = variant_dict[position]['REF'][: - (position - (sequence_length - length_extra_seq)) + 1]
variant_dict[position]['ALT'] = variant_dict[position]['ALT'][: - (position - (sequence_length - length_extra_seq)) + 1] if len(variant_dict[position]['ALT']) >= - (position - (sequence_length - length_extra_seq)) + 1 else variant_dict[position]['ALT']
number_diferences = count_number_diferences(variant_dict[position]['REF'], variant_dict[position]['ALT'])
return variant_dict, number_diferences
def cleanning_variants_extra_seq(variants_correct, variants_noMatter, variants_alignment, multiple_alleles_found, length_extra_seq, sequence_length):
number_multi_alleles = 0
number_diferences = 0
counter = 1
while counter <= sequence_length:
if counter <= length_extra_seq:
if counter in variants_correct:
variants_correct, number_multi_alleles, number_diferences = clean_variant_in_extra_seq_left(variants_correct, counter, length_extra_seq, multiple_alleles_found, number_multi_alleles)
if counter in variants_noMatter:
variants_noMatter, ignore, ignore = clean_variant_in_extra_seq_left(variants_noMatter, counter, length_extra_seq, None, None)
if counter in variants_alignment:
variants_alignment, ignore, ignore = clean_variant_in_extra_seq_left(variants_alignment, counter, length_extra_seq, None, None)
elif counter > length_extra_seq and counter <= sequence_length - length_extra_seq:
if counter in variants_correct:
if counter in multiple_alleles_found:
number_multi_alleles += 1
variants_correct, number_diferences_found = clean_variant_in_extra_seq_rigth(variants_correct, counter, sequence_length, length_extra_seq)
number_diferences += number_diferences_found
if counter in variants_noMatter:
variants_noMatter, ignore = clean_variant_in_extra_seq_rigth(variants_noMatter, counter, sequence_length, length_extra_seq)
if counter in variants_alignment:
variants_alignment, ignore = clean_variant_in_extra_seq_rigth(variants_alignment, counter, sequence_length, length_extra_seq)
else:
if counter in variants_correct:
del variants_correct[counter]
if counter in variants_noMatter:
del variants_noMatter[counter]
if counter in variants_alignment:
del variants_alignment[counter]
counter += 1
return variants_correct, variants_noMatter, variants_alignment, number_multi_alleles, number_diferences
def get_coverage(gene_coverage):
coverage = {}
with open(gene_coverage, 'rtU') as reader:
for line in reader:
line = line.splitlines()[0]
if len(line) > 0:
line = line.split('\t')
coverage[int(line[1])] = int(line[2])
return coverage
def get_coverage_report(coverage, sequence_length, minimum_depth_presence, minimum_depth_call, length_extra_seq):
if len(coverage) == 0:
return sequence_length - 2 * length_extra_seq, 100.0, 0.0
count_absent = 0
count_lowCoverage = 0
sum_coverage = 0
counter = 1
while counter <= sequence_length:
if counter > length_extra_seq and counter <= sequence_length - length_extra_seq:
if coverage[counter] < minimum_depth_presence:
count_absent += 1
else:
if coverage[counter] < minimum_depth_call:
count_lowCoverage += 1
sum_coverage += coverage[counter]
counter += 1
mean_coverage = 0
percentage_lowCoverage = 0
if sequence_length - 2 * length_extra_seq - count_absent > 0:
mean_coverage = float(sum_coverage) / float(sequence_length - 2 * length_extra_seq - count_absent)
percentage_lowCoverage = float(count_lowCoverage) / float(sequence_length - 2 * length_extra_seq - count_absent) * 100
return count_absent, percentage_lowCoverage, mean_coverage
# Get genome coverage data
def compute_genome_coverage_data(alignment_file, sequence_to_analyse, outdir, counter):
genome_coverage_data_file = os.path.join(outdir, 'samtools_depth.sequence_' + str(counter) + '.tab')
command = ['samtools', 'depth', '-a', '-q', '0', '-r', sequence_to_analyse, alignment_file, '>', genome_coverage_data_file]
run_successfully, stdout, stderr = utils.runCommandPopenCommunicate(command, True, None, False)
return run_successfully, genome_coverage_data_file
def create_sample_consensus_sequence(outdir, sequence_to_analyse, reference_file, variants, minimum_depth_presence, minimum_depth_call, minimum_depth_frequency_dominant_allele, sequence, length_extra_seq):
variants_correct, variants_noMatter, variants_alignment, multiple_alleles_found = get_true_variants(variants, minimum_depth_presence, minimum_depth_call, minimum_depth_frequency_dominant_allele, sequence)
variants_correct, variants_noMatter, variants_alignment, number_multi_alleles, number_diferences = cleanning_variants_extra_seq(variants_correct, variants_noMatter, variants_alignment, multiple_alleles_found, length_extra_seq, len(sequence))
return True, number_multi_alleles, None, number_diferences
@utils.trace_unhandled_exceptions
def analyse_sequence_data(bam_file, sequence_information, outdir, counter, reference_file, length_extra_seq, minimum_depth_presence, minimum_depth_call, minimum_depth_frequency_dominant_allele):
count_absent = None
percentage_lowCoverage = None
meanCoverage = None
number_diferences = 0
# Create vcf file (for multiple alleles check)
run_successfully, gene_vcf = create_vcf(bam_file, sequence_information['header'], outdir, counter, reference_file)
if run_successfully:
# Create coverage tab file
run_successfully, gene_coverage = compute_genome_coverage_data(bam_file, sequence_information['header'], outdir, counter)
if run_successfully:
variants = get_variants(gene_vcf)
coverage = get_coverage(gene_coverage)
run_successfully, number_multi_alleles, consensus_sequence, number_diferences = create_sample_consensus_sequence(outdir, sequence_information['header'], reference_file, variants, minimum_depth_presence, minimum_depth_call, minimum_depth_frequency_dominant_allele, sequence_information['sequence'], length_extra_seq)
count_absent, percentage_lowCoverage, meanCoverage = get_coverage_report(coverage, sequence_information['length'], minimum_depth_presence, minimum_depth_call, length_extra_seq)
utils.saveVariableToPickle([run_successfully, counter, number_multi_alleles, count_absent, percentage_lowCoverage, meanCoverage, consensus_sequence, number_diferences], outdir, str('coverage_info.' + str(counter)))
def sequence_data(sample, reference_file, bam_file, outdir, threads, length_extra_seq, minimum_depth_presence, minimum_depth_call, minimum_depth_frequency_dominant_allele, debug_mode_true):
sequence_data_outdir = os.path.join(outdir, 'sequence_data', '')
utils.removeDirectory(sequence_data_outdir)
os.mkdir(sequence_data_outdir)
sequences, headers = utils.get_sequence_information(reference_file, length_extra_seq)
pool = multiprocessing.Pool(processes=threads)
for sequence_counter in sequences:
sequence_dir = os.path.join(sequence_data_outdir, str(sequence_counter), '')
utils.removeDirectory(sequence_dir)
os.makedirs(sequence_dir)
pool.apply_async(analyse_sequence_data, args=(bam_file, sequences[sequence_counter], sequence_dir, sequence_counter, reference_file, length_extra_seq, minimum_depth_presence, minimum_depth_call, minimum_depth_frequency_dominant_allele,))
pool.close()
pool.join()
run_successfully, sample_data, consensus_files = gather_data_together(sample, sequence_data_outdir, sequences, outdir.rsplit('/', 2)[0], debug_mode_true, length_extra_seq)
return run_successfully, sample_data, consensus_files
def chunkstring(string, length):
return (string[0 + i:length + i] for i in range(0, len(string), length))
def gather_data_together(sample, data_directory, sequences_information, outdir, debug_mode_true, length_extra_seq):
run_successfully = True
counter = 0
sample_data = {}
consensus_files = None
genes_directories = [d for d in os.listdir(data_directory) if not d.startswith('.') and os.path.isdir(os.path.join(data_directory, d, ''))]
for gene_dir in genes_directories:
gene_dir_path = os.path.join(data_directory, gene_dir, '')
files = [f for f in os.listdir(gene_dir_path) if not f.startswith('.') and os.path.isfile(os.path.join(gene_dir_path, f))]
for file_found in files:
if file_found.startswith('coverage_info.') and file_found.endswith('.pkl'):
file_path = os.path.join(gene_dir_path, file_found)
if run_successfully:
run_successfully, sequence_counter, multiple_alleles_found, count_absent, percentage_lowCoverage, meanCoverage, consensus_sequence, number_diferences = utils.extractVariableFromPickle(file_path)
gene_identity = 0
if sequences_information[sequence_counter]['length'] - 2 * length_extra_seq - count_absent > 0:
gene_identity = 100 - (float(number_diferences) / (sequences_information[sequence_counter]['length'] - 2 * length_extra_seq - count_absent)) * 100
sample_data[sequence_counter] = {'header': sequences_information[sequence_counter]['header'], 'gene_coverage': 100 - (float(count_absent) / (sequences_information[sequence_counter]['length'] - 2 * length_extra_seq)) * 100, 'gene_low_coverage': percentage_lowCoverage, 'gene_number_positions_multiple_alleles': multiple_alleles_found, 'gene_mean_read_coverage': meanCoverage, 'gene_identity': gene_identity}
counter += 1
if not debug_mode_true:
utils.removeDirectory(gene_dir_path)
if counter != len(sequences_information):
run_successfully = False
return run_successfully, sample_data, consensus_files
trueCoverage_timer = functools.partial(utils.timer, name='True coverage check')
@trueCoverage_timer
def runTrueCoverage(sample, fastq_files, reference_file, threads, outdir, length_extra_seq, minimum_depth_presence, minimum_depth_call, minimum_depth_frequency_dominant_allele, minimum_gene_coverage, conserved_True, debug_mode_true, numMapLoc, minimum_gene_identity, trueCoverage_config):
pass_qc = False
failing = {}
trueCoverage_folder = os.path.join(outdir, 'trueCoverage', '')
utils.removeDirectory(trueCoverage_folder)
os.mkdir(trueCoverage_folder)
# Map reads
run_successfully, bam_file, reference_file = mapping_reads(fastq_files, reference_file, threads, trueCoverage_folder, conserved_True, numMapLoc)
if run_successfully:
# Index reference file
run_successfully, stdout = index_fasta_samtools(reference_file, None, None, True)
if run_successfully:
print 'Analysing alignment data'
run_successfully, sample_data, consensus_files = sequence_data(sample, reference_file, bam_file, trueCoverage_folder, threads, length_extra_seq, minimum_depth_presence, minimum_depth_call, minimum_depth_frequency_dominant_allele, debug_mode_true)
if run_successfully:
print 'Writing report file'
number_absent_genes = 0
number_genes_multiple_alleles = 0
mean_sample_coverage = 0
with open(os.path.join(outdir, 'trueCoverage_report.txt'), 'wt') as writer:
writer.write('\t'.join(['#gene', 'percentage_gene_coverage', 'gene_mean_read_coverage', 'percentage_gene_low_coverage', 'number_positions_multiple_alleles', 'percentage_gene_identity']) + '\n')
for i in range(1, len(sample_data) + 1):
writer.write('\t'.join([sample_data[i]['header'], str(round(sample_data[i]['gene_coverage'], 2)), str(round(sample_data[i]['gene_mean_read_coverage'], 2)), str(round(sample_data[i]['gene_low_coverage'], 2)), str(sample_data[i]['gene_number_positions_multiple_alleles']), str(round(sample_data[i]['gene_identity'], 2))]) + '\n')
if sample_data[i]['gene_coverage'] < minimum_gene_coverage or sample_data[i]['gene_identity'] < minimum_gene_identity:
number_absent_genes += 1
else:
mean_sample_coverage += sample_data[i]['gene_mean_read_coverage']
if sample_data[i]['gene_number_positions_multiple_alleles'] > 0:
number_genes_multiple_alleles += 1
if len(sample_data) - number_absent_genes > 0:
mean_sample_coverage = float(mean_sample_coverage) / float(len(sample_data) - number_absent_genes)
else:
mean_sample_coverage = 0
writer.write('\n'.join(['#general', '>number_absent_genes', str(number_absent_genes), '>number_genes_multiple_alleles', str(number_genes_multiple_alleles), '>mean_sample_coverage', str(round(mean_sample_coverage, 2))]) + '\n')
print '\n'.join([str('number_absent_genes: ' + str(number_absent_genes)), str('number_genes_multiple_alleles: ' + str(number_genes_multiple_alleles)), str('mean_sample_coverage: ' + str(round(mean_sample_coverage, 2)))])
if number_absent_genes > trueCoverage_config['maximum_number_absent_genes']:
failing['absent_genes'] = 'The number of absent genes (' + str(number_absent_genes) + ') exceeds the maximum allowed (' + str(trueCoverage_config['maximum_number_absent_genes']) + ')'
if number_genes_multiple_alleles > trueCoverage_config['maximum_number_genes_multiple_alleles']:
failing['multiple_alleles'] = 'The number of genes with multiple alleles (' + str(number_genes_multiple_alleles) + ') exceeds the maximum allowed (' + str(trueCoverage_config['maximum_number_genes_multiple_alleles']) + ')'
if mean_sample_coverage < trueCoverage_config['minimum_read_coverage']:
failing['read_coverage'] = 'The mean read coverage for genes present (' + str(mean_sample_coverage) + ') dit not meet the minimum required (' + str(trueCoverage_config['minimum_read_coverage']) + ')'
else:
failing['sample'] = 'Did not run'
else:
failing['sample'] = 'Did not run'
else:
failing['sample'] = 'Did not run'
if len(failing) == 0:
pass_qc = True
failing['sample'] = False
else:
print failing
if not debug_mode_true:
utils.removeDirectory(trueCoverage_folder)
return run_successfully, pass_qc, failing
| dorbarker/INNUca | modules/trueCoverage_rematch.py | Python | gpl-3.0 | 33,927 |
"""
Windows Meta File
"""
from construct import *
wmf_record = Struct("records",
ULInt32("size"), # size in words, including the size, function and params
Enum(ULInt16("function"),
AbortDoc = 0x0052,
Aldus_Header = 0x0001,
AnimatePalette = 0x0436,
Arc = 0x0817,
BitBlt = 0x0922,
Chord = 0x0830,
CLP_Header16 = 0x0002,
CLP_Header32 = 0x0003,
CreateBitmap = 0x06FE,
CreateBitmapIndirect = 0x02FD,
CreateBrush = 0x00F8,
CreateBrushIndirect = 0x02FC,
CreateFontIndirect = 0x02FB,
CreatePalette = 0x00F7,
CreatePatternBrush = 0x01F9,
CreatePenIndirect = 0x02FA,
CreateRegion = 0x06FF,
DeleteObject = 0x01F0,
DibBitblt = 0x0940,
DibCreatePatternBrush = 0x0142,
DibStretchBlt = 0x0B41,
DrawText = 0x062F,
Ellipse = 0x0418,
EndDoc = 0x005E,
EndPage = 0x0050,
EOF = 0x0000,
Escape = 0x0626,
ExcludeClipRect = 0x0415,
ExtFloodFill = 0x0548,
ExtTextOut = 0x0A32,
FillRegion = 0x0228,
FloodFill = 0x0419,
FrameRegion = 0x0429,
Header = 0x0004,
IntersectClipRect = 0x0416,
InvertRegion = 0x012A,
LineTo = 0x0213,
MoveTo = 0x0214,
OffsetClipRgn = 0x0220,
OffsetViewportOrg = 0x0211,
OffsetWindowOrg = 0x020F,
PaintRegion = 0x012B,
PatBlt = 0x061D,
Pie = 0x081A,
Polygon = 0x0324,
Polyline = 0x0325,
PolyPolygon = 0x0538,
RealizePalette = 0x0035,
Rectangle = 0x041B,
ResetDC = 0x014C,
ResizePalette = 0x0139,
RestoreDC = 0x0127,
RoundRect = 0x061C,
SaveDC = 0x001E,
ScaleViewportExt = 0x0412,
ScaleWindowExt = 0x0410,
SelectClipRegion = 0x012C,
SelectObject = 0x012D,
SelectPalette = 0x0234,
SetBKColor = 0x0201,
SetBKMode = 0x0102,
SetDibToDev = 0x0D33,
SelLayout = 0x0149,
SetMapMode = 0x0103,
SetMapperFlags = 0x0231,
SetPalEntries = 0x0037,
SetPixel = 0x041F,
SetPolyFillMode = 0x0106,
SetReLabs = 0x0105,
SetROP2 = 0x0104,
SetStretchBltMode = 0x0107,
SetTextAlign = 0x012E,
SetTextCharExtra = 0x0108,
SetTextColor = 0x0209,
SetTextJustification = 0x020A,
SetViewportExt = 0x020E,
SetViewportOrg = 0x020D,
SetWindowExt = 0x020C,
SetWindowOrg = 0x020B,
StartDoc = 0x014D,
StartPage = 0x004F,
StretchBlt = 0x0B23,
StretchDIB = 0x0F43,
TextOut = 0x0521,
_default_ = Pass,
),
Array(lambda ctx: ctx.size - 3, ULInt16("params")),
)
wmf_placeable_header = Struct("placeable_header",
Const(ULInt32("key"), 0x9AC6CDD7),
ULInt16("handle"),
SLInt16("left"),
SLInt16("top"),
SLInt16("right"),
SLInt16("bottom"),
ULInt16("units_per_inch"),
Padding(4),
ULInt16("checksum")
)
wmf_file = Struct("wmf_file",
# --- optional placeable header ---
Optional(wmf_placeable_header),
# --- header ---
Enum(ULInt16("type"),
InMemory = 0,
File = 1,
),
Const(ULInt16("header_size"), 9),
ULInt16("version"),
ULInt32("size"), # file size is in words
ULInt16("number_of_objects"),
ULInt32("size_of_largest_record"),
ULInt16("number_of_params"),
# --- records ---
GreedyRange(wmf_record)
)
| PythEch/pymobiledevice | libs/python/construct/formats/graphics/wmf.py | Python | lgpl-3.0 | 3,535 |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
from window import MainWindow
win = MainWindow()
| CrackedP0t/ponyplayer | ponyplayer.py | Python | gpl-3.0 | 94 |
"""
Partial backport of Python 3.5's weakref module:
finalize (new in Python 3.4)
Backport modifications are marked with marked with "XXX backport".
"""
from __future__ import absolute_import
import itertools
import sys
from weakref import ref
__all__ = ['finalize']
class finalize(object):
"""Class for finalization of weakrefable objects
finalize(obj, func, *args, **kwargs) returns a callable finalizer
object which will be called when obj is garbage collected. The
first time the finalizer is called it evaluates func(*arg, **kwargs)
and returns the result. After this the finalizer is dead, and
calling it just returns None.
When the program exits any remaining finalizers for which the
atexit attribute is true will be run in reverse order of creation.
By default atexit is true.
"""
# Finalizer objects don't have any state of their own. They are
# just used as keys to lookup _Info objects in the registry. This
# ensures that they cannot be part of a ref-cycle.
__slots__ = ()
_registry = {}
_shutdown = False
_index_iter = itertools.count()
_dirty = False
_registered_with_atexit = False
class _Info(object):
__slots__ = ("weakref", "func", "args", "kwargs", "atexit", "index")
def __init__(self, obj, func, *args, **kwargs):
if not self._registered_with_atexit:
# We may register the exit function more than once because
# of a thread race, but that is harmless
import atexit
atexit.register(self._exitfunc)
finalize._registered_with_atexit = True
info = self._Info()
info.weakref = ref(obj, self)
info.func = func
info.args = args
info.kwargs = kwargs or None
info.atexit = True
info.index = next(self._index_iter)
self._registry[self] = info
finalize._dirty = True
def __call__(self, _=None):
"""If alive then mark as dead and return func(*args, **kwargs);
otherwise return None"""
info = self._registry.pop(self, None)
if info and not self._shutdown:
return info.func(*info.args, **(info.kwargs or {}))
def detach(self):
"""If alive then mark as dead and return (obj, func, args, kwargs);
otherwise return None"""
info = self._registry.get(self)
obj = info and info.weakref()
if obj is not None and self._registry.pop(self, None):
return (obj, info.func, info.args, info.kwargs or {})
def peek(self):
"""If alive then return (obj, func, args, kwargs);
otherwise return None"""
info = self._registry.get(self)
obj = info and info.weakref()
if obj is not None:
return (obj, info.func, info.args, info.kwargs or {})
@property
def alive(self):
"""Whether finalizer is alive"""
return self in self._registry
@property
def atexit(self):
"""Whether finalizer should be called at exit"""
info = self._registry.get(self)
return bool(info) and info.atexit
@atexit.setter
def atexit(self, value):
info = self._registry.get(self)
if info:
info.atexit = bool(value)
def __repr__(self):
info = self._registry.get(self)
obj = info and info.weakref()
if obj is None:
return '<%s object at %#x; dead>' % (type(self).__name__, id(self))
else:
return '<%s object at %#x; for %r at %#x>' % \
(type(self).__name__, id(self), type(obj).__name__, id(obj))
@classmethod
def _select_for_exit(cls):
# Return live finalizers marked for exit, oldest first
L = [(f,i) for (f,i) in cls._registry.items() if i.atexit]
L.sort(key=lambda item:item[1].index)
return [f for (f,i) in L]
@classmethod
def _exitfunc(cls):
# At shutdown invoke finalizers for which atexit is true.
# This is called once all other non-daemonic threads have been
# joined.
reenable_gc = False
try:
if cls._registry:
import gc
if gc.isenabled():
reenable_gc = True
gc.disable()
pending = None
while True:
if pending is None or finalize._dirty:
pending = cls._select_for_exit()
finalize._dirty = False
if not pending:
break
f = pending.pop()
try:
# gc is disabled, so (assuming no daemonic
# threads) the following is the only line in
# this function which might trigger creation
# of a new finalizer
f()
except Exception:
sys.excepthook(*sys.exc_info())
assert f not in cls._registry
finally:
# prevent any more finalizers from executing during shutdown
finalize._shutdown = True
if reenable_gc:
gc.enable()
| unnikrishnankgs/va | venv/lib/python3.5/site-packages/backports/weakref.py | Python | bsd-2-clause | 5,250 |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, unicode_literals, division
from sc2reader.utils import Length
from sc2reader.events.base import Event
from sc2reader.log_utils import loggable
from itertools import chain
@loggable
class GameEvent(Event):
"""
This is the base class for all game events. The attributes below are universally available.
"""
def __init__(self, frame, pid):
#: The id of the player generating the event. This is 16 for global non-player events.
#: Prior to Heart of the Swarm this was the player id. Since HotS it is
#: now the user id (uid), we still call it pid for backwards compatibility. You shouldn't
#: ever need to use this; use :attr:`player` instead.
self.pid = pid
#: A reference to the :class:`~sc2reader.objects.Player` object representing
#: this player in the replay. Not available for global events (:attr:`is_local` = False)
self.player = None
#: The frame of the game that this event was recorded at. 16 frames per game second.
self.frame = frame
#: The second of the game that this event was recorded at. 16 frames per game second.
self.second = frame >> 4
#: A flag indicating if it is a local or global event.
self.is_local = pid != 16
#: Short cut string for event class name
self.name = self.__class__.__name__
def _str_prefix(self):
if getattr(self, "pid", 16) == 16:
player_name = "Global"
elif self.player and not self.player.name:
player_name = "Player {0} - ({1})".format(
self.player.pid, self.player.play_race
)
elif self.player:
player_name = self.player.name
else:
player_name = "no name"
return "{0}\t{1:<15} ".format(Length(seconds=int(self.frame / 16)), player_name)
def __str__(self):
return self._str_prefix() + self.name
class GameStartEvent(GameEvent):
"""
Recorded when the game starts and the frames start to roll. This is a global non-player
event.
"""
def __init__(self, frame, pid, data):
super(GameStartEvent, self).__init__(frame, pid)
#: ???
self.data = data
class PlayerLeaveEvent(GameEvent):
"""
Recorded when a player leaves the game.
"""
def __init__(self, frame, pid, data):
super(PlayerLeaveEvent, self).__init__(frame, pid)
#: ???
self.data = data
class UserOptionsEvent(GameEvent):
"""
This event is recorded for each player at the very beginning of the game before the
:class:`GameStartEvent`.
"""
def __init__(self, frame, pid, data):
super(UserOptionsEvent, self).__init__(frame, pid)
#:
self.game_fully_downloaded = data["game_fully_downloaded"]
#:
self.development_cheats_enabled = data["development_cheats_enabled"]
#:
self.multiplayer_cheats_enabled = data["multiplayer_cheats_enabled"]
#:
self.sync_checksumming_enabled = data["sync_checksumming_enabled"]
#:
self.is_map_to_map_transition = data["is_map_to_map_transition"]
#:
self.use_ai_beacons = data["use_ai_beacons"]
#: Are workers sent to auto-mine on game start
self.starting_rally = (
data["starting_rally"] if "starting_rally" in data else None
)
#:
self.debug_pause_enabled = data["debug_pause_enabled"]
#:
self.base_build_num = data["base_build_num"]
def create_command_event(frame, pid, data):
ability_type = data["data"][0]
if ability_type == "None":
return BasicCommandEvent(frame, pid, data)
elif ability_type == "TargetUnit":
return TargetUnitCommandEvent(frame, pid, data)
elif ability_type == "TargetPoint":
return TargetPointCommandEvent(frame, pid, data)
elif ability_type == "Data":
return DataCommandEvent(frame, pid, data)
@loggable
class CommandEvent(GameEvent):
"""
Ability events are generated when ever a player in the game issues a command
to a unit or group of units. They are split into three subclasses of ability,
each with their own set of associated data. The attributes listed below are
shared across all ability event types.
See :class:`TargetPointCommandEvent`, :class:`TargetUnitCommandEvent`, and
:class:`DataCommandEvent` for individual details.
"""
def __init__(self, frame, pid, data):
super(CommandEvent, self).__init__(frame, pid)
#: Flags on the command???
self.flags = data["flags"]
#: A dictionary of possible ability flags. Flags are:
#:
#: * alternate
#: * queued
#: * preempt
#: * smart_click
#: * smart_rally
#: * subgroup
#: * set_autocast,
#: * set_autocast_on
#: * user
#: * data_a
#: * data_b
#: * data_passenger
#: * data_abil_queue_order_id,
#: * ai
#: * ai_ignore_on_finish
#: * is_order
#: * script
#: * homogenous_interruption,
#: * minimap
#: * repeat
#: * dispatch_to_other_unit
#: * target_self
#:
self.flag = dict(
alternate=0x1 & self.flags != 0,
queued=0x2 & self.flags != 0,
preempt=0x4 & self.flags != 0,
smart_click=0x8 & self.flags != 0,
smart_rally=0x10 & self.flags != 0,
subgroup=0x20 & self.flags != 0,
set_autocast=0x40 & self.flags != 0,
set_autocast_on=0x80 & self.flags != 0,
user=0x100 & self.flags != 0,
data_a=0x200 & self.flags != 0,
data_passenger=0x200 & self.flags != 0, # alt-name
data_b=0x400 & self.flags != 0,
data_abil_queue_order_id=0x400 & self.flags != 0, # alt-name
ai=0x800 & self.flags != 0,
ai_ignore_on_finish=0x1000 & self.flags != 0,
is_order=0x2000 & self.flags != 0,
script=0x4000 & self.flags != 0,
homogenous_interruption=0x8000 & self.flags != 0,
minimap=0x10000 & self.flags != 0,
repeat=0x20000 & self.flags != 0,
dispatch_to_other_unit=0x40000 & self.flags != 0,
target_self=0x80000 & self.flags != 0,
)
#: Flag marking that the command had ability information
self.has_ability = data["ability"] is not None
#: Link the the ability group
self.ability_link = data["ability"]["ability_link"] if self.has_ability else 0
#: The index of the ability in the ability group
self.command_index = (
data["ability"]["ability_command_index"] if self.has_ability else 0
)
#: Additional ability data.
self.ability_data = (
data["ability"]["ability_command_data"] if self.has_ability else 0
)
#: Unique identifier for the ability
self.ability_id = self.ability_link << 5 | self.command_index
#: A reference to the ability being used
self.ability = None
#: A shortcut to the name of the ability being used
self.ability_name = ""
#: The type of ability, one of: None (no target), TargetPoint, TargetUnit, or Data
self.ability_type = data["data"][0]
#: The raw data associated with this ability type
self.ability_type_data = data["data"][1]
#: Other unit id??
self.other_unit_id = data["other_unit_tag"]
#: A reference to the other unit
self.other_unit = None
def __str__(self):
string = self._str_prefix()
if self.has_ability:
string += "Ability ({0:X})".format(self.ability_id)
if self.ability:
string += " - {0}".format(self.ability.name)
else:
string += "Right Click"
if self.ability_type == "TargetUnit":
string += "; Target: {0} [{1:0>8X}]".format(
self.target.name, self.target_unit_id
)
if self.ability_type in ("TargetPoint", "TargetUnit"):
string += "; Location: {0}".format(str(self.location))
return string
class BasicCommandEvent(CommandEvent):
"""
Extends :class:`CommandEvent`
This event is recorded for events that have no extra information recorded.
Note that like all CommandEvents, the event will be recorded regardless
of whether or not the command was successful.
"""
def __init__(self, frame, pid, data):
super(BasicCommandEvent, self).__init__(frame, pid, data)
class TargetPointCommandEvent(CommandEvent):
"""
Extends :class:`CommandEvent`
This event is recorded when ever a player issues a command that targets a location
and NOT a unit. Commands like Psistorm, Attack Move, Fungal Growth, and EMP fall
under this category.
Note that like all CommandEvents, the event will be recorded regardless
of whether or not the command was successful.
"""
def __init__(self, frame, pid, data):
super(TargetPointCommandEvent, self).__init__(frame, pid, data)
#: The x coordinate of the target. Available for TargetPoint and TargetUnit type events.
self.x = self.ability_type_data["point"].get("x", 0) / 4096.0
#: The y coordinate of the target. Available for TargetPoint and TargetUnit type events.
self.y = self.ability_type_data["point"].get("y", 0) / 4096.0
#: The z coordinate of the target. Available for TargetPoint and TargetUnit type events.
self.z = self.ability_type_data["point"].get("z", 0)
#: The location of the target. Available for TargetPoint and TargetUnit type events
self.location = (self.x, self.y, self.z)
class TargetUnitCommandEvent(CommandEvent):
"""
Extends :class:`CommandEvent`
This event is recorded when ever a player issues a command that targets a unit.
The location of the target unit at the time of the command is also recorded. Commands like
Chronoboost, Transfuse, and Snipe fall under this category.
Note that like all CommandEvents, the event will be recorded regardless
of whether or not the command was successful.
"""
def __init__(self, frame, pid, data):
super(TargetUnitCommandEvent, self).__init__(frame, pid, data)
#: Flags set on the target unit. Available for TargetUnit type events
self.target_flags = self.ability_type_data.get("flags", None)
#: Timer?? Available for TargetUnit type events.
self.target_timer = self.ability_type_data.get("timer", None)
#: Unique id of the target unit. Available for TargetUnit type events.
#: This id can be 0 when the target unit is shrouded by fog of war.
self.target_unit_id = self.ability_type_data.get("unit_tag", None)
#: A reference to the targeted unit. When the :attr:`target_unit_id` is
#: 0 this target unit is a generic, reused fog of war unit of the :attr:`target_unit_type`
#: with an id of zero. It should not be confused with a real unit.
self.target_unit = None
#: Current integer type id of the target unit. Available for TargetUnit type events.
self.target_unit_type = self.ability_type_data.get("unit_link", None)
#: Integer player id of the controlling player. Available for TargetUnit type events starting in 19595.
#: When the targeted unit is under fog of war this id is zero.
self.control_player_id = self.ability_type_data.get("control_player_id", None)
#: Integer player id of the player paying upkeep. Available for TargetUnit type events.
self.upkeep_player_id = self.ability_type_data.get("upkeep_player_id", None)
#: The x coordinate of the target. Available for TargetPoint and TargetUnit type events.
self.x = self.ability_type_data["point"].get("x", 0) / 4096.0
#: The y coordinate of the target. Available for TargetPoint and TargetUnit type events.
self.y = self.ability_type_data["point"].get("y", 0) / 4096.0
#: The z coordinate of the target. Available for TargetPoint and TargetUnit type events.
self.z = self.ability_type_data["point"].get("z", 0)
#: The location of the target. Available for TargetPoint and TargetUnit type events
self.location = (self.x, self.y, self.z)
class UpdateTargetPointCommandEvent(TargetPointCommandEvent):
"""
Extends :class: 'TargetPointCommandEvent'
This event is generated when the user changes the point of a unit. Appears to happen
when a unit is moving and it is given a new command. It's possible there are other
instances of this occurring.
"""
name = "UpdateTargetPointCommandEvent"
class UpdateTargetUnitCommandEvent(TargetUnitCommandEvent):
"""
Extends :class:`TargetUnitCommandEvent`
This event is generated when a TargetUnitCommandEvent is updated, likely due to
changing the target unit. It is unclear if this needs to be a separate event
from TargetUnitCommandEvent, but for flexibility, it will be treated
differently.
One example of this event occurring is casting inject on a hatchery while
holding shift, and then shift clicking on a second hatchery.
"""
name = "UpdateTargetUnitCommandEvent"
class DataCommandEvent(CommandEvent):
"""
Extends :class:`CommandEvent`
DataCommandEvent are recorded when ever a player issues a command that has no target. Commands
like Burrow, SeigeMode, Train XYZ, and Stop fall under this category.
Note that like all CommandEvents, the event will be recorded regardless
of whether or not the command was successful.
"""
def __init__(self, frame, pid, data):
super(DataCommandEvent, self).__init__(frame, pid, data)
#: Other target data. Available for Data type events.
self.target_data = self.ability_type_data.get("data", None)
@loggable
class CommandManagerStateEvent(GameEvent):
"""
These events indicated that the last :class:`CommandEvent` called has been
called again. For example, if you add three SCVs to an empty queue on a
Command Center, the first add will be generate a :class:`BasicCommandEvent`
and the two subsequent adds will each generate a
:class:`CommandManagerStateEvent`.
"""
def __init__(self, frame, pid, data):
super(CommandManagerStateEvent, self).__init__(frame, pid)
#: Always 1?
self.state = data["state"]
#: An index identifying how many events of this type have been called
self.sequence = data["sequence"]
@loggable
class SelectionEvent(GameEvent):
"""
Selection events are generated when ever the active selection of the
player is updated. Unlike other game events, these events can also be
generated by non-player actions like unit deaths or transformations.
Starting in Starcraft 2.0.0, selection events targeting control group
buffers are also generated when control group selections are modified
by non-player actions. When a player action updates a control group
a :class:`ControlGroupEvent` is generated.
"""
def __init__(self, frame, pid, data):
super(SelectionEvent, self).__init__(frame, pid)
#: The control group being modified. 10 for active selection
self.control_group = data["control_group_index"]
#: Deprecated, use control_group
self.bank = self.control_group
#: ???
self.subgroup_index = data["subgroup_index"]
#: The type of mask to apply. One of None, Mask, OneIndices, ZeroIndices
self.mask_type = data["remove_mask"][0]
#: The data for the mask
self.mask_data = data["remove_mask"][1]
#: The unit type data for the new units
self.new_unit_types = [
(
d["unit_link"],
d["subgroup_priority"],
d["intra_subgroup_priority"],
d["count"],
)
for d in data["add_subgroups"]
]
#: The unit id data for the new units
self.new_unit_ids = data["add_unit_tags"]
# This stretches out the unit types and priorities to be zipped with ids.
unit_types = chain(
*[
[utype] * count
for (
utype,
subgroup_priority,
intra_subgroup_priority,
count,
) in self.new_unit_types
]
)
unit_subgroup_priorities = chain(
*[
[subgroup_priority] * count
for (
utype,
subgroup_priority,
intra_subgroup_priority,
count,
) in self.new_unit_types
]
)
unit_intra_subgroup_priorities = chain(
*[
[intra_subgroup_priority] * count
for (
utype,
subgroup_priority,
intra_subgroup_priority,
count,
) in self.new_unit_types
]
)
#: The combined type and id information for new units
self.new_unit_info = list(
zip(
self.new_unit_ids,
unit_types,
unit_subgroup_priorities,
unit_intra_subgroup_priorities,
)
)
#: A list of references to units added by this selection
self.new_units = None
#: Deprecated, see new_units
self.objects = None
def __str__(self):
if self.new_units:
return GameEvent.__str__(self) + str([str(u) for u in self.new_units])
else:
return GameEvent.__str__(self) + str([str(u) for u in self.new_unit_info])
def create_control_group_event(frame, pid, data):
update_type = data["control_group_update"]
if update_type == 0:
return SetControlGroupEvent(frame, pid, data)
elif update_type == 1:
return AddToControlGroupEvent(frame, pid, data)
elif update_type == 2:
return GetControlGroupEvent(frame, pid, data)
elif update_type == 3:
# TODO: What could this be?!?
return ControlGroupEvent(frame, pid, data)
else:
# No idea what this is but we're seeing update_types of 4 and 5 in 3.0
return ControlGroupEvent(frame, pid, data)
@loggable
class ControlGroupEvent(GameEvent):
"""
ControlGroup events are recorded when ever a player action modifies or accesses a control
group. There are three kinds of events, generated by each of the possible
player actions:
* :class:`SetControlGroup` - Recorded when a user sets a control group (ctrl+#).
* :class:`GetControlGroup` - Recorded when a user retrieves a control group (#).
* :class:`AddToControlGroup` - Recorded when a user adds to a control group (shift+ctrl+#)
All three events have the same set of data (shown below) but are interpreted differently.
See the class entry for details.
"""
def __init__(self, frame, pid, data):
super(ControlGroupEvent, self).__init__(frame, pid)
#: Index to the control group being modified
self.control_group = data["control_group_index"]
#: Deprecated, use control_group
self.bank = self.control_group
#: Deprecated, use control_group
self.hotkey = self.control_group
#: The type of update being performed, 0 (set),1 (add),2 (get)
self.update_type = data["control_group_update"]
#: The type of mask to apply. One of None, Mask, OneIndices, ZeroIndices
self.mask_type = data["remove_mask"][0]
#: The data for the mask
self.mask_data = data["remove_mask"][1]
class SetControlGroupEvent(ControlGroupEvent):
"""
Extends :class:`ControlGroupEvent`
This event does a straight forward replace of the current control group contents
with the player's current selection. This event doesn't have masks set.
"""
class AddToControlGroupEvent(SetControlGroupEvent):
"""
Extends :class:`ControlGroupEvent`
This event adds the current selection to the control group.
"""
class GetControlGroupEvent(ControlGroupEvent):
"""
Extends :class:`ControlGroupEvent`
This event replaces the current selection with the contents of the control group.
The mask data is used to limit that selection to units that are currently selectable.
You might have 1 medivac and 8 marines on the control group but if the 8 marines are
inside the medivac they cannot be part of your selection.
"""
@loggable
class CameraEvent(GameEvent):
"""
Camera events are generated when ever the player camera moves, zooms, or rotates.
It does not matter why the camera changed, this event simply records the current
state of the camera after changing.
"""
def __init__(self, frame, pid, data):
super(CameraEvent, self).__init__(frame, pid)
#: The x coordinate of the center of the camera
self.x = (data["target"]["x"] if data["target"] is not None else 0) / 256.0
#: The y coordinate of the center of the camera
self.y = (data["target"]["y"] if data["target"] is not None else 0) / 256.0
#: The location of the center of the camera
self.location = (self.x, self.y)
#: The distance to the camera target ??
self.distance = data["distance"]
#: The current pitch of the camera
self.pitch = data["pitch"]
#: The current yaw of the camera
self.yaw = data["yaw"]
def __str__(self):
return self._str_prefix() + "{0} at ({1}, {2})".format(
self.name, self.x, self.y
)
@loggable
class ResourceTradeEvent(GameEvent):
"""
Generated when a player trades resources with another player. But not when fullfulling
resource requests.
"""
def __init__(self, frame, pid, data):
super(ResourceTradeEvent, self).__init__(frame, pid)
#: The id of the player sending the resources
self.sender_id = pid
#: A reference to the player sending the resources
self.sender = None
#: The id of the player receiving the resources
self.recipient_id = data["recipient_id"]
#: A reference to the player receiving the resources
self.recipient = None
#: An array of resources sent
self.resources = data["resources"]
#: Amount minerals sent
self.minerals = self.resources[0] if len(self.resources) >= 1 else None
#: Amount vespene sent
self.vespene = self.resources[1] if len(self.resources) >= 2 else None
#: Amount terrazine sent
self.terrazine = self.resources[2] if len(self.resources) >= 3 else None
#: Amount custom resource sent
self.custom_resource = self.resources[3] if len(self.resources) >= 4 else None
def __str__(self):
return self._str_prefix() + " transfer {0} minerals, {1} gas, {2} terrazine, and {3} custom to {4}".format(
self.minerals,
self.vespene,
self.terrazine,
self.custom_resource,
self.recipient,
)
class ResourceRequestEvent(GameEvent):
"""
Generated when a player creates a resource request.
"""
def __init__(self, frame, pid, data):
super(ResourceRequestEvent, self).__init__(frame, pid)
#: An array of resources sent
self.resources = data["resources"]
#: Amount minerals sent
self.minerals = self.resources[0] if len(self.resources) >= 1 else None
#: Amount vespene sent
self.vespene = self.resources[1] if len(self.resources) >= 2 else None
#: Amount terrazine sent
self.terrazon = self.resources[2] if len(self.resources) >= 3 else None
#: Amount custom resource sent
self.custom_resource = self.resources[3] if len(self.resources) >= 4 else None
def __str__(self):
return (
self._str_prefix()
+ " requests {0} minerals, {1} gas, {2} terrazine, and {3} custom".format(
self.minerals, self.vespene, self.terrazine, self.custom_resource
)
)
class ResourceRequestFulfillEvent(GameEvent):
"""
Generated when a player accepts a resource request.
"""
def __init__(self, frame, pid, data):
super(ResourceRequestFulfillEvent, self).__init__(frame, pid)
#: The id of the request being fulfilled
self.request_id = data["request_id"]
class ResourceRequestCancelEvent(GameEvent):
"""
Generated when a player cancels their resource request.
"""
def __init__(self, frame, pid, data):
super(ResourceRequestCancelEvent, self).__init__(frame, pid)
#: The id of the request being cancelled
self.request_id = data["request_id"]
class HijackReplayGameEvent(GameEvent):
"""
Generated when players take over from a replay.
"""
def __init__(self, frame, pid, data):
super(HijackReplayGameEvent, self).__init__(frame, pid)
#: The method used. Not sure what 0/1 represent
self.method = data["method"]
#: Information on the users hijacking the game
self.user_infos = data["user_infos"]
| ggtracker/sc2reader | sc2reader/events/game.py | Python | mit | 25,673 |
'''
umount.py - this file is part of S3QL (http://s3ql.googlecode.com)
Copyright (C) 2008-2009 Nikolaus Rath <[email protected]>
This program can be distributed under the terms of the GNU GPLv3.
'''
from __future__ import division, print_function, absolute_import
from .common import CTRL_NAME, setup_logging
from .parse_args import ArgumentParser
import llfuse
import logging
import os
import posixpath
import subprocess
import sys
import errno
import textwrap
import time
log = logging.getLogger("umount")
def parse_args(args):
'''Parse command line
This function writes to stdout/stderr and may call `system.exit()` instead
of throwing an exception if it encounters errors.
'''
parser = ArgumentParser(
description=textwrap.dedent('''\
Unmounts an S3QL file system. The command returns only after all data
has been uploaded to the backend.'''))
parser.add_debug()
parser.add_quiet()
parser.add_version()
parser.add_argument("mountpoint", metavar='<mountpoint>',
type=(lambda x: x.rstrip('/')),
help='Mount point to un-mount')
parser.add_argument('--lazy', "-z", action="store_true", default=False,
help="Lazy umount. Detaches the file system immediately, even if there "
'are still open files. The data will be uploaded in the background '
'once all open files have been closed.')
return parser.parse_args(args)
class UmountError(Exception):
"""
Base class for unmount errors.
"""
message = 'internal error'
exitcode = 3
def __init__(self, mountpoint):
super(UmountError, self).__init__()
self.mountpoint = mountpoint
def __str__(self):
return self.message
class NotMountPointError(UmountError):
message = 'Not a mountpoint.'
exitcode = 1
class NotS3qlFsError(UmountError):
message = 'Not an S3QL file system.'
exitcode = 2
class UmountSubError(UmountError):
message = 'Unmount subprocess failed.'
exitcode = 3
class MountInUseError(UmountError):
message = 'In use.'
exitcode = 4
class FSCrashedError(UmountError):
message = 'File system seems to have crashed.'
exitcode = 5
def check_mount(mountpoint):
'''Check that "mountpoint" is a mountpoint and a valid s3ql fs'''
try:
os.stat(mountpoint)
except OSError as exc:
if exc.errno is errno.ENOTCONN:
raise FSCrashedError(mountpoint)
raise
if not posixpath.ismount(mountpoint):
raise NotMountPointError(mountpoint)
ctrlfile = os.path.join(mountpoint, CTRL_NAME)
if not (
CTRL_NAME not in llfuse.listdir(mountpoint) and
os.path.exists(ctrlfile)
):
raise NotS3qlFsError(mountpoint)
def lazy_umount(mountpoint):
'''Invoke fusermount -u -z for mountpoint'''
if os.getuid() == 0:
umount_cmd = ('umount', '-l', mountpoint)
else:
umount_cmd = ('fusermount', '-u', '-z', mountpoint)
if subprocess.call(umount_cmd) != 0:
raise UmountError(mountpoint)
def blocking_umount(mountpoint):
'''Invoke fusermount and wait for daemon to terminate.'''
devnull = open('/dev/null', 'wb')
if subprocess.call(
['fuser', '-m', mountpoint], stdout=devnull, stderr=devnull
) == 0:
raise MountInUseError(mountpoint)
ctrlfile = os.path.join(mountpoint, CTRL_NAME)
log.debug('Flushing cache...')
llfuse.setxattr(ctrlfile, b's3ql_flushcache!', b'dummy')
# Get pid
log.debug('Trying to get pid')
pid = int(llfuse.getxattr(ctrlfile, b's3ql_pid?'))
log.debug('PID is %d', pid)
# Get command line to make race conditions less-likely
with open('/proc/%d/cmdline' % pid, 'r') as fh:
cmdline = fh.readline()
log.debug('cmdline is %r', cmdline)
# Unmount
log.debug('Unmounting...')
# This seems to be necessary to prevent weird busy errors
time.sleep(3)
if os.getuid() == 0:
umount_cmd = ['umount', mountpoint]
else:
umount_cmd = ['fusermount', '-u', mountpoint]
if subprocess.call(umount_cmd) != 0:
raise UmountError(mountpoint)
# Wait for daemon
log.debug('Uploading metadata...')
step = 0.5
while True:
try:
os.kill(pid, 0)
except OSError:
log.debug('Kill failed, assuming daemon has quit.')
break
# Check that the process did not terminate and the PID
# was reused by a different process
try:
with open('/proc/%d/cmdline' % pid, 'r') as fh:
if fh.readline() != cmdline:
log.debug('PID still alive, but cmdline changed')
# PID must have been reused, original process terminated
break
else:
log.debug('PID still alive and commandline unchanged.')
except OSError:
# Process must have exited by now
log.debug('Reading cmdline failed, assuming daemon has quit.')
break
# Process still exists, we wait
log.debug('Daemon seems to be alive, waiting...')
time.sleep(step)
if step < 10:
step *= 2
def umount(mountpoint, lazy=False):
'''Umount "mountpoint", blocks if not "lazy".'''
check_mount(mountpoint)
if lazy:
lazy_umount(mountpoint)
else:
blocking_umount(mountpoint)
def main(args=None):
'''Umount S3QL file system'''
if args is None:
args = sys.argv[1:]
options = parse_args(args)
setup_logging(options)
try:
umount(options.mountpoint, options.lazy)
except MountInUseError as err:
print('Cannot unmount, the following processes still access the mountpoint:',
file=sys.stderr)
subprocess.call(['fuser', '-v', '-m', options.mountpoint],
stdout=sys.stderr, stderr=sys.stderr)
sys.exit(err.exitcode)
except FSCrashedError as err:
print('%s: %s' % (options.mountpoint, err), file=sys.stderr)
print("Unmounting with the 'umount' or 'fusermount -u' command may help "
"in this situation.")
sys.exit(err.exitcode)
except UmountError as err:
print('%s: %s' % (options.mountpoint, err), file=sys.stderr)
sys.exit(err.exitcode)
sys.exit(0)
if __name__ == '__main__':
main(sys.argv[1:])
| thefirstwind/s3qloss | src/s3ql/umount.py | Python | gpl-3.0 | 6,539 |
# Copyright 2021 National Research Foundation (SARAO)
#
# This program is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option) any
# later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import ctypes
import gc
import struct
import threading
import time
import weakref
import numba
from numba import types
import numpy as np
import scipy
import pytest
import spead2
from spead2.numba import intp_to_voidptr
import spead2.recv as recv
from spead2.recv.numba import chunk_place_data
import spead2.send as send
HEAP_PAYLOAD_SIZE = 1024
HEAPS_PER_CHUNK = 10
CHUNK_PAYLOAD_SIZE = HEAPS_PER_CHUNK * HEAP_PAYLOAD_SIZE
# These are only applicable to packet presence mode tests
PACKETS_PER_HEAP = 2
PACKET_SIZE = HEAP_PAYLOAD_SIZE // PACKETS_PER_HEAP
def check_refcount(objlist):
"""Check that the objects in the list do not have any other references.
This is done by making sure that they are garbage collected after removal
from the list. The caller must make sure to delete any other references
(e.g. from local variables) before calling. The list is destroyed in the
process.
"""
while objlist:
weak = weakref.ref(objlist.pop())
# pypy needs multiple garbage collection passes in some cases
for i in range(10):
gc.collect()
assert weak() is None
user_data_type = types.Record.make_c_struct([
('scale', types.int_), # A scale applied to heap_index
('placed_heaps_index', types.uintp) # Index at which to update stats
])
@numba.cfunc(types.void(types.CPointer(chunk_place_data), types.uintp), nopython=True)
def place_plain(data_ptr, data_size):
data = numba.carray(data_ptr, 1)
items = numba.carray(intp_to_voidptr(data[0].items), 2, dtype=np.int64)
heap_cnt = items[0]
payload_size = items[1]
if payload_size == HEAP_PAYLOAD_SIZE:
data[0].chunk_id = heap_cnt // HEAPS_PER_CHUNK
data[0].heap_index = heap_cnt % HEAPS_PER_CHUNK
data[0].heap_offset = data[0].heap_index * HEAP_PAYLOAD_SIZE
@numba.cfunc(
types.void(types.CPointer(chunk_place_data), types.uintp, types.CPointer(user_data_type)),
nopython=True)
def place_bind(data_ptr, data_size, user_data_ptr):
# Takes a np.int_ in via user_data to scale the heap index.
data = numba.carray(data_ptr, 1)
items = numba.carray(intp_to_voidptr(data[0].items), 3, dtype=np.int64)
heap_cnt = items[0]
payload_size = items[1]
packet_size = items[2]
user_data = numba.carray(user_data_ptr, 1)
if payload_size == HEAP_PAYLOAD_SIZE and packet_size == PACKET_SIZE:
data[0].chunk_id = heap_cnt // HEAPS_PER_CHUNK
heap_index = heap_cnt % HEAPS_PER_CHUNK
data[0].heap_index = heap_index * user_data[0].scale
data[0].heap_offset = heap_index * HEAP_PAYLOAD_SIZE
batch_stats = numba.carray(intp_to_voidptr(data[0].batch_stats),
user_data[0].placed_heaps_index + 1, dtype=np.uint64)
batch_stats[user_data[0].placed_heaps_index] += 1
# ctypes doesn't distinguish equivalent integer types, so we have to
# specify the signature explicitly.
place_plain_llc = scipy.LowLevelCallable(place_plain.ctypes, signature='void (void *, size_t)')
place_bind_llc = scipy.LowLevelCallable(
place_bind.ctypes, signature='void (void *, size_t, void *)')
class TestChunkStreamConfig:
def test_default_construct(self):
config = recv.ChunkStreamConfig()
assert config.items == []
assert config.max_chunks == config.DEFAULT_MAX_CHUNKS
assert config.place is None
assert config.packet_presence_payload_size == 0
def test_zero_max_chunks(self):
config = recv.ChunkStreamConfig()
with pytest.raises(ValueError):
config.max_chunks = 0
def test_set_place_none(self):
config = recv.ChunkStreamConfig(place=None)
assert config.place is None
def test_set_place_empty_tuple(self):
with pytest.raises(IndexError):
recv.ChunkStreamConfig(place=())
def test_set_place_non_tuple(self):
with pytest.raises(TypeError):
recv.ChunkStreamConfig(place=1)
def test_set_place_non_capsule(self):
with pytest.raises(TypeError):
recv.ChunkStreamConfig(place=(1,))
def test_set_place_bad_signature(self):
place = scipy.LowLevelCallable(place_plain.ctypes, signature='void (void)')
# One might expect TypeError, but ValueError is what scipy uses for
# invalid signatures.
with pytest.raises(ValueError):
recv.ChunkStreamConfig(place=place)
def test_set_place_plain(self):
config = recv.ChunkStreamConfig(place=place_plain_llc)
assert config.place == place_plain_llc
def test_set_place_bind(self):
config = recv.ChunkStreamConfig(place=place_bind_llc)
assert config.place == place_bind_llc
def make_chunk(label="a"):
return MyChunk(label, data=bytearray(10), present=bytearray(1))
class TestChunk:
def test_default_construct(self):
chunk = recv.Chunk()
assert chunk.chunk_id == -1
assert chunk.present is None
assert chunk.data is None
def test_set_properties(self):
buf1 = np.zeros(10, np.uint8)
buf2 = np.zeros(20, np.uint8)
chunk = recv.Chunk()
chunk.chunk_id = 123
chunk.present = buf1
chunk.data = buf2
assert chunk.chunk_id == 123
assert chunk.present is buf1
assert chunk.data is buf2
chunk.present = None
chunk.data = None
# Check that we didn't leak any references
objlist = [buf1, buf2]
del buf1, buf2
check_refcount(objlist)
class MyChunk(recv.Chunk):
"""Subclasses Chunk to carry extra metadata."""
def __init__(self, label, **kwargs):
super().__init__(**kwargs)
self.label = label
class TestChunkRingbuffer:
@pytest.fixture
def chunk_ringbuffer(self):
return recv.ChunkRingbuffer(3)
def test_missing_buffers(self, chunk_ringbuffer):
with pytest.raises(ValueError):
chunk_ringbuffer.put(MyChunk("a", present=bytearray(1)))
with pytest.raises(ValueError):
chunk_ringbuffer.put(MyChunk("a", data=bytearray(1)))
def test_qsize(self, chunk_ringbuffer):
"""Test qsize, maxsize, empty and full."""
assert chunk_ringbuffer.empty()
assert not chunk_ringbuffer.full()
assert chunk_ringbuffer.qsize() == 0
assert chunk_ringbuffer.maxsize == 3
chunk_ringbuffer.put(make_chunk())
assert not chunk_ringbuffer.empty()
assert not chunk_ringbuffer.full()
assert chunk_ringbuffer.qsize() == 1
for label in ["b", "c"]:
chunk_ringbuffer.put(make_chunk(label))
assert not chunk_ringbuffer.empty()
assert chunk_ringbuffer.full()
assert chunk_ringbuffer.qsize() == 3
for i in range(3):
chunk_ringbuffer.get()
assert chunk_ringbuffer.empty()
@pytest.mark.parametrize('method', [recv.ChunkRingbuffer.get, recv.ChunkRingbuffer.get_nowait])
def test_stop(self, chunk_ringbuffer, method):
chunk_ringbuffer.put(make_chunk())
chunk_ringbuffer.stop()
chunk_ringbuffer.get() # Should get the item in the queue
with pytest.raises(spead2.Stopped):
method(chunk_ringbuffer)
def test_round_trip(self, chunk_ringbuffer):
chunk = make_chunk()
data = chunk.data
present = chunk.present
chunk_ringbuffer.put(chunk)
out_chunk = chunk_ringbuffer.get()
assert out_chunk is chunk
assert out_chunk.label == "a"
assert out_chunk.data is data
assert out_chunk.present is present
# Check that we haven't leaked a reference
objlist = [chunk]
del chunk, out_chunk
check_refcount(objlist)
def test_get_nowait(self, chunk_ringbuffer):
with pytest.raises(spead2.Empty):
chunk_ringbuffer.get_nowait()
chunk = make_chunk()
chunk_ringbuffer.put(chunk)
assert chunk_ringbuffer.get_nowait() is chunk
def test_put_nowait(self, chunk_ringbuffer):
for label in ["a", "b", "c"]:
chunk_ringbuffer.put_nowait(make_chunk(label))
d = make_chunk("d")
with pytest.raises(spead2.Full):
chunk_ringbuffer.put_nowait(d)
# put_nowait moves the data into a chunk_wrapper. Check that a failed
# put has no side effects.
assert d.data is not None
assert d.present is not None
for label in ["a", "b", "c"]:
assert chunk_ringbuffer.get().label == label
def test_iterate(self, chunk_ringbuffer):
for label in ["a", "b", "c"]:
chunk_ringbuffer.put_nowait(make_chunk(label))
chunk_ringbuffer.stop()
out_labels = [chunk.label for chunk in chunk_ringbuffer]
assert out_labels == ["a", "b", "c"]
def test_block(self, chunk_ringbuffer):
def thread_code():
time.sleep(0.05)
chunk_ringbuffer.put(make_chunk())
thread = threading.Thread(target=thread_code)
thread.start()
with pytest.raises(spead2.Empty):
chunk_ringbuffer.get_nowait()
chunk = chunk_ringbuffer.get()
assert chunk.label == "a"
thread.join()
def test_add_remove_producer(self, chunk_ringbuffer):
chunk_ringbuffer.add_producer()
chunk_ringbuffer.add_producer()
# Removing first producer should not stop the ringbuffer
assert not chunk_ringbuffer.remove_producer()
with pytest.raises(spead2.Empty):
chunk_ringbuffer.get_nowait()
# Removing the second should stop it
assert chunk_ringbuffer.remove_producer()
with pytest.raises(spead2.Stopped):
chunk_ringbuffer.get_nowait()
class TestChunkRingStream:
@pytest.fixture
def data_ring(self):
return spead2.recv.ChunkRingbuffer(4)
@pytest.fixture
def free_ring(self):
ring = spead2.recv.ChunkRingbuffer(4)
while not ring.full():
ring.put(
recv.Chunk(
present=np.zeros(HEAPS_PER_CHUNK, np.uint8),
data=np.zeros(CHUNK_PAYLOAD_SIZE, np.uint8)
)
)
return ring
@pytest.fixture
def item_group(self):
ig = spead2.send.ItemGroup()
ig.add_item(0x1000, 'position', 'position in stream', (), format=[('u', 32)])
ig.add_item(0x1001, 'payload', 'payload data', (HEAP_PAYLOAD_SIZE,), dtype=np.uint8)
return ig
@pytest.fixture
def queue(self):
return spead2.InprocQueue()
@pytest.fixture
def recv_stream(self, data_ring, free_ring, queue):
stream = spead2.recv.ChunkRingStream(
spead2.ThreadPool(),
# max_heaps is artificially high to make test_packet_too_old work
spead2.recv.StreamConfig(max_heaps=128),
spead2.recv.ChunkStreamConfig(
items=[0x1000, spead2.HEAP_LENGTH_ID],
max_chunks=4,
place=place_plain_llc
),
data_ring,
free_ring
)
stream.add_inproc_reader(queue)
yield stream
stream.stop()
@pytest.fixture
def send_stream(self, queue):
return send.InprocStream(spead2.ThreadPool(), [queue], send.StreamConfig())
def make_heap_payload(self, position):
"""Construct payload data for a heap.
The payload is constructed in 4-byte pieces, where the first two
bytes are `position` and the second two increment with each 4-byte
piece in the packet.
"""
heap_payload = np.zeros(HEAP_PAYLOAD_SIZE // 2, np.uint16)
heap_payload[0::2] = position
heap_payload[1::2] = range(HEAP_PAYLOAD_SIZE // 4)
return heap_payload.view(np.uint8)
@pytest.mark.parametrize('send_end', [True, False])
def test_cleanup(self, send_stream, recv_stream, item_group, send_end):
"""Send some heaps and don't retrieve the chunks, making sure cleanup works."""
send_stream.send_heap(item_group.get_heap(descriptors='all', data='none'))
for i in range(1000):
item_group['position'].value = i
item_group['payload'].value = self.make_heap_payload(i)
send_stream.send_heap(item_group.get_heap(descriptors='none', data='all'))
if send_end:
send_stream.send_heap(item_group.get_end())
def send_heaps(self, send_stream, item_group, positions):
"""Send heaps with given numbers."""
send_stream.send_heap(item_group.get_heap(descriptors='all', data='none'))
for i in positions:
item_group['position'].value = i
item_group['payload'].value = self.make_heap_payload(i)
send_stream.send_heap(item_group.get_heap(descriptors='none', data='all'))
send_stream.send_heap(item_group.get_end())
def check_chunk(self, chunk, expected_chunk_id, expected_present):
"""Validate a chunk."""
assert chunk.chunk_id == expected_chunk_id
assert chunk.present.dtype == np.dtype(np.uint8)
np.testing.assert_equal(chunk.present, expected_present)
for i, p in enumerate(chunk.present):
if p:
position = chunk.chunk_id * HEAPS_PER_CHUNK + i
np.testing.assert_equal(
chunk.data[i * HEAP_PAYLOAD_SIZE : (i + 1) * HEAP_PAYLOAD_SIZE],
self.make_heap_payload(position)
)
def check_chunk_packets(self, chunk, expected_chunk_id, expected_present):
"""Validate a chunk from test_packet_presence."""
assert chunk.chunk_id == expected_chunk_id
assert chunk.present.dtype == np.dtype(np.uint8)
np.testing.assert_equal(chunk.present, expected_present)
for i, p in enumerate(chunk.present):
if p:
heap_index = chunk.chunk_id * HEAPS_PER_CHUNK + i // PACKETS_PER_HEAP
packet_index = i % PACKETS_PER_HEAP
start = packet_index * PACKET_SIZE
end = (packet_index + 1) * PACKET_SIZE
np.testing.assert_equal(
chunk.data[i * PACKET_SIZE : (i + 1) * PACKET_SIZE],
self.make_heap_payload(heap_index)[start:end]
)
def test_basic(self, send_stream, recv_stream, item_group):
n_heaps = 103
self.send_heaps(send_stream, item_group, range(n_heaps))
seen = 0
for i, chunk in enumerate(recv_stream.data_ringbuffer):
expected_present = np.ones(HEAPS_PER_CHUNK, np.uint8)
if i == n_heaps // HEAPS_PER_CHUNK:
# It's the last chunk
expected_present[n_heaps % HEAPS_PER_CHUNK :] = 0
self.check_chunk(chunk, i, expected_present)
seen += 1
recv_stream.add_free_chunk(chunk)
assert seen == n_heaps // HEAPS_PER_CHUNK + 1
def test_out_of_order(self, send_stream, recv_stream, item_group):
"""Test some heaps out of chunk order, but within the window."""
pos = [37, 7, 27, 47, 17, 87, 57, 77, 67]
self.send_heaps(send_stream, item_group, pos)
seen = 0
expected_present = np.array([0, 0, 0, 0, 0, 0, 0, 1, 0, 0], np.uint8)
for i, chunk in enumerate(recv_stream.data_ringbuffer):
self.check_chunk(chunk, i, expected_present)
seen += 1
recv_stream.add_free_chunk(chunk)
assert seen == len(pos)
def test_jump(self, send_stream, recv_stream, item_group):
"""Test discontiguous jump in chunks."""
pos = [100, 200]
expected_chunks = [7, 8, 9, 10, 17, 18, 19, 20]
self.send_heaps(send_stream, item_group, pos)
seen = 0
for i, chunk in enumerate(recv_stream.data_ringbuffer):
expected_present = np.zeros(HEAPS_PER_CHUNK, np.uint8)
if expected_chunks[i] * HEAPS_PER_CHUNK in pos:
expected_present[0] = 1
self.check_chunk(chunk, expected_chunks[i], expected_present)
seen += 1
recv_stream.add_free_chunk(chunk)
assert seen == len(expected_chunks)
def test_heap_too_old(self, send_stream, recv_stream, item_group):
"""Test a heap arriving too late."""
pos = list(range(10, 41)) + [0] + list(range(41, 50)) # 0 is just too late
self.send_heaps(send_stream, item_group, pos)
seen = 0
for i, chunk in enumerate(recv_stream.data_ringbuffer):
expected_present = np.ones(HEAPS_PER_CHUNK, np.uint8)
if i == 0:
expected_present[:] = 0
self.check_chunk(chunk, i, expected_present)
seen += 1
recv_stream.add_free_chunk(chunk)
assert seen == 5 # Will see chunk 0 with no heaps, but won't see it again
recv_stream.stop() # Ensure that stats are brought up to date
assert recv_stream.stats["too_old_heaps"] == 1
assert recv_stream.stats["rejected_heaps"] == 2 # Descriptors and stop heap
def test_shared_ringbuffer(self, send_stream, recv_stream, item_group):
recv_stream2 = spead2.recv.ChunkRingStream(
spead2.ThreadPool(),
spead2.recv.StreamConfig(stream_id=1),
spead2.recv.ChunkStreamConfig(
items=[0x1000, spead2.HEAP_LENGTH_ID],
max_chunks=4,
place=place_plain_llc
),
recv_stream.data_ringbuffer,
recv_stream.free_ringbuffer
)
queue2 = spead2.InprocQueue()
recv_stream2.add_inproc_reader(queue2)
send_stream2 = send.InprocStream(spead2.ThreadPool(), [queue2], send.StreamConfig())
n_heaps = [17, 23]
self.send_heaps(send_stream, item_group, range(n_heaps[0]))
self.send_heaps(send_stream2, item_group, range(n_heaps[1]))
seen = [0, 0]
for chunk in recv_stream.data_ringbuffer:
assert 0 <= chunk.stream_id < 2
expected_present = np.ones(HEAPS_PER_CHUNK, np.uint8)
if chunk.chunk_id == n_heaps[chunk.stream_id] // HEAPS_PER_CHUNK:
# It's the last chunk for the stream
expected_present[n_heaps[chunk.stream_id] % HEAPS_PER_CHUNK :] = 0
self.check_chunk(chunk, seen[chunk.stream_id], expected_present)
seen[chunk.stream_id] += 1
recv_stream.add_free_chunk(chunk)
assert seen[0] == n_heaps[0] // HEAPS_PER_CHUNK + 1
assert seen[1] == n_heaps[1] // HEAPS_PER_CHUNK + 1
def test_missing_place_callback(self, data_ring, free_ring):
with pytest.raises(ValueError):
spead2.recv.ChunkRingStream(
spead2.ThreadPool(),
spead2.recv.StreamConfig(),
spead2.recv.ChunkStreamConfig(items=[0x1000, spead2.HEAP_LENGTH_ID]),
data_ring,
free_ring
)
def make_packet(self, position, start, end):
"""Construct a single packet.
Parameters
----------
position
Value of the "position" immediate item
start
First heap payload byte in this packet
end
Last heap payload byte in this packet (exclusive)
"""
assert 0 <= start < end <= HEAP_PAYLOAD_SIZE
heap_payload = self.make_heap_payload(position)
parts = [
# Magic, version, item ID bytes, heap address bytes, flags, number of items
struct.pack('>BBBBHH', 0x53, 4, 2, 6, 0, 6),
# Item ID (and immediate flag), item value/offset
struct.pack('>HxxI', 0x8000 | spead2.HEAP_CNT_ID, position),
struct.pack('>HxxI', 0x8000 | spead2.PAYLOAD_OFFSET_ID, start),
struct.pack('>HxxI', 0x8000 | spead2.PAYLOAD_LENGTH_ID, end - start),
struct.pack('>HxxI', 0x8000 | spead2.HEAP_LENGTH_ID, HEAP_PAYLOAD_SIZE),
struct.pack('>HxxI', 0x8000 | 0x1000, position),
struct.pack('>HxxI', 0x1001, 0),
heap_payload[start:end].tobytes()
]
return b''.join(parts)
def test_packet_too_old(self, recv_stream, queue):
"""Test a packet that adds to an existing heap whose chunk was already aged out."""
# Start a heap
queue.add_packet(self.make_packet(0, 0, 100))
# Age out the chunk by making a new one and filling it
for pos in range(40, 50):
queue.add_packet(self.make_packet(pos, 0, HEAP_PAYLOAD_SIZE))
# Finish the heap we started earlier
queue.add_packet(self.make_packet(0, 100, HEAP_PAYLOAD_SIZE))
# Add another chunk, so that we can validate that we didn't just stop
# with heap 0.
for pos in range(50, 60):
queue.add_packet(self.make_packet(pos, 0, HEAP_PAYLOAD_SIZE))
queue.stop()
seen = 0
for i, chunk in enumerate(recv_stream.data_ringbuffer):
expected_present = np.zeros(HEAPS_PER_CHUNK, np.uint8)
if i >= 4:
expected_present[:] = 1
self.check_chunk(chunk, i, expected_present)
seen += 1
recv_stream.add_free_chunk(chunk)
assert seen == 6
def test_packet_presence(self, data_ring, queue):
"""Test packet presence feature."""
# Each heap is split into two packets. Create a free ring where the
# chunks have space for this.
free_ring = spead2.recv.ChunkRingbuffer(4)
while not free_ring.full():
free_ring.put(
recv.Chunk(
present=np.zeros(HEAPS_PER_CHUNK * PACKETS_PER_HEAP, np.uint8),
data=np.zeros(CHUNK_PAYLOAD_SIZE, np.uint8)
)
)
stream_config = spead2.recv.StreamConfig(max_heaps=128, allow_out_of_order=True)
# Note: user_data is deliberately not assigned to a local variable, so
# that reference-counting errors are more likely to be detected.
user_data = np.zeros(1, dtype=user_data_type.dtype)
user_data["scale"] = PACKETS_PER_HEAP
user_data["placed_heaps_index"] = stream_config.add_stat("placed_heaps")
place_bind_llc = scipy.LowLevelCallable(
place_bind.ctypes,
user_data=user_data.ctypes.data_as(ctypes.c_void_p),
signature='void (void *, size_t, void *)')
stream = spead2.recv.ChunkRingStream(
spead2.ThreadPool(),
stream_config,
spead2.recv.ChunkStreamConfig(
items=[0x1000, spead2.HEAP_LENGTH_ID, spead2.PAYLOAD_LENGTH_ID],
max_chunks=4,
place=place_bind_llc,
).enable_packet_presence(PACKET_SIZE),
data_ring,
free_ring
)
stream.add_inproc_reader(queue)
queue.add_packet(self.make_packet(4, 0, PACKET_SIZE))
queue.add_packet(self.make_packet(4, PACKET_SIZE, 2 * PACKET_SIZE))
queue.add_packet(self.make_packet(17, PACKET_SIZE, 2 * PACKET_SIZE))
queue.stop()
chunks = list(stream.data_ringbuffer)
assert len(chunks) == 2
self.check_chunk_packets(
chunks[0], 0,
np.array([0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], np.uint8))
self.check_chunk_packets(
chunks[1], 1,
np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0], np.uint8))
assert stream.stats["placed_heaps"] == 2
| ska-sa/spead2 | tests/test_recv_chunk_stream.py | Python | lgpl-3.0 | 24,263 |
''' Created by Trevor Batty
Date: May 14th 2017
Creates the origin section of the GUI.
'''
import tkinter as tk
from tkinter import ttk
import tkMessageBox
import subprocess as sp
import tools
class Origin(ttk.Frame):
def __init__(self,root,mainFrame):
# Create Origin frame
ttk.Frame.__init__(self,mainFrame,padding="3 3 3 3")
# Create Origin label
self.dispLabel = ttk.Label(self, text="ORIGIN", font=(None,16)).grid(column=0, row=0, sticky=tk.W)
# Create Lat,Lon label
self.latLabel = ttk.Label(self, text="Latitude").grid(column=0, row=1, sticky=tk.W)
self.latLabel = ttk.Label(self, text="Longitude").grid(column=0, row=2, sticky=tk.W)
# Create Lat,Lon Entry Boxes
self.latVar = tk.StringVar()
self.latEntry = tk.Entry(self,textvariable=self.latVar,width=10)
self.latVar.set('-37.958926')
self.latEntry.grid(column=1, row=1, sticky=tk.W)
self.lonVar = tk.StringVar()
self.lonEntry = tk.Entry(self,textvariable=self.lonVar,width=10)
self.lonVar.set(145.238343)
self.lonEntry.grid(column=1, row=2, sticky=tk.W)
# Create Altitude, Heading label
self.grid_columnconfigure(2,minsize=50)
self.altLabel = ttk.Label(self, text="Altitude").grid(column=3, row=1, sticky=tk.W)
self.headLabel = ttk.Label(self, text="Heading").grid(column=3, row=2, sticky=tk.W)
# Create Altitude, Heading Entry Boxes
self.altVar = tk.StringVar()
self.altEntry = tk.Entry(self,textvariable=self.altVar,width=10)
self.altVar.set(44)
self.altEntry.grid(column=4, row=1, sticky=tk.W)
self.headVar = tk.StringVar()
self.headEntry = tk.Entry(self,textvariable=self.headVar,width=10)
self.headVar.set(0)
self.headEntry.grid(column=4, row=2, sticky=tk.W)
# Add Traces
self.latVar.trace("w",self.on_lat_changed)
self.lonVar.trace("w",self.on_lon_changed)
self.altVar.trace("w",self.on_alt_changed)
self.headVar.trace("w",self.on_head_changed)
# Volume Frame
self.volFrame = None
def updateMapOnOriginMove(self):
# Update the map limits on moving the origin
# Change axes limits
if (self.volFrame is not None):
# Difference to current origin
lat = float(self.latVar.get())
lon = float(self.lonVar.get())
alt = float(self.altVar.get())
head = float(self.headVar.get())
# Set Axes Limits
self.volFrame.origin = [lat, lon, alt, head]
scale = self.volFrame.latLonScale(self.volFrame.origin[0])
diff = (2*0.0025)*scale/2.0
self.volFrame.axes.set_xlim(self.volFrame.origin[1]-0.0025-diff,self.volFrame.origin[1]+0.0025+diff)
self.volFrame.axes.set_ylim(self.volFrame.origin[0]-0.0025,self.volFrame.origin[0]+0.0025)
# Adjust Limits
self.volFrame.checkLimits()
# Check Tiles
self.volFrame.checkRequiredTiles()
# Redraw
for polyRow in self.volFrame.polygonRows:
polyRow.polygon.reDrawPolyPoints()
# Redraw Canvas
self.volFrame.canvas.draw()
def on_lat_changed(self,*args):
# Latitude Changed
latStr = self.latVar.get()
valid, lat = tools.validateFloat(latStr)
if not valid:
tkMessageBox.showerror(message="Latitude must be between -90 and 90.")
self.latVar.set(0)
else:
if (lat<-90) or (lat>90):
tkMessageBox.showerror(message="Latitude must be between -90 and 90.")
self.latVar.set(cmp(lat,0)*90)
else:
# Update Map Region
self.updateMapOnOriginMove()
def on_lon_changed(self,*args):
# Longitude Changed
lonStr = self.lonVar.get()
valid, lon = tools.validateFloat(lonStr)
if not valid:
tkMessageBox.showerror(message="Longitude must be between -180 and 180.")
self.lonVar.set(0)
else:
if (lon<-180) or (lon>180):
tkMessageBox.showerror(message="Longitude must be between -180 and 180.")
self.lonVar.set(cmp(lon,0)*180)
def on_alt_changed(self,*args):
# Altitude Changed
altStr = self.altVar.get()
valid, alt = tools.validateFloat(altStr)
if not valid:
tkMessageBox.showerror(message="Altitude must be a float.")
self.altVar.set(0)
def on_head_changed(self,*args):
# Heading Changed
headStr = self.headVar.get()
valid, head = tools.validateFloat(headStr)
if not valid:
tkMessageBox.showerror(message="Heading must be between 0 and 360.")
self.headVar.set(0)
else:
if (head<0):
tkMessageBox.showerror(message="Heading must be between 0 and 360.")
self.headVar.set('0')
elif (head>360):
tkMessageBox.showerror(message="Heading must be between 0 and 360.")
self.headVar.set('360')
def checkEmptyEntry(self,str):
# Checks if the entry is empty or '-'
if (str == "") or (str == "-"):
val = "0"
else:
val = str
return val
def writeConfig(self,f):
# Writes the display section of the config to file
# Check for empty entries
lat = self.checkEmptyEntry(self.latVar.get())
lon = self.checkEmptyEntry(self.lonVar.get())
alt = self.checkEmptyEntry(self.altVar.get())
head = self.checkEmptyEntry(self.headVar.get())
# Write to file
f.write("# Origin Settings\n")
f.write("origin ")
f.write(lat + " ")
f.write(lon + " ")
f.write(alt + " ")
f.write(head)
f.write("\n\n")
| tbattz/configureOpenGLMap | src/origin.py | Python | gpl-3.0 | 6,145 |
from __future__ import absolute_import
from django.conf import settings
from django.http import Http404, HttpResponseRedirect
from django.views.generic import View
from sentry import options
class OutView(View):
def get(self, request):
if not settings.SENTRY_ONPREMISE:
raise Http404
install_id = options.get('sentry:install-id')
if install_id:
query = '?install_id=' + install_id
else:
query = ''
return HttpResponseRedirect('https://sentry.io/from/self-hosted/' + query)
| JackDanger/sentry | src/sentry/web/frontend/out.py | Python | bsd-3-clause | 558 |
"""
We acquire the python information by running an interrogation script via subprocess trigger. This operation is not
cheap, especially not on Windows. To not have to pay this hefty cost every time we apply multiple levels of
caching.
"""
from __future__ import absolute_import, unicode_literals
import logging
import os
import pipes
import sys
from collections import OrderedDict
from virtualenv.app_data import AppDataDisabled
from virtualenv.discovery.py_info import PythonInfo
from virtualenv.info import PY2
from virtualenv.util.path import Path
from virtualenv.util.six import ensure_text
from virtualenv.util.subprocess import Popen, subprocess
_CACHE = OrderedDict()
_CACHE[Path(sys.executable)] = PythonInfo()
def from_exe(cls, app_data, exe, raise_on_error=True, ignore_cache=False):
""""""
result = _get_from_cache(cls, app_data, exe, ignore_cache=ignore_cache)
if isinstance(result, Exception):
if raise_on_error:
raise result
else:
logging.info("%s", str(result))
result = None
return result
def _get_from_cache(cls, app_data, exe, ignore_cache=True):
# note here we cannot resolve symlinks, as the symlink may trigger different prefix information if there's a
# pyenv.cfg somewhere alongside on python3.4+
exe_path = Path(exe)
if not ignore_cache and exe_path in _CACHE: # check in the in-memory cache
result = _CACHE[exe_path]
else: # otherwise go through the app data cache
py_info = _get_via_file_cache(cls, app_data, exe_path, exe)
result = _CACHE[exe_path] = py_info
# independent if it was from the file or in-memory cache fix the original executable location
if isinstance(result, PythonInfo):
result.executable = exe
return result
def _get_via_file_cache(cls, app_data, path, exe):
path_text = ensure_text(str(path))
try:
path_modified = path.stat().st_mtime
except OSError:
path_modified = -1
if app_data is None:
app_data = AppDataDisabled()
py_info, py_info_store = None, app_data.py_info(path)
with py_info_store.locked():
if py_info_store.exists(): # if exists and matches load
data = py_info_store.read()
of_path, of_st_mtime, of_content = data["path"], data["st_mtime"], data["content"]
if of_path == path_text and of_st_mtime == path_modified:
py_info = cls._from_dict({k: v for k, v in of_content.items()})
else:
py_info_store.remove()
if py_info is None: # if not loaded run and save
failure, py_info = _run_subprocess(cls, exe, app_data)
if failure is None:
data = {"st_mtime": path_modified, "path": path_text, "content": py_info._to_dict()}
py_info_store.write(data)
else:
py_info = failure
return py_info
def _run_subprocess(cls, exe, app_data):
py_info_script = Path(os.path.abspath(__file__)).parent / "py_info.py"
with app_data.ensure_extracted(py_info_script) as py_info_script:
cmd = [exe, str(py_info_script)]
# prevent sys.prefix from leaking into the child process - see https://bugs.python.org/issue22490
env = os.environ.copy()
env.pop("__PYVENV_LAUNCHER__", None)
logging.debug("get interpreter info via cmd: %s", LogCmd(cmd))
try:
process = Popen(
cmd,
universal_newlines=True,
stdin=subprocess.PIPE,
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
env=env,
)
out, err = process.communicate()
code = process.returncode
except OSError as os_error:
out, err, code = "", os_error.strerror, os_error.errno
result, failure = None, None
if code == 0:
result = cls._from_json(out)
result.executable = exe # keep original executable as this may contain initialization code
else:
msg = "failed to query {} with code {}{}{}".format(
exe,
code,
" out: {!r}".format(out) if out else "",
" err: {!r}".format(err) if err else "",
)
failure = RuntimeError(msg)
return failure, result
class LogCmd(object):
def __init__(self, cmd, env=None):
self.cmd = cmd
self.env = env
def __repr__(self):
def e(v):
return v.decode("utf-8") if isinstance(v, bytes) else v
cmd_repr = e(" ").join(pipes.quote(e(c)) for c in self.cmd)
if self.env is not None:
cmd_repr += e(" env of {!r}").format(self.env)
if PY2:
return cmd_repr.encode("utf-8")
return cmd_repr
def __unicode__(self):
raw = repr(self)
if PY2:
return raw.decode("utf-8")
return raw
def clear(app_data):
app_data.py_info_clear()
_CACHE.clear()
___all___ = (
"from_exe",
"clear",
"LogCmd",
)
| TeamSPoon/logicmoo_workspace | packs_web/butterfly/lib/python3.7/site-packages/virtualenv/discovery/cached_py_info.py | Python | mit | 5,043 |
def counting_sort(input):
output = [0] * len(input)
max = input[0]
min = input[0]
for i in range(1, len(input)):
if input[i] > max:
max = input[i]
elif input[i] < min:
min = input[i]
k = max - min + 1
count_list = [0] * k
for i in range(0, len(input)):
count_list[input[i] - min] += 1
for i in range(1, k):
count_list[i] += count_list[i - 1]
for i in range(0, len(input)):
output[count_list[input[i] - min] - 1] = input[i]
count_list[input[i] - min] -= 1
for i in range(0, len(input)):
input[i] = output[i]
input = [1, 5, 2, 7, 3, 4, 4, 1, 5]
counting_sort(input)
print("Sorted list :", end = " ")
for i in range(0, len(input)):
print(input[i], end = " ")
''' Output
Sorted list : 1 1 2 3 4 4 5 5 7
'''
| algobook/Algo_Ds_Notes | Counting_Sort/Counting_Sort.py | Python | gpl-3.0 | 850 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import json
import pandas as pd
import polyline
from sqlalchemy import String, Text
from superset import db
from superset.utils.core import get_example_database
from .helpers import get_example_data, TBL
def load_bart_lines(only_metadata=False, force=False):
tbl_name = "bart_lines"
database = get_example_database()
table_exists = database.has_table_by_name(tbl_name)
if not only_metadata and (not table_exists or force):
content = get_example_data("bart-lines.json.gz")
df = pd.read_json(content, encoding="latin-1")
df["path_json"] = df.path.map(json.dumps)
df["polyline"] = df.path.map(polyline.encode)
del df["path"]
df.to_sql(
tbl_name,
database.get_sqla_engine(),
if_exists="replace",
chunksize=500,
dtype={
"color": String(255),
"name": String(255),
"polyline": Text,
"path_json": Text,
},
index=False,
)
print("Creating table {} reference".format(tbl_name))
tbl = db.session.query(TBL).filter_by(table_name=tbl_name).first()
if not tbl:
tbl = TBL(table_name=tbl_name)
tbl.description = "BART lines"
tbl.database = database
db.session.merge(tbl)
db.session.commit()
tbl.fetch_metadata()
| zhouyao1994/incubator-superset | superset/examples/bart_lines.py | Python | apache-2.0 | 2,148 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.